Update K8S to include an auto generator of values template

This commit is contained in:
mr
2025-11-12 13:13:43 +01:00
parent 7ad4bf0b5d
commit 9f9b1849eb
141 changed files with 13168 additions and 211 deletions

3
.gitignore vendored
View File

@@ -1,2 +1,3 @@
opencloud/Chart.lock
deployed_config.sh
deployed_config.sh
kind-logs

View File

@@ -8,42 +8,63 @@ environment is a legitimate target.
# Locally built microservices deployment procedure
## Clone the repository
## Install OC-K8S
```
git clone https://cloud.o-forge.io/plm/oc-k8s.git
sudo cp oc-k8s.sh /usr/bin/oc-k8s
sudo chmod +x /usr/bin/oc-k8s
```
## Install kind
Follow instructions here https://kind.sigs.k8s.io/
or
```
go install sigs.k8s.io/kind@v0.30.0 && kind create cluster
```
## Install helm
Download suitable helm client here https://helm.sh/docs/intro/install/
## Resume for a first start
```
oc-k8s start
```
or
```
./oc-k8s.sh start
```
To stop :
```
oc-k8s stop
```
or
```
./oc-k8s.sh start
```
## Fire up a kind cluster
Execute following script to create a single node development k8s cluster
```
create_kind_cluster.sh
```
WARNING APACHE & NGINX ARE NOT RUNNING:
- `sudo /etc/init.d/apache2 stop`
- `sudo nginx -s stop`
Execute following script to create a single node development k8s cluster
```
oc-k8s create cluster
```
or
```
./oc-k8s.sh create cluster
```
It will create a *opencloud* docker container running kubernetes services.
## Clone all the microservices repositories taking part of the opencloud environment
Execute following script to clone all the needed parts:
```
clone_opencloud_microservices.sh
```
## Build everything
You need to build and publish all the opencloud microservices images in the kind cluster before deploying the Helm package.
@@ -51,13 +72,21 @@ You need to build and publish all the opencloud microservices images in the kind
Proceed as following:
```
build_opencloud_microservices.sh
oc-k8s build services [branch(default:mail)] [target(default:all)]
```
or
```
./oc-k8s.sh build services [branch(default:mail)] [target(default:all)]
```
## Deploy the opencloud chart
```
install_development.sh
oc-k8s create helm [env(default:dev)]
```
or
```
./oc-k8s.sh create helm [env(default:dev)]
```
Feel free to modify/create a new opencloud/dev-values.yaml. Provided setup should work out of the box, but is not suitable for production usage.
@@ -74,9 +103,6 @@ Edit your /etc/hosts file, and add following line:
Everything should be operational now, go to http://beta.opencloud.com and enjoy the ride
# Prebuilt microservices deployment procedure
TODO
# First steps

View File

@@ -1,18 +0,0 @@
#!/bin/bash
# Get the target from the first argument or use "all" as default
TARGET=${1:-all}
find .. -mindepth 2 -maxdepth 2 -name 'Makefile' | while read -r makefile; do
dir=$(dirname "$makefile")
echo "Running 'make $TARGET' in $dir"
(
cd "$dir" && export HOST="${2:-http://beta.opencloud.com/}" && make "$TARGET"
)
if [ $? -ne 0 ]; then
echo "Error: make $TARGET failed in $dir"
exit 1
fi
done
echo "All make processes completed successfully."

View File

@@ -1,47 +0,0 @@
#!/bin/bash
REPOS=(
"oc-auth"
"oc-catalog"
"oc-datacenter"
"oc-front"
"oc-monitord"
"oc-peer"
"oc-shared"
"oc-scheduler"
"oc-schedulerd"
"oc-workflow"
"oc-workspace"
)
# Function to clone repositories
clone_repo() {
local branch=${2:-main}
local repo_url="https://cloud.o-forge.io/core/$1.git"
local repo_name=$(basename "$repo_url" .git)
echo "Processing repository: $repo_name"
if [ ! -d "$1" ]; then
echo "Cloning repository: $repo_name"
git clone "$repo_url"
if [ $? -ne 0 ]; then
echo "Error cloning $repo_url"
exit 1
fi
fi
echo "Repository '$repo_name' already exists. Pulling latest changes..."
cd "$repo_name" && git checkout $branch && git pull && cd ..
}
branch=${1:-main}
cd ..
# Iterate through each repository in the list
for repo in "${REPOS[@]}"; do
clone_repo "$repo" "$branch"
done
echo "All repositories processed successfully."

View File

@@ -1,32 +0,0 @@
#!/bin/bash
./delete_kind_cluster.sh | true
cat <<EOF | kind create cluster --name opencloud --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 30950
hostPort: 80
protocol: TCP
- containerPort: 30951
hostPort: 443
protocol: TCP
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."locahost:5000"]
endpoint = ["http://dev-docker-registry-ui-registry-server.opencloud.svc.cluster.local:5000"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."dev-docker-registry-ui-registry-server.opencloud.svc.cluster.local:5000"]
endpoint = ["http://dev-docker-registry-ui-registry-server.opencloud.svc.cluster.local:5000"]
[plugins."io.containerd.grpc.v1.cri".registry.configs."dev-docker-registry-ui-registry-server.opencloud.svc.cluster.local:5000".tls]
insecure_skip_verify = true
cert_file = ""
key_file = ""
ca_file = ""
EOF

View File

@@ -1,3 +0,0 @@
#!/bin/bash
kind delete cluster --name opencloud | true

View File

@@ -1,8 +1,8 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCVENDQWUyZ0F3SUJBZ0lJS1huSGhrNXV6cjR3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TlRBME1ETXdOalUzTlRsYUZ3MHpOVEEwTURFd056QXlOVGxhTUJVeApFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUUNtcUM1VWwwYTloNEZLUmxsZ0hEaGJtbVlIOGdMVXNpMzRkVzRSZEY0N3pUWW43YklVYm5KaWxXenMKbFJVek9ZdytZUEE3cWdrOG56TUxaZUpTeXI3TnpDSmpYTm9tNFJGYi82RXFtNVRRUkdYQ0ZjNHJwZC9WcXYyQQpudkZxTFF2SXc2SGsrc1pVNS9PU21FSm9VZWdRbFp2NGxYNXZQeDdQbEVmR1Y1MG9RYlprNEUvc0tpcEQ3YlVrCnlKeUkyN054TUM0ZkRiK0FTNWZPbWtrNFlpdis4bG0zUEdCVXp1ZGMzdFFyd1NuNGtmWitYTnBkRk9KeXh2cEgKVjliSTdCb3NXbGthU3hTQWUxQllCMEpOdXVYSjFhZjMwcmk3TjF4MGFrc1JSdEYyUzVUeCtmNXhlRDNJTG1zbwp3QzdTTG82VjI1Um5oVnV6UGtiR0ZkbzJnQ2RQQWdNQkFBR2pXVEJYTUE0R0ExVWREd0VCL3dRRUF3SUNwREFQCkJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJSSzk1NEExY1ZGdktVUmxaMXI0ejUrOGZqdFVEQVYKQmdOVkhSRUVEakFNZ2dwcmRXSmxjbTVsZEdWek1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQXhzZFdha3FSWgpJUzBMUXM3RUFrdVZrUzRGdTRoY2R2UzA5K29OTHA0Z0dLN3JjRXRob1dQVjlzK01jL01ZRStqaXIzYWlsakZUCm9zK0JVNE1vMmtHQnFIbkRUY3dBTjFzeVpib2p1QzJGRlN4SFNxeEJSK1lUL1RmWm5Tdkd0NUNNVEpudG1iMk8KeTduUmx6TUpYaXo3THF0d0xVMlZONTgwdUNDRTYrNkZyb041bjZna1hlYjJ0dXpMMzN4OW1yUUN4biswMjBpRApVcUUwSTZqZHhIcGl6dDZaSzBKQnRMYlZYVHExbFBDN00rMnNyVXRVUlg1ckp1OE91RTZ0WUhrbTV4UnlVTnJUCmJiZUdQWHRBTGdLWStMMVgybSsvT0kzNEEzRWV6cy91dTBpRHVYTWVBM1Z3UWxUZlROY2NXS1Z6STRxQ0lidTcKQ3NFOTBCNDRETkx0Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://127.0.0.1:41449
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCVENDQWUyZ0F3SUJBZ0lJVTBLdTVWVUVnUDR3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TlRFeE1EWXdPVE0wTWpOYUZ3MHpOVEV4TURRd09UTTVNak5hTUJVeApFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUURMRVJ4QkdISURNVDJCL2dvQWovRUhxc0s0NVkrMW1jcVFDNUxON3J1N3dRV3ZvcHd1VmlkREh0Wm4KQUFRSGR3RDhCOENMeFhleHFJZU1pM3ppWm80UHBBQTRpVklubE5RVi81S3pKZUg1SFVwYXptUjJuTkhIUGFOcgpEY1BTM0VoejJSNDZYMzlsUEpld3doeXczNFZWUGR1NXA3QVd2UThtRjhxMDc1Mll0NVh0REpkWEhFVVNzR2gxClhpTGF5aVkxTDk3cnJyZGtQemluOGpWVnNFdWFqMXdyUGhuTzQ0elpmK3JpblYrVEM2MlRkdmZTQkJHQXQ1SmEKc2RnaVg4a1FVaDUzQ1J2bUVyRUxtZzVKVmVhNjc3TFFiVUZXNzFTRkRlZmFNaDBoNk1OVng2WkF2T1R0RGVFegp2SFhGYVN1WWU5NDdzYmtVVGEzZTB0MjdUTk10QWdNQkFBR2pXVEJYTUE0R0ExVWREd0VCL3dRRUF3SUNwREFQCkJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJUUGVTQThBTXhwaGR5U05Yd3RORXRvaXk2bUtEQVYKQmdOVkhSRUVEakFNZ2dwcmRXSmxjbTVsZEdWek1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQVJaenhteFowagpCTTFSSFlJNDZrYzRoUFg2QXRJd0xPaUgvUWpkYlUwWDdqZFQzZWVPdGtTMm9SQmR0WThYbzFGVVJkUmdBc0YrClI1VDdOL0RZbFJRaWZaWTdQbDE0bzJmQWkxRG40cEhMQ3pWVzJaVUFSdDExY1p0Q1hZa3NTczJXdXV2SjRCVmIKY3FJYTUza3kza0tidm9keDRmOWFJU3lvTmx3YXZmYlNzU1BOTHVOb2VLaWg3SjdOTStBaElXblpxQU5JYWtEVgpnK0RVUi9EL1RjdHVvV3RHbVVrVnkvQWZldUt4VnpqNXhjSENleVBBaGRHUlBjVThLQTQzMHV4bW9hY2d5MG13CmtlQTB6RjJmNCtPT1JFa291MXBwMzFjYWxvSndNaEIwcXliQmxwcVFoNTI2aGdBQW1hZkFVSWhvY2liZzlLRDQKa2tUVURRbkVCNFVvCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://127.0.0.1:42767
name: kind-opencloud
contexts:
- context:
@@ -11,10 +11,9 @@ contexts:
name: kind-opencloud
current-context: kind-opencloud
kind: Config
preferences: {}
users:
- name: kind-opencloud
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLVENDQWhHZ0F3SUJBZ0lJV09YL1R4a255L1l3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TlRBME1ETXdOalUzTlRsYUZ3MHlOakEwTURNd056QXlOVGxhTUR3eApIekFkQmdOVkJBb1RGbXQxWW1WaFpHMDZZMngxYzNSbGNpMWhaRzFwYm5NeEdUQVhCZ05WQkFNVEVHdDFZbVZ5CmJtVjBaWE10WVdSdGFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEY3VLc3MKNDNTcFZpa0t6L0c5MU04K3RMbC9hL0FMMi9uckJkUVFWMVBadmJ2cUF3VWozbWdoenVTdTRXZ2laV2lteFZ6SQowRnNPbzhzVk9QRVF6SExOaVo4ejROZGRTN3NZSjY4clpsR1RZb3Zyb1BWQzJKcWdUOVpqZ1FWRDJqYUsyYWhkCmd3WmxIVUlIYW9meWl6UEQ0S1IzSWcwd1dWSHRDbnpob2kxUTJreXlrdFh5QkZrSzczTkJBZVpBNEtUdURQNUEKbWFJMXNBS2hoM25ranFPd2JubFhNdmV0b0ZjT0krYlpyUk04UDE5ZUR2QnB1OWNEcXRyUjg1dm40em5aaFNpYwpBamdtT29sNUZUdmNpYTdQTnV3RTVPTVptSWdieDR3TzFPZlcxZm0yVnJnMWJReGlVU04rOGxpdi82bHNna0h1CkNzYmxER1VuUmZQdWVNcXBBZ01CQUFHalZqQlVNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUsKQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkVyM25nRFZ4VVc4cFJHVgpuV3ZqUG43eCtPMVFNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUFTd0Q1YzVwK2pSMkFKUkEwY21jY3E5WXcrCjBSR0k5citvTDg0dEZPUDJrdTEzQTdwUHZ4UlUzTVlxclg1WWpvYmZBbGN5eG9jM3ZsU0U2ZGVHUDRNZ3hIdFkKWnJuSUg2SzA1eE1iaC9tc2I2QmZPQmNMWWNQaVRCNC92SjNMN0pRdDhjc0RZd0RZeXRpd1NTRFRBMXEyZjQvdQorZG5DRXczdXYvK0pVL2RyVDR5MDdTMHA5OHowelhZaENaVXN5a2d0ekFkZDlLYWNoeU8wbGU4WHg3VWx4SzF5CkZDb2RHYXdweW94SFpvT0dVZ0dQK2NZNWR1bys1Tk5QNE1WRHMvQlIyZWkxR1pkcDJMUERZeUxSdVB3YnVXS3AKS3Raa0dtMFFQL1I0WnNVVXhpOVdpaTJ0TEhEVGxuL1ZJZUdXeFgrRStDR1dzTkRacEhkUEVwNERFVHEyCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBM0xpckxPTjBxVllwQ3MveHZkVFBQclM1ZjJ2d0M5djU2d1hVRUZkVDJiMjc2Z01GCkk5NW9JYzdrcnVGb0ltVm9wc1ZjeU5CYkRxUExGVGp4RU14eXpZbWZNK0RYWFV1N0dDZXZLMlpSazJLTDY2RDEKUXRpYW9FL1dZNEVGUTlvMml0bW9YWU1HWlIxQ0IycUg4b3N6dytDa2R5SU5NRmxSN1FwODRhSXRVTnBNc3BMVgo4Z1JaQ3U5elFRSG1RT0NrN2d6K1FKbWlOYkFDb1lkNTVJNmpzRzU1VnpMM3JhQlhEaVBtMmEwVFBEOWZYZzd3CmFidlhBNnJhMGZPYjUrTTUyWVVvbkFJNEpqcUplUlU3M0ltdXp6YnNCT1RqR1ppSUc4ZU1EdFRuMXRYNXRsYTQKTlcwTVlsRWpmdkpZci8rcGJJSkI3Z3JHNVF4bEowWHo3bmpLcVFJREFRQUJBb0lCQUNEU3paSGxXV245U0NrMQpkMHUxcEFROU9BQzgrbnVwQU9kMGZHN1pyYnlKdkxJMk5NTjZ4WjJSZU95UTFMMll1eEZoOUVSZ3g4bkxqZDNnCkUrbGRuZ3oyMUZnUXJiWlFGc2ltemlQMEdWbURwQzhzUm96a1RVcGQxUklERTlkZFVoTWYveXU0Mm5DYkEwQzcKRlBCNnh2M3dJRVVmS1RyeGJYK0V1Y0dNTHBVQk92YjQ1VUE3OFovK3dteVFGcUR0OE9nRnRCRkI4Q3dmRDNaZApkdTA0NkNwbDVnUDVOSWpMUm5vTUM4MHBTYTNTK2tpUkxUZWJNaVRkTkdLdDNRV0xCdG8yVDJsQ2U2VFhBc1M3CmZQRHdLcWZYZ09ickxQS2Q5L0duaG5sSTRBQjB6bzNCOUJRRjNGcUVTNC9UaTJia01CMy9ROFg5b3AzOENndEQKQ09CMGNpRUNnWUVBNDhOQXZsK1BJUWxISXpTNGZ4VFE0TGY1T1Q0dzZhcGdaWGQ5TzN2QTh1WmdVZUYweWN0bwpKbEFDWE5IWXVsSXpza0NqaEw2OWpNVTFDNmZiYmF4RlQ0cm5PcHVVQXh2cWJXM2pHaDJXMTBaNWJ5REdqaXk0CkpRYzZlK0lCK3IxbWNtTkgxMWFTa1NKd2w3NW1JQzM2NU9NUFVmbkRsSmwvcUpMNS9maHF4YTBDZ1lFQStCWHYKNWtQak9yMW52S2VES05idzRXdmJQWDkrUm1Lclp3UXc0QW9rRXJ0S25EOUpBNlZqd3JOejZzUm1OcFNBbld0SwpMQWM3bXF3clNGMUJvMWo3ZC9XeXZrSmRLWTJVUWNVcEtESkk5TmtkRkxSUFp3Q29iRllUU1A3NmdOZjA0d2pkCncxakpqalJhNVhwSHZIWjR5aWM1MXFiMmlrellpd0ZVYWtraElHMENnWUFjVDE2eXl3bnRYMHZhVW1sRVVvQU0KRHBJSnpYTWkrQ3gzTUR6K3Nja3Y1L2E4OXJzZmdncjJDQ2ZqQmFwY3JtOENqd3BFSzdrLzBCdGgwWUZ5eG1EdgpIQ0xWcEJUbnRnYVRoMzhjOWVTVDZZeWFoWVpva2FKcDU5WWJJK0ovR2grcnhDOWYwc0R4RFVqVmM3TVorTjBnCnFpQnpYZ296WkVqNzJSajl1bXo3R1FLQmdFUUlJQVdSVUZyblhlOUtNSm1lZCtnWlg4SDVtSE5NMDV3b2RnNjYKT1RJNTBqZ08wc01CTTk1TXY5ajlpdG9rMXpCVUg3NzdMTWs1dFpUZW5nVmhmWFJrR0dROFpyZmliMm1wMjZOKwpScXQrYm84aTh6Tlg1dGZ1c3RNdHVSeGtIRysyR3JwYk9zcVlhT1ROSjZiekJpVEpwUDdNUW9laWt5OXJhZTdMCnB4M3hBb0dCQU5vZ1MwQTd4NnNtaUo0NVJ1SGtBRmFjNlIveFNWd1ZqTDBBTVVqaHpkZUU4Ri9BK0VQSjJnaG0KdXhEM0p5YlI0dnVuL05DbDg2KzZMejhXZGpQdTcrenF4M0UxVXJ1ZVkzZklKS3VQQXQvYks4eWVFakNxeldYcAo3bkkrZUFNM0pyWlhrRzJiNVlLaWhHdU9ieDJnUGVRYk1Dcy9wQUg3NUM0TlJyd01FK0lNCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLVENDQWhHZ0F3SUJBZ0lJU210V3NrK0RrcjB3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TlRFeE1EWXdPVE0wTWpOYUZ3MHlOakV4TURZd09UTTVNak5hTUR3eApIekFkQmdOVkJBb1RGbXQxWW1WaFpHMDZZMngxYzNSbGNpMWhaRzFwYm5NeEdUQVhCZ05WQkFNVEVHdDFZbVZ5CmJtVjBaWE10WVdSdGFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDNHEwUWIKeWFod3hMbnFyV05vRzYxcmVuc0Z3dE1XNXkzckxkeENLa3krYndpeGRjYjREQTRxU21KMU53dHo1UHMreTV1Rwo0RVRJRW9vTnZsZEtvQjhuZlBEdW44ZGd1cXdkZEdwOTRpbGJFRldoaExEeDR3VnRLQ2Y3cVZMN0VpUnFBRHFtCmYrcVlOa2FFd08xMnZoeTdCODFTdlNzQTZwQ29yOGVYL2k2K0diZGlHMGkxWGtEcFNWSGxBbE1WUlZEdUFydGwKQnZDVUdCRE5VdTBuenNKS3dkTFNsUG1RaUY3VTNwb3U4WHFRaGhvWlhFSWc4UkVpZ2FCRlJWd1dzaTRsWTlYQwp4Ym04VlpRbnFYL3NDemVma28vREtzSXJuN3VDYXpqczBJa1JyMmdXbE8wRmpkaGFocTdjYW12ZDZIaHRBMk56CkdiWHEwU0o4MVduYk1FcWxBZ01CQUFHalZqQlVNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUsKQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRk05NUlEd0F6R21GM0pJMQpmQzAwUzJpTExxWW9NQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUJZaWNTTkd0cG1vOFVzVGlDc1FUVGJwTXZECmQ5V3RLeUVMdWNSMHdUN29ISkVMNU56OW9idWdmaS93ZFFUTzVualg3OFVNcXBnMEJiZHU0OGZSMXl1SWhSOVEKWVpPbEYzREwyRFhXeVJ5VU41QnJWYWhhVCtVMysvcjNWNmNnRHlrbGZ3NzQ2cFJweGJCMDZ2UUdsV3l5Smt3OQpyUjU3OFM5WTdPekZBL1B4cEp0T3prcFhVMFoxRWZ6SGhMc3UzT0J0WDZDWTRIa0JYNEpYaUwveldHRFdqWUtCClhqY3RRSlE3MzJ2c3FHbEpvR3pRNWEzN2NqbEFMV1doUWVJZXRoQm5sMFcvZUwva3czSzhMUXI0WG9OMVhwa3kKQVJ3Y3c3VVJ5UkFPanJYZTdaWnFJRk44NXdxQ0F3WFNCcU90M0Nld0pUMGNtampKNnpQaEs4Qnl6VTNjCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBdUt0RUc4bW9jTVM1NnExamFCdXRhM3A3QmNMVEZ1Y3Q2eTNjUWlwTXZtOElzWFhHCitBd09La3BpZFRjTGMrVDdQc3ViaHVCRXlCS0tEYjVYU3FBZkozenc3cC9IWUxxc0hYUnFmZUlwV3hCVm9ZU3cKOGVNRmJTZ24rNmxTK3hJa2FnQTZwbi9xbURaR2hNRHRkcjRjdXdmTlVyMHJBT3FRcUsvSGwvNHV2aG0zWWh0SQp0VjVBNlVsUjVRSlRGVVZRN2dLN1pRYndsQmdRelZMdEo4N0NTc0hTMHBUNWtJaGUxTjZhTHZGNmtJWWFHVnhDCklQRVJJb0dnUlVWY0ZySXVKV1BWd3NXNXZGV1VKNmwvN0FzM241S1B3eXJDSzUrN2dtczQ3TkNKRWE5b0ZwVHQKQlkzWVdvYXUzR3ByM2VoNGJRTmpjeG0xNnRFaWZOVnAyekJLcFFJREFRQUJBb0lCQURzZzIxWHFZN3gzd08rYgpSTkQ0eTdZMktWM1cyNi9ocFpvL3ZONlNuSy84N1ZPazJtaGhzOGRtdWMrK0tEU2JoTGEraXJYTTZUa0dkcDJmCjdHTkFrYmtiRUNuWGp3Y3h2aHBRUkNpNldYT0c3MlFnRUdmaXdNSUtzWUtxM0o3M1dDRjRvVTdlZGtiMldlMzMKdUJEbEhsM2tTYUhNWUJDVENtNTRtZmZZc3k2NmthZnR5cVlSZEc2amUvWFN5YnZ0U2FRNU9CeGlWMUlKRjFCYgpoYVYwWFEyVVhKU21Ic1dCUncwcEtKYWdBR0xkQ0ROL0RFYnlJaGczNWltekpuNTdORXV3cUxadDRsOHpESzg5CndiUmZmd1UxODBKQUtUZHI0cnMrNFNVYnUrVzZ3dGFSYzZXVk10RStyQitKZkxJNkxYcHk1TmRQZnpQZk9VNEkKU0FvUWFxRUNnWUVBeTAwcm5FN3EzOGVPUCtRSGxTeG04NjMwV2F3eldwamhkZEk1dFlXMFlUSGFKcHNCcXBOVwpMUWhUUTFMSGwxY3VWZFBlWTFrRmYzcEJtRkVFdkIvOU9HWnNUL3JYZXlwQW1qRWlSSkprYlFYMVJ1Sklva0FvCkxsckV2ODQ5S0JkYlVOMEpIMWpnaTUrNUZacGNaMWUvbmpxQzIxc0N4a2QvdzVDMTA3Yk91dk1DZ1lFQTZJbXIKbmJhZ1YzZnBNOXE4ZXNjY2tGK0xwUmdOYXBnbGFXSWdxTHpTQjBpRC82YTA4QzFQdGhCR1hXYlg2czA4VGVEZwppM1RpTDlYTjNHZmo1M3UyWTltbCtYSW9zaW1WOUMxQVJEU2xyU3Bqc2k4MmdUcnlmN0pxVEpEbXR5T2cvYnhHCmUxQTl2WGw5NUtBckpkdDVYOW11NWovYlVsMi80dWlYS3ZOaG1nY0NnWUF3Unh3akcwNkZZN014RXVxR0R5eEgKazdpaUR0V0hIeVR6UUwvNWprMitnd01tQmN0SWdZS0dZU0s4cXd6QXNMQk5LMm4xTjYrcGpSUEVvd0MzMS9UZgpEdHVMeHFxakVlNlRieldQQ1NDV0QwTW0yUmVjaUJYQ3BFVnlzdndzNkJjRVJla2pxNHh2TzdmTkhJSHkvSkNCCmQ2MlpmNnhSLzZxa2UxbmljOWYxNXdLQmdHWXFwb1lpaVF3bEdRYTVZZm9ucFRzaFgzOW5zKzFUYjR4L21vM0kKZUJiUEczK2xSUFlKVW01UmREanhTSWhYSjltbnBjQmYyRWpnRFJSN2FMeDV0RWJtaDIrellvSDBsM2V5c2Z2RgpqYzBwUktneTZpV1pPcFkwVTFQVWppbUVzK1VIWllna1Z4djZWdit1QjRjWWlKaytVUVBFcHAzbnBya1JLK21wCkNRYmRBb0dBWG5tbGpwN1JjbFdiaENMaVdoWGxKSndQYnBqZjRmY2twQ3BCbnN4RnRCVEUwQ2g1ZkkzVTQxWGYKUmd5d1QvZ01OSVBnakxodU95bWh5dEdtazVlMitGWk5VVU8rZG9lanRCQjVGVXN0Y2N4VUg5QkZndjNzV3I5RAp0V1NNbjVna3RrazVLZEdQU2p5OXorSlRiMDNMNGNXajhUS0c1amRVSHVKWjhWanZ4dWs9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==

View File

@@ -1,8 +0,0 @@
#!/bin/bash
./uninstall.sh | true
RELEASE_NAME=${1:-dev}
RELEASE_NAMESPACE=${1:-dev}
helm install ${RELEASE_NAME} opencloud -n ${RELEASE_NAMESPACE} --create-namespace -f opencloud/${RELEASE_NAME}-values.yaml
kind get kubeconfig --name opencloud > ./deployed_config

267
oc-k8s.sh Executable file
View File

@@ -0,0 +1,267 @@
#!/bin/bash
REPOS=(
"oc-auth"
"oc-catalog"
"oc-datacenter"
"oc-front"
"oc-monitord"
"oc-peer"
"oc-shared"
"oc-scheduler"
"oc-schedulerd"
"oc-workflow"
"oc-workspace"
)
# values template
main_create_values() {
set -euo pipefail
if [[ -z "${1:-}" ]]; then
echo "Error: No RELEASE PROVIDED."
main_help_values
exit 1
fi
TEMPLATE_FILE=./opencloud/values.yaml.template
ENV_FILE=${2:-}
OUTPUT_FILE="./opencloud/values/$1-values.yaml"
# Load environment variables from env file
if [[ -f "$ENV_FILE" ]]; then
set -a
source "$ENV_FILE"
set +a
fi
# Process the template
awk '
{
line = $0
# match ${VAR:-default} patterns
while (match(line, /\$\{([A-Za-z_][A-Za-z0-9_]*):-([^}]+)\}/, arr)) {
varname = arr[1]
defaultval = arr[2]
# get environment value or default
cmd = "bash -c '\''echo ${" varname ":-" defaultval "}'\''"
cmd | getline value
close(cmd)
line = substr(line, 1, RSTART-1) value substr(line, RSTART+RLENGTH)
}
print line
}' "$TEMPLATE_FILE" > "$OUTPUT_FILE"
echo "Rendered $OUTPUT_FILE from $TEMPLATE_FILE using $ENV_FILE"
}
# HELM SERVICE
main_create_helm() {
main_delete_helm "${1:-dev}" | true
RELEASE_NAME=${1:-dev}
RELEASE_NAMESPACE=${1:-dev}
helm install ${RELEASE_NAME} opencloud -n ${RELEASE_NAMESPACE} --create-namespace -f opencloud/${RELEASE_NAME}-values.yaml
kind get kubeconfig --name opencloud > ./deployed_config
kind export logs ./kind-logs
}
main_upgrade_helm() {
RELEASE_NAME=${1:-dev}
RELEASE_NAMESPACE=${1:-dev}
helm upgrade ${RELEASE_NAME} opencloud -n ${RELEASE_NAMESPACE} --create-namespace -f opencloud/${RELEASE_NAME}-values.yaml
}
main_delete_helm() {
RELEASE_NAME=${1:-dev}
RELEASE_NAMESPACE=${1:-dev}
helm uninstall ${RELEASE_NAME} -n ${RELEASE_NAMESPACE}
export KUBECONFIG=$(realpath ~/.kube/config)
}
# CLUSTER SERVICE
build_service() {
local repo_url="https://cloud.o-forge.io/core/$1.git"
local branch=${2:-main}
local target=${3:-all}
local repo_name=$(basename "$repo_url" .git)
echo "Processing repository: $repo_name"
if [ ! -d "$1" ]; then
echo "Cloning repository: $repo_name"
git clone "$repo_url"
if [ $? -ne 0 ]; then
echo "Error cloning $repo_url"
exit 1
fi
fi
echo "Repository '$repo_name' now exists. Pulling latest changes..."
cd "$repo_name" && git checkout $branch && git pull
echo "Running 'make $target' in $repo_name"
export HOST="${2:-http://beta.opencloud.com/}" && make "$target"
if [ $? -ne 0 ]; then
echo "Error: make $target failed in $dir"
exit 1
fi
cd ..
}
main_build_services() {
branch=${1:-main}
target=${2:-all}
cd ..
# Iterate through each repository in the list
for repo in "${REPOS[@]}"; do
build_service "$repo" "$branch" "$target"
done
echo "All repositories processed successfully."
}
# CLUSTER CONTROLLER
main_delete_cluster() {
kind delete cluster --name opencloud | true
}
main_create_cluster() {
main_delete_cluster | true
cat <<EOF | kind create cluster --name opencloud --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 30950
hostPort: 80
protocol: TCP
- containerPort: 30951
hostPort: 443
protocol: TCP
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."locahost:5000"]
endpoint = ["http://dev-docker-registry-ui-registry-server.opencloud.svc.cluster.local:5000"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."dev-docker-registry-ui-registry-server.opencloud.svc.cluster.local:5000"]
endpoint = ["http://dev-docker-registry-ui-registry-server.opencloud.svc.cluster.local:5000"]
[plugins."io.containerd.grpc.v1.cri".registry.configs."dev-docker-registry-ui-registry-server.opencloud.svc.cluster.local:5000".tls]
insecure_skip_verify = true
cert_file = ""
key_file = ""
ca_file = ""
EOF
echo "[WARNING] New cluster shoulw be merged into your current config !"
kind get kubeconfig --name opencloud > /tmp/kind-opencloud.kubeconfig
KUBECONFIG=~/.kube/config:/tmp/kind-opencloud.kubeconfig kubectl config view --flatten > ~/.kube/config # fusionnate clusters config.
kubectl config get-contexts
kubectl config use-context kind-opencloud
}
main_help_values() {
echo "
Cluster commands: oc-k8s <action> values
create - Create a new values release yaml
help - Show this help message
Usage:
oc-k8s create values [release] [env_file (optionnal)]
release - Release values name (required)
env_file - env to map (optionnal)
oc-k8s help values
"
}
main_help_cluster() {
echo "
Cluster commands: oc-k8s <action> cluster
create - Create a new kind cluster named 'opencloud'
delete - Delete the kind cluster named 'opencloud'
help - Show this help message
Usage:
oc-k8s create cluster
oc-k8s delete cluster
oc-k8s help cluster
"
}
main_help_services() {
echo "
Service commands: oc-k8s <action> services
build - Build all opencloud services
help - Show this help message
Usage:
oc-k8s build services [branch] [target]
branch - Git branch to build (default: main)
target - make target (default: all)
oc-k8s help services
"
}
main_help_helm() {
echo "
Helm commands: oc-k8s <action> helm
create - Install a helm release for the given environment (default: dev)
delete - Uninstall a helm release for the given environment (default: dev)
help - Show this help message
Usage:
oc-k8s create helm [env]
dev - environnement selected (default: dev)
oc-k8s upgrade helm [env]
dev - environnement selected (default: dev)
oc-k8s delete helm [env]
dev - environnement selected (default: dev)
oc-k8sh help helm
"
}
main_help_all() {
echo "
Main commands: oc-k8s <action>
start - Start opencloud k8s
stop - Stop opencloud k8s
Usage:
oc-k8s start [args]
oc-k8s stop [args]
"
main_help_cluster
main_help_services
main_help_helm
main_help_values
}
main_start() {
sudo /etc/init.d/apache2 stop
sudo nginx -s stop
main_create_cluster "${@:1}"
main_build_services "${@:1}"
main_create_helm "${@:1}"
}
main_stop() {
main_delete_helm "${@:1}" | true
main_delete_cluster "${@:1}" | true
}
if declare -f main_${1} > /dev/null; then
main_${1} "${@:2}"
elif declare -f main_${1}_${2} > /dev/null; then
main_${1}_${2} "${@:3}"
else
echo "Function does not exist"
main_help_all
fi

View File

@@ -50,3 +50,7 @@ dependencies:
version: 1.1.3
repository: "https://helm.joxit.dev/"
condition: docker-registry-ui.enabled
- name: prometheus
version: "27.45.0"
repository: "https://prometheus-community.github.io/helm-charts"
condition: prometheus.enabled

129
opencloud/README.md Normal file
View File

@@ -0,0 +1,129 @@
# HOW TO MAKE YOUR PROPER VALUES.YAML
Use command :
```
oc-k8s create values [release] [env_file (optionnal)]
```
or
```
./oc-k8s.sh create values [release] [env_file (optionnal)]
```
Map in a env file, any Variable you wish to override and give the path.
## ENV VARIABLE
| Variable | Default | Purpose / Explanation |
| -------------- | ---------------------- | --------------------------------------------------------------------------------- |
| `HOST` | `exemple.com` | Domain for reverse proxy rules (Traefik). |
| `REGISTRY_HOST` | `registry.exemple.com` | Docker registry URL for reverse proxy and pull secrets. |
## MONGO VARIABLE
| Variable | Default | Purpose / Explanation |
| ------------------- | ----------- | ------------------------------------------ |
| `OC_MONGO_ENABLED` | `true` | Enable/disable MongoDB deployment. |
| `OC_MONGO_ADMIN` | `admin` | Root username for MongoDB. |
| `OC_MONGO_PWD` | `admin` | Root password. |
| `OC_MONGO_DATABASE` | `opencloud` | Default database to create. |
| `OC_MONGO_SIZE` | `5000Mi` | Persistent storage size for MongoDB. |
## MONGO EXPRESS VARIABLE
| Variable | Default | Purpose / Explanation |
| ----------------------------------------- | -------------------------------------------------------- | ------------------------------------------- |
| `OC_MONGOEXPRESS_ENABLED` | `true` | Enable mongo-express UI. |
| `OC_MONGOEXPRESS_ADMIN` | `${OC_MONGO_ADMIN:-admin}` | Admin username to connect to MongoDB. |
| `OC_MONGOEXPRESS_PWD` | `${OC_MONGO_PWD:-admin}` | Admin password. |
## NATS VARIABLE
| Variable | Default | Explanation |
| ----------------- | ------- | -------------------------------------- |
| `OC_NATS_ENABLED` | `true` | Deploy NATS cluster. |
| `OC_NATS_SIZE` | `20Mi` | Storage size for JetStream file store. |
## OpenLDAP VARIABLE
| Variable | Default | Explanation |
| --------------------------- | ---------------------------- | -------------------------------------------------------- |
| `OC_LDAP_ENABLED` | `true` | Deploy OpenLDAP server. |
| `OC_LDAP_IMAGE` | `osixia/openldap` | Docker image for OpenLDAP. |
| `OC_LDAP_ORGANISATION` | `Opencloud` | LDAP organization name. |
| `OC_LDAP_DOMAIN` | `opencloud.com` | LDAP domain. |
| `OC_LDAP_TLS` | `false` | Enable TLS for LDAP. |
| `OC_LDAP_ADMIN_PWD` | `admin` | LDAP admin password. |
| `OC_LDAP_CONFIG_PWD` | `config` | Password for configuration account. |
| `OC_LDAP_EXTERNAL` | `false` | Connect to external LDAP instead of internal deployment. |
| `OC_LDAP_EXTERNAL_ENDPOINT` | (none) | LDAP server URL. |
| `OC_LDAP_EXTERNAL_DN` | `cn=admin,dc=example,dc=com` | Bind DN for external LDAP. |
| `OC_LDAP_EXTERNAL_PWD` | `admin` | Bind password for external LDAP. |
## Prometheus VARIABLE
| Variable | Default | Explanation |
| ------------------------------- | ------- | ------------------------- |
| `OC_PROMETHEUS_ENABLED` | `true` | Enable Prometheus server. |
| `OC_PROMETHEUS_SIZE` | `5Gi` | Persistent volume size. |
| `OC_PROMETHEUS_LIMITS_CPU` | `500m` | CPU limit. |
| `OC_PROMETHEUS_LIMITS_MEMORY` | `512Mi` | Memory limit. |
| `OC_PROMETHEUS_REQUESTS_CPU` | `128m` | CPU request. |
| `OC_PROMETHEUS_REQUESTS_MEMORY` | `256Mi` | Memory request. |
## Grafana VARIABLE
| VARIABLE | DEFAULT | DESCRIPTION |
| -------------------------------------- | ----------- | ------------------------------------------------------------------------------------ |
| `OC_GRAFANA_ENABLED` | `true` | Enable or disable Grafana deployment. |
| `OC_GRAFANA_ADMIN_USER` | `admin` | Username for the Grafana admin account. |
| `OC_GRAFANA_ADMIN_PWD` | `admin` | Password for the Grafana admin account. |
| `OC_GRAFANA_SIZE` | `1Gi` | Size of the persistent volume for Grafana. |
## Traefik VARIABLE
| Variable | Default | Explanation |
| ------------------------------- | ------- | ------------------------- |
| `OC_TRAEFIK_ENABLED` | `true` | Enable Traefik server. |
## Hydra VARIABLE
| Variable | Default | Explanation |
| ------------------ | ------------------------------------------------------------------------ | ------------------------------------------- |
| `OC_HYDRA_ENABLED` | `true` | Deploy Hydra (OAuth2). |
## Keto VARIABLE
| VARIABLE | DEFAULT | DESCRIPTION |
| ---------------------------- | --------------------------------- | ---------------------------------------------------------------------------------------------- |
| `OC_KETO_ENABLED` | `true` | Enable or disable Keto deployment. |
## Loki VARIABLE
| VARIABLE | DEFAULT | DESCRIPTION |
| ---------------------------- | --------------------------------- | ---------------------------------------------------------------------------------------------- |
| `OC_LOKI_ENABLED` | `true` | Enable or disable Loki deployment. |
| `OC_LOKI_SIZE` | `1Gi` | Resource allowed. |
## Minio VARIABLE
| VARIABLE | DEFAULT | DESCRIPTION |
| ---------------------------- | --------------------------------- | ---------------------------------------------------------------------------------------------- |
| `OC_MINIO_ENABLED` | `true` | Enable or disable Minio deployment. |
## Argo VARIABLE
| VARIABLE | DEFAULT | DESCRIPTION |
| ------------------------------------------ | --------------- | ---------------------------------------------- |
| `OC_ARGO_ENABLED` | `false` | Enable or disable Argo Workflows deployment. |
## OC API VARIABLE
| Variable | Default | Explanation |
| ------------------------------------ | ------------ | --------------------------------- |
| `OC_<APP>_ENABLED` | true | Deploy the service. |
| `OC_<APP>_IMAGE` | registry URL | Docker image. |
| `OC_<APP>_LIMITS_CPU/MEMORY` | 128m / 256Mi | Resource limits. |
| `OC_<APP>_REQUESTS_CPU/MEMORY` | 128m / 256Mi | Resource requests. |
| `OC_<APP>_REPLICAS_ENABLED` | true | Enable Horizontal Pod Autoscaler. |
| `OC_<APP>_REPLICAS_MAX` | 5 | Max replicas. |
| `OC_<APP>_REPLICAS_USAGE` | 80 | HPA target CPU usage (%). |

View File

@@ -0,0 +1,24 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
ci/
OWNERS

View File

@@ -0,0 +1,15 @@
dependencies:
- name: alertmanager
repository: https://prometheus-community.github.io/helm-charts
version: 1.28.0
- name: kube-state-metrics
repository: https://prometheus-community.github.io/helm-charts
version: 6.4.1
- name: prometheus-node-exporter
repository: https://prometheus-community.github.io/helm-charts
version: 4.49.1
- name: prometheus-pushgateway
repository: https://prometheus-community.github.io/helm-charts
version: 3.4.2
digest: sha256:fedbc59b33be92cc31268269ffcd55336a21d62e3b9ae3874e99f4ca63479991
generated: "2025-11-04T21:27:16.796331119Z"

View File

@@ -0,0 +1,58 @@
annotations:
artifacthub.io/license: Apache-2.0
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
- name: Upstream Project
url: https://github.com/prometheus/prometheus
apiVersion: v2
appVersion: v3.7.3
dependencies:
- condition: alertmanager.enabled
name: alertmanager
repository: https://prometheus-community.github.io/helm-charts
version: 1.28.*
- condition: kube-state-metrics.enabled
name: kube-state-metrics
repository: https://prometheus-community.github.io/helm-charts
version: 6.4.*
- condition: prometheus-node-exporter.enabled
name: prometheus-node-exporter
repository: https://prometheus-community.github.io/helm-charts
version: 4.49.*
- condition: prometheus-pushgateway.enabled
name: prometheus-pushgateway
repository: https://prometheus-community.github.io/helm-charts
version: 3.4.*
description: Prometheus is a monitoring system and time series database.
home: https://prometheus.io/
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
keywords:
- monitoring
- prometheus
kubeVersion: '>=1.19.0-0'
maintainers:
- email: gianrubio@gmail.com
name: gianrubio
url: https://github.com/gianrubio
- email: zanhsieh@gmail.com
name: zanhsieh
url: https://github.com/zanhsieh
- email: miroslav.hadzhiev@gmail.com
name: Xtigyro
url: https://github.com/Xtigyro
- email: naseem@transit.app
name: naseemkullah
url: https://github.com/naseemkullah
- email: rootsandtrees@posteo.de
name: zeritti
url: https://github.com/zeritti
name: prometheus
sources:
- https://github.com/prometheus/alertmanager
- https://github.com/prometheus/prometheus
- https://github.com/prometheus/pushgateway
- https://github.com/prometheus/node_exporter
- https://github.com/kubernetes/kube-state-metrics
type: application
version: 27.45.0

View File

@@ -0,0 +1,392 @@
# Prometheus
[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true.
This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.19+
- Helm 3.7+
## Usage
The chart is distributed as an [OCI Artifact](https://helm.sh/docs/topics/registries/) as well as via a traditional [Helm Repository](https://helm.sh/docs/topics/chart_repository/).
- OCI Artifact: `oci://ghcr.io/prometheus-community/charts/prometheus`
- Helm Repository: `https://prometheus-community.github.io/helm-charts` with chart `prometheus`
The installation instructions use the OCI registry. Refer to the [`helm repo`]([`helm repo`](https://helm.sh/docs/helm/helm_repo/)) command documentation for information on installing charts via the traditional repository.
### Install Chart
Starting with version 16.0, the Prometheus chart requires Helm 3.7+ in order to install successfully. Please check your `helm` release before installation.
```console
helm install [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prometheus
```
_See [configuration](#configuration) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
### Dependencies
By default this chart installs additional, dependent charts:
- [alertmanager](https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager)
- [kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics)
- [prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter)
- [prometheus-pushgateway](https://github.com/walker-tom/helm-charts/tree/main/charts/prometheus-pushgateway)
To disable the dependency during installation, set `alertmanager.enabled`, `kube-state-metrics.enabled`, `prometheus-node-exporter.enabled` and `prometheus-pushgateway.enabled` to `false`.
_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._
### Uninstall Chart
```console
helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
### Updating values.schema.json
A [`values.schema.json`](https://helm.sh/docs/topics/charts/#schema-files) file has been added to validate chart values. When `values.yaml` file has a structure change (i.e. add a new field, change value type, etc.), modify `values.schema.json` file manually or run `helm schema-gen values.yaml > values.schema.json` to ensure the schema is aligned with the latest values. Refer to [helm plugin `helm-schema-gen`](https://github.com/karuppiah7890/helm-schema-gen) for plugin installation instructions.
### Upgrading Chart
```console
helm upgrade [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prometheus --install
```
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
#### To 27.0
Prometheus' configuration parameter `insecure_skip_verify` in scrape configs `serverFiles."prometheus.yml".scrape_configs` has been commented out keeping thus the default Prometheus' value.
If certificate verification must be skipped, please, uncomment the line before upgrading.
#### To 26.0
This release changes default version of promethues to v3.0.0, See official [migration guide](https://prometheus.io/docs/prometheus/latest/migration/#prometheus-3-0-migration-guide
) and [release notes](https://github.com/prometheus/prometheus/releases/tag/v3.0.0) for more details.
#### To 25.0
The `server.remoteRead[].url` and `server.remoteWrite[].url` fields now support templating. Allowing for `url` values such as `https://{{ .Release.Name }}.example.com`.
Any entries in these which previously included `{{` or `}}` must be escaped with `{{ "{{" }}` and `{{ "}}" }}` respectively. Entries which did not previously include the template-like syntax will not be affected.
#### To 24.0
Require Kubernetes 1.19+
Release 1.0.0 of the _alertmanager_ replaced [configmap-reload](https://github.com/jimmidyson/configmap-reload) with [prometheus-config-reloader](https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader).
Extra command-line arguments specified via `configmapReload.prometheus.extraArgs` are not compatible and will break with the new prometheus-config-reloader. Please, refer to the [sources](https://github.com/prometheus-operator/prometheus-operator/blob/main/cmd/prometheus-config-reloader/main.go) in order to make the appropriate adjustment to the extra command-line arguments.
#### To 23.0
Release 5.0.0 of the _kube-state-metrics_ chart introduced a separation of the `image.repository` value in two distinct values:
```console
image:
registry: registry.k8s.io
repository: kube-state-metrics/kube-state-metrics
```
If a custom values file or CLI flags set `kube-state.metrics.image.repository`, please, set the new values accordingly.
If you are upgrading _prometheus-pushgateway_ with the chart and _prometheus-pushgateway_ has been deployed as a statefulset with a persistent volume, the statefulset must be deleted before upgrading the chart, e.g.:
```bash
kubectl delete sts -l app.kubernetes.io/name=prometheus-pushgateway -n monitoring --cascade=orphan
```
Users are advised to review changes in the corresponding chart releases before upgrading.
#### To 22.0
The `app.kubernetes.io/version` label has been removed from the pod selector.
Therefore, you must delete the previous StatefulSet or Deployment before upgrading. Performing this operation will cause **Prometheus to stop functioning** until the upgrade is complete.
```console
kubectl delete deploy,sts -l app.kubernetes.io/name=prometheus
```
#### To 21.0
The Kubernetes labels have been updated to follow [Helm 3 label and annotation best practices](https://helm.sh/docs/chart_best_practices/labels/).
Specifically, labels mapping is listed below:
| OLD | NEW |
|--------------------|------------------------------|
|heritage | app.kubernetes.io/managed-by |
|chart | helm.sh/chart |
|[container version] | app.kubernetes.io/version |
|app | app.kubernetes.io/name |
|release | app.kubernetes.io/instance |
Therefore, depending on the way you've configured the chart, the previous StatefulSet or Deployment need to be deleted before upgrade.
If `runAsStatefulSet: false` (this is the default):
```console
kubectl delete deploy -l app=prometheus
```
If `runAsStatefulSet: true`:
```console
kubectl delete sts -l app=prometheus
```
After that do the actual upgrade:
```console
helm upgrade -i prometheus prometheus-community/prometheus
```
#### To 20.0
The [configmap-reload](https://github.com/jimmidyson/configmap-reload) container was replaced by the [prometheus-config-reloader](https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader).
Extra command-line arguments specified via configmapReload.prometheus.extraArgs are not compatible and will break with the new prometheus-config-reloader, refer to the [sources](https://github.com/prometheus-operator/prometheus-operator/blob/main/cmd/prometheus-config-reloader/main.go) in order to make the appropriate adjustment to the extra command-line arguments.
#### To 19.0
Prometheus has been updated to version v2.40.5.
Prometheus-pushgateway was updated to version 2.0.0 which adapted [Helm label and annotation best practices](https://helm.sh/docs/chart_best_practices/labels/).
See the [upgrade docs of the prometheus-pushgateway chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway#to-200) to see whats to do, before you upgrade Prometheus!
The condition in Chart.yaml to disable kube-state-metrics has been changed from `kubeStateMetrics.enabled` to `kube-state-metrics.enabled`
The Docker image tag is used from appVersion field in Chart.yaml by default.
Unused subchart configs has been removed and subchart config is now on the bottom of the config file.
If Prometheus is used as deployment the updatestrategy has been changed to "Recreate" by default, so Helm updates work out of the box.
`.Values.server.extraTemplates` & `.Values.server.extraObjects` has been removed in favour of `.Values.extraManifests`, which can do the same.
`.Values.server.enabled` has been removed as it's useless now that all components are created by subcharts.
All files in `templates/server` directory has been moved to `templates` directory.
```bash
helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 19.0.0
```
#### To 18.0
Version 18.0.0 uses alertmanager service from the [alertmanager chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager). If you've made some config changes, please check the old `alertmanager` and the new `alertmanager` configuration section in values.yaml for differences.
Note that the `configmapReload` section for `alertmanager` was moved out of dedicated section (`configmapReload.alertmanager`) to alertmanager embedded (`alertmanager.configmapReload`).
Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade:
```bash
# In 17.x
kubectl scale deploy prometheus-server --replicas=0
# Upgrade
helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 18.0.0
```
#### To 17.0
Version 17.0.0 uses pushgateway service from the [prometheus-pushgateway chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway). If you've made some config changes, please check the old `pushgateway` and the new `prometheus-pushgateway` configuration section in values.yaml for differences.
Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade:
```bash
# In 16.x
kubectl scale deploy prometheus-server --replicas=0
# Upgrade
helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 17.0.0
```
#### To 16.0
Starting from version 16.0 embedded services (like alertmanager, node-exporter etc.) are moved out of Prometheus chart and the respecting charts from this repository are used as dependencies. Version 16.0.0 moves node-exporter service to [prometheus-node-exporter chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter). If you've made some config changes, please check the old `nodeExporter` and the new `prometheus-node-exporter` configuration section in values.yaml for differences.
Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade:
```bash
# In 15.x
kubectl scale deploy prometheus-server --replicas=0
# Upgrade
helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 16.0.0
```
#### To 15.0
Version 15.0.0 changes the relabeling config, aligning it with the [Prometheus community conventions](https://github.com/prometheus/prometheus/pull/9832). If you've made manual changes to the relabeling config, you have to adapt your changes.
Before you update please execute the following command, to be able to update kube-state-metrics:
```bash
kubectl delete deployments.apps -l app.kubernetes.io/instance=prometheus,app.kubernetes.io/name=kube-state-metrics --cascade=orphan
```
#### To 9.0
Version 9.0 adds a new option to enable or disable the Prometheus Server. This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment. To install the server `server.enabled` must be set to `true`.
#### To 5.0
As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data.
Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out [the 2.x migration guide](https://prometheus.io/docs/prometheus/2.55/migration/).
Users of this chart will need to update their alerting rules to the new format before they can upgrade.
#### Example Migration
Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following:
1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below:
```yaml
alertmanager:
enabled: false
alertmanagerFiles:
alertmanager.yml: ""
kubeStateMetrics:
enabled: false
nodeExporter:
enabled: false
pushgateway:
enabled: false
server:
extraArgs:
storage.local.retention: 720h
serverFiles:
alerts: ""
prometheus.yml: ""
rules: ""
```
1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target.
```yaml
prometheus.yml:
...
remote_read:
- url: http://prometheus-old/api/v1/read
...
```
Old data will be available when you query the new prometheus instance.
## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands:
```console
helm show values oci://ghcr.io/prometheus-community/charts/prometheus
```
You may similarly use the above configuration commands on each chart [dependency](#dependencies) to see its configurations.
### Scraping Pod Metrics via Annotations
This chart uses a default configuration that causes prometheus to scrape a variety of kubernetes resource types, provided they have the correct annotations. In this section we describe how to configure pods to be scraped; for information on how other resource types can be scraped you can do a `helm template` to get the kubernetes resource definitions, and then reference the prometheus configuration in the ConfigMap against the prometheus documentation for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config).
In order to get prometheus to scrape pods, you must add annotations to the pods as below:
```yaml
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: /metrics
prometheus.io/port: "8080"
```
You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from. `prometheus.io/port` should be set to the port that your pod serves metrics from. Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be enclosed in double quotes.
### Sharing Alerts Between Services
Note that when [installing](#install-chart) or [upgrading](#upgrading-chart) you may use multiple values override files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example,
```yaml
# values.yaml
# ...
# service1-alert.yaml
serverFiles:
alerts:
service1:
- alert: anAlert
# ...
# service2-alert.yaml
serverFiles:
alerts:
service2:
- alert: anAlert
# ...
```
```console
helm install [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prom-label-proxy -f values.yaml -f service1-alert.yaml -f service2-alert.yaml
```
### RBAC Configuration
Roles and RoleBindings resources will be created automatically for `server` service.
To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account.
> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own.
### ConfigMap Files
AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod.
Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod.
### Ingress TLS
If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism.
To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace:
```console
kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key
```
Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file:
```yaml
server:
ingress:
## If true, Prometheus server Ingress will be created
##
enabled: true
## Prometheus server Ingress hostnames
## Must be provided if Ingress is enabled
##
hosts:
- prometheus.domain.com
## Prometheus server Ingress TLS configuration
## Secrets must be manually created in the namespace
##
tls:
- secretName: prometheus-server-tls
hosts:
- prometheus.domain.com
```
### NetworkPolicy
Enabling Network Policy for Prometheus will secure connections to Alert Manager and Kube State Metrics by only accepting connections from Prometheus Server. All inbound connections to Prometheus Server are still allowed.
To enable network policy for Prometheus, install a networking plugin that implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true.
If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need to manually create a networkpolicy which allows it.

View File

@@ -0,0 +1,26 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
unittests/
ci/

View File

@@ -0,0 +1,26 @@
annotations:
artifacthub.io/license: Apache-2.0
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
apiVersion: v2
appVersion: v0.29.0
description: The Alertmanager handles alerts sent by client applications such as the
Prometheus server.
home: https://prometheus.io/
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
keywords:
- monitoring
kubeVersion: '>=1.25.0-0'
maintainers:
- email: monotek23@gmail.com
name: monotek
url: https://github.com/monotek
- email: naseem@transit.app
name: naseemkullah
url: https://github.com/naseemkullah
name: alertmanager
sources:
- https://github.com/prometheus/alertmanager
type: application
version: 1.28.0

View File

@@ -0,0 +1,62 @@
# Alertmanager
As per [prometheus.io documentation](https://prometheus.io/docs/alerting/latest/alertmanager/):
> The Alertmanager handles alerts sent by client applications such as the
> Prometheus server. It takes care of deduplicating, grouping, and routing them
> to the correct receiver integration such as email, PagerDuty, or OpsGenie. It
> also takes care of silencing and inhibition of alerts.
## Prerequisites
Kubernetes 1.14+
## Usage
The chart is distributed as an [OCI Artifact](https://helm.sh/docs/topics/registries/) as well as via a traditional [Helm Repository](https://helm.sh/docs/topics/chart_repository/).
- OCI Artifact: `oci://ghcr.io/prometheus-community/charts/alertmanager`
- Helm Repository: `https://prometheus-community.github.io/helm-charts` with chart `alertmanager`
The installation instructions use the OCI registry. Refer to the [`helm repo`]([`helm repo`](https://helm.sh/docs/helm/helm_repo/)) command documentation for information on installing charts via the traditional repository.
### Install Chart
```console
helm install [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/alertmanager
```
_See [configuration](#configuration) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
### Uninstall Chart
```console
helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
### Upgrading Chart
```console
helm upgrade [RELEASE_NAME] [CHART] --install
```
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
### To 1.0
The [configmap-reload](https://github.com/jimmidyson/configmap-reload) container was replaced by the [prometheus-config-reloader](https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader).
Extra command-line arguments specified via configmapReload.prometheus.extraArgs are not compatible and will break with the new prometheus-config-reloader, refer to the [sources](https://github.com/prometheus-operator/prometheus-operator/blob/main/cmd/prometheus-config-reloader/main.go) in order to make the appropriate adjustment to the extea command-line arguments.
The `networking.k8s.io/v1beta1` is no longer supported. use [`networking.k8s.io/v1`](https://kubernetes.io/docs/reference/using-api/deprecation-guide/#ingressclass-v122).
## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands:
```console
helm show values oci://ghcr.io/prometheus-community/charts/alertmanager
```

View File

@@ -0,0 +1,21 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ include "alertmanager.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "alertmanager.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ include "alertmanager.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ include "alertmanager.namespace" . }} svc -w {{ include "alertmanager.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ include "alertmanager.namespace" . }} {{ include "alertmanager.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ include "alertmanager.namespace" . }} -l "app.kubernetes.io/name={{ include "alertmanager.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:{{ .Values.service.port }} to use your application"
kubectl --namespace {{ include "alertmanager.namespace" . }} port-forward $POD_NAME {{ .Values.service.port }}:80
{{- end }}

View File

@@ -0,0 +1,81 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "alertmanager.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "alertmanager.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "alertmanager.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "alertmanager.labels" -}}
helm.sh/chart: {{ include "alertmanager.chart" . }}
{{ include "alertmanager.selectorLabels" . }}
{{- with .Chart.AppVersion }}
app.kubernetes.io/version: {{ . | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "alertmanager.selectorLabels" -}}
app.kubernetes.io/name: {{ include "alertmanager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "alertmanager.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "alertmanager.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Define Ingress apiVersion
*/}}
{{- define "alertmanager.ingress.apiVersion" -}}
{{- printf "networking.k8s.io/v1" }}
{{- end }}
{{/*
Allow overriding alertmanager namespace
*/}}
{{- define "alertmanager.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,21 @@
{{- if .Values.config.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "alertmanager.fullname" . }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- with .Values.configAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
namespace: {{ include "alertmanager.namespace" . }}
data:
alertmanager.yml: |
{{- $config := omit .Values.config "enabled" }}
{{- toYaml $config | default "{}" | nindent 4 }}
{{- range $key, $value := .Values.templates }}
{{ $key }}: |-
{{- $value | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,47 @@
{{- if .Values.ingress.enabled }}
{{- $fullName := include "alertmanager.fullname" . }}
{{- $svcPort := .Values.service.port }}
apiVersion: {{ include "alertmanager.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- if .Values.ingress.labels }}
{{- toYaml .Values.ingress.labels | nindent 4 }}
{{- end }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
namespace: {{ include "alertmanager.namespace" . }}
spec:
{{- if .Values.ingress.className }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,56 @@
{{- if and .Values.servicePerReplica.enabled .Values.ingressPerReplica.enabled }}
{{- $pathType := .Values.ingressPerReplica.pathType }}
{{- $count := .Values.replicaCount | int -}}
{{- $servicePort := .Values.service.port -}}
{{- $ingressValues := .Values.ingressPerReplica -}}
{{- $fullName := include "alertmanager.fullname" . }}
apiVersion: v1
kind: List
metadata:
name: {{ $fullName }}-ingressperreplica
namespace: {{ include "alertmanager.namespace" . }}
items:
{{- range $i, $e := until $count }}
- kind: Ingress
apiVersion: {{ include "alertmanager.ingress.apiVersion" $ }}
metadata:
name: {{ $fullName }}-{{ $i }}
namespace: {{ include "alertmanager.namespace" $ }}
labels:
{{- include "alertmanager.labels" $ | nindent 8 }}
{{- if $ingressValues.labels }}
{{ toYaml $ingressValues.labels | indent 8 }}
{{- end }}
{{- if $ingressValues.annotations }}
annotations:
{{ toYaml $ingressValues.annotations | indent 8 }}
{{- end }}
spec:
{{- if $ingressValues.className }}
ingressClassName: {{ $ingressValues.className }}
{{- end }}
rules:
- host: {{ $ingressValues.hostPrefix }}-{{ $i }}.{{ $ingressValues.hostDomain }}
http:
paths:
{{- range $p := $ingressValues.paths }}
- path: {{ tpl $p $ }}
pathType: {{ $pathType }}
backend:
service:
name: {{ $fullName }}-{{ $i }}
port:
name: http
{{- end -}}
{{- if or $ingressValues.tlsSecretName $ingressValues.tlsSecretPerReplica.enabled }}
tls:
- hosts:
- {{ $ingressValues.hostPrefix }}-{{ $i }}.{{ $ingressValues.hostDomain }}
{{- if $ingressValues.tlsSecretPerReplica.enabled }}
secretName: {{ $ingressValues.tlsSecretPerReplica.prefix }}-{{ $i }}
{{- else }}
secretName: {{ $ingressValues.tlsSecretName }}
{{- end }}
{{- end }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,14 @@
{{- if .Values.podDisruptionBudget }}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ include "alertmanager.fullname" . }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
namespace: {{ include "alertmanager.namespace" . }}
spec:
selector:
matchLabels:
{{- include "alertmanager.selectorLabels" . | nindent 6 }}
{{- toYaml .Values.podDisruptionBudget | nindent 2 }}
{{- end }}

View File

@@ -0,0 +1,14 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "alertmanager.serviceAccountName" . }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
namespace: {{ include "alertmanager.namespace" . }}
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
{{- end }}

View File

@@ -0,0 +1,44 @@
{{- if and .Values.servicePerReplica.enabled }}
{{- $count := .Values.replicaCount | int -}}
{{- $serviceValues := .Values.servicePerReplica -}}
apiVersion: v1
kind: List
metadata:
name: {{ include "alertmanager.fullname" . }}-serviceperreplica
namespace: {{ include "alertmanager.namespace" . }}
items:
{{- range $i, $e := until $count }}
- apiVersion: v1
kind: Service
metadata:
name: {{ include "alertmanager.fullname" $ }}-{{ $i }}
namespace: {{ include "alertmanager.namespace" $ }}
labels:
{{- include "alertmanager.labels" $ | nindent 8 }}
{{- if $serviceValues.annotations }}
annotations:
{{ toYaml $serviceValues.annotations | indent 8 }}
{{- end }}
spec:
{{- if $serviceValues.clusterIP }}
clusterIP: {{ $serviceValues.clusterIP }}
{{- end }}
{{- if $serviceValues.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $cidr := $serviceValues.loadBalancerSourceRanges }}
- {{ $cidr }}
{{- end }}
{{- end }}
{{- if ne $serviceValues.type "ClusterIP" }}
externalTrafficPolicy: {{ $serviceValues.externalTrafficPolicy }}
{{- end }}
ports:
- name: http
port: {{ $.Values.service.port }}
targetPort: {{ $.Values.containerPortName }}
selector:
{{- include "alertmanager.selectorLabels" $ | nindent 8 }}
statefulset.kubernetes.io/pod-name: {{ include "alertmanager.fullname" $ }}-{{ $i }}
type: "{{ $serviceValues.type }}"
{{- end }}
{{- end }}

View File

@@ -0,0 +1,75 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "alertmanager.fullname" . }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- with .Values.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
namespace: {{ include "alertmanager.namespace" . }}
spec:
{{- if .Values.service.ipDualStack.enabled }}
ipFamilies: {{ toYaml .Values.service.ipDualStack.ipFamilies | nindent 4 }}
ipFamilyPolicy: {{ .Values.service.ipDualStack.ipFamilyPolicy }}
{{- end }}
type: {{ .Values.service.type }}
{{- with .Values.service.loadBalancerIP }}
loadBalancerIP: {{ . }}
{{- end }}
{{- with .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $cidr := . }}
- {{ $cidr }}
{{- end }}
{{- end }}
ports:
- port: {{ .Values.service.port }}
targetPort: {{ .Values.containerPortName }}
protocol: TCP
name: http
{{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
{{- with .Values.service.extraPorts }}
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
{{- include "alertmanager.selectorLabels" . | nindent 4 }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "alertmanager.fullname" . }}-headless
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- with .Values.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
namespace: {{ include "alertmanager.namespace" . }}
spec:
clusterIP: None
ports:
- port: {{ .Values.service.port }}
targetPort: {{ .Values.containerPortName }}
protocol: TCP
name: http
{{- if or (gt (int .Values.replicaCount) 1) (.Values.additionalPeers) }}
- port: {{ .Values.service.clusterPort }}
targetPort: clusterpeer-tcp
protocol: TCP
name: cluster-tcp
- port: {{ .Values.service.clusterPort }}
targetPort: clusterpeer-udp
protocol: UDP
name: cluster-udp
{{- end }}
{{- with .Values.service.extraPorts }}
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
{{- include "alertmanager.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,280 @@
{{- $svcClusterPort := .Values.service.clusterPort }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "alertmanager.fullname" . }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- with .Values.statefulSet.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
namespace: {{ include "alertmanager.namespace" . }}
spec:
replicas: {{ .Values.replicaCount }}
minReadySeconds: {{ .Values.minReadySeconds }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
selector:
matchLabels:
{{- include "alertmanager.selectorLabels" . | nindent 6 }}
serviceName: {{ include "alertmanager.fullname" . }}-headless
template:
metadata:
labels:
{{- include "alertmanager.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
{{- if not .Values.configmapReload.enabled }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- end }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- if .Values.hostUsers }}
hostUsers: true
{{- end }}
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "alertmanager.serviceAccountName" . }}
{{- with .Values.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.hostAliases }}
hostAliases:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.schedulerName }}
schedulerName: {{ . }}
{{- end }}
{{- if or .Values.podAntiAffinity .Values.affinity }}
affinity:
{{- end }}
{{- with .Values.affinity }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if eq .Values.podAntiAffinity "hard" }}
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: {{ .Values.podAntiAffinityTopologyKey }}
labelSelector:
matchExpressions:
- {key: app.kubernetes.io/name, operator: In, values: [{{ include "alertmanager.name" . }}]}
{{- else if eq .Values.podAntiAffinity "soft" }}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
topologyKey: {{ .Values.podAntiAffinityTopologyKey }}
labelSelector:
matchExpressions:
- {key: app.kubernetes.io/name, operator: In, values: [{{ include "alertmanager.name" . }}]}
{{- end }}
{{- with .Values.priorityClassName }}
priorityClassName: {{ . }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
{{- with .Values.extraInitContainers }}
initContainers:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
{{- if .Values.configmapReload.enabled }}
- name: {{ .Chart.Name }}-{{ .Values.configmapReload.name }}
image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}"
imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}"
{{- with .Values.configmapReload.extraEnv }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
args:
{{- if and (hasKey .Values.configmapReload.extraArgs "config-file" | not) (hasKey .Values.configmapReload.extraArgs "watched-dir" | not) }}
- --watched-dir=/etc/alertmanager
{{- end }}
{{- if not (hasKey .Values.configmapReload.extraArgs "reload-url") }}
- --reload-url=http://127.0.0.1:9093/-/reload
{{- end }}
{{- range $key, $value := .Values.configmapReload.extraArgs }}
- --{{ $key }}={{ $value }}
{{- end }}
resources:
{{- toYaml .Values.configmapReload.resources | nindent 12 }}
{{- with .Values.configmapReload.containerPort }}
ports:
- containerPort: {{ . }}
{{- end }}
{{- with .Values.configmapReload.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.configmapReload.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.configmapReload.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
{{- if .Values.config.enabled }}
- name: config
mountPath: /etc/alertmanager
{{- end }}
{{- if .Values.configmapReload.extraVolumeMounts }}
{{- toYaml .Values.configmapReload.extraVolumeMounts | nindent 12 }}
{{- end }}
{{- end }}
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
{{- if .Values.extraEnv }}
{{- toYaml .Values.extraEnv | nindent 12 }}
{{- end }}
{{- with .Values.command }}
command:
{{- toYaml . | nindent 12 }}
{{- end }}
args:
- --storage.path=/alertmanager
{{- if not (hasKey .Values.extraArgs "config.file") }}
- --config.file=/etc/alertmanager/alertmanager.yml
{{- end }}
{{- if or (gt (int .Values.replicaCount) 1) (.Values.additionalPeers) }}
- --cluster.advertise-address=[$(POD_IP)]:{{ $svcClusterPort }}
- --cluster.listen-address=0.0.0.0:{{ $svcClusterPort }}
{{- end }}
{{- if gt (int .Values.replicaCount) 1}}
{{- $fullName := include "alertmanager.fullname" . }}
{{- range $i := until (int .Values.replicaCount) }}
- --cluster.peer={{ $fullName }}-{{ $i }}.{{ $fullName }}-headless:{{ $svcClusterPort }}
{{- end }}
{{- end }}
{{- if .Values.additionalPeers }}
{{- range $item := .Values.additionalPeers }}
- --cluster.peer={{ $item }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.extraArgs }}
- --{{ $key }}={{ $value }}
{{- end }}
{{- if .Values.baseURL }}
- --web.external-url={{ .Values.baseURL }}
{{- end }}
ports:
- name: {{ .Values.containerPortName }}
containerPort: 9093
protocol: TCP
{{- if or (gt (int .Values.replicaCount) 1) (.Values.additionalPeers) }}
- name: clusterpeer-tcp
containerPort: {{ $svcClusterPort }}
protocol: TCP
- name: clusterpeer-udp
containerPort: {{ $svcClusterPort }}
protocol: UDP
{{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
readinessProbe:
{{- toYaml .Values.readinessProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
{{- if .Values.config.enabled }}
- name: config
mountPath: /etc/alertmanager
{{- end }}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
subPath: {{ .subPath }}
readOnly: {{ .readOnly }}
{{- end }}
- name: storage
mountPath: /alertmanager
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 12 }}
{{- end }}
{{- with .Values.extraContainers }}
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
{{- if .Values.config.enabled }}
- name: config
configMap:
name: {{ include "alertmanager.fullname" . }}
{{- end }}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
secret:
secretName: {{ .secretName }}
{{- with .optional }}
optional: {{ . }}
{{- end }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 8 }}
{{- end }}
{{- if .Values.extraPodConfigs }}
{{- toYaml .Values.extraPodConfigs | nindent 6 }}
{{- end }}
{{- if .Values.persistence.enabled }}
volumeClaimTemplates:
- metadata:
name: storage
{{- with .Values.persistence.annotations }}
annotations:
{{ toYaml . | nindent 14 }}
{{- end }}
{{- with .Values.persistence.labels }}
labels:
{{ toYaml . | nindent 14 }}
{{- end }}
spec:
accessModes:
{{- toYaml .Values.persistence.accessModes | nindent 10 }}
resources:
requests:
storage: {{ .Values.persistence.size }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: {{ .Values.persistence.storageClass }}
{{- end }}
{{- end }}
{{- else }}
- name: storage
{{- with .Values.persistence.emptyDir }}
emptyDir:
{{- toYaml . | nindent 12 }}
{{- else }}
emptyDir: {}
{{- end -}}
{{- end }}

View File

@@ -0,0 +1,20 @@
{{- if .Values.testFramework.enabled }}
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "alertmanager.fullname" . }}-test-connection"
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- with .Values.testFramework.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
namespace: {{ include "alertmanager.namespace" . }}
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "alertmanager.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if .Values.verticalPodAutoscaler.enabled }}
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: {{ include "alertmanager.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
{{- if .Values.verticalPodAutoscaler.recommenders }}
recommenders:
{{- range .Values.verticalPodAutoscaler.recommenders }}
- name: {{ .name }}
{{- end }}
{{- end }}
targetRef:
apiVersion: apps/v1
kind: StatefulSet
name: {{ include "alertmanager.fullname" . }}
{{- if .Values.verticalPodAutoscaler.updatePolicy }}
updatePolicy:
{{- toYaml .Values.verticalPodAutoscaler.updatePolicy | nindent 4 }}
{{- end }}
{{- if .Values.verticalPodAutoscaler.resourcePolicy }}
resourcePolicy:
{{- toYaml .Values.verticalPodAutoscaler.resourcePolicy | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,972 @@
{
"$schema": "http://json-schema.org/draft-07/schema",
"title": "alertmanager",
"description": "The Alertmanager handles alerts sent by client applications such as the Prometheus server.",
"type": "object",
"required": [
"replicaCount",
"image",
"serviceAccount",
"service",
"persistence",
"config"
],
"definitions": {
"image": {
"description": "Container image parameters.",
"type": "object",
"required": ["repository"],
"additionalProperties": false,
"properties": {
"repository": {
"description": "Image repository. Path to the image with registry(quay.io) or without(prometheus/alertmanager) for docker.io.",
"type": "string"
},
"pullPolicy": {
"description": "Image pull policy. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated.",
"type": "string",
"enum": [
"Never",
"IfNotPresent",
"Always"
],
"default": "IfNotPresent"
},
"tag": {
"description": "Use chart appVersion by default.",
"type": "string",
"default": ""
}
}
},
"resources": {
"description": "Resource limits and requests for the Container.",
"type": "object",
"properties": {
"limits": {
"description": "Resource limits for the Container.",
"type": "object",
"properties": {
"cpu": {
"description": "CPU request for the Container.",
"type": "string"
},
"memory": {
"description": "Memory request for the Container.",
"type": "string"
}
}
},
"requests": {
"description": "Resource requests for the Container.",
"type": "object",
"properties": {
"cpu": {
"description": "CPU request for the Container.",
"type": "string"
},
"memory": {
"description": "Memory request for the Container.",
"type": "string"
}
}
}
}
},
"securityContext": {
"description": "Security context for the container.",
"type": "object",
"properties": {
"capabilities": {
"description": "Specifies the capabilities to be dropped by the container.",
"type": "object",
"properties": {
"drop": {
"description": "List of capabilities to be dropped.",
"type": "array",
"items": {
"type": "string"
}
}
}
},
"readOnlyRootFilesystem": {
"description": "Specifies whether the root file system should be mounted as read-only.",
"type": "boolean"
},
"runAsUser": {
"description": "Specifies the UID (User ID) to run the container as.",
"type": "integer"
},
"runAsNonRoot": {
"description": "Specifies whether to run the container as a non-root user.",
"type": "boolean"
},
"runAsGroup": {
"description": "Specifies the GID (Group ID) to run the container as.",
"type": "integer"
}
}
},
"volumeMounts": {
"description": "List of volume mounts for the Container.",
"type": "array",
"items": {
"description": "Volume mounts for the Container.",
"type": "object",
"required": ["name", "mountPath"],
"properties": {
"name": {
"description": "The name of the volume to mount.",
"type": "string"
},
"mountPath": {
"description": "The mount path for the volume.",
"type": "string"
},
"readOnly": {
"description": "Specifies if the volume should be mounted in read-only mode.",
"type": "boolean"
}
}
}
},
"env": {
"description": "List of environment variables for the Container.",
"type": "array",
"items": {
"description": "Environment variables for the Container.",
"type": "object",
"required": ["name"],
"properties": {
"name": {
"description": "The name of the environment variable.",
"type": "string"
},
"value": {
"description": "The value of the environment variable.",
"type": "string"
}
}
}
},
"config": {
"description": "https://prometheus.io/docs/alerting/latest/configuration/",
"duration": {
"type": "string",
"pattern": "^((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0)$"
},
"labelname": {
"type": "string",
"pattern": "^[a-zA-Z_][a-zA-Z0-9_]*$|^...$"
},
"route": {
"description": "Alert routing configuration.",
"type": "object",
"properties": {
"receiver": {
"description": "The default receiver to send alerts to.",
"type": "string"
},
"group_by": {
"description": "The labels by which incoming alerts are grouped together.",
"type": "array",
"items": {
"type": "string",
"$ref": "#/definitions/config/labelname"
}
},
"continue": {
"description": "Whether an alert should continue matching subsequent sibling nodes.",
"type": "boolean",
"default": false
},
"matchers": {
"description": "A list of matchers that an alert has to fulfill to match the node.",
"type": "array",
"items": {
"type": "string"
}
},
"group_wait": {
"description": "How long to initially wait to send a notification for a group of alerts.",
"$ref": "#/definitions/config/duration"
},
"group_interval": {
"description": "How long to wait before sending a notification about new alerts that are added to a group of alerts for which an initial notification has already been sent.",
"$ref": "#/definitions/config/duration"
},
"repeat_interval": {
"description": "How long to wait before sending a notification again if it has already been sent successfully for an alert.",
"$ref": "#/definitions/config/duration"
},
"mute_time_intervals": {
"description": "Times when the route should be muted.",
"type": "array",
"items": {
"type": "string"
}
},
"active_time_intervals": {
"description": "Times when the route should be active.",
"type": "array",
"items": {
"type": "string"
}
},
"routes": {
"description": "Zero or more child routes.",
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/config/route"
}
}
}
}
}
},
"properties": {
"replicaCount": {
"description": "Number of desired pods.",
"type": "integer",
"default": 1,
"minimum": 0
},
"image": {
"description": "Container image parameters.",
"$ref": "#/definitions/image"
},
"baseURL": {
"description": "External URL where alertmanager is reachable.",
"type": "string",
"default": "",
"examples": [
"https://alertmanager.example.com"
]
},
"extraArgs": {
"description": "Additional alertmanager container arguments. Use args without '--', only 'key: value' syntax.",
"type": "object",
"default": {}
},
"extraSecretMounts": {
"description": "Additional Alertmanager Secret mounts.",
"type": "array",
"default": [],
"items": {
"type": "object",
"required": ["name", "mountPath", "secretName"],
"properties": {
"name": {
"type": "string"
},
"mountPath": {
"type": "string"
},
"subPath": {
"type": "string",
"default": ""
},
"secretName": {
"type": "string"
},
"readOnly": {
"type": "boolean",
"default": false
}
}
}
},
"imagePullSecrets": {
"description": "The property allows you to configure multiple image pull secrets.",
"type": "array",
"default": [],
"items": {
"type": "object",
"required": ["name"],
"properties": {
"name": {
"description": "Specifies the Secret name of the image pull secret.",
"type": "string"
}
}
}
},
"nameOverride": {
"description": "Override value for the name of the Helm chart.",
"type": "string",
"default": ""
},
"fullnameOverride": {
"description": "Override value for the fully qualified app name.",
"type": "string",
"default": ""
},
"namespaceOverride": {
"description": "Override deployment namespace.",
"type": "string",
"default": ""
},
"automountServiceAccountToken": {
"description": "Specifies whether to automatically mount the ServiceAccount token into the Pod's filesystem.",
"type": "boolean",
"default": true
},
"hostUsers": {
"description": "Running within a user namespace, where the user IDs inside the container are mapped to different, usually unprivileged, user IDs on the host system.",
"type": "boolean",
"default": false
},
"serviceAccount": {
"description": "Contains properties related to the service account configuration.",
"type": "object",
"required": ["create"],
"properties": {
"create": {
"description": "Specifies whether a service account should be created.",
"type": "boolean",
"default": true
},
"annotations": {
"description": "Annotations to add to the service account.",
"type": "object",
"default": {}
},
"name": {
"description": "The name of the service account to use. If not set and create is true, a name is generated using the fullname template.",
"type": "string",
"default": ""
}
}
},
"schedulerName": {
"description": "Sets the schedulerName in the alertmanager pod.",
"type": "string",
"default": ""
},
"priorityClassName": {
"description": "Sets the priorityClassName in the alertmanager pod.",
"type": "string",
"default": ""
},
"podSecurityContext": {
"description": "Pod security context configuration.",
"type": "object",
"properties": {
"fsGroup": {
"description": "The fsGroup value for the pod's security context.",
"type": "integer",
"default": 65534
},
"runAsUser": {
"description": "The UID to run the pod's containers as.",
"type": "integer"
},
"runAsGroup": {
"description": "The GID to run the pod's containers as.",
"type": "integer"
}
}
},
"dnsConfig": {
"description": "DNS configuration for the pod.",
"type": "object",
"properties": {
"nameservers": {
"description": "List of DNS server IP addresses.",
"type": "array",
"items": {
"type": "string"
}
},
"searches": {
"description": "List of DNS search domains.",
"type": "array",
"items": {
"type": "string"
}
},
"options": {
"description": "List of DNS options.",
"type": "array",
"items": {
"description": "DNS options.",
"type": "object",
"required": ["name"],
"properties": {
"name": {
"description": "The name of the DNS option.",
"type": "string"
},
"value": {
"description": "The value of the DNS option.",
"type": "string"
}
}
}
}
}
},
"hostAliases": {
"description": "List of host aliases.",
"type": "array",
"items": {
"description": "Host aliases configuration.",
"type": "object",
"required": ["ip", "hostnames"],
"properties": {
"ip": {
"description": "IP address associated with the host alias.",
"type": "string"
},
"hostnames": {
"description": "List of hostnames associated with the IP address.",
"type": "array",
"items": {
"type": "string"
}
}
}
}
},
"securityContext": {
"description": "Security context for the container.",
"$ref": "#/definitions/securityContext"
},
"additionalPeers": {
"description": "Additional peers for a alertmanager.",
"type": "array",
"items": {
"type": "string"
}
},
"extraInitContainers": {
"description": "Additional InitContainers to initialize the pod.",
"type": "array",
"default": [],
"items": {
"required": ["name", "image"],
"properties": {
"name": {
"description": "The name of the InitContainer.",
"type": "string"
},
"image": {
"description": "The container image to use for the InitContainer.",
"type": "string"
},
"pullPolicy": {
"description": "Image pull policy. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated.",
"type": "string",
"enum": [
"Never",
"IfNotPresent",
"Always"
],
"default": "IfNotPresent"
},
"command": {
"description": "The command to run in the InitContainer.",
"type": "array",
"items": {
"type": "string"
}
},
"args": {
"description": "Additional command arguments for the InitContainer.",
"type": "array",
"items": {
"type": "string"
}
},
"ports": {
"description": "List of ports to expose from the container.",
"type": "array",
"items": {
"type": "object"
}
},
"env": {
"description": "List of environment variables for the InitContainer.",
"$ref": "#/definitions/env"
},
"envFrom": {
"description": "List of sources to populate environment variables in the container.",
"type": "array",
"items": {
"type": "object"
}
},
"volumeMounts": {
"description": "List of volume mounts for the InitContainer.",
"$ref": "#/definitions/volumeMounts"
},
"resources": {
"description": "Resource requirements for the InitContainer.",
"$ref": "#/definitions/resources"
},
"securityContext": {
"$ref": "#/definitions/securityContext",
"description": "The security context for the InitContainer."
}
}
}
},
"extraContainers": {
"description": "Additional containers to add to the stateful set.",
"type": "array",
"default": [],
"items": {
"required": ["name", "image"],
"properties": {
"name": {
"description": "The name of the InitContainer.",
"type": "string"
},
"image": {
"description": "The container image to use for the InitContainer.",
"type": "string"
},
"pullPolicy": {
"description": "Image pull policy. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated.",
"type": "string",
"enum": [
"Never",
"IfNotPresent",
"Always"
],
"default": "IfNotPresent"
},
"command": {
"description": "The command to run in the InitContainer.",
"type": "array",
"items": {
"type": "string"
}
},
"args": {
"description": "Additional command arguments for the InitContainer.",
"type": "array",
"items": {
"type": "string"
}
},
"ports": {
"description": "List of ports to expose from the container.",
"type": "array",
"items": {
"type": "object"
}
},
"env": {
"description": "List of environment variables for the InitContainer.",
"$ref": "#/definitions/env"
},
"envFrom": {
"description": "List of sources to populate environment variables in the container.",
"type": "array",
"items": {
"type": "object"
}
},
"volumeMounts": {
"description": "List of volume mounts for the InitContainer.",
"$ref": "#/definitions/volumeMounts"
},
"resources": {
"description": "Resource requirements for the InitContainer.",
"$ref": "#/definitions/resources"
},
"securityContext": {
"$ref": "#/definitions/securityContext",
"description": "The security context for the InitContainer."
}
}
}
},
"resources": {
"description": "Resource limits and requests for the pod.",
"$ref": "#/definitions/resources"
},
"containerPortName": {
"description": "Name of the port for the main container.",
"type": "string",
"default": "http"
},
"livenessProbe": {
"description": "Liveness probe configuration.",
"type": "object"
},
"readinessProbe": {
"description": "Readiness probe configuration.",
"type": "object"
},
"service": {
"description": "Service configuration.",
"type": "object",
"required": ["type", "port"],
"properties": {
"annotations": {
"description": "Annotations to add to the service.",
"type": "object"
},
"type": {
"description": "Service type.",
"type": "string"
},
"port": {
"description": "Port number for the service.",
"type": "integer"
},
"clusterPort": {
"description": "Port number for the cluster.",
"type": "integer"
},
"loadBalancerIP": {
"description": "External IP to assign when the service type is LoadBalancer.",
"type": "string"
},
"loadBalancerSourceRanges": {
"description": "IP ranges to allow access to the loadBalancerIP.",
"type": "array",
"items": {
"type": "string"
}
},
"nodePort": {
"description": "Specific nodePort to force when service type is NodePort.",
"type": "integer"
}
}
},
"ingress": {
"description": "Ingress configuration.",
"type": "object",
"properties": {
"enabled": {
"description": "Indicates if Ingress is enabled.",
"type": "boolean"
},
"className": {
"description": "Ingress class name.",
"type": "string"
},
"annotations": {
"description": "Annotations to add to the Ingress.",
"type": "object"
},
"hosts": {
"description": "Host and path configuration for the Ingress.",
"type": "array",
"items": {
"type": "object",
"properties": {
"host": {
"description": "Host name for the Ingress.",
"type": "string"
},
"paths": {
"description": "Path configuration for the Ingress.",
"type": "array",
"items": {
"type": "object",
"properties": {
"path": {
"description": "Path for the Ingress.",
"type": "string"
},
"pathType": {
"description": "Path type for the Ingress.",
"type": "string"
}
}
}
}
}
}
},
"tls": {
"description": "TLS configuration for the Ingress.",
"type": "array",
"items": {
"type": "object",
"properties": {
"secretName": {
"description": "Name of the secret for TLS.",
"type": "string"
},
"hosts": {
"description": "Host names for the TLS configuration.",
"type": "array",
"items": {
"type": "string"
}
}
}
}
}
}
},
"nodeSelector": {
"description": "Node selector for pod assignment.",
"type": "object"
},
"tolerations": {
"description": "Tolerations for pod assignment.",
"type": "array"
},
"affinity": {
"description": "Affinity rules for pod assignment.",
"type": "object"
},
"podAntiAffinity": {
"description": "Pod anti-affinity configuration.",
"type": "string",
"enum": ["", "soft", "hard"],
"default": ""
},
"podAntiAffinityTopologyKey": {
"description": "Topology key to use for pod anti-affinity.",
"type": "string"
},
"topologySpreadConstraints": {
"description": "Topology spread constraints for pod assignment.",
"type": "array",
"items": {
"type": "object",
"required": ["maxSkew", "topologyKey", "whenUnsatisfiable", "labelSelector"],
"properties": {
"maxSkew": {
"type": "integer"
},
"topologyKey": {
"type": "string"
},
"whenUnsatisfiable": {
"type": "string",
"enum": ["DoNotSchedule", "ScheduleAnyway"]
},
"labelSelector": {
"type": "object",
"required": ["matchLabels"],
"properties": {
"matchLabels": {
"type": "object"
}
}
}
}
}
},
"statefulSet": {
"description": "StatefulSet configuration for managing pods.",
"type": "object",
"properties": {
"annotations": {
"type": "object"
}
}
},
"podAnnotations": {
"description": "Annotations to add to the pods.",
"type": "object"
},
"podLabels": {
"description": "Labels to add to the pods.",
"type": "object"
},
"podDisruptionBudget": {
"description": "Pod disruption budget configuration.",
"type": "object",
"properties": {
"maxUnavailable": {
"type": "integer"
},
"minAvailable": {
"type": "integer"
}
}
},
"command": {
"description": "The command to be executed in the container.",
"type": "array",
"items": {
"type": "string"
}
},
"persistence": {
"description": "Persistence configuration for storing data.",
"type": "object",
"required": ["enabled", "size"],
"properties": {
"enabled": {
"type": "boolean"
},
"storageClass": {
"type": "string"
},
"accessModes": {
"type": "array",
"items": {
"type": "string"
}
},
"size": {
"type": "string"
},
"annotations": {
"type": "object",
"description": "Custom annotations to apply to the PersistentVolumeClaim created by the Alertmanager StatefulSet.",
"additionalProperties": {
"type": "string"
},
"default": {}
},
"labels": {
"type": "object",
"description": "Custom labels to apply to the PersistentVolumeClaim created by the Alertmanager StatefulSet.",
"additionalProperties": {
"type": "string"
},
"default": {}
}
}
},
"configAnnotations": {
"description": "Annotations to be added to the Alertmanager configuration.",
"type": "object"
},
"config": {
"description": "Alertmanager configuration.",
"type": "object",
"properties": {
"enabled": {
"description": "Whether to create alermanager configmap or not.",
"type": "boolean"
},
"global": {
"description": "Global configuration options.",
"type": "object"
},
"templates": {
"description": "Alertmanager template files.",
"type": "array",
"items": {
"type": "string"
}
},
"receivers": {
"description": "Alert receivers configuration.",
"type": "array",
"items": {
"type": "object",
"required": ["name"],
"properties": {
"name": {
"description": "The unique name of the receiver.",
"type": "string"
}
}
}
},
"route": {
"description": "Alert routing configuration.",
"type": "object",
"$ref": "#/definitions/config/route"
}
}
},
"configmapReload": {
"description": "Monitors ConfigMap changes and POSTs to a URL.",
"type": "object",
"properties": {
"enabled": {
"description": "Specifies whether the configmap-reload container should be deployed.",
"type": "boolean",
"default": false
},
"name": {
"description": "The name of the configmap-reload container.",
"type": "string"
},
"image": {
"description": "The container image for the configmap-reload container.",
"$ref": "#/definitions/image"
},
"containerPort": {
"description": "Port number for the configmap-reload container.",
"type": "integer"
},
"resources": {
"description": "Resource requests and limits for the configmap-reload container.",
"$ref": "#/definitions/resources"
}
}
},
"templates": {
"description": "Custom templates used by Alertmanager.",
"type": "object"
},
"extraVolumeMounts": {
"description": "List of volume mounts for the Container.",
"$ref": "#/definitions/volumeMounts"
},
"extraVolumes": {
"description": "Additional volumes to be mounted in the Alertmanager pod.",
"type": "array",
"default": [],
"items": {
"type": "object",
"required": ["name"],
"properties": {
"name": {
"type": "string"
}
}
}
},
"extraEnv": {
"description": "List of environment variables for the Container.",
"$ref": "#/definitions/env"
},
"testFramework": {
"description": "Configuration for the test Pod.",
"type": "object",
"properties": {
"enabled": {
"description": "Specifies whether the test Pod is enabled.",
"type": "boolean",
"default": false
},
"annotations": {
"description": "Annotations to be added to the test Pod.",
"type": "object"
}
}
},
"verticalPodAutoscaler": {
"description": "Vertical Pod Autoscaling configuration.",
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"default": false
},
"recommenders": {
"type": "array"
},
"updatePolicy": {
"type": "object"
},
"resourcePolicy": {
"type": "object"
}
}
},
"extraPodConfigs": {
"description": "Object to allow users to add additional Pod configuration like dnsPolicy or hostNetwork",
"type": "object"
}
}
}

View File

@@ -0,0 +1,440 @@
# yaml-language-server: $schema=values.schema.json
# Default values for alertmanager.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
# Number of old history to retain to allow rollback
# Default Kubernetes value is set to 10
revisionHistoryLimit: 10
image:
repository: quay.io/prometheus/alertmanager
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
# Full external URL where alertmanager is reachable, used for backlinks.
baseURL: ""
extraArgs: {}
## Additional Alertmanager Secret mounts
# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
extraSecretMounts: []
# - name: secret-files
# mountPath: /etc/secrets
# subPath: ""
# secretName: alertmanager-secret-files
# readOnly: true
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
## namespaceOverride overrides the namespace which the resources will be deployed in
namespaceOverride: ""
automountServiceAccountToken: true
## Running within a user namespace.
# Kubernetes server must be at or later than version v1.25.
# Kubernetes v1.25 through to v1.27 recognise UserNamespacesStatelessPodsSupport.
# Kubernetes v1.28 through to v1.32 need to enable the UserNamespacesSupport feature gate.
hostUsers: false
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# Sets priorityClassName in alertmanager pod
priorityClassName: ""
# Sets schedulerName in alertmanager pod
schedulerName: ""
podSecurityContext:
fsGroup: 65534
dnsConfig: {}
# nameservers:
# - 1.2.3.4
# searches:
# - ns1.svc.cluster-domain.example
# - my.dns.search.suffix
# options:
# - name: ndots
# value: "2"
# - name: edns0
hostAliases: []
# - ip: "127.0.0.1"
# hostnames:
# - "foo.local"
# - "bar.local"
# - ip: "10.1.2.3"
# hostnames:
# - "foo.remote"
# - "bar.remote"
securityContext:
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
runAsUser: 65534
runAsNonRoot: true
runAsGroup: 65534
additionalPeers: []
## Additional InitContainers to initialize the pod
##
extraInitContainers: []
## Additional containers to add to the stateful set. This will allow to setup sidecarContainers like a proxy to integrate
## alertmanager with an external tool like teams that has not direct integration.
##
extraContainers: []
containerPortName: &containerPortName http
livenessProbe:
httpGet:
path: /
port: *containerPortName
readinessProbe:
httpGet:
path: /
port: *containerPortName
service:
annotations: {}
labels: {}
type: ClusterIP
port: 9093
clusterPort: 9094
loadBalancerIP: "" # Assign ext IP when Service type is LoadBalancer
loadBalancerSourceRanges: [] # Only allow access to loadBalancerIP from these IPs
# if you want to force a specific nodePort. Must be use with service.type=NodePort
# nodePort:
# Optionally specify extra list of additional ports exposed on both services
extraPorts: []
# ip dual stack
ipDualStack:
enabled: false
ipFamilies: ["IPv6", "IPv4"]
ipFamilyPolicy: "PreferDualStack"
# Configuration for creating a separate Service for each statefulset Alertmanager replica
#
servicePerReplica:
enabled: false
annotations: {}
# Loadbalancer source IP ranges
# Only used if servicePerReplica.type is "LoadBalancer"
loadBalancerSourceRanges: []
# Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
#
externalTrafficPolicy: Cluster
# Service type
#
type: ClusterIP
ingress:
enabled: false
className: ""
labels: {}
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: alertmanager.domain.com
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - alertmanager.domain.com
# Configuration for creating an Ingress that will map to each Alertmanager replica service
# alertmanager.servicePerReplica must be enabled
#
ingressPerReplica:
enabled: false
# className for the ingresses
#
className: ""
annotations: {}
labels: {}
# Final form of the hostname for each per replica ingress is
# {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
#
# Prefix for the per replica ingress that will have `-$replicaNumber`
# appended to the end
hostPrefix: "alertmanager"
# Domain that will be used for the per replica ingress
hostDomain: "domain.com"
# Paths to use for ingress rules
#
paths:
- /
# PathType for ingress rules
#
pathType: ImplementationSpecific
# Secret name containing the TLS certificate for alertmanager per replica ingress
# Secret must be manually created in the namespace
tlsSecretName: ""
# Separated secret for each per replica Ingress. Can be used together with cert-manager
#
tlsSecretPerReplica:
enabled: false
# Final form of the secret for each per replica ingress is
# {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
#
prefix: "alertmanager"
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 10m
# memory: 32Mi
nodeSelector: {}
tolerations: []
affinity: {}
## Pod anti-affinity can prevent the scheduler from placing Alertmanager replicas on the same node.
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
##
podAntiAffinity: ""
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
##
podAntiAffinityTopologyKey: kubernetes.io/hostname
## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: failure-domain.beta.kubernetes.io/zone
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: alertmanager
statefulSet:
annotations: {}
## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
## This is an alpha field from kubernetes 1.22 until 1.24 which requires enabling the StatefulSetMinReadySeconds
## feature gate.
## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#minimum-ready-seconds
minReadySeconds: 0
podAnnotations: {}
podLabels: {}
# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget: {}
# maxUnavailable: 1
# minAvailable: 1
command: []
persistence:
## If true, storage will create or use Persistence Volume
## If false, storage will use emptyDir
##
enabled: true
## Custom annotations for the PVC created by the alertmanager StatefulSet.
## Useful for configuring storage provider options such as disk type, KMS encryption keys, or custom volume name prefixes.
annotations: {}
## Custom labels for the PVC created by the alertmanager StatefulSet.
## Useful for selecting, grouping, and organizing so that they can be queried or targeted in deployments, policies, etc.
labels: {}
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner.
##
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 50Mi
## Configure emptyDir volume
##
emptyDir: {}
configAnnotations: {}
## For example if you want to provide private data from a secret vault
## https://github.com/banzaicloud/bank-vaults/tree/main/charts/vault-secrets-webhook
## P.s.: Add option `configMapMutation: true` for vault-secrets-webhook
# vault.security.banzaicloud.io/vault-role: "admin"
# vault.security.banzaicloud.io/vault-addr: "https://vault.vault.svc.cluster.local:8200"
# vault.security.banzaicloud.io/vault-skip-verify: "true"
# vault.security.banzaicloud.io/vault-path: "kubernetes"
## Example for inject secret
# slack_api_url: '${vault:secret/data/slack-hook-alerts#URL}'
config:
enabled: true
global: {}
# slack_api_url: ''
templates:
- '/etc/alertmanager/*.tmpl'
receivers:
- name: default-receiver
# slack_configs:
# - channel: '@you'
# send_resolved: true
route:
group_wait: 10s
group_interval: 5m
receiver: default-receiver
repeat_interval: 3h
## Monitors ConfigMap changes and POSTs to a URL
## Ref: https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader
##
configmapReload:
## If false, the configmap-reload container will not be deployed
##
enabled: false
## configmap-reload container name
##
name: configmap-reload
## configmap-reload container image
##
image:
repository: quay.io/prometheus-operator/prometheus-config-reloader
tag: v0.86.1
pullPolicy: IfNotPresent
# containerPort: 9533
## configmap-reload resource requests and limits
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
livenessProbe: {}
# httpGet:
# path: /healthz
# port: 8080
# scheme: HTTP
readinessProbe: {}
# httpGet:
# path: /healthz
# port: 8080
# scheme: HTTP
extraArgs: {}
## Optionally specify extra list of additional volumeMounts
extraVolumeMounts: []
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
## Optionally specify extra environment variables to add to alertmanager container
extraEnv: []
# - name: FOO
# value: BAR
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsUser: 65534
# runAsNonRoot: true
# runAsGroup: 65534
templates: {}
# alertmanager.tmpl: |-
## Optionally specify extra list of additional volumeMounts
extraVolumeMounts: []
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
## Optionally specify extra list of additional volumes
extraVolumes: []
# - name: extras
# emptyDir: {}
## Optionally specify extra environment variables to add to alertmanager container
extraEnv: []
# - name: FOO
# value: BAR
testFramework:
enabled: false
annotations:
"helm.sh/hook": test-success
# "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
# --- Vertical Pod Autoscaler
verticalPodAutoscaler:
# -- Use VPA for alertmanager
enabled: false
# recommenders:
# - name: 'alternative'
# updatePolicy:
# updateMode: "Auto"
# minReplicas: 1
# resourcePolicy:
# containerPolicies:
# - containerName: '*'
# minAllowed:
# cpu: 100m
# memory: 128Mi
# maxAllowed:
# cpu: 1
# memory: 500Mi
# controlledResources: ["cpu", "memory"]
# --- Extra Pod Configs
extraPodConfigs: {}
# dnsPolicy: ClusterFirstWithHostNet
# hostNetwork: true

View File

@@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@@ -0,0 +1,29 @@
annotations:
artifacthub.io/license: Apache-2.0
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
apiVersion: v2
appVersion: 2.17.0
description: Install kube-state-metrics to generate and expose cluster-level metrics
home: https://github.com/kubernetes/kube-state-metrics/
keywords:
- metric
- monitoring
- prometheus
- kubernetes
maintainers:
- email: tariq.ibrahim@mulesoft.com
name: tariq1890
url: https://github.com/tariq1890
- email: manuel@rueg.eu
name: mrueg
url: https://github.com/mrueg
- email: david@0xdc.me
name: dotdc
url: https://github.com/dotdc
name: kube-state-metrics
sources:
- https://github.com/kubernetes/kube-state-metrics/
type: application
version: 6.4.1

View File

@@ -0,0 +1,87 @@
# kube-state-metrics Helm Chart
Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics).
## Usage
The chart is distributed as an [OCI Artifact](https://helm.sh/docs/topics/registries/) as well as via a traditional [Helm Repository](https://helm.sh/docs/topics/chart_repository/).
- OCI Artifact: `oci://ghcr.io/prometheus-community/charts/kube-state-metrics`
- Helm Repository: `https://prometheus-community.github.io/helm-charts` with chart `kube-state-metrics`
The installation instructions use the OCI registry. Refer to the [`helm repo`]([`helm repo`](https://helm.sh/docs/helm/helm_repo/)) command documentation for information on installing charts via the traditional repository.
### Install Chart
```console
helm install [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/kube-state-metrics [flags]
```
_See [configuration](#configuration) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
### Uninstall Chart
```console
helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
### Upgrading Chart
```console
helm upgrade [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/kube-state-metrics [flags]
```
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
#### Migrating from stable/kube-state-metrics and kubernetes/kube-state-metrics
You can upgrade in-place:
1. [upgrade](#upgrading-chart) your existing release name using the new chart repository
## Upgrading to v6.0.0
This version drops support for deprecated Pod Security Policy resources.
## Upgrading to v3.0.0
v3.0.0 includes kube-state-metrics v2.0, see the [changelog](https://github.com/kubernetes/kube-state-metrics/blob/release-2.0/CHANGELOG.md) for major changes on the application-side.
The upgraded chart now the following changes:
- Dropped support for helm v2 (helm v3 or later is required)
- collectors key was renamed to resources
- namespace key was renamed to namespaces
## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments:
```console
helm show values oci://ghcr.io/prometheus-community/charts/kube-state-metrics
```
### kube-rbac-proxy
You can enable `kube-state-metrics` endpoint protection using `kube-rbac-proxy`. By setting `kubeRBACProxy.enabled: true`, this chart will deploy one RBAC proxy container per endpoint (metrics & telemetry).
To authorize access, authenticate your requests (via a `ServiceAccount` for example) with a `ClusterRole` attached such as:
```yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kube-state-metrics-read
rules:
- apiGroups: [ "" ]
resources: ["services/kube-state-metrics"]
verbs:
- get
```
See [kube-rbac-proxy examples](https://github.com/brancz/kube-rbac-proxy/tree/master/examples/resource-attributes) for more details.

View File

@@ -0,0 +1,23 @@
kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects.
The exposed metrics can be found here:
https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics
The metrics are exported on the HTTP endpoint /metrics on the listening port.
In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics
They are served either as plaintext or protobuf depending on the Accept header.
They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint.
{{- if .Values.kubeRBACProxy.enabled}}
kube-rbac-proxy endpoint protections is enabled:
- Metrics endpoints are now HTTPS
- Ensure that the client authenticates the requests (e.g. via service account) with the following role permissions:
```
rules:
- apiGroups: [ "" ]
resources: ["services/{{ template "kube-state-metrics.fullname" . }}"]
verbs:
- get
```
{{- end }}

View File

@@ -0,0 +1,186 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "kube-state-metrics.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kube-state-metrics.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "kube-state-metrics.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "kube-state-metrics.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kube-state-metrics.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Generate basic labels
*/}}
{{- define "kube-state-metrics.labels" }}
helm.sh/chart: {{ template "kube-state-metrics.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: {{ template "kube-state-metrics.name" . }}
{{- include "kube-state-metrics.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
{{- if .Values.customLabels }}
{{ tpl (toYaml .Values.customLabels) . }}
{{- end }}
{{- if .Values.releaseLabel }}
release: {{ .Release.Name }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kube-state-metrics.selectorLabels" }}
{{- if .Values.selectorOverride }}
{{ toYaml .Values.selectorOverride }}
{{- else }}
app.kubernetes.io/name: {{ include "kube-state-metrics.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- end }}
{{/* Sets default scrape limits for servicemonitor */}}
{{- define "servicemonitor.scrapeLimits" -}}
{{- with .sampleLimit }}
sampleLimit: {{ . }}
{{- end }}
{{- with .targetLimit }}
targetLimit: {{ . }}
{{- end }}
{{- with .labelLimit }}
labelLimit: {{ . }}
{{- end }}
{{- with .labelNameLengthLimit }}
labelNameLengthLimit: {{ . }}
{{- end }}
{{- with .labelValueLengthLimit }}
labelValueLengthLimit: {{ . }}
{{- end }}
{{- end -}}
{{/* Sets default scrape limits for scrapeconfig */}}
{{- define "scrapeconfig.scrapeLimits" -}}
{{- with .sampleLimit }}
sampleLimit: {{ . }}
{{- end }}
{{- with .targetLimit }}
targetLimit: {{ . }}
{{- end }}
{{- with .labelLimit }}
labelLimit: {{ . }}
{{- end }}
{{- with .labelNameLengthLimit }}
labelNameLengthLimit: {{ . }}
{{- end }}
{{- with .labelValueLengthLimit }}
labelValueLengthLimit: {{ . }}
{{- end }}
{{- end -}}
{{/*
Formats imagePullSecrets. Input is (dict "Values" .Values "imagePullSecrets" .{specific imagePullSecrets})
*/}}
{{- define "kube-state-metrics.imagePullSecrets" -}}
{{- range (concat .Values.global.imagePullSecrets .imagePullSecrets) }}
{{- if eq (typeOf .) "map[string]interface {}" }}
- {{ toYaml . | trim }}
{{- else }}
- name: {{ . }}
{{- end }}
{{- end }}
{{- end -}}
{{/*
The image to use for kube-state-metrics
*/}}
{{- define "kube-state-metrics.image" -}}
{{- if .Values.image.sha }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }}
{{- else }}
{{- printf "%s/%s:%s@%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }}
{{- end }}
{{- else }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }}
{{- else }}
{{- printf "%s/%s:%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }}
{{- end }}
{{- end }}
{{- end }}
{{/*
The image to use for kubeRBACProxy
*/}}
{{- define "kubeRBACProxy.image" -}}
{{- if .Values.kubeRBACProxy.image.sha }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) .Values.kubeRBACProxy.image.sha }}
{{- else }}
{{- printf "%s/%s:%s@%s" .Values.kubeRBACProxy.image.registry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) .Values.kubeRBACProxy.image.sha }}
{{- end }}
{{- else }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) }}
{{- else }}
{{- printf "%s/%s:%s" .Values.kubeRBACProxy.image.registry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) }}
{{- end }}
{{- end }}
{{- end }}
{{/*
The name of the ConfigMap for the customResourceState config.
*/}}
{{- define "kube-state-metrics.crsConfigMapName" -}}
{{- if ne .Values.customResourceState.name "" }}
{{- .Values.customResourceState.name }}
{{- else }}
{{- template "kube-state-metrics.fullname" . }}-customresourcestate-config
{{- end }}
{{- end }}

View File

@@ -0,0 +1,33 @@
{{- if and .Values.networkPolicy.enabled (eq .Values.networkPolicy.flavor "cilium") }}
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
{{- if .Values.annotations }}
annotations:
{{ toYaml .Values.annotations | nindent 4 }}
{{- end }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
spec:
endpointSelector:
matchLabels:
{{- include "kube-state-metrics.selectorLabels" . | indent 6 }}
egress:
{{- if and .Values.networkPolicy.cilium .Values.networkPolicy.cilium.kubeApiServerSelector }}
{{ toYaml .Values.networkPolicy.cilium.kubeApiServerSelector | nindent 6 }}
{{- else }}
- toEntities:
- kube-apiserver
{{- end }}
ingress:
- toPorts:
- ports:
- port: {{ .Values.service.port | quote }}
protocol: TCP
{{- if .Values.selfMonitor.enabled }}
- port: {{ .Values.selfMonitor.telemetryPort | default 8081 | quote }}
protocol: TCP
{{ end }}
{{ end }}

View File

@@ -0,0 +1,20 @@
{{- if and .Values.rbac.create .Values.rbac.useClusterRole -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
name: {{ template "kube-state-metrics.fullname" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- if .Values.rbac.useExistingRole }}
name: {{ .Values.rbac.useExistingRole }}
{{- else }}
name: {{ template "kube-state-metrics.fullname" . }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ template "kube-state-metrics.serviceAccountName" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
{{- end -}}

View File

@@ -0,0 +1,16 @@
{{- if and .Values.customResourceState.enabled .Values.customResourceState.create }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "kube-state-metrics.crsConfigMapName" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
{{- if .Values.annotations }}
annotations:
{{ toYaml .Values.annotations | nindent 4 }}
{{- end }}
data:
{{ .Values.customResourceState.key }}: |
{{- toYaml .Values.customResourceState.config | nindent 4 }}
{{- end }}

View File

@@ -0,0 +1,379 @@
apiVersion: apps/v1
{{- if .Values.autosharding.enabled }}
kind: StatefulSet
{{- else }}
kind: Deployment
{{- end }}
metadata:
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "kube-state-metrics.selectorLabels" . | indent 6 }}
replicas: {{ .Values.replicas }}
{{- if not .Values.autosharding.enabled }}
strategy:
type: {{ .Values.updateStrategy | default "RollingUpdate" }}
{{- end }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
{{- if .Values.autosharding.enabled }}
serviceName: {{ template "kube-state-metrics.fullname" . }}
volumeClaimTemplates: []
{{- end }}
template:
metadata:
labels:
{{- include "kube-state-metrics.labels" . | indent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.podAnnotations }}
annotations:
{{ toYaml .Values.podAnnotations | nindent 8 }}
{{- end }}
spec:
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
hostNetwork: {{ .Values.hostNetwork }}
serviceAccountName: {{ template "kube-state-metrics.serviceAccountName" . }}
{{- if .Values.securityContext.enabled }}
securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- with .Values.initContainers }}
initContainers:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- if .Values.dnsConfig }}
dnsConfig: {{ toYaml .Values.dnsConfig | nindent 8 }}
{{- end }}
dnsPolicy: {{ .Values.dnsPolicy }}
containers:
{{- $servicePort := ternary .Values.kubeRBACProxy.port (.Values.service.port | default 8080) .Values.kubeRBACProxy.enabled}}
{{- $telemetryPort := ternary 9091 (.Values.selfMonitor.telemetryPort | default 8081) .Values.kubeRBACProxy.enabled}}
- name: {{ template "kube-state-metrics.name" . }}
{{- if .Values.autosharding.enabled }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.env }}
{{- toYaml .Values.env | nindent 8 }}
{{- end }}
{{ else }}
{{- if .Values.env }}
env:
{{- toYaml .Values.env | nindent 8 }}
{{- end }}
{{- end }}
args:
{{- if .Values.extraArgs }}
{{- .Values.extraArgs | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.kubeRBACProxy.enabled }}
- --host=127.0.0.1
{{- end }}
- --port={{ $servicePort }}
{{- if .Values.collectors }}
- --resources={{ .Values.collectors | join "," }}
{{- end }}
{{- if .Values.metricLabelsAllowlist }}
- --metric-labels-allowlist={{ .Values.metricLabelsAllowlist | join "," }}
{{- end }}
{{- if .Values.metricAnnotationsAllowList }}
- --metric-annotations-allowlist={{ .Values.metricAnnotationsAllowList | join "," }}
{{- end }}
{{- if .Values.metricAllowlist }}
- --metric-allowlist={{ .Values.metricAllowlist | join "," }}
{{- end }}
{{- if .Values.metricDenylist }}
- --metric-denylist={{ .Values.metricDenylist | join "," }}
{{- end }}
{{- $namespaces := list }}
{{- if .Values.namespaces }}
{{- range $ns := join "," .Values.namespaces | split "," }}
{{- $namespaces = append $namespaces (tpl $ns $) }}
{{- end }}
{{- end }}
{{- if .Values.releaseNamespace }}
{{- $namespaces = append $namespaces ( include "kube-state-metrics.namespace" . ) }}
{{- end }}
{{- if $namespaces }}
- --namespaces={{ $namespaces | mustUniq | join "," }}
{{- end }}
{{- if .Values.namespacesDenylist }}
- --namespaces-denylist={{ tpl (.Values.namespacesDenylist | join ",") $ }}
{{- end }}
{{- if .Values.autosharding.enabled }}
- --pod=$(POD_NAME)
- --pod-namespace=$(POD_NAMESPACE)
{{- end }}
{{- if .Values.kubeconfig.enabled }}
- --kubeconfig=/opt/k8s/.kube/config
{{- end }}
{{- if .Values.kubeRBACProxy.enabled }}
- --telemetry-host=127.0.0.1
- --telemetry-port={{ $telemetryPort }}
{{- else }}
{{- if .Values.selfMonitor.telemetryHost }}
- --telemetry-host={{ .Values.selfMonitor.telemetryHost }}
{{- end }}
{{- if .Values.selfMonitor.telemetryPort }}
- --telemetry-port={{ $telemetryPort }}
{{- end }}
{{- end }}
{{- if .Values.customResourceState.enabled }}
- --custom-resource-state-config-file=/etc/customresourcestate/{{ .Values.customResourceState.key }}
{{- end }}
{{- if or (.Values.kubeconfig.enabled) (.Values.customResourceState.enabled) (.Values.volumeMounts) }}
volumeMounts:
{{- if .Values.kubeconfig.enabled }}
- name: kubeconfig
mountPath: /opt/k8s/.kube/
readOnly: true
{{- end }}
{{- if .Values.customResourceState.enabled }}
- name: customresourcestate-config
mountPath: /etc/customresourcestate
readOnly: true
{{- end }}
{{- if .Values.volumeMounts }}
{{ toYaml .Values.volumeMounts | indent 8 }}
{{- end }}
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
image: {{ include "kube-state-metrics.image" . }}
{{- if eq .Values.kubeRBACProxy.enabled false }}
ports:
- containerPort: {{ .Values.service.port | default 8080}}
name: http
{{- if .Values.selfMonitor.enabled }}
- containerPort: {{ $telemetryPort }}
name: metrics
{{- end }}
{{- end }}
{{- if .Values.startupProbe.enabled }}
startupProbe:
failureThreshold: {{ .Values.startupProbe.failureThreshold }}
httpGet:
{{- if .Values.hostNetwork }}
host: 127.0.0.1
{{- end }}
httpHeaders:
{{- range $_, $header := .Values.startupProbe.httpGet.httpHeaders }}
- name: {{ $header.name }}
value: {{ $header.value }}
{{- end }}
path: /healthz
{{- if .Values.kubeRBACProxy.enabled }}
port: http
scheme: HTTPS
{{- else }}
port: {{ $servicePort }}
scheme: {{ upper .Values.startupProbe.httpGet.scheme }}
{{- end }}
initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.startupProbe.periodSeconds }}
successThreshold: {{ .Values.startupProbe.successThreshold }}
timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds }}
{{- end }}
livenessProbe:
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
httpGet:
{{- if .Values.hostNetwork }}
host: 127.0.0.1
{{- end }}
httpHeaders:
{{- range $_, $header := .Values.livenessProbe.httpGet.httpHeaders }}
- name: {{ $header.name }}
value: {{ $header.value }}
{{- end }}
path: /livez
{{- if .Values.kubeRBACProxy.enabled }}
port: http
scheme: HTTPS
{{- else }}
port: {{ $servicePort }}
scheme: {{ upper .Values.livenessProbe.httpGet.scheme }}
{{- end }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
readinessProbe:
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
httpGet:
{{- if .Values.hostNetwork }}
host: 127.0.0.1
{{- end }}
httpHeaders:
{{- range $_, $header := .Values.readinessProbe.httpGet.httpHeaders }}
- name: {{ $header.name }}
value: {{ $header.value }}
{{- end }}
path: /readyz
{{- if .Values.kubeRBACProxy.enabled }}
port: metrics
scheme: HTTPS
{{- else }}
port: {{ $telemetryPort }}
scheme: {{ upper .Values.readinessProbe.httpGet.scheme }}
{{- end }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
resources:
{{ toYaml .Values.resources | indent 10 }}
{{- if .Values.containerSecurityContext }}
securityContext:
{{ toYaml .Values.containerSecurityContext | indent 10 }}
{{- end }}
{{- if .Values.kubeRBACProxy.enabled }}
- name: kube-rbac-proxy-http
args:
{{- if .Values.kubeRBACProxy.extraArgs }}
{{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.kubeRBACProxy.ignoreProbePaths }}
- --ignore-paths=/livez,/readyz
{{- end }}
- --secure-listen-address=:{{ .Values.service.port | default 8080}}
- --upstream=http://127.0.0.1:{{ $servicePort }}/
- --proxy-endpoints-port={{ .Values.kubeRBACProxy.proxyEndpointsPort | default 8888 }}
- --config-file=/etc/kube-rbac-proxy-config/config-file.yaml
volumeMounts:
- name: kube-rbac-proxy-config
mountPath: /etc/kube-rbac-proxy-config
{{- with .Values.kubeRBACProxy.volumeMounts }}
{{- toYaml . | nindent 10 }}
{{- end }}
imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }}
image: {{ include "kubeRBACProxy.image" . }}
ports:
- containerPort: {{ .Values.service.port | default 8080}}
name: http
- containerPort: {{ .Values.kubeRBACProxy.proxyEndpointsPort | default 8888 }}
name: http-healthz
readinessProbe:
httpGet:
scheme: HTTPS
port: http-healthz
path: /healthz
initialDelaySeconds: 5
timeoutSeconds: 5
{{- if .Values.kubeRBACProxy.resources }}
resources:
{{ toYaml .Values.kubeRBACProxy.resources | indent 10 }}
{{- end }}
{{- if .Values.kubeRBACProxy.containerSecurityContext }}
securityContext:
{{ toYaml .Values.kubeRBACProxy.containerSecurityContext | indent 10 }}
{{- end }}
{{- if .Values.selfMonitor.enabled }}
- name: kube-rbac-proxy-telemetry
args:
{{- if .Values.kubeRBACProxy.extraArgs }}
{{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.kubeRBACProxy.ignoreProbePaths }}
- --ignore-paths=/livez,/readyz
{{- end }}
- --secure-listen-address=:{{ .Values.selfMonitor.telemetryPort | default 8081 }}
- --upstream=http://127.0.0.1:{{ $telemetryPort }}/
- --proxy-endpoints-port=8889
- --config-file=/etc/kube-rbac-proxy-config/config-file.yaml
volumeMounts:
- name: kube-rbac-proxy-config
mountPath: /etc/kube-rbac-proxy-config
{{- with .Values.kubeRBACProxy.volumeMounts }}
{{- toYaml . | nindent 10 }}
{{- end }}
imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }}
image: {{ include "kubeRBACProxy.image" . }}
ports:
- containerPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }}
name: metrics
- containerPort: 8889
name: metrics-healthz
readinessProbe:
httpGet:
scheme: HTTPS
port: 8889
path: healthz
initialDelaySeconds: 5
timeoutSeconds: 5
{{- if .Values.kubeRBACProxy.resources }}
resources:
{{ toYaml .Values.kubeRBACProxy.resources | indent 10 }}
{{- end }}
{{- if .Values.kubeRBACProxy.containerSecurityContext }}
securityContext:
{{ toYaml .Values.kubeRBACProxy.containerSecurityContext | indent 10 }}
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.containers }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- include "kube-state-metrics.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.imagePullSecrets) | indent 8 }}
{{- end }}
{{- if .Values.affinity }}
affinity:
{{- if kindIs "map" .Values.affinity }}
{{- toYaml .Values.affinity | nindent 8 }}
{{- else }}
{{- tpl .Values.affinity $ | nindent 8 }}
{{- end }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ tpl (toYaml .) $ | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ tpl (toYaml .) $ | indent 8 }}
{{- end }}
{{- if .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{ toYaml .Values.topologySpreadConstraints | indent 8 }}
{{- end }}
{{- if or (.Values.kubeconfig.enabled) (.Values.customResourceState.enabled) (.Values.volumes) (.Values.kubeRBACProxy.enabled) }}
volumes:
{{- if .Values.kubeconfig.enabled}}
- name: kubeconfig
secret:
secretName: {{ template "kube-state-metrics.fullname" . }}-kubeconfig
{{- end }}
{{- if .Values.kubeRBACProxy.enabled}}
- name: kube-rbac-proxy-config
configMap:
name: {{ template "kube-state-metrics.fullname" . }}-rbac-config
{{- end }}
{{- if .Values.customResourceState.enabled}}
- name: customresourcestate-config
configMap:
name: {{ template "kube-state-metrics.crsConfigMapName" . }}
{{- end }}
{{- if .Values.volumes }}
{{ toYaml .Values.volumes | indent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,4 @@
{{ range .Values.extraManifests }}
---
{{ tpl (toYaml .) $ }}
{{ end }}

View File

@@ -0,0 +1,12 @@
{{- if .Values.kubeconfig.enabled -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "kube-state-metrics.fullname" . }}-kubeconfig
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
type: Opaque
data:
config: '{{ .Values.kubeconfig.secret }}'
{{- end -}}

View File

@@ -0,0 +1,43 @@
{{- if and .Values.networkPolicy.enabled (eq .Values.networkPolicy.flavor "kubernetes") }}
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
{{- if .Values.annotations }}
annotations:
{{ toYaml .Values.annotations | nindent 4 }}
{{- end }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
spec:
{{- if .Values.networkPolicy.egress }}
## Deny all egress by default
egress:
{{- toYaml .Values.networkPolicy.egress | nindent 4 }}
{{- end }}
ingress:
{{- if .Values.networkPolicy.ingress }}
{{- toYaml .Values.networkPolicy.ingress | nindent 4 }}
{{- else }}
## Allow ingress on default ports by default
- ports:
- port: http
protocol: TCP
{{- if .Values.selfMonitor.enabled }}
{{- $telemetryPort := ternary 9091 (.Values.selfMonitor.telemetryPort | default 8081) .Values.kubeRBACProxy.enabled}}
- port: {{ $telemetryPort }}
protocol: TCP
{{- end }}
{{- end }}
podSelector:
{{- if .Values.networkPolicy.podSelector }}
{{- toYaml .Values.networkPolicy.podSelector | nindent 4 }}
{{- else }}
matchLabels:
{{- include "kube-state-metrics.selectorLabels" . | indent 6 }}
{{- end }}
policyTypes:
- Ingress
- Egress
{{- end }}

View File

@@ -0,0 +1,14 @@
{{- if .Values.podDisruptionBudget -}}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
{{ toYaml .Values.podDisruptionBudget | indent 2 }}
{{- end -}}

View File

@@ -0,0 +1,22 @@
{{- if .Values.kubeRBACProxy.enabled}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "kube-state-metrics.fullname" . }}-rbac-config
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
{{- if .Values.annotations }}
annotations:
{{ toYaml .Values.annotations | nindent 4 }}
{{- end }}
data:
config-file.yaml: |+
authorization:
resourceAttributes:
namespace: {{ template "kube-state-metrics.namespace" . }}
apiVersion: v1
resource: services
subresource: {{ template "kube-state-metrics.fullname" . }}
name: {{ template "kube-state-metrics.fullname" . }}
{{- end }}

View File

@@ -0,0 +1,236 @@
{{- if and (eq .Values.rbac.create true) (not .Values.rbac.useExistingRole) -}}
{{- range (ternary (join "," .Values.namespaces | split "," ) (list "") (eq $.Values.rbac.useClusterRole false)) }}
---
apiVersion: rbac.authorization.k8s.io/v1
{{- if eq $.Values.rbac.useClusterRole false }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
metadata:
labels:
{{- include "kube-state-metrics.labels" $ | indent 4 }}
name: {{ template "kube-state-metrics.fullname" $ }}
{{- if eq $.Values.rbac.useClusterRole false }}
namespace: {{ . }}
{{- end }}
rules:
{{ if has "certificatesigningrequests" $.Values.collectors }}
- apiGroups: ["certificates.k8s.io"]
resources:
- certificatesigningrequests
verbs: ["list", "watch"]
{{ end -}}
{{ if has "configmaps" $.Values.collectors }}
- apiGroups: [""]
resources:
- configmaps
verbs: ["list", "watch"]
{{ end -}}
{{ if has "cronjobs" $.Values.collectors }}
- apiGroups: ["batch"]
resources:
- cronjobs
verbs: ["list", "watch"]
{{ end -}}
{{ if has "daemonsets" $.Values.collectors }}
- apiGroups: ["apps"]
resources:
- daemonsets
verbs: ["list", "watch"]
{{ end -}}
{{ if has "deployments" $.Values.collectors }}
- apiGroups: ["apps"]
resources:
- deployments
verbs: ["list", "watch"]
{{ end -}}
{{ if has "endpoints" $.Values.collectors }}
- apiGroups: [""]
resources:
- endpoints
verbs: ["list", "watch"]
{{ end -}}
{{ if has "endpointslices" $.Values.collectors }}
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs: ["list", "watch"]
{{ end -}}
{{ if has "horizontalpodautoscalers" $.Values.collectors }}
- apiGroups: ["autoscaling"]
resources:
- horizontalpodautoscalers
verbs: ["list", "watch"]
{{ end -}}
{{ if has "ingresses" $.Values.collectors }}
- apiGroups: ["networking.k8s.io"]
resources:
- ingresses
verbs: ["list", "watch"]
{{ end -}}
{{ if has "jobs" $.Values.collectors }}
- apiGroups: ["batch"]
resources:
- jobs
verbs: ["list", "watch"]
{{ end -}}
{{ if has "leases" $.Values.collectors }}
- apiGroups: ["coordination.k8s.io"]
resources:
- leases
verbs: ["list", "watch"]
{{ end -}}
{{ if has "limitranges" $.Values.collectors }}
- apiGroups: [""]
resources:
- limitranges
verbs: ["list", "watch"]
{{ end -}}
{{ if has "mutatingwebhookconfigurations" $.Values.collectors }}
- apiGroups: ["admissionregistration.k8s.io"]
resources:
- mutatingwebhookconfigurations
verbs: ["list", "watch"]
{{ end -}}
{{ if has "namespaces" $.Values.collectors }}
- apiGroups: [""]
resources:
- namespaces
verbs: ["list", "watch"]
{{ end -}}
{{ if has "networkpolicies" $.Values.collectors }}
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs: ["list", "watch"]
{{ end -}}
{{ if has "ingressclasses" $.Values.collectors }}
- apiGroups: ["networking.k8s.io"]
resources:
- ingressclasses
verbs: ["list", "watch"]
{{ end -}}
{{ if has "clusterrolebindings" $.Values.collectors }}
- apiGroups: ["rbac.authorization.k8s.io"]
resources:
- clusterrolebindings
verbs: ["list", "watch"]
{{ end -}}
{{ if has "clusterroles" $.Values.collectors }}
- apiGroups: ["rbac.authorization.k8s.io"]
resources:
- clusterroles
verbs: ["list", "watch"]
{{ end -}}
{{ if has "roles" $.Values.collectors }}
- apiGroups: ["rbac.authorization.k8s.io"]
resources:
- roles
verbs: ["list", "watch"]
{{ end -}}
{{ if has "nodes" $.Values.collectors }}
- apiGroups: [""]
resources:
- nodes
verbs: ["list", "watch"]
{{ end -}}
{{ if has "persistentvolumeclaims" $.Values.collectors }}
- apiGroups: [""]
resources:
- persistentvolumeclaims
verbs: ["list", "watch"]
{{ end -}}
{{ if has "persistentvolumes" $.Values.collectors }}
- apiGroups: [""]
resources:
- persistentvolumes
verbs: ["list", "watch"]
{{ end -}}
{{ if has "poddisruptionbudgets" $.Values.collectors }}
- apiGroups: ["policy"]
resources:
- poddisruptionbudgets
verbs: ["list", "watch"]
{{ end -}}
{{ if has "pods" $.Values.collectors }}
- apiGroups: [""]
resources:
- pods
verbs: ["list", "watch"]
{{ end -}}
{{ if has "replicasets" $.Values.collectors }}
- apiGroups: ["apps"]
resources:
- replicasets
verbs: ["list", "watch"]
{{ end -}}
{{ if has "replicationcontrollers" $.Values.collectors }}
- apiGroups: [""]
resources:
- replicationcontrollers
verbs: ["list", "watch"]
{{ end -}}
{{ if has "resourcequotas" $.Values.collectors }}
- apiGroups: [""]
resources:
- resourcequotas
verbs: ["list", "watch"]
{{ end -}}
{{ if has "secrets" $.Values.collectors }}
- apiGroups: [""]
resources:
- secrets
verbs: ["list", "watch"]
{{ end -}}
{{ if has "services" $.Values.collectors }}
- apiGroups: [""]
resources:
- services
verbs: ["list", "watch"]
{{ end -}}
{{ if has "statefulsets" $.Values.collectors }}
- apiGroups: ["apps"]
resources:
- statefulsets
verbs: ["list", "watch"]
{{ end -}}
{{ if has "storageclasses" $.Values.collectors }}
- apiGroups: ["storage.k8s.io"]
resources:
- storageclasses
verbs: ["list", "watch"]
{{ end -}}
{{ if has "validatingwebhookconfigurations" $.Values.collectors }}
- apiGroups: ["admissionregistration.k8s.io"]
resources:
- validatingwebhookconfigurations
verbs: ["list", "watch"]
{{ end -}}
{{ if has "volumeattachments" $.Values.collectors }}
- apiGroups: ["storage.k8s.io"]
resources:
- volumeattachments
verbs: ["list", "watch"]
{{ end -}}
{{- if $.Values.kubeRBACProxy.enabled }}
- apiGroups: ["authentication.k8s.io"]
resources:
- tokenreviews
verbs: ["create"]
- apiGroups: ["authorization.k8s.io"]
resources:
- subjectaccessreviews
verbs: ["create"]
{{- end }}
{{- if $.Values.customResourceState.enabled }}
- apiGroups: ["apiextensions.k8s.io"]
resources:
- customresourcedefinitions
verbs: ["list", "watch"]
{{- end }}
{{ if $.Values.rbac.extraRules }}
{{ toYaml $.Values.rbac.extraRules }}
{{ end }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,24 @@
{{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}}
{{- range (join "," $.Values.namespaces) | split "," }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
{{- include "kube-state-metrics.labels" $ | indent 4 }}
name: {{ template "kube-state-metrics.fullname" $ }}
namespace: {{ . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
{{- if (not $.Values.rbac.useExistingRole) }}
name: {{ template "kube-state-metrics.fullname" $ }}
{{- else }}
name: {{ $.Values.rbac.useExistingRole }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ template "kube-state-metrics.serviceAccountName" $ }}
namespace: {{ template "kube-state-metrics.namespace" $ }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,60 @@
{{- if .Values.prometheus.scrapeconfig.enabled }}
apiVersion: monitoring.coreos.com/v1alpha1
kind: ScrapeConfig
metadata:
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
{{- with .Values.prometheus.scrapeconfig.additionalLabels }}
{{- tpl (toYaml . | nindent 4) $ }}
{{- end }}
{{- with .Values.prometheus.scrapeconfig.annotations }}
annotations:
{{- tpl (toYaml . | nindent 4) $ }}
{{- end }}
spec:
{{- include "scrapeconfig.scrapeLimits" .Values.prometheus.scrapeconfig | indent 2 }}
staticConfigs:
- targets:
- {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc:{{ .Values.service.port }}
{{- if .Values.prometheus.scrapeconfig.staticConfigLabels}}
labels:
{{- with .Values.prometheus.scrapeconfig.staticConfigLabels }}
{{- tpl (toYaml . | nindent 8) $ }}
{{- end }}
{{- end }}
{{- if .Values.prometheus.scrapeconfig.jobName }}
jobName: {{ .Values.prometheus.scrapeconfig.jobName }}
{{- end }}
{{- if .Values.prometheus.scrapeconfig.honorLabels }}
honorLabels: true
{{- end }}
{{- if .Values.prometheus.scrapeconfig.scrapeInterval }}
scrapeInterval: {{ .Values.prometheus.scrapeconfig.scrapeInterval }}
{{- end }}
{{- if .Values.prometheus.scrapeconfig.scrapeTimeout }}
scrapeTimeout: {{ .Values.prometheus.scrapeconfig.scrapeTimeout }}
{{- end }}
{{- if .Values.prometheus.scrapeconfig.proxyUrl }}
proxyUrl: {{ .Values.prometheus.scrapeconfig.proxyUrl }}
{{- end }}
{{- if .Values.prometheus.scrapeconfig.enableHttp2 }}
enableHttp2: {{ .Values.prometheus.scrapeconfig.enableHttp2 }}
{{- end }}
{{- if .Values.prometheus.scrapeconfig.metricRelabelings }}
metricRelabelings:
{{- toYaml .Values.prometheus.scrapeconfig.metricRelabelings | nindent 4 }}
{{- end }}
{{- if .Values.prometheus.scrapeconfig.relabelings }}
relabelings:
{{- toYaml .Values.prometheus.scrapeconfig.relabelings | nindent 4 }}
{{- end }}
{{- if .Values.prometheus.scrapeconfig.scheme }}
scheme: {{ .Values.prometheus.scrapeconfig.scheme }}
{{- end }}
{{- if .Values.prometheus.scrapeconfig.tlsConfig }}
tlsConfig:
{{- toYaml (.Values.prometheus.scrapeconfig.tlsConfig ) | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,53 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
annotations:
{{- if .Values.prometheusScrape }}
prometheus.io/scrape: '{{ .Values.prometheusScrape }}'
{{- end }}
{{- if .Values.service.annotations }}
{{- toYaml .Values.service.annotations | nindent 4 }}
{{- end }}
spec:
type: "{{ .Values.service.type }}"
{{- if .Values.service.ipDualStack.enabled }}
ipFamilies: {{ toYaml .Values.service.ipDualStack.ipFamilies | nindent 4 }}
ipFamilyPolicy: {{ .Values.service.ipDualStack.ipFamilyPolicy }}
{{- end }}
ports:
- name: http
protocol: TCP
port: {{ .Values.service.port | default 8080}}
{{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
targetPort: http
{{ if .Values.selfMonitor.enabled }}
- name: metrics
protocol: TCP
port: {{ .Values.selfMonitor.telemetryPort | default 8081 }}
targetPort: metrics
{{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }}
nodePort: {{ .Values.selfMonitor.telemetryNodePort }}
{{- end }}
{{ end }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: "{{ .Values.service.loadBalancerIP }}"
{{- end }}
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $cidr := .Values.service.loadBalancerSourceRanges }}
- {{ $cidr }}
{{- end }}
{{- end }}
{{- if .Values.autosharding.enabled }}
clusterIP: None
{{- else if .Values.service.clusterIP }}
clusterIP: "{{ .Values.service.clusterIP }}"
{{- end }}
selector:
{{- include "kube-state-metrics.selectorLabels" . | indent 4 }}

View File

@@ -0,0 +1,18 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
metadata:
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
name: {{ template "kube-state-metrics.serviceAccountName" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
{{- if .Values.serviceAccount.annotations }}
annotations:
{{ toYaml .Values.serviceAccount.annotations | indent 4 }}
{{- end }}
{{- if or .Values.serviceAccount.imagePullSecrets .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- include "kube-state-metrics.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,120 @@
{{- if .Values.prometheus.monitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
{{- with .Values.prometheus.monitor.additionalLabels }}
{{- tpl (toYaml . | nindent 4) $ }}
{{- end }}
{{- with .Values.prometheus.monitor.annotations }}
annotations:
{{- tpl (toYaml . | nindent 4) $ }}
{{- end }}
spec:
jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.monitor.jobLabel }}
{{- with .Values.prometheus.monitor.targetLabels }}
targetLabels:
{{- toYaml . | trim | nindent 4 }}
{{- end }}
{{- with .Values.prometheus.monitor.podTargetLabels }}
podTargetLabels:
{{- toYaml . | trim | nindent 4 }}
{{- end }}
{{- include "servicemonitor.scrapeLimits" .Values.prometheus.monitor | indent 2 }}
{{- if .Values.prometheus.monitor.namespaceSelector }}
namespaceSelector:
matchNames:
{{- with .Values.prometheus.monitor.namespaceSelector }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}
selector:
matchLabels:
{{- with .Values.prometheus.monitor.selectorOverride }}
{{- toYaml . | nindent 6 }}
{{- else }}
{{- include "kube-state-metrics.selectorLabels" . | indent 6 }}
{{- end }}
endpoints:
- port: http
{{- if or .Values.prometheus.monitor.http.interval .Values.prometheus.monitor.interval }}
interval: {{ .Values.prometheus.monitor.http.interval | default .Values.prometheus.monitor.interval }}
{{- end }}
{{- if or .Values.prometheus.monitor.http.scrapeTimeout .Values.prometheus.monitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.prometheus.monitor.http.scrapeTimeout | default .Values.prometheus.monitor.scrapeTimeout }}
{{- end }}
{{- if or .Values.prometheus.monitor.http.proxyUrl .Values.prometheus.monitor.proxyUrl }}
proxyUrl: {{ .Values.prometheus.monitor.http.proxyUrl | default .Values.prometheus.monitor.proxyUrl }}
{{- end }}
{{- if or .Values.prometheus.monitor.http.enableHttp2 .Values.prometheus.monitor.enableHttp2 }}
enableHttp2: {{ .Values.prometheus.monitor.http.enableHttp2 | default .Values.prometheus.monitor.enableHttp2 }}
{{- end }}
{{- if or .Values.prometheus.monitor.http.honorLabels .Values.prometheus.monitor.honorLabels }}
honorLabels: true
{{- end }}
{{- if or .Values.prometheus.monitor.http.metricRelabelings .Values.prometheus.monitor.metricRelabelings }}
metricRelabelings:
{{- toYaml (.Values.prometheus.monitor.http.metricRelabelings | default .Values.prometheus.monitor.metricRelabelings) | nindent 8 }}
{{- end }}
{{- if or .Values.prometheus.monitor.http.relabelings .Values.prometheus.monitor.relabelings }}
relabelings:
{{- toYaml (.Values.prometheus.monitor.http.relabelings | default .Values.prometheus.monitor.relabelings) | nindent 8 }}
{{- end }}
{{- if or .Values.prometheus.monitor.http.scheme .Values.prometheus.monitor.scheme }}
scheme: {{ .Values.prometheus.monitor.http.scheme | default .Values.prometheus.monitor.scheme }}
{{- end }}
{{- if or .Values.prometheus.monitor.http.tlsConfig .Values.prometheus.monitor.tlsConfig }}
tlsConfig:
{{- toYaml (.Values.prometheus.monitor.http.tlsConfig | default .Values.prometheus.monitor.tlsConfig) | nindent 8 }}
{{- end }}
{{- if or .Values.prometheus.monitor.http.bearerTokenFile .Values.prometheus.monitor.bearerTokenFile }}
bearerTokenFile: {{ .Values.prometheus.monitor.http.bearerTokenFile | default .Values.prometheus.monitor.bearerTokenFile }}
{{- end }}
{{- with (.Values.prometheus.monitor.http.bearerTokenSecret | default .Values.prometheus.monitor.bearerTokenSecret) }}
bearerTokenSecret:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.selfMonitor.enabled }}
- port: metrics
{{- if or .Values.prometheus.monitor.metrics.interval .Values.prometheus.monitor.interval }}
interval: {{ .Values.prometheus.monitor.metrics.interval | default .Values.prometheus.monitor.interval }}
{{- end }}
{{- if or .Values.prometheus.monitor.metrics.scrapeTimeout .Values.prometheus.monitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.prometheus.monitor.metrics.scrapeTimeout | default .Values.prometheus.monitor.scrapeTimeout }}
{{- end }}
{{- if or .Values.prometheus.monitor.metrics.proxyUrl .Values.prometheus.monitor.proxyUrl }}
proxyUrl: {{ .Values.prometheus.monitor.metrics.proxyUrl | default .Values.prometheus.monitor.proxyUrl }}
{{- end }}
{{- if or .Values.prometheus.monitor.metrics.enableHttp2 .Values.prometheus.monitor.enableHttp2 }}
enableHttp2: {{ .Values.prometheus.monitor.metrics.enableHttp2 | default .Values.prometheus.monitor.enableHttp2 }}
{{- end }}
{{- if or .Values.prometheus.monitor.metrics.honorLabels .Values.prometheus.monitor.honorLabels }}
honorLabels: true
{{- end }}
{{- if or .Values.prometheus.monitor.metrics.metricRelabelings .Values.prometheus.monitor.metricRelabelings }}
metricRelabelings:
{{- toYaml (.Values.prometheus.monitor.metrics.metricRelabelings | default .Values.prometheus.monitor.metricRelabelings) | nindent 8 }}
{{- end }}
{{- if or .Values.prometheus.monitor.metrics.relabelings .Values.prometheus.monitor.relabelings }}
relabelings:
{{- toYaml (.Values.prometheus.monitor.metrics.relabelings | default .Values.prometheus.monitor.relabelings) | nindent 8 }}
{{- end }}
{{- if or .Values.prometheus.monitor.metrics.scheme .Values.prometheus.monitor.scheme }}
scheme: {{ .Values.prometheus.monitor.metrics.scheme | default .Values.prometheus.monitor.scheme }}
{{- end }}
{{- if or .Values.prometheus.monitor.metrics.tlsConfig .Values.prometheus.monitor.tlsConfig }}
tlsConfig:
{{- toYaml (.Values.prometheus.monitor.metrics.tlsConfig | default .Values.prometheus.monitor.tlsConfig) | nindent 8 }}
{{- end }}
{{- if or .Values.prometheus.monitor.metrics.bearerTokenFile .Values.prometheus.monitor.bearerTokenFile }}
bearerTokenFile: {{ .Values.prometheus.monitor.metrics.bearerTokenFile | default .Values.prometheus.monitor.bearerTokenFile }}
{{- end }}
{{- with (.Values.prometheus.monitor.metrics.bearerTokenSecret | default .Values.prometheus.monitor.bearerTokenSecret) }}
bearerTokenSecret:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if and .Values.autosharding.enabled .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- apps
resourceNames:
- {{ template "kube-state-metrics.fullname" . }}
resources:
- statefulsets
verbs:
- get
- list
- watch
{{- end }}

View File

@@ -0,0 +1,17 @@
{{- if and .Values.autosharding.enabled .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "kube-state-metrics.serviceAccountName" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
{{- end }}

View File

@@ -0,0 +1,44 @@
{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.verticalPodAutoscaler.enabled) }}
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
spec:
{{- with .Values.verticalPodAutoscaler.recommenders }}
recommenders:
{{- toYaml . | nindent 4 }}
{{- end }}
resourcePolicy:
containerPolicies:
- containerName: {{ template "kube-state-metrics.name" . }}
{{- with .Values.verticalPodAutoscaler.controlledResources }}
controlledResources:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.verticalPodAutoscaler.controlledValues }}
controlledValues: {{ .Values.verticalPodAutoscaler.controlledValues }}
{{- end }}
{{- if .Values.verticalPodAutoscaler.maxAllowed }}
maxAllowed:
{{ toYaml .Values.verticalPodAutoscaler.maxAllowed | nindent 8 }}
{{- end }}
{{- if .Values.verticalPodAutoscaler.minAllowed }}
minAllowed:
{{ toYaml .Values.verticalPodAutoscaler.minAllowed | nindent 8 }}
{{- end }}
targetRef:
apiVersion: apps/v1
{{- if .Values.autosharding.enabled }}
kind: StatefulSet
{{- else }}
kind: Deployment
{{- end }}
name: {{ template "kube-state-metrics.fullname" . }}
{{- with .Values.verticalPodAutoscaler.updatePolicy }}
updatePolicy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,611 @@
# Default values for kube-state-metrics.
prometheusScrape: true
image:
registry: registry.k8s.io
repository: kube-state-metrics/kube-state-metrics
# If unset use v + .Charts.appVersion
tag: ""
sha: ""
pullPolicy: IfNotPresent
imagePullSecrets: []
# - name: "image-pull-secret"
global:
# To help compatibility with other charts which use global.imagePullSecrets.
# Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style).
# global:
# imagePullSecrets:
# - name: pullSecret1
# - name: pullSecret2
# or
# global:
# imagePullSecrets:
# - pullSecret1
# - pullSecret2
imagePullSecrets: []
#
# Allow parent charts to override registry hostname
imageRegistry: ""
# If set to true, this will deploy kube-state-metrics as a StatefulSet and the data
# will be automatically sharded across <.Values.replicas> pods using the built-in
# autodiscovery feature: https://github.com/kubernetes/kube-state-metrics#automated-sharding
# This is an experimental feature and there are no stability guarantees.
autosharding:
enabled: false
replicas: 1
# Change the deployment strategy when autosharding is disabled.
# ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
# The default is "RollingUpdate" as per Kubernetes defaults.
# During a release, 'RollingUpdate' can lead to two running instances for a short period of time while 'Recreate' can create a small gap in data.
# updateStrategy: Recreate
# Number of old history to retain to allow rollback
# Default Kubernetes value is set to 10
revisionHistoryLimit: 10
# List of additional cli arguments to configure kube-state-metrics
# for example: --enable-gzip-encoding, --log-file, etc.
# all the possible args can be found here: https://github.com/kubernetes/kube-state-metrics/blob/main/docs/developer/cli-arguments.md
extraArgs: []
# If false then the user will opt out of automounting API credentials.
automountServiceAccountToken: true
service:
port: 8080
# Default to clusterIP for backward compatibility
type: ClusterIP
ipDualStack:
enabled: false
ipFamilies: ["IPv6", "IPv4"]
ipFamilyPolicy: "PreferDualStack"
nodePort: 0
loadBalancerIP: ""
# Only allow access to the loadBalancerIP from these IPs
loadBalancerSourceRanges: []
clusterIP: ""
annotations: {}
## Additional labels to add to all resources
customLabels: {}
# app: kube-state-metrics
## Override selector labels
selectorOverride: {}
## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box
releaseLabel: false
hostNetwork: false
rbac:
# If true, create & use RBAC resources
create: true
# Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to it, rolename set here.
# useExistingRole: your-existing-role
# If set to false - Run without Cluteradmin privs needed - ONLY works if namespace is also set (if useExistingRole is set this name is used as ClusterRole or Role to bind to)
useClusterRole: true
# Add permissions for CustomResources' apiGroups in Role/ClusterRole. Should be used in conjunction with Custom Resource State Metrics configuration
# Example:
# - apiGroups: ["monitoring.coreos.com"]
# resources: ["prometheuses"]
# verbs: ["list", "watch"]
extraRules: []
# Configure kube-rbac-proxy. When enabled, creates one kube-rbac-proxy container per exposed HTTP endpoint (metrics and telemetry if enabled).
# The requests are served through the same service but requests are then HTTPS.
kubeRBACProxy:
enabled: false
image:
registry: quay.io
repository: brancz/kube-rbac-proxy
tag: v0.20.0
sha: ""
pullPolicy: IfNotPresent
# This set --ignore-paths=/livez,/readyz to kubeRBACProxy container args
# to allow the pod probes working properly with kubeRBACProxy enabled.
ignoreProbePaths: true
# List of additional cli arguments to configure kube-rbac-prxy
# for example: --tls-cipher-suites, --log-file, etc.
# all the possible args can be found here: https://github.com/brancz/kube-rbac-proxy#usage
extraArgs: []
## Specify security settings for a Container
## Allows overrides and additional options compared to (Pod) securityContext
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
containerSecurityContext:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
# Configure specific upstream port for kube-state-metrics container
port: 9090
# Configure specific proxy endpoints port
# This port is for healthz on readinessProbe kube-rbac-proxy-http container
proxyEndpointsPort: 8888
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 64Mi
# requests:
# cpu: 10m
# memory: 32Mi
## volumeMounts enables mounting custom volumes in rbac-proxy containers
## Useful for TLS certificates and keys
volumeMounts: []
# - mountPath: /etc/tls
# name: kube-rbac-proxy-tls
# readOnly: true
serviceAccount:
# Specifies whether a ServiceAccount should be created, require rbac true
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Reference to one or more secrets to be used when pulling images
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# ServiceAccount annotations.
# Use case: AWS EKS IAM roles for service accounts
# ref: https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html
annotations: {}
# If false then the user will opt out of automounting API credentials.
automountServiceAccountToken: true
# Additional Environment variables
env: []
# - name: GOMAXPROCS
# valueFrom:
# resourceFieldRef:
# resource: limits.cpu
prometheus:
monitor:
enabled: false
annotations: {}
additionalLabels: {}
namespace: ""
namespaceSelector: []
jobLabel: ""
targetLabels: []
podTargetLabels: []
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: 0
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
##
targetLimit: 0
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
##
labelLimit: 0
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
##
labelNameLengthLimit: 0
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
##
labelValueLengthLimit: 0
selectorOverride: {}
## kube-state-metrics endpoint
http:
interval: ""
scrapeTimeout: ""
proxyUrl: ""
## Whether to enable HTTP2 for servicemonitor
enableHttp2: false
honorLabels: false
metricRelabelings: []
relabelings: []
scheme: ""
## File to read bearer token for scraping targets
bearerTokenFile: ""
## Secret to mount to read bearer token for scraping targets. The secret needs
## to be in the same namespace as the service monitor and accessible by the
## Prometheus Operator
bearerTokenSecret: {}
# name: secret-name
# key: key-name
tlsConfig: {}
## selfMonitor endpoint
metrics:
interval: ""
scrapeTimeout: ""
proxyUrl: ""
## Whether to enable HTTP2 for servicemonitor
enableHttp2: false
honorLabels: false
metricRelabelings: []
relabelings: []
scheme: ""
## File to read bearer token for scraping targets
bearerTokenFile: ""
## Secret to mount to read bearer token for scraping targets. The secret needs
## to be in the same namespace as the service monitor and accessible by the
## Prometheus Operator
bearerTokenSecret: {}
# name: secret-name
# key: key-name
tlsConfig: {}
## Create a scrapeConfig resource for scraping the kube-state-metrics service. Use this instead of serviceMonitor
## to have more instances of kube-state-metrics safety.
scrapeconfig:
## To avoid duplicate metrics, first disable the serviceMonitor creation via prometheus.monitor.enabled=false
enabled: false
annotations: {}
additionalLabels: {}
jobName: kube-state-metrics
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: 0
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
##
targetLimit: 0
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
##
labelLimit: 0
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
##
labelNameLengthLimit: 0
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
##
labelValueLengthLimit: 0
## StaticConfigLabels defines the labels to be used in the Prometheus static configuration for scraping.
staticConfigLabels: {}
scrapeInterval: ""
scrapeTimeout: ""
proxyUrl: ""
## Whether to enable HTTP2 for scrapeconfig
enableHttp2: false
honorLabels: true
metricRelabelings: []
relabelings: []
scheme: ""
tlsConfig: {}
## Configure network policy for kube-state-metrics
networkPolicy:
enabled: false
# networkPolicy.flavor -- Flavor of the network policy to use.
# Can be:
# * kubernetes for networking.k8s.io/v1/NetworkPolicy
# * cilium for cilium.io/v2/CiliumNetworkPolicy
flavor: kubernetes
## Configure the cilium network policy kube-apiserver selector
# cilium:
# kubeApiServerSelector:
# - toEntities:
# - kube-apiserver
# egress:
# - {}
# ingress:
# - {}
# podSelector:
# matchLabels:
# app.kubernetes.io/name: kube-state-metrics
securityContext:
enabled: true
runAsGroup: 65534
runAsUser: 65534
fsGroup: 65534
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
## Specify security settings for a Container
## Allows overrides and additional options compared to (Pod) securityContext
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
containerSecurityContext:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
## Affinity settings for pod assignment
## Can be defined as either a dict or string. String is useful for `tpl` templating.
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
affinity: {}
# affinity: |
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchLabels:
# {{- include "kube-state-metrics.selectorLabels" . | indent 10 }}
# topologyKey: kubernetes.io/hostname
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
## Topology spread constraints for pod assignment
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
topologySpreadConstraints: []
# Annotations to be added to the deployment/statefulset
annotations: {}
# Labels to be added to the deployment/statefulset
labels: {}
# Annotations to be added to the pod
podAnnotations: {}
# Labels to be added to the pod
podLabels: {}
## Assign a PriorityClassName to pods if set
# priorityClassName: ""
# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget: {}
# Comma-separated list of metrics to be exposed.
# This list comprises of exact metric names and/or regex patterns.
# The allowlist and denylist are mutually exclusive.
metricAllowlist: []
# Comma-separated list of metrics not to be enabled.
# This list comprises of exact metric names and/or regex patterns.
# The allowlist and denylist are mutually exclusive.
metricDenylist: []
# Comma-separated list of additional Kubernetes label keys that will be used in the resource's
# labels metric. By default the metric contains only name and namespace labels.
# To include additional labels, provide a list of resource names in their plural form and Kubernetes
# label keys you would like to allow for them (Example: '=namespaces=[k8s-label-1,k8s-label-n,...],pods=[app],...)'.
# A single '*' can be provided per resource instead to allow any labels, but that has
# severe performance implications (Example: '=pods=[*]').
metricLabelsAllowlist: []
# - namespaces=[k8s-label-1,k8s-label-n]
# Comma-separated list of Kubernetes annotations keys that will be used in the resource'
# labels metric. By default the metric contains only name and namespace labels.
# To include additional annotations provide a list of resource names in their plural form and Kubernetes
# annotation keys you would like to allow for them (Example: '=namespaces=[kubernetes.io/team,...],pods=[kubernetes.io/team],...)'.
# A single '*' can be provided per resource instead to allow any annotations, but that has
# severe performance implications (Example: '=pods=[*]').
metricAnnotationsAllowList: []
# - pods=[k8s-annotation-1,k8s-annotation-n]
# Available collectors for kube-state-metrics.
# By default, all available resources are enabled, comment out to disable.
collectors:
- certificatesigningrequests
- configmaps
- cronjobs
- daemonsets
- deployments
- endpoints
- horizontalpodautoscalers
- ingresses
- jobs
- leases
- limitranges
- mutatingwebhookconfigurations
- namespaces
- networkpolicies
- nodes
- persistentvolumeclaims
- persistentvolumes
- poddisruptionbudgets
- pods
- replicasets
- replicationcontrollers
- resourcequotas
- secrets
- services
- statefulsets
- storageclasses
- validatingwebhookconfigurations
- volumeattachments
# - ingressclasses
# - clusterrolebindings
# - clusterroles
# - roles
# Enabling kubeconfig will pass the --kubeconfig argument to the container
kubeconfig:
enabled: false
# base64 encoded kube-config file
secret:
# Enabling support for customResourceState, will create a configMap including your config that will be read from kube-state-metrics
customResourceState:
# Whether to enable support for CustomResourceStateMetrics.
enabled: false
# Whether to create the ConfigMap that holds the config.
create: true
# Name of the ConfigMap that holds the config. If empty, name will be generated based on the release name.
name: ""
# ConfigMap key that holds the config.
key: config.yaml
# Definition of the CustomResourceStateMetrics. Add (Cluster)Role permissions to list/watch the resources defined in the config to rbac.extraRules.
config: {}
# Enable only the release namespace for collecting resources. By default all namespaces are collected.
# If releaseNamespace and namespaces are both set a merged list will be collected.
releaseNamespace: false
# Comma-separated list(string) or yaml list of namespaces to be enabled for collecting resources. By default all namespaces are collected.
namespaces: ""
# Comma-separated list of namespaces not to be enabled. If namespaces and namespaces-denylist are both set,
# only namespaces that are excluded in namespaces-denylist will be used.
namespacesDenylist: ""
## Override the deployment namespace
##
namespaceOverride: ""
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 64Mi
# requests:
# cpu: 10m
# memory: 32Mi
# Enable self metrics configuration for service and Service Monitor
# Default values for telemetry configuration can be overridden
# If you set telemetryNodePort, you must also set service.type to NodePort
selfMonitor:
enabled: false
# telemetryHost: 0.0.0.0
# telemetryPort: 8081
# telemetryNodePort: 0
# Enable vertical pod autoscaler support for kube-state-metrics
verticalPodAutoscaler:
enabled: false
# Recommender responsible for generating recommendation for the object.
# List should be empty (then the default recommender will generate the recommendation)
# or contain exactly one recommender.
# recommenders: []
# - name: custom-recommender-performance
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
controlledResources: []
# Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
# controlledValues: RequestsAndLimits
# Define the max allowed resources for the pod
maxAllowed: {}
# cpu: 200m
# memory: 100Mi
# Define the min allowed resources for the pod
minAllowed: {}
# cpu: 200m
# memory: 100Mi
# updatePolicy:
# Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
# minReplicas: 1
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
# updateMode: Auto
# volumeMounts are used to add custom volume mounts to deployment.
# See example below
volumeMounts: []
# - mountPath: /etc/config
# name: config-volume
# volumes are used to add custom volumes to deployment
# See example below
volumes: []
# - configMap:
# name: cm-for-volume
# name: config-volume
# Extra manifests to deploy as an array
extraManifests: []
# - apiVersion: v1
# kind: ConfigMap
# metadata:
# labels:
# name: prometheus-extra
# data:
# extra-data: "value"
## Containers allows injecting additional containers.
containers: []
# - name: crd-init
# image: kiwigrid/k8s-sidecar:latest
## InitContainers allows injecting additional initContainers.
initContainers: []
# - name: crd-sidecar
# image: kiwigrid/k8s-sidecar:latest
## dnsPolicy allows to change the default DNS configuration for the pod
## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
dnsPolicy: ClusterFirst
## dnsConfig allows setting up specific DNS configuration for the pod
## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
dnsConfig: {}
## Settings for startup, liveness and readiness probes
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
##
## Startup probe can optionally be enabled.
##
startupProbe:
enabled: false
failureThreshold: 3
httpGet:
httpHeaders: []
scheme: http
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
## Liveness probe
##
livenessProbe:
failureThreshold: 3
httpGet:
httpHeaders: []
scheme: http
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
## Readiness probe
##
readinessProbe:
failureThreshold: 3
httpGet:
httpHeaders: []
scheme: http
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
ci/

View File

@@ -0,0 +1,28 @@
annotations:
artifacthub.io/license: Apache-2.0
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
apiVersion: v2
appVersion: 1.10.2
description: A Helm chart for prometheus node-exporter
home: https://github.com/prometheus/node_exporter/
keywords:
- node-exporter
- prometheus
- exporter
maintainers:
- email: gianrubio@gmail.com
name: gianrubio
url: https://github.com/gianrubio
- email: zanhsieh@gmail.com
name: zanhsieh
url: https://github.com/zanhsieh
- email: rootsandtrees@posteo.de
name: zeritti
url: https://github.com/zeritti
name: prometheus-node-exporter
sources:
- https://github.com/prometheus/node_exporter/
type: application
version: 4.49.1

View File

@@ -0,0 +1,96 @@
# Prometheus Node Exporter
Prometheus exporter for hardware and OS metrics exposed by *NIX kernels, written in Go with pluggable metric collectors.
This chart bootstraps a Prometheus [Node Exporter](http://github.com/prometheus/node_exporter) daemonset on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Usage
The chart is distributed as an [OCI Artifact](https://helm.sh/docs/topics/registries/) as well as via a traditional [Helm Repository](https://helm.sh/docs/topics/chart_repository/).
- OCI Artifact: `oci://ghcr.io/prometheus-community/charts/prometheus-node-exporter`
- Helm Repository: `https://prometheus-community.github.io/helm-charts` with chart `prometheus-node-exporter`
The installation instructions use the OCI registry. Refer to the [`helm repo`]([`helm repo`](https://helm.sh/docs/helm/helm_repo/)) command documentation for information on installing charts via the traditional repository.
### Install Chart
```console
helm install [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prometheus-node-exporter
```
_See [configuration](#configuring) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
### Uninstall Chart
```console
helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
### Upgrading Chart
```console
helm upgrade [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prometheus-node-exporter --install
```
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
#### 3.x to 4.x
Starting from version 4.0.0, the `node exporter` chart is using the [Kubernetes recommended labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/). Therefore you have to delete the daemonset before you upgrade.
```console
kubectl delete daemonset -l app=prometheus-node-exporter
helm upgrade -i prometheus-node-exporter prometheus-community/prometheus-node-exporter
```
If you use your own custom [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor) or [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#podmonitor), please ensure to upgrade their `selector` fields accordingly to the new labels.
#### From 2.x to 3.x
Change the following:
```yaml
hostRootFsMount: true
```
to:
```yaml
hostRootFsMount:
enabled: true
mountPropagation: HostToContainer
```
## Configuring
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands:
```console
helm show values oci://ghcr.io/prometheus-community/charts/prometheus-node-exporter
```
### kube-rbac-proxy
You can enable `prometheus-node-exporter` endpoint protection using `kube-rbac-proxy`. By setting `kubeRBACProxy.enabled: true`, this chart will deploy a RBAC proxy container protecting the node-exporter endpoint.
To authorize access, authenticate your requests (via a `ServiceAccount` for example) with a `ClusterRole` attached such as:
```yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus-node-exporter-read
rules:
- apiGroups: [ "" ]
resources: ["services/node-exporter-prometheus-node-exporter"]
verbs:
- get
```
See [kube-rbac-proxy examples](https://github.com/brancz/kube-rbac-proxy/tree/master/examples/resource-attributes) for more details.

View File

@@ -0,0 +1,29 @@
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ template "prometheus-node-exporter.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus-node-exporter.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ template "prometheus-node-exporter.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "prometheus-node-exporter.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ template "prometheus-node-exporter.namespace" . }} {{ template "prometheus-node-exporter.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ template "prometheus-node-exporter.namespace" . }} -l "app.kubernetes.io/name={{ template "prometheus-node-exporter.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:9100 to use your application"
kubectl port-forward --namespace {{ template "prometheus-node-exporter.namespace" . }} $POD_NAME 9100
{{- end }}
{{- if .Values.kubeRBACProxy.enabled}}
kube-rbac-proxy endpoint protections is enabled:
- Metrics endpoints is now HTTPS
- Ensure that the client authenticates the requests (e.g. via service account) with the following role permissions:
```
rules:
- apiGroups: [ "" ]
resources: ["services/{{ template "prometheus-node-exporter.fullname" . }}"]
verbs:
- get
```
{{- end }}

View File

@@ -0,0 +1,237 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "prometheus-node-exporter.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "prometheus-node-exporter.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "prometheus-node-exporter.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "prometheus-node-exporter.labels" -}}
helm.sh/chart: {{ include "prometheus-node-exporter.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: {{ include "prometheus-node-exporter.name" . }}
{{ include "prometheus-node-exporter.selectorLabels" . }}
{{- with .Chart.AppVersion }}
app.kubernetes.io/version: {{ . | quote }}
{{- end }}
{{- with .Values.commonLabels }}
{{ tpl (toYaml .) $ }}
{{- end }}
{{- if .Values.releaseLabel }}
release: {{ .Release.Name }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "prometheus-node-exporter.selectorLabels" -}}
app.kubernetes.io/name: {{ include "prometheus-node-exporter.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "prometheus-node-exporter.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "prometheus-node-exporter.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
The image to use
*/}}
{{- define "prometheus-node-exporter.image" -}}
{{- if .Values.image.sha }}
{{- fail "image.sha forbidden. Use image.digest instead" }}
{{- else if .Values.image.digest }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.digest }}
{{- else }}
{{- printf "%s/%s:%s@%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.digest }}
{{- end }}
{{- else }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }}
{{- else }}
{{- printf "%s/%s:%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "prometheus-node-exporter.namespace" -}}
{{- if .Values.namespaceOverride }}
{{- .Values.namespaceOverride }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- end }}
{{/*
Create the namespace name of the service monitor
*/}}
{{- define "prometheus-node-exporter.monitor-namespace" -}}
{{- if .Values.namespaceOverride }}
{{- .Values.namespaceOverride }}
{{- else }}
{{- if .Values.prometheus.monitor.namespace }}
{{- .Values.prometheus.monitor.namespace }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- end }}
{{- end }}
{{/* Sets default scrape limits for servicemonitor */}}
{{- define "servicemonitor.scrapeLimits" -}}
{{- with .sampleLimit }}
sampleLimit: {{ . }}
{{- end }}
{{- with .targetLimit }}
targetLimit: {{ . }}
{{- end }}
{{- with .labelLimit }}
labelLimit: {{ . }}
{{- end }}
{{- with .labelNameLengthLimit }}
labelNameLengthLimit: {{ . }}
{{- end }}
{{- with .labelValueLengthLimit }}
labelValueLengthLimit: {{ . }}
{{- end }}
{{- end }}
{{/*
Formats imagePullSecrets. Input is (dict "Values" .Values "imagePullSecrets" .{specific imagePullSecrets})
*/}}
{{- define "prometheus-node-exporter.imagePullSecrets" -}}
{{- range (concat .Values.global.imagePullSecrets .imagePullSecrets) }}
{{- if eq (typeOf .) "map[string]interface {}" }}
- {{ toYaml . | trim }}
{{- else }}
- name: {{ . }}
{{- end }}
{{- end }}
{{- end -}}
{{/*
Create the namespace name of the pod monitor
*/}}
{{- define "prometheus-node-exporter.podmonitor-namespace" -}}
{{- if .Values.namespaceOverride }}
{{- .Values.namespaceOverride }}
{{- else }}
{{- if .Values.prometheus.podMonitor.namespace }}
{{- .Values.prometheus.podMonitor.namespace }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- end }}
{{- end }}
{{/* Sets default scrape limits for podmonitor */}}
{{- define "podmonitor.scrapeLimits" -}}
{{- with .sampleLimit }}
sampleLimit: {{ . }}
{{- end }}
{{- with .targetLimit }}
targetLimit: {{ . }}
{{- end }}
{{- with .labelLimit }}
labelLimit: {{ . }}
{{- end }}
{{- with .labelNameLengthLimit }}
labelNameLengthLimit: {{ . }}
{{- end }}
{{- with .labelValueLengthLimit }}
labelValueLengthLimit: {{ . }}
{{- end }}
{{- end }}
{{/* Sets sidecar volumeMounts */}}
{{- define "prometheus-node-exporter.sidecarVolumeMounts" -}}
{{- range $_, $mount := $.Values.sidecarVolumeMount }}
- name: {{ $mount.name }}
mountPath: {{ $mount.mountPath }}
readOnly: {{ $mount.readOnly }}
{{- end }}
{{- range $_, $mount := $.Values.sidecarHostVolumeMounts }}
- name: {{ $mount.name }}
mountPath: {{ $mount.mountPath }}
readOnly: {{ $mount.readOnly }}
{{- if $mount.mountPropagation }}
mountPropagation: {{ $mount.mountPropagation }}
{{- end }}
{{- end }}
{{- end }}
{{/*
The default node affinity to exclude
- AWS Fargate
- Azure virtual nodes
*/}}
{{- define "prometheus-node-exporter.defaultAffinity" -}}
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: eks.amazonaws.com/compute-type
operator: NotIn
values:
- fargate
- key: type
operator: NotIn
values:
- virtual-kubelet
{{- end -}}
{{- define "prometheus-node-exporter.mergedAffinities" -}}
{{- $defaultAffinity := include "prometheus-node-exporter.defaultAffinity" . | fromYaml -}}
{{- with .Values.affinity -}}
{{- if .nodeAffinity -}}
{{- $_ := set $defaultAffinity "nodeAffinity" (mergeOverwrite $defaultAffinity.nodeAffinity .nodeAffinity) -}}
{{- end -}}
{{- if .podAffinity -}}
{{- $_ := set $defaultAffinity "podAffinity" .podAffinity -}}
{{- end -}}
{{- if .podAntiAffinity -}}
{{- $_ := set $defaultAffinity "podAntiAffinity" .podAntiAffinity -}}
{{- end -}}
{{- end -}}
{{- toYaml $defaultAffinity -}}
{{- end -}}

View File

@@ -0,0 +1,19 @@
{{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "prometheus-node-exporter.fullname" . }}
labels:
{{- include "prometheus-node-exporter.labels" . | nindent 4 }}
rules:
{{- if $.Values.kubeRBACProxy.enabled }}
- apiGroups: [ "authentication.k8s.io" ]
resources:
- tokenreviews
verbs: [ "create" ]
- apiGroups: [ "authorization.k8s.io" ]
resources:
- subjectaccessreviews
verbs: [ "create" ]
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,20 @@
{{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
{{- include "prometheus-node-exporter.labels" . | nindent 4 }}
name: {{ template "prometheus-node-exporter.fullname" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- if .Values.rbac.useExistingRole }}
name: {{ .Values.rbac.useExistingRole }}
{{- else }}
name: {{ template "prometheus-node-exporter.fullname" . }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ template "prometheus-node-exporter.serviceAccountName" . }}
namespace: {{ template "prometheus-node-exporter.namespace" . }}
{{- end -}}

View File

@@ -0,0 +1,349 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "prometheus-node-exporter.fullname" . }}
namespace: {{ include "prometheus-node-exporter.namespace" . }}
labels:
{{- include "prometheus-node-exporter.labels" . | nindent 4 }}
{{- with .Values.daemonsetAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
{{- with .Values.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "prometheus-node-exporter.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- tpl (toYaml .) $ | nindent 8 }}
{{- end }}
spec:
automountServiceAccountToken: {{ ternary true false (or .Values.serviceAccount.automountServiceAccountToken .Values.kubeRBACProxy.enabled) }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.priorityClassName }}
priorityClassName: {{ . }}
{{- end }}
{{- with .Values.extraInitContainers }}
initContainers:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "prometheus-node-exporter.serviceAccountName" . }}
{{- with .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ . }}
{{- end }}
containers:
{{- $servicePort := ternary .Values.kubeRBACProxy.port .Values.service.port .Values.kubeRBACProxy.enabled }}
{{- $servicePortReference := ternary .Values.kubeRBACProxy.port .Values.service.portName .Values.kubeRBACProxy.enabled }}
- name: node-exporter
image: {{ include "prometheus-node-exporter.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- --path.procfs=/host/proc
- --path.sysfs=/host/sys
{{- if .Values.hostRootFsMount.enabled }}
- --path.rootfs=/host/root
{{- if semverCompare ">=1.4.0-0" (coalesce .Values.version .Values.image.tag .Chart.AppVersion) }}
- --path.udev.data=/host/root/run/udev/data
{{- end }}
{{- end }}
- --web.listen-address=[$(HOST_IP)]:{{ $servicePort }}
{{- with .Values.extraArgs }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
env:
- name: HOST_IP
{{- if .Values.kubeRBACProxy.enabled }}
value: 127.0.0.1
{{- else if .Values.service.listenOnAllInterfaces }}
value: 0.0.0.0
{{- else }}
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
{{- end }}
{{- range $key, $value := .Values.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- if eq .Values.kubeRBACProxy.enabled false }}
ports:
- name: {{ .Values.service.portName }}
containerPort: {{ .Values.service.port }}
protocol: TCP
{{- end }}
livenessProbe:
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
httpGet:
{{- if .Values.kubeRBACProxy.enabled }}
host: 127.0.0.1
{{- end }}
httpHeaders:
{{- range $_, $header := .Values.livenessProbe.httpGet.httpHeaders }}
- name: {{ $header.name }}
value: {{ $header.value }}
{{- end }}
path: /
port: {{ $servicePortReference }}
scheme: {{ upper .Values.livenessProbe.httpGet.scheme }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
readinessProbe:
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
httpGet:
{{- if .Values.kubeRBACProxy.enabled }}
host: 127.0.0.1
{{- end }}
httpHeaders:
{{- range $_, $header := .Values.readinessProbe.httpGet.httpHeaders }}
- name: {{ $header.name }}
value: {{ $header.value }}
{{- end }}
path: /
port: {{ $servicePortReference }}
scheme: {{ upper .Values.readinessProbe.httpGet.scheme }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.terminationMessageParams.enabled }}
{{- with .Values.terminationMessageParams }}
terminationMessagePath: {{ .terminationMessagePath }}
terminationMessagePolicy: {{ .terminationMessagePolicy }}
{{- end }}
{{- end }}
volumeMounts:
- name: proc
mountPath: /host/proc
{{- with .Values.hostProcFsMount.mountPropagation }}
mountPropagation: {{ . }}
{{- end }}
readOnly: true
- name: sys
mountPath: /host/sys
{{- with .Values.hostSysFsMount.mountPropagation }}
mountPropagation: {{ . }}
{{- end }}
readOnly: true
{{- if .Values.hostRootFsMount.enabled }}
- name: root
mountPath: /host/root
{{- with .Values.hostRootFsMount.mountPropagation }}
mountPropagation: {{ . }}
{{- end }}
readOnly: true
{{- end }}
{{- range $_, $mount := .Values.extraHostVolumeMounts }}
- name: {{ $mount.name }}
mountPath: {{ $mount.mountPath }}
readOnly: {{ $mount.readOnly }}
{{- with $mount.mountPropagation }}
mountPropagation: {{ . }}
{{- end }}
{{- end }}
{{- range $_, $mount := .Values.sidecarVolumeMount }}
- name: {{ $mount.name }}
mountPath: {{ $mount.mountPath }}
readOnly: true
{{- end }}
{{- range $_, $mount := .Values.configmaps }}
- name: {{ $mount.name }}
mountPath: {{ $mount.mountPath }}
{{- end }}
{{- range $_, $mount := .Values.secrets }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
{{- end }}
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- range .Values.sidecars }}
{{- $overwrites := dict "volumeMounts" (concat (include "prometheus-node-exporter.sidecarVolumeMounts" $ | fromYamlArray) (.volumeMounts | default list) | default list) }}
{{- $defaults := dict "image" (include "prometheus-node-exporter.image" $) "securityContext" $.Values.containerSecurityContext "imagePullPolicy" $.Values.image.pullPolicy }}
- {{- toYaml (merge $overwrites . $defaults) | nindent 10 }}
{{- end }}
{{- if .Values.kubeRBACProxy.enabled }}
- name: kube-rbac-proxy
args:
{{- if .Values.kubeRBACProxy.extraArgs }}
{{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 12 }}
{{- end }}
- --secure-listen-address=:{{ .Values.service.port}}
- --upstream=http://127.0.0.1:{{ $servicePort }}/
- --proxy-endpoints-port={{ .Values.kubeRBACProxy.proxyEndpointsPort }}
- --config-file=/etc/kube-rbac-proxy-config/config-file.yaml
{{- if and .Values.kubeRBACProxy.tls.enabled .Values.tlsSecret.enabled }}
- --tls-cert-file=/tls/private/{{ .Values.tlsSecret.certItem }}
- --tls-private-key-file=/tls/private/{{ .Values.tlsSecret.keyItem }}
{{- if and .Values.kubeRBACProxy.tls.tlsClientAuth .Values.tlsSecret.caItem }}
- --client-ca-file=/tls/private/{{ .Values.tlsSecret.caItem }}
{{- end }}
{{- end }}
volumeMounts:
- name: kube-rbac-proxy-config
mountPath: /etc/kube-rbac-proxy-config
{{- if and .Values.kubeRBACProxy.tls.enabled .Values.tlsSecret.enabled }}
- name: {{ tpl .Values.tlsSecret.volumeName . | quote }}
mountPath: /tls/private
readOnly: true
{{- end }}
{{- with .Values.kubeRBACProxy.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }}
{{- if .Values.kubeRBACProxy.image.sha }}
image: "{{ .Values.global.imageRegistry | default .Values.kubeRBACProxy.image.registry}}/{{ .Values.kubeRBACProxy.image.repository }}:{{ .Values.kubeRBACProxy.image.tag }}@sha256:{{ .Values.kubeRBACProxy.image.sha }}"
{{- else }}
image: "{{ .Values.global.imageRegistry | default .Values.kubeRBACProxy.image.registry}}/{{ .Values.kubeRBACProxy.image.repository }}:{{ .Values.kubeRBACProxy.image.tag }}"
{{- end }}
ports:
- containerPort: {{ .Values.service.port}}
name: {{ .Values.kubeRBACProxy.portName }}
{{- if .Values.kubeRBACProxy.enableHostPort }}
hostPort: {{ .Values.service.port }}
{{- end }}
- containerPort: {{ .Values.kubeRBACProxy.proxyEndpointsPort }}
{{- if .Values.kubeRBACProxy.enableProxyEndpointsHostPort }}
hostPort: {{ .Values.kubeRBACProxy.proxyEndpointsPort }}
{{- end }}
name: "http-healthz"
readinessProbe:
httpGet:
scheme: HTTPS
port: {{ .Values.kubeRBACProxy.proxyEndpointsPort }}
path: healthz
initialDelaySeconds: 5
timeoutSeconds: 5
{{- if .Values.kubeRBACProxy.resources }}
resources:
{{- toYaml .Values.kubeRBACProxy.resources | nindent 12 }}
{{- end }}
{{- if .Values.terminationMessageParams.enabled }}
{{- with .Values.terminationMessageParams }}
terminationMessagePath: {{ .terminationMessagePath }}
terminationMessagePolicy: {{ .terminationMessagePolicy }}
{{- end }}
{{- end }}
{{- with .Values.kubeRBACProxy.env }}
env:
{{- range $key, $value := $.Values.kubeRBACProxy.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
{{- if .Values.kubeRBACProxy.containerSecurityContext }}
securityContext:
{{ toYaml .Values.kubeRBACProxy.containerSecurityContext | nindent 12 }}
{{- end }}
{{- end }}
{{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- include "prometheus-node-exporter.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.imagePullSecrets) | indent 8 }}
{{- end }}
hostNetwork: {{ .Values.hostNetwork }}
hostPID: {{ .Values.hostPID }}
hostIPC: {{ .Values.hostIPC }}
affinity:
{{- include "prometheus-node-exporter.mergedAffinities" . | nindent 8 }}
{{- with .Values.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.restartPolicy }}
restartPolicy: {{ . }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: proc
hostPath:
path: /proc
- name: sys
hostPath:
path: /sys
{{- if .Values.hostRootFsMount.enabled }}
- name: root
hostPath:
path: /
{{- end }}
{{- range $_, $mount := .Values.extraHostVolumeMounts }}
- name: {{ $mount.name }}
hostPath:
path: {{ $mount.hostPath }}
{{- with $mount.type }}
type: {{ . }}
{{- end }}
{{- end }}
{{- range $_, $mount := .Values.sidecarVolumeMount }}
- name: {{ $mount.name }}
emptyDir:
medium: Memory
{{- end }}
{{- range $_, $mount := .Values.sidecarHostVolumeMounts }}
- name: {{ $mount.name }}
hostPath:
path: {{ $mount.hostPath }}
{{- end }}
{{- range $_, $mount := .Values.configmaps }}
- name: {{ $mount.name }}
configMap:
name: {{ $mount.name }}
{{- end }}
{{- range $_, $mount := .Values.secrets }}
- name: {{ $mount.name }}
secret:
secretName: {{ $mount.name }}
{{- end }}
{{- if .Values.kubeRBACProxy.enabled }}
- name: kube-rbac-proxy-config
configMap:
name: {{ template "prometheus-node-exporter.fullname" . }}-rbac-config
{{- end }}
{{- if .Values.tlsSecret.enabled }}
- name: {{ tpl .Values.tlsSecret.volumeName . | quote }}
secret:
secretName: {{ tpl .Values.tlsSecret.secretName . | quote }}
items:
- key: {{ required "Value tlsSecret.certItem must be set." .Values.tlsSecret.certItem | quote }}
path: {{ .Values.tlsSecret.certItem | quote }}
- key: {{ required "Value tlsSecret.keyItem must be set." .Values.tlsSecret.keyItem | quote }}
path: {{ .Values.tlsSecret.keyItem | quote }}
{{- if .Values.tlsSecret.caItem }}
- key: {{ .Values.tlsSecret.caItem | quote }}
path: {{ .Values.tlsSecret.caItem | quote }}
{{- end }}
{{- end }}
{{- with .Values.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,18 @@
{{- if .Values.endpoints }}
apiVersion: v1
kind: Endpoints
metadata:
name: {{ include "prometheus-node-exporter.fullname" . }}
namespace: {{ include "prometheus-node-exporter.namespace" . }}
labels:
{{- include "prometheus-node-exporter.labels" . | nindent 4 }}
subsets:
- addresses:
{{- range .Values.endpoints }}
- ip: {{ . }}
{{- end }}
ports:
- name: {{ .Values.service.portName }}
port: 9100
protocol: TCP
{{- end }}

View File

@@ -0,0 +1,4 @@
{{ range .Values.extraManifests }}
---
{{ tpl . $ }}
{{ end }}

View File

@@ -0,0 +1,27 @@
{{- if .Values.networkPolicy.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ include "prometheus-node-exporter.fullname" . }}
namespace: {{ include "prometheus-node-exporter.namespace" . }}
labels:
{{- include "prometheus-node-exporter.labels" $ | nindent 4 }}
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
ingress:
{{- if .Values.networkPolicy.ingress }}
{{- toYaml .Values.networkPolicy.ingress | nindent 4 }}
{{- else }}
- ports:
- port: {{ .Values.service.port }}
{{- end }}
policyTypes:
- Egress
- Ingress
podSelector:
matchLabels:
{{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@@ -0,0 +1,91 @@
{{- if .Values.prometheus.podMonitor.enabled }}
apiVersion: {{ .Values.prometheus.podMonitor.apiVersion | default "monitoring.coreos.com/v1" }}
kind: PodMonitor
metadata:
name: {{ include "prometheus-node-exporter.fullname" . }}
namespace: {{ include "prometheus-node-exporter.podmonitor-namespace" . }}
labels:
{{- include "prometheus-node-exporter.labels" . | nindent 4 }}
{{- with .Values.prometheus.podMonitor.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.podMonitor.jobLabel }}
{{- include "podmonitor.scrapeLimits" .Values.prometheus.podMonitor | nindent 2 }}
selector:
matchLabels:
{{- with .Values.prometheus.podMonitor.selectorOverride }}
{{- toYaml . | nindent 6 }}
{{- else }}
{{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ include "prometheus-node-exporter.namespace" . }}
{{- with .Values.prometheus.podMonitor.attachMetadata }}
attachMetadata:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.prometheus.podMonitor.podTargetLabels }}
podTargetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
podMetricsEndpoints:
- port: {{ .Values.service.portName }}
{{- with .Values.prometheus.podMonitor.scheme }}
scheme: {{ . }}
{{- end }}
{{- with .Values.prometheus.podMonitor.path }}
path: {{ . }}
{{- end }}
{{- with .Values.prometheus.podMonitor.basicAuth }}
basicAuth:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.prometheus.podMonitor.bearerTokenSecret }}
bearerTokenSecret:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.prometheus.podMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.prometheus.podMonitor.authorization }}
authorization:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.prometheus.podMonitor.oauth2 }}
oauth2:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.prometheus.podMonitor.proxyUrl }}
proxyUrl: {{ . }}
{{- end }}
{{- with .Values.prometheus.podMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.prometheus.podMonitor.honorTimestamps }}
honorTimestamps: {{ . }}
{{- end }}
{{- with .Values.prometheus.podMonitor.honorLabels }}
honorLabels: {{ . }}
{{- end }}
{{- with .Values.prometheus.podMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
{{- with .Values.prometheus.podMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.prometheus.podMonitor.metricRelabelings }}
metricRelabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
enableHttp2: {{ default false .Values.prometheus.podMonitor.enableHttp2 }}
filterRunning: {{ default true .Values.prometheus.podMonitor.filterRunning }}
followRedirects: {{ default false .Values.prometheus.podMonitor.followRedirects }}
{{- with .Values.prometheus.podMonitor.params }}
params:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,16 @@
{{- if .Values.kubeRBACProxy.enabled}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "prometheus-node-exporter.fullname" . }}-rbac-config
namespace: {{ include "prometheus-node-exporter.namespace" . }}
data:
config-file.yaml: |+
authorization:
resourceAttributes:
namespace: {{ template "prometheus-node-exporter.namespace" . }}
apiVersion: v1
resource: services
subresource: {{ template "prometheus-node-exporter.fullname" . }}
name: {{ template "prometheus-node-exporter.fullname" . }}
{{- end }}

View File

@@ -0,0 +1,41 @@
{{- if .Values.service.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "prometheus-node-exporter.fullname" . }}
namespace: {{ include "prometheus-node-exporter.namespace" . }}
labels:
{{- include "prometheus-node-exporter.labels" $ | nindent 4 }}
{{- with .Values.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.service.ipDualStack.enabled }}
ipFamilies: {{ toYaml .Values.service.ipDualStack.ipFamilies | nindent 4 }}
ipFamilyPolicy: {{ .Values.service.ipDualStack.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.externalTrafficPolicy }}
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
{{- end }}
{{- if .Values.service.internalTrafficPolicy }}
internalTrafficPolicy: {{ .Values.service.internalTrafficPolicy }}
{{- end }}
type: {{ .Values.service.type }}
{{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }}
clusterIP: "{{ .Values.service.clusterIP }}"
{{- end }}
ports:
- port: {{ .Values.service.servicePort | default .Values.service.port }}
{{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
targetPort: {{ .Values.service.targetPort }}
protocol: TCP
name: {{ .Values.service.portName }}
selector:
{{- include "prometheus-node-exporter.selectorLabels" . | nindent 4 }}
{{- end }}

View File

@@ -0,0 +1,18 @@
{{- if and .Values.rbac.create .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "prometheus-node-exporter.serviceAccountName" . }}
namespace: {{ include "prometheus-node-exporter.namespace" . }}
labels:
{{- include "prometheus-node-exporter.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
{{- if or .Values.serviceAccount.imagePullSecrets .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- include "prometheus-node-exporter.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,65 @@
{{- if .Values.prometheus.monitor.enabled }}
apiVersion: {{ .Values.prometheus.monitor.apiVersion | default "monitoring.coreos.com/v1" }}
kind: ServiceMonitor
metadata:
name: {{ include "prometheus-node-exporter.fullname" . }}
namespace: {{ include "prometheus-node-exporter.monitor-namespace" . }}
labels:
{{- include "prometheus-node-exporter.labels" . | nindent 4 }}
{{- with .Values.prometheus.monitor.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.monitor.jobLabel }}
{{- include "servicemonitor.scrapeLimits" .Values.prometheus.monitor | nindent 2 }}
{{- with .Values.prometheus.monitor.podTargetLabels }}
podTargetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.prometheus.monitor.targetLabels }}
targetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- with .Values.prometheus.monitor.selectorOverride }}
{{- toYaml . | nindent 6 }}
{{- else }}
{{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }}
{{- end }}
{{- with .Values.prometheus.monitor.attachMetadata }}
attachMetadata:
{{- toYaml . | nindent 4 }}
{{- end }}
endpoints:
- port: {{ .Values.service.portName }}
scheme: {{ .Values.prometheus.monitor.scheme }}
{{- with .Values.prometheus.monitor.basicAuth }}
basicAuth:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.prometheus.monitor.bearerTokenFile }}
bearerTokenFile: {{ . }}
{{- end }}
{{- with .Values.prometheus.monitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.prometheus.monitor.proxyUrl }}
proxyUrl: {{ . }}
{{- end }}
{{- with .Values.prometheus.monitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.prometheus.monitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
{{- with .Values.prometheus.monitor.relabelings }}
relabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.prometheus.monitor.metricRelabelings }}
metricRelabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,40 @@
{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.verticalPodAutoscaler.enabled) }}
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: {{ include "prometheus-node-exporter.fullname" . }}
namespace: {{ include "prometheus-node-exporter.namespace" . }}
labels:
{{- include "prometheus-node-exporter.labels" . | nindent 4 }}
spec:
{{- with .Values.verticalPodAutoscaler.recommenders }}
recommenders:
{{- toYaml . | nindent 4 }}
{{- end }}
resourcePolicy:
containerPolicies:
- containerName: node-exporter
{{- with .Values.verticalPodAutoscaler.controlledResources }}
controlledResources:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.verticalPodAutoscaler.controlledValues }}
controlledValues: {{ . }}
{{- end }}
{{- with .Values.verticalPodAutoscaler.maxAllowed }}
maxAllowed:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.verticalPodAutoscaler.minAllowed }}
minAllowed:
{{- toYaml . | nindent 8 }}
{{- end }}
targetRef:
apiVersion: apps/v1
kind: DaemonSet
name: {{ include "prometheus-node-exporter.fullname" . }}
{{- with .Values.verticalPodAutoscaler.updatePolicy }}
updatePolicy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,616 @@
# Default values for prometheus-node-exporter.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
registry: quay.io
repository: prometheus/node-exporter
# Overrides the image tag whose default is {{ printf "v%s" .Chart.AppVersion }}
tag: ""
pullPolicy: IfNotPresent
digest: ""
imagePullSecrets: []
# - name: "image-pull-secret"
nameOverride: ""
fullnameOverride: ""
# Number of old history to retain to allow rollback
# Default Kubernetes value is set to 10
revisionHistoryLimit: 10
global:
# To help compatibility with other charts which use global.imagePullSecrets.
# Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style).
# global:
# imagePullSecrets:
# - name: pullSecret1
# - name: pullSecret2
# or
# global:
# imagePullSecrets:
# - pullSecret1
# - pullSecret2
imagePullSecrets: []
#
# Allow parent charts to override registry hostname
imageRegistry: ""
# Configure kube-rbac-proxy. When enabled, creates a kube-rbac-proxy to protect the node-exporter http endpoint.
# The requests are served through the same service but requests are HTTPS.
kubeRBACProxy:
enabled: false
## Set environment variables as name/value pairs
env: {}
# VARIABLE: value
image:
registry: quay.io
repository: brancz/kube-rbac-proxy
tag: v0.20.0
sha: ""
pullPolicy: IfNotPresent
# List of additional cli arguments to configure kube-rbac-proxy
# for example: --tls-cipher-suites, --log-file, etc.
# all the possible args can be found here: https://github.com/brancz/kube-rbac-proxy#usage
extraArgs: []
## Specify security settings for a Container
## Allows overrides and additional options compared to (Pod) securityContext
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
containerSecurityContext: {}
# Specify the port used for the Node exporter container (upstream port)
port: 8100
# Specify the name of the container port
portName: http
# Configure a hostPort. If true, hostPort will be enabled in the container and set to service.port.
enableHostPort: false
# Configure Proxy Endpoints Port
# This is the port being probed for readiness
proxyEndpointsPort: 8888
# Configure a hostPort. If true, hostPort will be enabled in the container and set to proxyEndpointsPort.
enableProxyEndpointsHostPort: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 64Mi
# requests:
# cpu: 10m
# memory: 32Mi
## Additional volume mounts in the kube-rbac-proxy container
## See extraVolumes below
extraVolumeMounts: []
# - name: extra-volume
# mountPath: /extra
# readOnly: true
## tls enables using TLS resources from a volume on secret referred to in tlsSecret below.
## When enabling tlsClientAuth, client CA certificate must be set in tlsSecret.caItem.
## Ref. https://github.com/brancz/kube-rbac-proxy/issues/187
tls:
enabled: false
tlsClientAuth: false
## tlsSecret refers to an existing secret holding TLS items: client CA certificate, private key and certificate.
## secretName and volumeName can be templated.
## If enabled, volume volumeName gets created on secret secretName.
## The volume's resources will be used by kube-rbac-proxy if kubeRBACProxy.tls.enabled is set.
tlsSecret:
enabled: false
## Key with client CA certificate (optional)
caItem: ""
## Key with certificate
certItem: tls.crt
## Key with private key
keyItem: tls.key
## Name of an existing secret
secretName: prometheus-node-exporter-tls
## Name of the volume to be created
volumeName: prometheus-node-exporter-tls
## Service configuration
service:
## Creating a service is enabled by default
enabled: true
## Service type
type: ClusterIP
## IP address for type ClusterIP
clusterIP: ""
## Default service port. Sets the port of the exposed container as well (NE or kubeRBACProxy).
## Use "servicePort" below if changing the service port only is desired.
port: 9100
## Service port. Use this field if you wish to set a different service port
## without changing the container port ("port" above).
servicePort: ""
## Targeted port in the pod. Must refer to an open container port ("port" or "portName").
## (IntOrString)
targetPort: 9100
## Name of the service port. Sets the port name of the main container (NE) as well.
portName: metrics
## Port number for service type NodePort
nodePort: null
## If true, node exporter will listen on all interfaces
listenOnAllInterfaces: true
## Additional annotations and labels for the service
annotations:
prometheus.io/scrape: "true"
labels: {}
## Dual stack settings for the service
## https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services
ipDualStack:
enabled: false
ipFamilies: ["IPv6", "IPv4"]
ipFamilyPolicy: "PreferDualStack"
## External/Internal traffic policy setting (Cluster, Local)
## https://kubernetes.io/docs/reference/networking/virtual-ips/#traffic-policies
externalTrafficPolicy: ""
internalTrafficPolicy: ""
# Set a NetworkPolicy with:
# ingress only on service.port or custom policy
# no egress permitted
networkPolicy:
enabled: false
# ingress:
# - {}
# Additional environment variables that will be passed to the daemonset
env: {}
## env:
## VARIABLE: value
prometheus:
monitor:
enabled: false
additionalLabels: {}
namespace: ""
jobLabel: ""
# List of pod labels to add to node exporter metrics
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
podTargetLabels: []
# List of target labels to add to node exporter metrics
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
targetLabels: []
scheme: http
basicAuth: {}
bearerTokenFile:
tlsConfig: {}
## proxyUrl: URL of a proxy that should be used for scraping.
##
proxyUrl: ""
## Override serviceMonitor selector
##
selectorOverride: {}
## Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above.
##
attachMetadata:
node: false
relabelings: []
metricRelabelings: []
interval: ""
scrapeTimeout: 10s
## prometheus.monitor.apiVersion ApiVersion for the serviceMonitor Resource(defaults to "monitoring.coreos.com/v1")
apiVersion: ""
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
##
sampleLimit: 0
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
##
targetLimit: 0
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
##
labelLimit: 0
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
##
labelNameLengthLimit: 0
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
##
labelValueLengthLimit: 0
# PodMonitor defines monitoring for a set of pods.
# ref. https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#podmonitor
# Using a PodMonitor may be preferred in some environments where there is very large number
# of Node Exporter endpoints (1000+) behind a single service.
# The PodMonitor is disabled by default. When switching from ServiceMonitor to PodMonitor,
# the time series resulting from the configuration through PodMonitor may have different labels.
# For instance, there will not be the service label any longer which might
# affect PromQL queries selecting that label.
podMonitor:
enabled: false
# Namespace in which to deploy the pod monitor. Defaults to the release namespace.
namespace: ""
# Additional labels, e.g. setting a label for pod monitor selector as set in prometheus
additionalLabels: {}
# release: kube-prometheus-stack
# PodTargetLabels transfers labels of the Kubernetes Pod onto the target.
podTargetLabels: []
# apiVersion defaults to monitoring.coreos.com/v1.
apiVersion: ""
# Override pod selector to select pod objects.
selectorOverride: {}
# Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above.
attachMetadata:
node: false
# The label to use to retrieve the job name from. Defaults to label app.kubernetes.io/name.
jobLabel: ""
# Scheme/protocol to use for scraping.
scheme: "http"
# Path to scrape metrics at.
path: "/metrics"
# BasicAuth allow an endpoint to authenticate over basic authentication.
# More info: https://prometheus.io/docs/operating/configuration/#endpoint
basicAuth: {}
# Secret to mount to read bearer token for scraping targets.
# The secret needs to be in the same namespace as the pod monitor and accessible by the Prometheus Operator.
# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core
bearerTokenSecret: {}
# TLS configuration to use when scraping the endpoint.
tlsConfig: {}
# Authorization section for this endpoint.
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#safeauthorization
authorization: {}
# OAuth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer.
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#oauth2
oauth2: {}
# ProxyURL eg http://proxyserver:2195. Directs scrapes through proxy to this endpoint.
proxyUrl: ""
# Interval at which endpoints should be scraped. If not specified Prometheus' global scrape interval is used.
interval: ""
# Timeout after which the scrape is ended. If not specified, the Prometheus global scrape interval is used.
scrapeTimeout: ""
# HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data.
honorTimestamps: true
# HonorLabels chooses the metric's labels on collisions with target labels.
honorLabels: true
# Whether to enable HTTP2. Default false.
enableHttp2: ""
# Drop pods that are not running. (Failed, Succeeded).
# Enabled by default. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase
filterRunning: ""
# FollowRedirects configures whether scrape requests follow HTTP 3xx redirects. Default false.
followRedirects: ""
# Optional HTTP URL parameters
params: {}
# RelabelConfigs to apply to samples before scraping. Prometheus Operator automatically adds
# relabelings for a few standard Kubernetes fields. The original scrape job's name
# is available via the __tmp_prometheus_job_name label.
# More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
relabelings: []
# MetricRelabelConfigs to apply to samples before ingestion.
metricRelabelings: []
# SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
sampleLimit: 0
# TargetLimit defines a limit on the number of scraped targets that will be accepted.
targetLimit: 0
# Per-scrape limit on number of labels that will be accepted for a sample.
# Only valid in Prometheus versions 2.27.0 and newer.
labelLimit: 0
# Per-scrape limit on length of labels name that will be accepted for a sample.
# Only valid in Prometheus versions 2.27.0 and newer.
labelNameLengthLimit: 0
# Per-scrape limit on length of labels value that will be accepted for a sample.
# Only valid in Prometheus versions 2.27.0 and newer.
labelValueLengthLimit: 0
## Customize the updateStrategy if set
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 200m
# memory: 50Mi
# requests:
# cpu: 100m
# memory: 30Mi
# Specify the container restart policy passed to the Node Export container
# Possible Values: Always (default)|OnFailure|Never
restartPolicy: null
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
annotations: {}
imagePullSecrets: []
automountServiceAccountToken: false
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
containerSecurityContext:
readOnlyRootFilesystem: true
# capabilities:
# add:
# - SYS_TIME
rbac:
## If true, create & use RBAC resources
##
create: true
# for deployments that have node_exporter deployed outside of the cluster, list
# their addresses here
endpoints: []
# Expose the service to the host network
hostNetwork: true
# Share the host process ID namespace
hostPID: true
# Share the host ipc namespace
hostIPC: false
# Mount the node's root file system (/) at /host/root in the container
hostRootFsMount:
enabled: true
# Defines how new mounts in existing mounts on the node or in the container
# are propagated to the container or node, respectively. Possible values are
# None, HostToContainer, and Bidirectional. If this field is omitted, then
# None is used. More information on:
# https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation
mountPropagation: HostToContainer
# Mount the node's proc file system (/proc) at /host/proc in the container
hostProcFsMount:
# Possible values are None, HostToContainer, and Bidirectional
mountPropagation: ""
# Mount the node's sys file system (/sys) at /host/sys in the container
hostSysFsMount:
# Possible values are None, HostToContainer, and Bidirectional
mountPropagation: ""
## Assign a group of affinity scheduling rules
## The default nodeAffinity excludes Fargate nodes and virtual kubelets from scheduling
## unless overriden by hard node affinity set in the field.
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchFields:
# - key: metadata.name
# operator: In
# values:
# - target-host-name
# Annotations to be added to node exporter pods
podAnnotations:
# Fix for very slow GKE cluster upgrades
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
# Extra labels to add to node exporter pods (can be templated)
podLabels: {}
## Extra labels to attach to all resources (can be templated)
commonLabels: {}
# Annotations to be added to node exporter daemonset
daemonsetAnnotations: {}
## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box
releaseLabel: false
# Custom DNS configuration to be added to prometheus-node-exporter pods
dnsConfig: {}
# nameservers:
# - 1.2.3.4
# searches:
# - ns1.svc.cluster-domain.example
# - my.dns.search.suffix
# options:
# - name: ndots
# value: "2"
# - name: edns0
## Assign a nodeSelector if operating a hybrid cluster
##
nodeSelector:
kubernetes.io/os: linux
# kubernetes.io/arch: amd64
# Specify grace period for graceful termination of pods. Defaults to 30 if null or not specified
terminationGracePeriodSeconds: null
tolerations:
- effect: NoSchedule
operator: Exists
# Enable or disable container termination message settings
# https://kubernetes.io/docs/tasks/debug/debug-application/determine-reason-pod-failure/
terminationMessageParams:
enabled: false
# If enabled, specify the path for termination messages
terminationMessagePath: /dev/termination-log
# If enabled, specify the policy for termination messages
terminationMessagePolicy: File
## Assign a PriorityClassName to pods if set
# priorityClassName: ""
## Additional container arguments
##
extraArgs: []
# - --collector.diskstats.ignored-devices=^(ram|loop|fd|(h|s|v)d[a-z]|nvme\\d+n\\d+p)\\d+$
# - --collector.textfile.directory=/run/prometheus
## Additional mounts from the host to node-exporter container
##
extraHostVolumeMounts: []
# - name: <mountName>
# hostPath: <hostPath>
# https://kubernetes.io/docs/concepts/storage/volumes/#hostpath-volume-types
# type: "" (Default)|DirectoryOrCreate|Directory|FileOrCreate|File|Socket|CharDevice|BlockDevice
# mountPath: <mountPath>
# readOnly: true|false
# mountPropagation: None|HostToContainer|Bidirectional
## Additional configmaps to be mounted.
##
configmaps: []
# - name: <configMapName>
# mountPath: <mountPath>
secrets: []
# - name: <secretName>
# mountPath: <mountPatch>
## Override the deployment namespace
##
namespaceOverride: ""
## Additional containers for export metrics to text file; fields image,imagePullPolicy,securityContext take default value from main container
##
sidecars: []
# - name: nvidia-dcgm-exporter
# image: nvidia/dcgm-exporter:1.4.3
# volumeMounts:
# - name: tmp
# mountPath: /tmp
## Volume for sidecar containers
##
sidecarVolumeMount: []
# - name: collector-textfiles
# mountPath: /run/prometheus
# readOnly: false
## Additional mounts from the host to sidecar containers
##
sidecarHostVolumeMounts: []
# - name: <mountName>
# hostPath: <hostPath>
# mountPath: <mountPath>
# readOnly: true|false
# mountPropagation: None|HostToContainer|Bidirectional
## Additional InitContainers to initialize the pod
##
extraInitContainers: []
## Liveness probe
##
livenessProbe:
failureThreshold: 3
httpGet:
httpHeaders: []
scheme: http
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
## Readiness probe
##
readinessProbe:
failureThreshold: 3
httpGet:
httpHeaders: []
scheme: http
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
# Enable vertical pod autoscaler support for prometheus-node-exporter
verticalPodAutoscaler:
enabled: false
# Recommender responsible for generating recommendation for the object.
# List should be empty (then the default recommender will generate the recommendation)
# or contain exactly one recommender.
# recommenders:
# - name: custom-recommender-performance
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
controlledResources: []
# Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
# controlledValues: RequestsAndLimits
# Define the max allowed resources for the pod
maxAllowed: {}
# cpu: 200m
# memory: 100Mi
# Define the min allowed resources for the pod
minAllowed: {}
# cpu: 200m
# memory: 100Mi
# updatePolicy:
# Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
# minReplicas: 1
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
# updateMode: Auto
# Extra manifests to deploy as an array
extraManifests: []
# - |
# apiVersion: v1
# kind: ConfigMap
# metadata:
# name: prometheus-extra
# data:
# extra-data: "value"
## Extra volumes to become available in the pod
extraVolumes: []
# - name: extra-volume
# secret:
# defaultMode: 420
# optional: false
# secretName: node-exporter-secret
## Extra volume mounts in the node-exporter container
extraVolumeMounts: []
# - name: extra-volume
# mountPath: /extra
# readOnly: true
# Override version of app, required if image.tag is defined and does not follow semver
version: ""

View File

@@ -0,0 +1,26 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# OWNERS file for Kubernetes
OWNERS
ci/

View File

@@ -0,0 +1,27 @@
annotations:
artifacthub.io/license: Apache-2.0
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
apiVersion: v2
appVersion: v1.11.2
description: A Helm chart for prometheus pushgateway
home: https://github.com/prometheus/pushgateway
keywords:
- pushgateway
- prometheus
maintainers:
- email: gianrubio@gmail.com
name: gianrubio
url: https://github.com/gianrubio
- email: christian.staude@staffbase.com
name: cstaud
url: https://github.com/cstaud
- email: rootsandtrees@posteo.de
name: zeritti
url: https://github.com/zeritti
name: prometheus-pushgateway
sources:
- https://github.com/prometheus/pushgateway
type: application
version: 3.4.2

View File

@@ -0,0 +1,101 @@
# Prometheus Pushgateway
This chart bootstraps a Prometheus [Pushgateway](http://github.com/prometheus/pushgateway) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
An optional prometheus `ServiceMonitor` can be enabled, should you wish to use this gateway with [Prometheus Operator](https://github.com/coreos/prometheus-operator).
## Usage
The chart is distributed as an [OCI Artifact](https://helm.sh/docs/topics/registries/) as well as via a traditional [Helm Repository](https://helm.sh/docs/topics/chart_repository/).
- OCI Artifact: `oci://ghcr.io/prometheus-community/charts/prometheus-pushgateway`
- Helm Repository: `https://prometheus-community.github.io/helm-charts` with chart `prometheus-pushgateway`
The installation instructions use the OCI registry. Refer to the [`helm repo`]([`helm repo`](https://helm.sh/docs/helm/helm_repo/)) command documentation for information on installing charts via the traditional repository.
### Install Chart
```console
helm install [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prometheus-pushgateway
```
_See [configuration](#configuration) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
### Uninstall Chart
```console
helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
### Upgrading Chart
```console
helm upgrade [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prometheus-pushgateway --install
```
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
#### To 3.0.0
Previously, as dynamic labels were also set on the statefulset's volume claim template, it was not possible
to upgrade a chart release in a usual manner whilst using a statefulset and persistence due to the volume claim template's fields being immutable.
This release removes the dynamic labels from the statefulset's volume claim template.
If you have configured a statefulset with persistent storage,
please, delete the statefulset before upgrading:
```console
kubectl delete sts -l app.kubernetes.io/name=prometheus-pushgateway --cascade=orphan
```
#### To 2.0.0
Chart API version has been upgraded to v2 so Helm 3 is needed from now on.
Docker image tag is used from Chart.yaml appVersion field by default now.
Version 2.0.0 also adapted [Helm label and annotation best practices](https://helm.sh/docs/chart_best_practices/labels/). Specifically, labels mapping is listed below:
```console
OLD => NEW
----------------------------------------
heritage => app.kubernetes.io/managed-by
chart => helm.sh/chart
[container version] => app.kubernetes.io/version
app => app.kubernetes.io/name
release => app.kubernetes.io/instance
```
Therefore, depending on the way you've configured the chart, the previous StatefulSet or Deployment need to be deleted before upgrade.
If `runAsStatefulSet: false` (this is the default):
```console
kubectl delete deploy -l app=prometheus-pushgateway
```
If `runAsStatefulSet: true`:
```console
kubectl delete sts -l app=prometheus-pushgateway
```
After that do the actual upgrade:
```console
helm upgrade -i prometheus-pushgateway prometheus-community/prometheus-pushgateway
```
## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands:
```console
helm show values oci://ghcr.io/prometheus-community/charts/prometheus-pushgateway
```

View File

@@ -0,0 +1,19 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ template "prometheus-pushgateway.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus-pushgateway.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ template "prometheus-pushgateway.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "prometheus-pushgateway.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ template "prometheus-pushgateway.namespace" . }} {{ template "prometheus-pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ template "prometheus-pushgateway.namespace" . }} -l "app.kubernetes.io/name={{ template "prometheus-pushgateway.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl port-forward $POD_NAME 9091
echo "Visit http://127.0.0.1:9091 to use your application"
{{- end }}

View File

@@ -0,0 +1,297 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "prometheus-pushgateway.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Namespace to set on the resources
*/}}
{{- define "prometheus-pushgateway.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "prometheus-pushgateway.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "prometheus-pushgateway.chart" -}}
{{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "prometheus-pushgateway.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "prometheus-pushgateway.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Create default labels
*/}}
{{- define "prometheus-pushgateway.defaultLabels" -}}
helm.sh/chart: {{ include "prometheus-pushgateway.chart" . }}
{{ include "prometheus-pushgateway.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.podLabels }}
{{ toYaml . }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "prometheus-pushgateway.selectorLabels" -}}
app.kubernetes.io/name: {{ include "prometheus-pushgateway.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Define webConfiguration
*/}}
{{- define "prometheus-pushgateway.webConfiguration" -}}
basic_auth_users:
{{- range $k, $v := .Values.webConfiguration.basicAuthUsers }}
{{ $k }}: {{ htpasswd "" $v | trimPrefix ":"}}
{{- end }}
{{- end }}
{{/*
Define Authorization
*/}}
{{- define "prometheus-pushgateway.Authorization" -}}
{{- $users := keys .Values.webConfiguration.basicAuthUsers }}
{{- $user := first $users }}
{{- $password := index .Values.webConfiguration.basicAuthUsers $user }}
{{- $user }}:{{ $password }}
{{- end }}
{{/*
Define basicAuth
*/}}
{{- define "prometheus-pushgateway.basicAuth" -}}
{{- $users := keys .Values.webConfiguration.basicAuthUsers }}
{{- $user := first $users }}
{{- $password := index .Values.webConfiguration.basicAuthUsers $user -}}
user: {{ $user | b64enc | quote }}
password: {{ $password | b64enc | quote }}
{{- end }}
{{/*
Set the image with or without the registry
*/}}
{{- define "prometheus-pushgateway.image" -}}
{{- $registry := default .Values.image.registry (.Values.global).imageRegistry }}
{{- $repository := .Values.image.repository }}
{{- $tag := default .Chart.AppVersion .Values.image.tag }}
{{- if $registry }}
{{- printf "%s/%s:%s" $registry $repository $tag -}}
{{- else -}}
{{- printf "%s:%s" $repository $tag -}}
{{- end -}}
{{- end -}}
{{/*
Returns pod spec
*/}}
{{- define "prometheus-pushgateway.podSpec" -}}
serviceAccountName: {{ include "prometheus-pushgateway.serviceAccountName" . }}
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
{{- with .Values.priorityClassName }}
priorityClassName: {{ . | quote }}
{{- end }}
{{- with .Values.hostAliases }}
hostAliases:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with (.Values.global).imagePullSecrets | default .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.extraInitContainers }}
initContainers:
{{- toYaml . | nindent 2 }}
{{- end }}
containers:
{{- with .Values.extraContainers }}
{{- toYaml . | nindent 2 }}
{{- end }}
- name: pushgateway
image: {{ include "prometheus-pushgateway.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- with .Values.extraVars }}
env:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- if or .Values.extraArgs .Values.webConfiguration }}
args:
{{- with .Values.extraArgs }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- if .Values.webConfiguration }}
- --web.config.file=/etc/config/web-config.yaml
{{- end }}
{{- end }}
ports:
- name: metrics
containerPort: 9091
protocol: TCP
{{- if .Values.liveness.enabled }}
{{- $livenessCommon := omit .Values.liveness.probe "httpGet" }}
livenessProbe:
{{- with .Values.liveness.probe }}
httpGet:
path: {{ .httpGet.path }}
port: {{ .httpGet.port }}
{{- if or .httpGet.httpHeaders $.Values.webConfiguration.basicAuthUsers }}
httpHeaders:
{{- if $.Values.webConfiguration.basicAuthUsers }}
- name: Authorization
value: Basic {{ include "prometheus-pushgateway.Authorization" $ | b64enc }}
{{- end }}
{{- with .httpGet.httpHeaders }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- end }}
{{- toYaml $livenessCommon | nindent 6 }}
{{- end }}
{{- end }}
{{- if .Values.readiness.enabled }}
{{- $readinessCommon := omit .Values.readiness.probe "httpGet" }}
readinessProbe:
{{- with .Values.readiness.probe }}
httpGet:
path: {{ .httpGet.path }}
port: {{ .httpGet.port }}
{{- if or .httpGet.httpHeaders $.Values.webConfiguration.basicAuthUsers }}
httpHeaders:
{{- if $.Values.webConfiguration.basicAuthUsers }}
- name: Authorization
value: Basic {{ include "prometheus-pushgateway.Authorization" $ | b64enc }}
{{- end }}
{{- with .httpGet.httpHeaders }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- end }}
{{- toYaml $readinessCommon | nindent 6 }}
{{- end }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.lifecycle }}
lifecycle: {{ toYaml . | nindent 6 }}
{{- end }}
volumeMounts:
- name: storage-volume
mountPath: "{{ .Values.persistentVolume.mountPath }}"
subPath: "{{ .Values.persistentVolume.subPath }}"
{{- if .Values.webConfiguration }}
- name: web-config
mountPath: "/etc/config"
{{- end }}
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- if or .Values.podAntiAffinity .Values.affinity }}
affinity:
{{- end }}
{{- with .Values.affinity }}
{{- toYaml . | nindent 2 }}
{{- end }}
{{- if eq .Values.podAntiAffinity "hard" }}
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: {{ .Values.podAntiAffinityTopologyKey }}
labelSelector:
matchExpressions:
- {key: app.kubernetes.io/name, operator: In, values: [{{ include "prometheus-pushgateway.name" . }}]}
{{- else if eq .Values.podAntiAffinity "soft" }}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
topologyKey: {{ .Values.podAntiAffinityTopologyKey }}
labelSelector:
matchExpressions:
- {key: app.kubernetes.io/name, operator: In, values: [{{ include "prometheus-pushgateway.name" . }}]}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 2 }}
{{- end }}
volumes:
{{- $storageVolumeAsPVCTemplate := and .Values.runAsStatefulSet .Values.persistentVolume.enabled -}}
{{- if not $storageVolumeAsPVCTemplate }}
- name: storage-volume
{{- if .Values.persistentVolume.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.persistentVolume.existingClaim }}{{ .Values.persistentVolume.existingClaim }}{{- else }}{{ include "prometheus-pushgateway.fullname" . }}{{- end }}
{{- else }}
emptyDir: {}
{{- end }}
{{- if .Values.webConfiguration }}
- name: web-config
secret:
secretName: {{ include "prometheus-pushgateway.fullname" . }}
{{- end }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 2 }}
{{- else if $storageVolumeAsPVCTemplate }}
{{- if .Values.webConfiguration }}
- name: web-config
secret:
secretName: {{ include "prometheus-pushgateway.fullname" . }}
{{- else }}
[]
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,32 @@
{{- if not .Values.runAsStatefulSet }}
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
{{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }}
{{- with .Values.deploymentAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "prometheus-pushgateway.fullname" . }}
namespace: {{ template "prometheus-pushgateway.namespace" . }}
spec:
replicas: {{ .Values.replicaCount }}
{{- with .Values.strategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "prometheus-pushgateway.defaultLabels" . | nindent 8 }}
spec:
{{- include "prometheus-pushgateway.podSpec" . | nindent 6 }}
{{- end }}

View File

@@ -0,0 +1,8 @@
{{- range .Values.extraManifests }}
---
{{- if typeIs "string" . }}
{{- tpl . $ }}
{{- else }}
{{- tpl (. | toYaml | nindent 0) $ }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,41 @@
{{- if .Values.ingress.enabled }}
{{- $serviceName := include "prometheus-pushgateway.fullname" . }}
{{- $servicePort := .Values.service.port }}
{{- $ingressPath := .Values.ingress.path }}
{{- $ingressClassName := .Values.ingress.className }}
{{- $ingressPathType := .Values.ingress.pathType }}
{{- $extraPaths := .Values.ingress.extraPaths }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }}
name: {{ include "prometheus-pushgateway.fullname" . }}
namespace: {{ template "prometheus-pushgateway.namespace" . }}
spec:
ingressClassName: {{ $ingressClassName }}
rules:
{{- range $host := .Values.ingress.hosts }}
- host: {{ $host }}
http:
paths:
{{- with $extraPaths }}
{{- toYaml . | nindent 10 }}
{{- end }}
- path: {{ $ingressPath }}
pathType: {{ $ingressPathType }}
backend:
service:
name: {{ $serviceName }}
port:
number: {{ $servicePort }}
{{- end -}}
{{- with .Values.ingress.tls }}
tls:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if .Values.networkPolicy }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
{{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }}
{{- if .Values.networkPolicy.customSelectors }}
name: ingress-allow-customselector-{{ template "prometheus-pushgateway.name" . }}
{{- else if .Values.networkPolicy.allowAll }}
name: ingress-allow-all-{{ template "prometheus-pushgateway.name" . }}
{{- else -}}
{{- fail "One of `allowAll` or `customSelectors` must be specified." }}
{{- end }}
namespace: {{ template "prometheus-pushgateway.namespace" . }}
spec:
podSelector:
matchLabels:
{{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }}
ingress:
- ports:
- port: {{ .Values.service.targetPort }}
{{- with .Values.networkPolicy.customSelectors }}
from:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,14 @@
{{- if .Values.podDisruptionBudget }}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
labels:
{{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }}
name: {{ include "prometheus-pushgateway.fullname" . }}
namespace: {{ template "prometheus-pushgateway.namespace" . }}
spec:
selector:
matchLabels:
{{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }}
{{- toYaml .Values.podDisruptionBudget | nindent 2 }}
{{- end }}

View File

@@ -0,0 +1,29 @@
{{- if and (not .Values.runAsStatefulSet) .Values.persistentVolume.enabled (not .Values.persistentVolume.existingClaim) }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
{{- with .Values.persistentVolume.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }}
{{- with .Values.persistentVolumeLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "prometheus-pushgateway.fullname" . }}
namespace: {{ template "prometheus-pushgateway.namespace" . }}
spec:
accessModes:
{{- toYaml .Values.persistentVolume.accessModes | nindent 4 }}
{{- if .Values.persistentVolume.storageClass }}
{{- if (eq "-" .Values.persistentVolume.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistentVolume.storageClass }}"
{{- end }}
{{- end }}
resources:
requests:
storage: "{{ .Values.persistentVolume.size }}"
{{- end }}

View File

@@ -0,0 +1,24 @@
{{- if .Values.webConfiguration }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "prometheus-pushgateway.fullname" . }}
namespace: {{ include "prometheus-pushgateway.namespace" . }}
labels:
{{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }}
data:
web-config.yaml: {{ include "prometheus-pushgateway.webConfiguration" . | b64enc}}
{{- end }}
---
{{- if and .Values.webConfiguration .Values.serviceMonitor.enabled (empty .Values.serviceMonitor.basicAuth) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "prometheus-pushgateway.fullname" . }}-basic-auth
namespace: {{ default (include "prometheus-pushgateway.namespace" .) .Values.serviceMonitor.namespace }}
labels:
{{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }}
data:
{{- include "prometheus-pushgateway.basicAuth" . | nindent 2 }}
type: Opaque
{{- end }}

View File

@@ -0,0 +1,45 @@
{{- $stsNoHeadlessSvcTypes := list "LoadBalancer" "NodePort" -}}
apiVersion: v1
kind: Service
metadata:
{{- with .Values.serviceAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }}
{{- with .Values.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "prometheus-pushgateway.fullname" . }}
namespace: {{ template "prometheus-pushgateway.namespace" . }}
spec:
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{ else if and .Values.runAsStatefulSet (not (has .Values.service.type $stsNoHeadlessSvcTypes)) }}
clusterIP: None # Headless service
{{- end }}
{{- if .Values.service.ipDualStack.enabled }}
ipFamilies: {{ toYaml .Values.service.ipDualStack.ipFamilies | nindent 4 }}
ipFamilyPolicy: {{ .Values.service.ipDualStack.ipFamilyPolicy }}
{{- end }}
type: {{ .Values.service.type }}
{{- with .Values.service.loadBalancerIP }}
loadBalancerIP: {{ . }}
{{- end }}
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $cidr := .Values.service.loadBalancerSourceRanges }}
- {{ $cidr }}
{{- end }}
{{- end }}
ports:
- port: {{ .Values.service.port }}
targetPort: {{ .Values.service.targetPort }}
{{- if and (eq .Values.service.type "NodePort") .Values.service.nodePort }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
protocol: TCP
name: {{ .Values.service.portName }}
selector:
{{- include "prometheus-pushgateway.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,17 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }}
{{- with .Values.serviceAccountLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "prometheus-pushgateway.serviceAccountName" . }}
namespace: {{ template "prometheus-pushgateway.namespace" . }}
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
{{- end }}

View File

@@ -0,0 +1,58 @@
{{- if .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
{{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }}
{{- if .Values.serviceMonitor.additionalLabels }}
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
{{- end }}
name: {{ include "prometheus-pushgateway.fullname" . }}
namespace: {{ default (include "prometheus-pushgateway.namespace" .) .Values.serviceMonitor.namespace }}
spec:
endpoints:
- port: {{ .Values.service.portName }}
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.scheme }}
scheme: {{ . }}
{{- end }}
{{- if and .Values.webConfiguration (empty .Values.serviceMonitor.basicAuth) }}
basicAuth:
password:
name: {{ include "prometheus-pushgateway.fullname" . }}-basic-auth
key: password
username:
name: {{ include "prometheus-pushgateway.fullname" . }}-basic-auth
key: user
{{- else if not (empty .Values.serviceMonitor.basicAuth) }}
basicAuth: {{ toYaml .Values.serviceMonitor.basicAuth | nindent 6 }}
{{- end }}
{{- with .Values.serviceMonitor.bearerTokenFile }}
bearerTokenFile: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml .| nindent 6 }}
{{- end }}
{{- with .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
path: {{ .Values.serviceMonitor.telemetryPath }}
honorLabels: {{ .Values.serviceMonitor.honorLabels }}
{{- with .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{- tpl (toYaml . | nindent 6) $ }}
{{- end }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ template "prometheus-pushgateway.namespace" . }}
selector:
matchLabels:
{{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }}
{{- end -}}

View File

@@ -0,0 +1,51 @@
{{- if .Values.runAsStatefulSet }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
{{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }}
name: {{ include "prometheus-pushgateway.fullname" . }}
namespace: {{ template "prometheus-pushgateway.namespace" . }}
spec:
replicas: {{ .Values.replicaCount }}
serviceName: {{ include "prometheus-pushgateway.fullname" . }}
selector:
matchLabels:
{{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "prometheus-pushgateway.defaultLabels" . | nindent 8 }}
spec:
{{- include "prometheus-pushgateway.podSpec" . | nindent 6 }}
{{- if .Values.persistentVolume.enabled }}
volumeClaimTemplates:
- metadata:
{{- with .Values.persistentVolume.annotations }}
annotations:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.persistentVolumeLabels }}
labels:
{{- toYaml . | nindent 10 }}
{{- end }}
name: storage-volume
spec:
accessModes:
{{ toYaml .Values.persistentVolume.accessModes }}
{{- if .Values.persistentVolume.storageClass }}
{{- if (eq "-" .Values.persistentVolume.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistentVolume.storageClass }}"
{{- end }}
{{- end }}
resources:
requests:
storage: "{{ .Values.persistentVolume.size }}"
{{- end }}
{{- end }}

View File

@@ -0,0 +1,393 @@
# Default values for prometheus-pushgateway.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
imageRegistry: ""
imagePullSecrets: []
# Provide a name in place of prometheus-pushgateway for `app:` labels
nameOverride: ""
# Provide a name to substitute for the full names of resources
fullnameOverride: ""
# Provide a namespace to substitude for the namespace on resources
namespaceOverride: ""
image:
registry: ""
repository: quay.io/prometheus/pushgateway
# if not set appVersion field from Chart.yaml is used
tag: ""
pullPolicy: IfNotPresent
# Optional pod imagePullSecrets
imagePullSecrets: []
service:
type: ClusterIP
port: 9091
targetPort: 9091
# nodePort: 32100
portName: http
# Optional - Can be used for headless if value is "None"
clusterIP: ""
ipDualStack:
enabled: false
ipFamilies: ["IPv6", "IPv4"]
ipFamilyPolicy: "PreferDualStack"
loadBalancerIP: ""
loadBalancerSourceRanges: []
# Whether to automatically mount a service account token into the pod
automountServiceAccountToken: true
# Optional deployment annotations
deploymentAnnotations: {}
# Optional pod annotations
podAnnotations: {}
# Optional pod labels
podLabels: {}
# Optional service annotations
serviceAnnotations: {}
# Optional service labels
serviceLabels: {}
# Optional serviceAccount labels
serviceAccountLabels: {}
# Optional persistentVolume labels
persistentVolumeLabels: {}
# Optional additional environment variables
extraVars: []
## Additional pushgateway container arguments
##
## example:
## extraArgs:
## - --persistence.file=/data/pushgateway.data
## - --persistence.interval=5m
extraArgs: []
## Additional InitContainers to initialize the pod
##
extraInitContainers: []
# Optional additional containers (sidecar)
extraContainers: []
# - name: oAuth2-proxy
# args:
# - -https-address=:9092
# - -upstream=http://localhost:9091
# - -skip-auth-regex=^/metrics
# - -openshift-delegate-urls={"/":{"group":"monitoring.coreos.com","resource":"prometheuses","verb":"get"}}
# image: openshift/oauth-proxy:v1.1.0
# ports:
# - containerPort: 9092
# name: proxy
# resources:
# limits:
# memory: 16Mi
# requests:
# memory: 4Mi
# cpu: 20m
# volumeMounts:
# - mountPath: /etc/prometheus/secrets/pushgateway-tls
# name: secret-pushgateway-tls
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 200m
# memory: 50Mi
# requests:
# cpu: 100m
# memory: 30Mi
# -- Sets web configuration
# To enable basic authentication, provide basicAuthUsers as a map
# If serviceMonitor.enabled is set, a secret with these credentials will be created
# and configured in serviceMonitor. serviceMonitor.basicAuth overrides this secret.
webConfiguration: {}
# basicAuthUsers:
# username: password
liveness:
enabled: true
probe:
httpGet:
path: /-/healthy
port: 9091
initialDelaySeconds: 10
timeoutSeconds: 10
readiness:
enabled: true
probe:
httpGet:
path: /-/ready
port: 9091
initialDelaySeconds: 10
timeoutSeconds: 10
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
## Configure ingress resource that allow you to access the
## pushgateway installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Enable Ingress.
##
enabled: false
# AWS ALB requires path of /*
className: ""
path: /
pathType: ImplementationSpecific
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
extraPaths: []
# - path: /*
# backend:
# serviceName: ssl-redirect
# servicePort: use-annotation
## Annotations.
##
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: 'true'
## Hostnames.
## Must be provided if Ingress is enabled.
##
# hosts:
# - pushgateway.domain.com
## TLS configuration.
## Secrets must be manually created in the namespace.
##
# tls:
# - secretName: pushgateway-tls
# hosts:
# - pushgateway.domain.com
tolerations: []
# - effect: NoSchedule
# operator: Exists
## Node labels for pushgateway pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
replicaCount: 1
hostAliases: []
# - ip: "127.0.0.1"
# hostnames:
# - "foo.local"
# - "bar.local"
# - ip: "10.1.2.3"
# hostnames:
# - "foo.remote"
# - "bar.remote"
## When running more than one replica alongside with persistence, different volumes are needed
## per replica, since sharing a `persistence.file` across replicas does not keep metrics synced.
## For this purpose, you can enable the `runAsStatefulSet` to deploy the pushgateway as a
## StatefulSet instead of as a Deployment.
runAsStatefulSet: false
## Security context to be added to push-gateway pods
##
securityContext:
fsGroup: 65534
runAsUser: 65534
runAsNonRoot: true
## Security context to be added to push-gateway containers
## Having a separate variable as securityContext differs for pods and containers.
containerSecurityContext: {}
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: true
# runAsUser: 65534
# runAsNonRoot: true
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## Pod anti-affinity can prevent the scheduler from placing pushgateway replicas on the same node.
## The value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
## The default value "" will disable pod anti-affinity so that no anti-affinity rules will be configured (unless set in `affinity`).
##
podAntiAffinity: ""
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
##
podAntiAffinityTopologyKey: kubernetes.io/hostname
## Topology spread constraints for pods
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
topologySpreadConstraints: []
# Enable this if you're using https://github.com/coreos/prometheus-operator
serviceMonitor:
enabled: false
namespace: monitoring
# telemetryPath: HTTP resource path from which to fetch metrics.
# Telemetry path, default /metrics, has to be prefixed accordingly if pushgateway sets a route prefix at start-up.
#
telemetryPath: "/metrics"
# Fallback to the prometheus default unless specified
interval: ""
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
scheme: ""
## Basic authentication
basicAuth: {}
## Bearer token file
bearerTokenFile: ""
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
tlsConfig: {}
# bearerTokenFile:
# Fallback to the prometheus default unless specified
scrapeTimeout: ""
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
additionalLabels: {}
# Retain the job and instance labels of the metrics pushed to the Pushgateway
# [Scraping Pushgateway](https://github.com/prometheus/pushgateway#configure-the-pushgateway-as-a-target-to-scrape)
honorLabels: true
## Metric relabel configs to apply to samples before ingestion.
## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs)
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
## Relabel configs to apply to samples before ingestion.
## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
# The values to set in the PodDisruptionBudget spec (minAvailable/maxUnavailable)
# If not set then a PodDisruptionBudget will not be created
podDisruptionBudget: {}
priorityClassName:
# Deployment Strategy type
strategy:
type: Recreate
persistentVolume:
## If true, pushgateway will create/use a Persistent Volume Claim
## If false, use emptyDir
##
enabled: false
## pushgateway data Persistent Volume access modes
## Must match those of existing PV or dynamic provisioner
## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
accessModes:
- ReadWriteOnce
## pushgateway data Persistent Volume Claim annotations
##
annotations: {}
## pushgateway data Persistent Volume existing claim name
## Requires pushgateway.persistentVolume.enabled: true
## If defined, PVC must be created manually before volume will be bound
existingClaim: ""
## pushgateway data Persistent Volume mount root path
##
mountPath: /data
## pushgateway data Persistent Volume size
##
size: 2Gi
## pushgateway data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## Subdirectory of pushgateway data Persistent Volume to mount
## Useful if the volume's root directory is not empty
##
subPath: ""
extraVolumes: []
# - name: extra
# emptyDir: {}
extraVolumeMounts: []
# - name: extra
# mountPath: /usr/share/extras
# readOnly: true
# Configuration for clusters with restrictive network policies in place:
# - allowAll allows access to the PushGateway from any namespace
# - customSelector is a list of pod/namespaceSelectors to allow access from
# These options are mutually exclusive and the latter will take precedence.
networkPolicy: {}
# allowAll: true
# customSelectors:
# - namespaceSelector:
# matchLabels:
# type: admin
# - podSelector:
# matchLabels:
# app: myapp
# Array of extra K8s objects to deploy (evaluated as a template)
# The value can hold an array of strings as well as objects
extraManifests: []
# Lifecycle hooks configuration
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "sleep 30"]

View File

@@ -0,0 +1,118 @@
The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster:
{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
{{ if .Values.server.ingress.enabled -}}
For access from outside the cluster, the server URL(s) are:
{{- range .Values.server.ingress.hosts }}
http://{{ tpl . $ }}
{{- end }}
{{- else if .Values.server.route.main.enabled }}
For access from outside the cluster, the server URL(s) are:
{{- range .Values.server.route.main.hostnames }}
http://{{ tpl . $ }}
{{- end }}
{{- else }}
Get the Prometheus server URL by running these commands in the same shell:
{{- if contains "NodePort" .Values.server.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.server.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }}
{{- else if contains "ClusterIP" .Values.server.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prometheus.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME {{ include "prometheus.port" . }}
{{- end }}
{{- if .Values.server.persistentVolume.enabled }}
{{- else }}
#################################################################################
###### WARNING: Persistence is disabled!!! You will lose your data when #####
###### the Server pod is terminated. #####
#################################################################################
{{- end }}
{{- end }}
{{ if .Values.alertmanager.enabled }}
The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.port }} on the following DNS name from within your cluster:
{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
{{ if .Values.alertmanager.ingress.enabled -}}
From outside the cluster, the alertmanager URL(s) are:
{{- range .Values.alertmanager.ingress.hosts }}
http://{{ . }}
{{- end }}
{{- else }}
Get the Alertmanager URL by running these commands in the same shell:
{{- if contains "NodePort" .Values.alertmanager.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }}
{{- else if contains "ClusterIP" .Values.alertmanager.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "alertmanager.name" .Subcharts.alertmanager }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093
{{- end }}
{{- end }}
{{- if .Values.alertmanager.persistence.enabled }}
{{- else }}
#################################################################################
###### WARNING: Persistence is disabled!!! You will lose your data when #####
###### the AlertManager pod is terminated. #####
#################################################################################
{{- end }}
{{- end }}
{{- if (index .Values "prometheus-node-exporter" "enabled") }}
#################################################################################
###### WARNING: Pod Security Policy has been disabled by default since #####
###### it deprecated after k8s 1.25+. use #####
###### (index .Values "prometheus-node-exporter" "rbac" #####
###### . "pspEnabled") with (index .Values #####
###### "prometheus-node-exporter" "rbac" "pspAnnotations") #####
###### in case you still need it. #####
#################################################################################
{{- end }}
{{ if (index .Values "prometheus-pushgateway" "enabled") }}
The Prometheus PushGateway can be accessed via port {{ index .Values "prometheus-pushgateway" "service" "port" }} on the following DNS name from within your cluster:
{{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}.{{ .Release.Namespace }}.svc.cluster.local
{{ if (index .Values "prometheus-pushgateway" "ingress" "enabled") -}}
From outside the cluster, the pushgateway URL(s) are:
{{- range (index .Values "prometheus-pushgateway" "ingress" "hosts") }}
http://{{ . }}
{{- end }}
{{- else }}
Get the PushGateway URL by running these commands in the same shell:
{{- $pushgateway_svc_type := index .Values "prometheus-pushgateway" "service" "type" -}}
{{- if contains "NodePort" $pushgateway_svc_type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" $pushgateway_svc_type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ index .Values "prometheus-pushgateway" "service" "port" }}
{{- else if contains "ClusterIP" $pushgateway_svc_type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "prometheus.name" (index .Subcharts "prometheus-pushgateway") }},component=pushgateway" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091
{{- end }}
{{- end }}
{{- end }}
For more information on running Prometheus, visit:
https://prometheus.io/

View File

@@ -0,0 +1,180 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "prometheus.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Return the port for prometheus.
*/}}
{{- define "prometheus.port" -}}
9090
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "prometheus.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create labels for prometheus
*/}}
{{- define "prometheus.common.matchLabels" -}}
app.kubernetes.io/name: {{ include "prometheus.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create unified labels for prometheus components
*/}}
{{- define "prometheus.common.metaLabels" -}}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
helm.sh/chart: {{ include "prometheus.chart" . }}
app.kubernetes.io/part-of: {{ include "prometheus.name" . }}
{{- with .Values.commonMetaLabels}}
{{ toYaml . }}
{{- end }}
{{- end -}}
{{- define "prometheus.server.labels" -}}
{{ include "prometheus.server.matchLabels" . }}
{{ include "prometheus.common.metaLabels" . }}
{{- end -}}
{{- define "prometheus.server.matchLabels" -}}
app.kubernetes.io/component: {{ .Values.server.name }}
{{ include "prometheus.common.matchLabels" . }}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "prometheus.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a fully qualified ClusterRole name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "prometheus.clusterRoleName" -}}
{{- if .Values.server.clusterRoleNameOverride -}}
{{ .Values.server.clusterRoleNameOverride | trunc 63 | trimSuffix "-" }}
{{- else -}}
{{ include "prometheus.server.fullname" . }}
{{- end -}}
{{- end -}}
{{/*
Create a fully qualified alertmanager name for communicating and check to ensure that `alertmanager` exists before trying to use it with the user via NOTES.txt
*/}}
{{- define "prometheus.alertmanager.fullname" -}}
{{- if .Subcharts.alertmanager -}}
{{- template "alertmanager.fullname" .Subcharts.alertmanager -}}
{{- else -}}
{{- "alertmanager not found" -}}
{{- end -}}
{{- end -}}
{{/*
Create a fully qualified Prometheus server name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "prometheus.server.fullname" -}}
{{- if .Values.server.fullnameOverride -}}
{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Get KubeVersion removing pre-release information.
*/}}
{{- define "prometheus.kubeVersion" -}}
{{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for networkpolicy.
*/}}
{{- define "prometheus.networkPolicy.apiVersion" -}}
{{- print "networking.k8s.io/v1" -}}
{{- end -}}
{{/*
Create the name of the service account to use for the server component
*/}}
{{- define "prometheus.serviceAccountName.server" -}}
{{- if .Values.serviceAccounts.server.create -}}
{{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.server.name }}
{{- end -}}
{{- end -}}
{{/*
Define the prometheus.namespace template if set with forceNamespace or .Release.Namespace is set
*/}}
{{- define "prometheus.namespace" -}}
{{- default .Release.Namespace .Values.forceNamespace -}}
{{- end }}
{{/*
Define template prometheus.namespaces producing a list of namespaces to monitor
*/}}
{{- define "prometheus.namespaces" -}}
{{- $namespaces := list }}
{{- if and .Values.rbac.create .Values.server.useExistingClusterRoleName }}
{{- if .Values.server.namespaces -}}
{{- range $ns := join "," .Values.server.namespaces | split "," }}
{{- $namespaces = append $namespaces (tpl $ns $) }}
{{- end -}}
{{- end -}}
{{- if .Values.server.releaseNamespace -}}
{{- $namespaces = append $namespaces (include "prometheus.namespace" .) }}
{{- end -}}
{{- end -}}
{{ mustToJson $namespaces }}
{{- end -}}
{{/*
Define prometheus.server.remoteWrite producing a list of remoteWrite configurations with URL templating
*/}}
{{- define "prometheus.server.remoteWrite" -}}
{{- $remoteWrites := list }}
{{- range $remoteWrite := .Values.server.remoteWrite }}
{{- $remoteWrites = tpl $remoteWrite.url $ | set $remoteWrite "url" | append $remoteWrites }}
{{- end -}}
{{ toYaml $remoteWrites }}
{{- end -}}
{{/*
Define prometheus.server.remoteRead producing a list of remoteRead configurations with URL templating
*/}}
{{- define "prometheus.server.remoteRead" -}}
{{- $remoteReads := list }}
{{- range $remoteRead := .Values.server.remoteRead }}
{{- $remoteReads = tpl $remoteRead.url $ | set $remoteRead "url" | append $remoteReads }}
{{- end -}}
{{ toYaml $remoteReads }}
{{- end -}}

View File

@@ -0,0 +1,45 @@
{{- if and .Values.rbac.create (empty .Values.server.useExistingClusterRoleName) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
{{- include "prometheus.server.labels" . | nindent 4 }}
name: {{ include "prometheus.clusterRoleName" . }}
rules:
- apiGroups:
- ""
resources:
- nodes
- nodes/proxy
- nodes/metrics
- services
- endpoints
- pods
- ingresses
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- "networking.k8s.io"
resources:
- ingresses/status
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- "discovery.k8s.io"
resources:
- endpointslices
verbs:
- get
- list
- watch
- nonResourceURLs:
- "/metrics"
verbs:
- get
{{- end }}

View File

@@ -0,0 +1,16 @@
{{- if and .Values.rbac.create (empty .Values.server.namespaces) (empty .Values.server.useExistingClusterRoleName) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
{{- include "prometheus.server.labels" . | nindent 4 }}
name: {{ include "prometheus.clusterRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ template "prometheus.serviceAccountName.server" . }}
namespace: {{ include "prometheus.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "prometheus.clusterRoleName" . }}
{{- end }}

View File

@@ -0,0 +1,107 @@
{{- if and (empty .Values.server.configMapOverrideName) (empty .Values.server.configFromSecret) -}}
apiVersion: v1
kind: ConfigMap
metadata:
{{- with .Values.server.configMapAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "prometheus.server.labels" . | nindent 4 }}
{{- with .Values.server.extraConfigmapLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ template "prometheus.server.fullname" . }}
namespace: {{ include "prometheus.namespace" . }}
data:
allow-snippet-annotations: "false"
{{- $root := . -}}
{{- range $key, $value := .Values.ruleFiles }}
{{ $key }}: {{- toYaml $value | indent 2 }}
{{- end }}
{{- range $key, $value := .Values.serverFiles }}
{{ $key }}: |
{{- if eq $key "prometheus.yml" }}
global:
{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }}
{{- if $root.Values.server.remoteWrite }}
remote_write:
{{- include "prometheus.server.remoteWrite" $root | nindent 4 }}
{{- end }}
{{- if $root.Values.server.remoteRead }}
remote_read:
{{- include "prometheus.server.remoteRead" $root | nindent 4 }}
{{- end }}
{{- if or $root.Values.server.tsdb $root.Values.server.exemplars }}
storage:
{{- if $root.Values.server.tsdb }}
tsdb:
{{ $root.Values.server.tsdb | toYaml | indent 8 }}
{{- end }}
{{- if $root.Values.server.exemplars }}
exemplars:
{{ $root.Values.server.exemplars | toYaml | indent 8 }}
{{- end }}
{{- end }}
{{- if $root.Values.server.otlp }}
otlp:
{{ $root.Values.server.otlp | toYaml | indent 8 }}
{{- end }}
{{- if $root.Values.scrapeConfigFiles }}
scrape_config_files:
{{ toYaml $root.Values.scrapeConfigFiles | indent 4 }}
{{- end }}
{{- end }}
{{- if eq $key "alerts" }}
{{- if and (not (empty $value)) (empty $value.groups) }}
groups:
{{- range $ruleKey, $ruleValue := $value }}
- name: {{ $ruleKey -}}.rules
rules:
{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }}
{{- end }}
{{- else }}
{{ toYaml $value | indent 4 }}
{{- end }}
{{- else }}
{{ toYaml $value | default "{}" | indent 4 }}
{{- end }}
{{- if eq $key "prometheus.yml" -}}
{{- if $root.Values.extraScrapeConfigs }}
{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }}
{{- end -}}
{{- if or ($root.Values.alertmanager.enabled) ($root.Values.server.alertmanagers) }}
alerting:
{{- if $root.Values.alertRelabelConfigs }}
{{ $root.Values.alertRelabelConfigs | toYaml | trimSuffix "\n" | indent 6 }}
{{- end }}
alertmanagers:
{{- if $root.Values.server.alertmanagers }}
{{ toYaml $root.Values.server.alertmanagers | indent 8 }}
{{- else }}
- kubernetes_sd_configs:
- role: pod
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
{{- if $root.Values.alertmanager.prefixURL }}
path_prefix: {{ $root.Values.alertmanager.prefixURL }}
{{- end }}
relabel_configs:
- source_labels: [__meta_kubernetes_namespace]
regex: {{ $root.Release.Namespace }}
action: keep
- source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
regex: {{ $root.Release.Name }}
action: keep
- source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name]
regex: {{ default "alertmanager" $root.Values.alertmanager.nameOverride | trunc 63 | trimSuffix "-" }}
action: keep
- source_labels: [__meta_kubernetes_pod_container_port_number]
regex: "9093"
action: keep
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,488 @@
apiVersion: apps/v1
{{- if .Values.server.statefulSet.enabled }}
kind: StatefulSet
metadata:
{{- if .Values.server.deploymentAnnotations }}
annotations:
{{ toYaml .Values.server.deploymentAnnotations | nindent 4 }}
{{- end }}
labels:
{{- include "prometheus.server.labels" . | nindent 4 }}
{{- with .Values.server.statefulSet.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- else if .Values.server.daemonSet.enabled }}
kind: DaemonSet
metadata:
labels:
{{- include "prometheus.server.labels" . | nindent 4 }}
{{- with .Values.server.daemonSet.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- else }}
kind: Deployment
metadata:
labels:
{{- include "prometheus.server.labels" . | nindent 4 }}
{{- with .Values.server.deploymentAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
name: {{ template "prometheus.server.fullname" . }}
namespace: {{ include "prometheus.namespace" . }}
spec:
{{- if .Values.server.statefulSet.enabled }}
{{- if semverCompare ">= 1.27.x" (include "prometheus.kubeVersion" .) }}
persistentVolumeClaimRetentionPolicy:
whenDeleted: {{ ternary "Delete" "Retain" .Values.server.statefulSet.pvcDeleteOnStsDelete }}
whenScaled: {{ ternary "Delete" "Retain" .Values.server.statefulSet.pvcDeleteOnStsScale }}
{{- end }}
podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }}
serviceName: {{ template "prometheus.server.fullname" . }}-headless
{{- with .Values.server.statefulSet.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- else if .Values.server.daemonSet.enabled }}
{{- with .Values.server.daemonSet.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- else }}
{{- with .Values.server.strategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{ if eq .type "Recreate" }}rollingUpdate: null{{ end }}
{{- end }}
{{- end }}
selector:
matchLabels:
{{- include "prometheus.server.matchLabels" . | nindent 6 }}
{{- if not .Values.server.daemonSet.enabled }}
replicas: {{ .Values.server.replicaCount }}
{{- end }}
revisionHistoryLimit: {{ .Values.server.revisionHistoryLimit }}
template:
metadata:
{{- if .Values.server.podAnnotations }}
annotations:
{{ toYaml .Values.server.podAnnotations | nindent 8 }}
{{- end }}
labels:
{{- include "prometheus.server.labels" . | nindent 8 }}
{{- if .Values.server.podLabels}}
{{ toYaml .Values.server.podLabels | nindent 8 }}
{{- end}}
spec:
{{- if .Values.server.priorityClassName }}
priorityClassName: "{{ .Values.server.priorityClassName }}"
{{- end }}
{{- if .Values.server.runtimeClassName }}
runtimeClassName: "{{ .Values.server.runtimeClassName }}"
{{- end }}
{{- if .Values.server.schedulerName }}
schedulerName: "{{ .Values.server.schedulerName }}"
{{- end }}
{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }}
{{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "<nil>") }}
enableServiceLinks: true
{{- else }}
enableServiceLinks: false
{{- end }}
{{- end }}
serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }}
{{- if kindIs "bool" .Values.server.automountServiceAccountToken }}
automountServiceAccountToken: {{ .Values.server.automountServiceAccountToken }}
{{- end }}
{{- if .Values.server.extraInitContainers }}
initContainers:
{{ toYaml .Values.server.extraInitContainers | indent 8 }}
{{- end }}
containers:
{{- if .Values.configmapReload.prometheus.enabled }}
- name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }}
{{- if .Values.configmapReload.prometheus.image.digest }}
image: "{{ tpl .Values.configmapReload.prometheus.image.repository . }}@{{ tpl .Values.configmapReload.prometheus.image.digest . }}"
{{- else }}
image: "{{ tpl .Values.configmapReload.prometheus.image.repository . }}:{{ tpl .Values.configmapReload.prometheus.image.tag . }}"
{{- end }}
imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}"
{{- with .Values.configmapReload.prometheus.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
args:
- --watched-dir=/etc/config
{{- $default_url := (printf "http://127.0.0.1:%s/-/reload" (include "prometheus.port" .)) }}
{{- with .Values.server.prefixURL }}
{{- $default_url = printf "http://127.0.0.1:%s%s/-/reload" (include "prometheus.port" .) . }}
{{- end }}
{{- if .Values.configmapReload.prometheus.containerPort }}
- --listen-address=0.0.0.0:{{ .Values.configmapReload.prometheus.containerPort }}
{{- end }}
- --reload-url={{ default $default_url .Values.configmapReload.reloadUrl }}
{{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }}
{{- if $value }}
- --{{ $key }}={{ $value }}
{{- else }}
- --{{ $key }}
{{- end }}
{{- end }}
{{- range .Values.configmapReload.prometheus.extraVolumeDirs }}
- --watched-dir={{ . }}
{{- end }}
{{- with .Values.configmapReload.env }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.configmapReload.prometheus.containerPort }}
ports:
- containerPort: {{ .Values.configmapReload.prometheus.containerPort }}
{{- if .Values.configmapReload.prometheus.containerPortName }}
name: {{ .Values.configmapReload.prometheus.containerPortName }}
{{- end }}
{{- end }}
{{- with .Values.configmapReload.prometheus.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.configmapReload.prometheus.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.configmapReload.prometheus.startupProbe.enabled }}
{{- $startupProbe := omit .Values.configmapReload.prometheus.startupProbe "enabled" }}
startupProbe:
{{- toYaml $startupProbe | nindent 12 }}
{{- end }}
{{- with .Values.configmapReload.prometheus.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: config-volume
mountPath: /etc/config
readOnly: true
{{- range .Values.configmapReload.prometheus.extraConfigmapMounts }}
- name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }}
mountPath: {{ .mountPath }}
subPath: {{ .subPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- with .Values.configmapReload.prometheus.extraVolumeMounts }}
{{ toYaml . | nindent 12 }}
{{- end }}
{{- end }}
- name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}
{{- if .Values.server.image.digest }}
image: "{{ tpl .Values.server.image.repository . }}@{{ tpl .Values.server.image.digest . }}"
{{- else }}
image: "{{ tpl .Values.server.image.repository . }}:{{ tpl .Values.server.image.tag . | default .Chart.AppVersion}}"
{{- end }}
imagePullPolicy: "{{ .Values.server.image.pullPolicy }}"
{{- with .Values.server.command }}
command:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.server.env }}
env:
{{ toYaml .Values.server.env | indent 12}}
{{- end }}
args:
{{- if .Values.server.defaultFlagsOverride }}
{{ toYaml .Values.server.defaultFlagsOverride | nindent 12}}
{{- else }}
{{- if .Values.server.retention }}
- --storage.tsdb.retention.time={{ .Values.server.retention }}
{{- end }}
{{- if .Values.server.retentionSize }}
- --storage.tsdb.retention.size={{ .Values.server.retentionSize }}
{{- end }}
- --config.file={{ .Values.server.configPath }}
{{- if .Values.server.storagePath }}
- --storage.tsdb.path={{ .Values.server.storagePath }}
{{- else }}
- --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }}
{{- end }}
- --web.console.libraries=/etc/prometheus/console_libraries
- --web.console.templates=/etc/prometheus/consoles
{{- range .Values.server.extraFlags }}
- --{{ . }}
{{- end }}
{{- range $key, $value := .Values.server.extraArgs }}
{{- if $value }}
- --{{ $key }}={{ $value }}
{{- else }}
- --{{ $key }}
{{- end }}
{{- end }}
{{- if .Values.server.prefixURL }}
- --web.route-prefix={{ .Values.server.prefixURL }}
{{- end }}
{{- if .Values.server.baseURL }}
- --web.external-url={{ .Values.server.baseURL }}
{{- end }}
{{- end }}
ports:
- containerPort: {{ include "prometheus.port" . }}
{{- if .Values.server.portName }}
name: {{ .Values.server.portName }}
{{- end }}
{{- if .Values.server.hostPort }}
hostPort: {{ .Values.server.hostPort }}
{{- end }}
readinessProbe:
{{- if not .Values.server.tcpSocketProbeEnabled }}
httpGet:
path: {{ .Values.server.prefixURL }}/-/ready
port: {{ default (include "prometheus.port" .) .Values.server.portName }}
scheme: {{ .Values.server.probeScheme }}
{{- with .Values.server.probeHeaders }}
httpHeaders:
{{- toYaml . | nindent 14 }}
{{- end }}
{{- else }}
tcpSocket:
port: {{ default (include "prometheus.port" .) .Values.server.portName }}
{{- end }}
initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }}
periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }}
timeoutSeconds: {{ .Values.server.readinessProbeTimeout }}
failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }}
successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }}
livenessProbe:
{{- if not .Values.server.tcpSocketProbeEnabled }}
httpGet:
path: {{ .Values.server.prefixURL }}/-/healthy
port: {{ default (include "prometheus.port" .) .Values.server.portName }}
scheme: {{ .Values.server.probeScheme }}
{{- with .Values.server.probeHeaders }}
httpHeaders:
{{- toYaml . | nindent 14 }}
{{- end }}
{{- else }}
tcpSocket:
port: {{ default (include "prometheus.port" .) .Values.server.portName }}
{{- end }}
initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }}
periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }}
timeoutSeconds: {{ .Values.server.livenessProbeTimeout }}
failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }}
successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }}
{{- if .Values.server.startupProbe.enabled }}
startupProbe:
{{- if not .Values.server.tcpSocketProbeEnabled }}
httpGet:
path: {{ .Values.server.prefixURL }}/-/healthy
port: {{ default (include "prometheus.port" .) .Values.server.portName }}
scheme: {{ .Values.server.probeScheme }}
{{- if .Values.server.probeHeaders }}
httpHeaders:
{{- range .Values.server.probeHeaders}}
- name: {{ .name }}
value: {{ .value }}
{{- end }}
{{- end }}
{{- else }}
tcpSocket:
port: {{ default (include "prometheus.port" .) .Values.server.portName }}
{{- end }}
failureThreshold: {{ .Values.server.startupProbe.failureThreshold }}
periodSeconds: {{ .Values.server.startupProbe.periodSeconds }}
timeoutSeconds: {{ .Values.server.startupProbe.timeoutSeconds }}
{{- end }}
{{- with .Values.server.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- $storageVolumeName := ternary .Values.server.persistentVolume.statefulSetNameOverride "storage-volume" (and .Values.server.persistentVolume.enabled .Values.server.statefulSet.enabled (not (empty .Values.server.persistentVolume.statefulSetNameOverride))) }}
volumeMounts:
- name: config-volume
mountPath: /etc/config
- name: {{ $storageVolumeName }}
mountPath: {{ .Values.server.persistentVolume.mountPath }}
subPath: "{{ .Values.server.persistentVolume.subPath }}"
{{- range .Values.server.extraHostPathMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
subPath: {{ .subPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- range .Values.server.extraConfigmapMounts }}
- name: {{ $.Values.server.name }}-{{ .name }}
mountPath: {{ .mountPath }}
subPath: {{ .subPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- range .Values.server.extraSecretMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
subPath: {{ .subPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- if .Values.server.extraVolumeMounts }}
{{ toYaml .Values.server.extraVolumeMounts | nindent 12 }}
{{- end }}
{{- with .Values.server.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.server.sidecarContainers }}
{{- range $name, $spec := .Values.server.sidecarContainers }}
- name: {{ $name }}
{{- if kindIs "string" $spec }}
{{- tpl $spec $ | nindent 10 }}
{{- else }}
{{- toYaml $spec | nindent 10 }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.server.hostNetwork }}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
{{- else }}
dnsPolicy: {{ .Values.server.dnsPolicy }}
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
{{- if .Values.server.nodeSelector }}
nodeSelector:
{{ toYaml .Values.server.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.server.hostAliases }}
hostAliases:
{{ toYaml .Values.server.hostAliases | indent 8 }}
{{- end }}
{{- if .Values.server.dnsConfig }}
dnsConfig:
{{ toYaml .Values.server.dnsConfig | indent 8 }}
{{- end }}
{{- with .Values.server.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.server.tolerations }}
tolerations:
{{ toYaml .Values.server.tolerations | indent 8 }}
{{- end }}
{{- if or .Values.server.affinity .Values.server.podAntiAffinity }}
affinity:
{{- end }}
{{- with .Values.server.affinity }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if eq .Values.server.podAntiAffinity "hard" }}
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: {{ .Values.server.podAntiAffinityTopologyKey }}
labelSelector:
matchExpressions:
- {key: app.kubernetes.io/name, operator: In, values: [{{ template "prometheus.name" . }}]}
{{- else if eq .Values.server.podAntiAffinity "soft" }}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
topologyKey: {{ .Values.server.podAntiAffinityTopologyKey }}
labelSelector:
matchExpressions:
- {key: app.kubernetes.io/name, operator: In, values: [{{ template "prometheus.name" . }}]}
{{- end }}
{{- with .Values.server.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }}
volumes:
- name: config-volume
{{- if empty .Values.server.configFromSecret }}
configMap:
name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }}
{{- else }}
secret:
secretName: {{ .Values.server.configFromSecret }}
{{- end }}
{{- range .Values.server.extraHostPathMounts }}
- name: {{ .name }}
hostPath:
path: {{ .hostPath }}
{{- end }}
{{- range .Values.configmapReload.prometheus.extraConfigmapMounts }}
- name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}
{{- range .Values.server.extraConfigmapMounts }}
- name: {{ $.Values.server.name }}-{{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}
{{- range .Values.server.extraSecretMounts }}
- name: {{ .name }}
secret:
secretName: {{ .secretName }}
{{- with .optional }}
optional: {{ . }}
{{- end }}
{{- end }}
{{- range .Values.configmapReload.prometheus.extraConfigmapMounts }}
- name: {{ .name }}
configMap:
name: {{ .configMap }}
{{- with .optional }}
optional: {{ . }}
{{- end }}
{{- end }}
{{- if .Values.server.extraVolumes }}
{{ toYaml .Values.server.extraVolumes | indent 8}}
{{- end }}
{{- if and .Values.server.persistentVolume.enabled (not .Values.server.statefulSet.enabled) }}
- name: {{ $storageVolumeName }}
persistentVolumeClaim:
claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }}
{{- else if not .Values.server.persistentVolume.enabled }}
- name: {{ $storageVolumeName }}
emptyDir:
{{- if or .Values.server.emptyDir.sizeLimit .Values.server.emptyDir.medium }}
{{- if .Values.server.emptyDir.medium }}
medium: {{ .Values.server.emptyDir.medium }}
{{- end }}
{{- if .Values.server.emptyDir.sizeLimit }}
sizeLimit: {{ .Values.server.emptyDir.sizeLimit }}
{{- end }}
{{- else }}
{}
{{- end -}}
{{- end -}}
{{- if and .Values.server.statefulSet.enabled .Values.server.persistentVolume.enabled }}
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ $storageVolumeName }}
{{- with .Values.server.persistentVolume.annotations }}
annotations:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.server.persistentVolume.labels }}
labels:
{{- toYaml . | nindent 10 }}
{{- end }}
spec:
accessModes:
{{- toYaml .Values.server.persistentVolume.accessModes | nindent 10 }}
resources:
requests:
storage: "{{ .Values.server.persistentVolume.size }}"
{{- if .Values.server.persistentVolume.storageClass }}
{{- if (eq "-" .Values.server.persistentVolume.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.server.persistentVolume.storageClass }}"
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,4 @@
{{ range .Values.extraManifests }}
---
{{ tpl . $ }}
{{ end }}

View File

@@ -0,0 +1,32 @@
{{- if .Values.server.statefulSet.enabled -}}
apiVersion: v1
kind: Service
metadata:
{{- if .Values.server.statefulSet.headless.annotations }}
annotations:
{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }}
{{- end }}
labels:
{{- include "prometheus.server.labels" . | nindent 4 }}
{{- if .Values.server.statefulSet.headless.labels }}
{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }}
{{- end }}
name: {{ template "prometheus.server.fullname" . }}-headless
namespace: {{ include "prometheus.namespace" . }}
spec:
clusterIP: None
ports:
- name: http
port: {{ .Values.server.statefulSet.headless.servicePort }}
protocol: TCP
targetPort: {{ default (include "prometheus.port" .) .Values.server.portName }}
{{- if .Values.server.statefulSet.headless.gRPC.enabled }}
- name: grpc
port: {{ .Values.server.statefulSet.headless.gRPC.servicePort }}
protocol: TCP
targetPort: 10901
{{- end }}
selector:
{{- include "prometheus.server.matchLabels" . | nindent 4 }}
{{- end -}}

View File

@@ -0,0 +1,45 @@
{{- range $name, $route := .Values.server.route }}
{{- if $route.enabled }}
---
apiVersion: {{ $route.apiVersion | default "gateway.networking.k8s.io/v1" }}
kind: {{ $route.kind | default "HTTPRoute" }}
metadata:
{{- with $route.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}
name: {{ include "prometheus.server.fullname" $ }}
namespace: {{ include "prometheus.namespace" $ }}
labels: {{ include "prometheus.server.labels" $ | nindent 4 }}
{{- with $route.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with $route.parentRefs }}
parentRefs: {{ toYaml . | nindent 4 }}
{{- end }}
{{- with $route.hostnames }}
hostnames: {{ tpl (toYaml .) $ | nindent 4 }}
{{- end }}
rules:
{{- with $route.additionalRules }}
{{- tpl (toYaml .) $ | nindent 4 }}
{{- end }}
{{- if $route.httpsRedirect }}
- filters:
- type: RequestRedirect
requestRedirect:
scheme: https
statusCode: 301
{{- else }}
- backendRefs:
- name: {{ include "prometheus.server.fullname" $ }}
port: {{ $.Values.server.service.servicePort }}
{{- with $route.filters }}
filters: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with $route.matches }}
matches: {{ toYaml . | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More