Adding openldap + ldap user manager

This commit is contained in:
plm 2024-12-02 13:57:37 +01:00
parent 519fb80ee7
commit ba9a971964
38 changed files with 1984 additions and 29 deletions

View File

@ -7,10 +7,10 @@ appVersion: "0.0.1"
# TODO: ldap, ory hydra, keto
dependencies:
- name: openldap-stack-ha
version: "4.3.1"
repository: "https://jp-gouin.github.io/helm-openldap/"
condition: openldap-stack-ha.enabled
- name: openldap
repository: https://jp-gouin.github.io/helm-openldap/
version: "2.0.4"
condition: openldap.enabled
- name: traefik
version: "33.0.0"
repository: "https://helm.traefik.io/traefik"

View File

@ -0,0 +1,133 @@
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: openldap-qualif-
spec:
entrypoint: test-deployment
arguments:
parameters:
- name: namespace
value: openldap-qualif
- name: app
value: openldap-qualif
# This spec contains two templates: hello-hello-hello and whalesay
templates:
- name: test-deployment
parallelism: 1
# Instead of just running a container
# This template has a sequence of steps
steps:
- - name: wait-upgrade # hello1 is run before the following steps
template: wait-upgrade
arguments:
parameters:
- name: time
value: 10
- name: type
value: sts
- - name: test-openldap-upgrade # double dash => run after previous step
template: test-openldap-upgrade
arguments:
parameters:
- name: url
value: "{{workflow.parameters.app}}.{{workflow.parameters.namespace}}"
- name: password
value: "Not@SecurePassw0rd"
- name: user
value: "cn=admin,dc=example,dc=org"
- name: occurence
value: "{{item}}"
withSequence:
count: "1"
- - name: apply-chaos-test # double dash => run after previous step
template: apply-chaos-test
- - name: test-openldap # double dash => run after previous step
template: test-openldap-upgrade
arguments:
parameters:
- name: url
value: "{{workflow.parameters.app}}.{{workflow.parameters.namespace}}"
- name: password
value: "Not@SecurePassw0rd"
- name: user
value: "cn=admin,dc=example,dc=org"
- name: occurence
value: "{{item}}"
withSequence:
count: "60"
- - name: cleanup # double dash => run after previous step
template: pause-chaos-test
# This is the same template as from the previous example
- name: wait-upgrade
serviceAccountName: argo-workflow-invocator
inputs:
parameters:
- name: time
- name: type # type of resources to wait (deployement or sts)
script:
image: bitnami/kubectl:1.18.13
command: [/bin/bash]
source: |
sleep {{inputs.parameters.time}}
kubectl rollout status -n {{workflow.parameters.namespace}} {{inputs.parameters.type}} {{workflow.parameters.app}}
- name: test-openldap-upgrade
serviceAccountName: argo-workflow-invocator
inputs:
parameters:
- name: url
- name: password
- name: user
- name: occurence
script:
image: alpine
command: [sh]
source: | # Contents of the here-script
apk add openldap-clients
echo "run ldap commands (add, search, modify...)"
LDAPTLS_REQCERT=never ldapsearch -x -D '{{inputs.parameters.user}}' -w {{inputs.parameters.password}} -H ldaps://{{inputs.parameters.url}} -b 'dc=example,dc=org'
sleep 60
- name: apply-chaos-test
serviceAccountName: argo-workflow-invocator
resource: # indicates that this is a resource template
action: apply # can be any kubectl action (e.g. create, delete, apply, patch)
manifest: | #put your kubernetes spec here
apiVersion: chaos-mesh.org/v1alpha1
kind: PodChaos
metadata:
name: pod-failure-openldap
namespace: openldap-qualif
annotations:
experiment.chaos-mesh.org/pause: "false"
spec:
action: pod-failure
mode: random-max-percent
value: "100"
duration: "15s"
selector:
labelSelectors:
"app": "openldap-qualif"
scheduler:
cron: "@every 2m"
- name: pause-chaos-test
serviceAccountName: argo-workflow-invocator
resource: # indicates that this is a resource template
action: apply # can be any kubectl action (e.g. create, delete, apply, patch)
manifest: | #put your kubernetes spec here
apiVersion: chaos-mesh.org/v1alpha1
kind: PodChaos
metadata:
name: pod-failure-openldap
namespace: openldap-qualif
annotations:
experiment.chaos-mesh.org/pause: "true"
spec:
action: pod-failure
mode: random-max-percent
value: "100"
duration: "15s"
selector:
labelSelectors:
"app": "openldap-qualif"
scheduler:
cron: "@every 2m"

View File

@ -0,0 +1,5 @@
.git
.github
.chaos
.argo-workflow

View File

@ -0,0 +1,24 @@
apiVersion: v2
appVersion: 2.4.47
dependencies:
- condition: ltb-passwd.enabled
name: ltb-passwd
repository: ""
version: 0.1.x
- condition: phpldapadmin.enabled
name: phpldapadmin
repository: ""
version: 0.1.x
description: Community developed LDAP software
home: https://www.openldap.org
icon: http://www.openldap.org/images/headers/LDAPworm.gif
keywords:
- ldap
- openldap
maintainers:
- email: jp-gouin@hotmail.fr
name: Jean-Philippe Gouin
name: openldap
sources:
- https://github.com/kubernetes/charts
version: 2.0.4

View File

@ -0,0 +1,167 @@
# OpenLDAP Helm Chart
## Prerequisites Details
* Kubernetes 1.8+
* PV support on the underlying infrastructure
## Chart Details
This chart will do the following:
* Instantiate 3 instances of OpenLDAP server with multi-master replication
* A phpldapadmin to administrate the OpenLDAP server
* ltb-passwd for self service password
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ git clone https://github.com/jp-gouin/helm-openldap.git
$ cd helm-openldap
$ helm install openldap .
```
## Configuration
We use the docker images provided by https://github.com/osixia/docker-openldap. The docker image is highly configurable and well documented. Please consult to documentation for the docker image for more information.
The following table lists the configurable parameters of the openldap chart and their default values.
| Parameter | Description | Default |
| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ------------------- |
| `replicaCount` | Number of replicas | `3` |
| `strategy` | Deployment strategy | `{}` |
| `image.repository` | Container image repository | `osixia/openldap` |
| `image.tag` | Container image tag | `1.1.10` |
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `extraLabels` | Labels to add to the Resources | `{}` |
| `podAnnotations` | Annotations to add to the pod | `{}` |
| `existingSecret` | Use an existing secret for admin and config user passwords | `""` |
| `service.annotations` | Annotations to add to the service | `{}` |
| `service.externalIPs` | Service external IP addresses | `[]` |
| `service.ldapPort` | External service port for LDAP | `389` |
| `service.ldapPortNodePort` | Nodeport of External service port for LDAP if service.type is NodePort | `nil` |
| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` |
| `service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | `[]` |
| `service.sslLdapPort` | External service port for SSL+LDAP | `636` |
| `service.sslLdapPortNodePort` | Nodeport of External service port for SSL if service.type is NodePort | `nil` |
| `service.type` | Service type can be ClusterIP, NodePort, LoadBalancer | `ClusterIP` |
| `env` | List of key value pairs as env variables to be sent to the docker image. See https://github.com/osixia/docker-openldap for available ones | `[see values.yaml]` |
| `logLevel` | Set the container log level. Valid values: `none`, `error`, `warning`, `info`, `debug`, `trace` | `info` |
| `tls.enabled` | Set to enable TLS/LDAPS with custom certificate - should also set `tls.secret` | `false` |
| `tls.secret` | Secret containing TLS cert and key (eg, generated via cert-manager) | `""` |
| `tls.CA.enabled` | Set to enable custom CA crt file - should also set `tls.CA.secret` | `false` |
| `tls.CA.secret` | Secret containing CA certificate (ca.crt) | `""` |
| `adminPassword` | Password for admin user. Unset to auto-generate the password | None |
| `configPassword` | Password for config user. Unset to auto-generate the password | None |
| `customLdifFiles` | Custom ldif files to seed the LDAP server. List of filename -> data pairs | None |
| `persistence.enabled` | Whether to use PersistentVolumes or not | `false` |
| `persistence.storageClass` | Storage class for PersistentVolumes. | `<unset>` |
| `persistence.accessMode` | Access mode for PersistentVolumes | `ReadWriteOnce` |
| `persistence.size` | PersistentVolumeClaim storage size | `8Gi` |
| `resources` | Container resource requests and limits in yaml | `{}` |
| `test.enabled` | Conditionally provision test resources | `false` |
| `test.image.repository` | Test container image requires bats framework | `dduportal/bats` |
| `test.image.tag` | Test container tag | `0.4.0` |
| `replication.enabled` | Enable the multi-master replication | `true` |
| `replication.retry` | retry period for replication in sec | `60` |
| `replication.timeout` | timeout for replication in sec| `1` |
| `replication.starttls` | starttls replication | `critical` |
| `replication.tls_reqcert` | tls certificate validation for replication | `never` |
| `replication.interval` | interval for replication | `00:00:00:10` |
| `replication.clusterName` | Set the clustername for replication | "cluster.local" |
| `phpldapadmin.enabled` | Enable the deployment of PhpLdapAdmin | `true`|
| `phpldapadmin.ingress` | Ingress of Phpldapadmin | `{}` |
| `phpldapadmin.env` | Environment variables for PhpldapAdmin| `{}` |
|`ltb-passwd.enabled`| Enable the deployment of Ltb-Passwd| `true` |
|`ltb-passwd.ingress`| Ingress of the Ltb-Passwd service | `{}` |
|`ltb-passwd.ldap`| Ldap configuration for the Ltb-Passwd service | `{}` |
|`ltb-passwd.env`| Environment variables for ltp-passwd | `{}` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```bash
$ helm install --name my-release -f values.yaml stable/openldap
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## PhpLdapAdmin
To enable PhpLdapAdmin set `phpldapadmin.enabled` to `true`
Ingress can be configure if you want to expose the service.
Setup the env part of the configuration to access the OpenLdap server
**Note** : The ldap host should match the following `namespace.Appfullname`
Example :
```
phpldapadmin:
enabled: true
ingress:
enabled: true
annotations: {}
path: /
## Ingress Host
hosts:
- phpldapadmin.local
env:
PHPLDAPADMIN_LDAP_HOSTS: openldap.openldap
```
## Self-service-password
To enable Self-service-password set `ltb-passwd.enabled` to `true`
Ingress can be configure if you want to expose the service.
Setup the `ldap` part with the information of the OpenLdap server.
Set `bindDN` accordingly to your ldap domain
**Note** : The ldap server host should match the following `ldap://namespace.Appfullname`
Example :
```
ltb-passwd:
enabled : true
ingress:
enabled: true
annotations: {}
host: "ssl-ldap2.local"
ldap:
server: ldap://openldap.openldap
searchBase: dc=example,dc=org
bindDN: cn=admin,dc=example,dc=org
bindPWKey: LDAP_ADMIN_PASSWORD
```
## Cleanup orphaned Persistent Volumes
Deleting the Deployment will not delete associated Persistent Volumes if persistence is enabled.
Do the following after deleting the chart release to clean up orphaned Persistent Volumes.
```bash
$ kubectl delete pvc -l release=${RELEASE-NAME}
```
## Custom Secret
`existingSecret` can be used to override the default secret.yaml provided
## Testing
Helm tests are included and they confirm connection to slapd.
```bash
helm install . --set test.enabled=true
helm test <RELEASE_NAME>
RUNNING: foolish-mouse-openldap-service-test-akmms
PASSED: foolish-mouse-openldap-service-test-akmms
```
It will confirm that we can do an ldapsearch with the default credentials

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,5 @@
apiVersion: v2
appVersion: "1.3"
description: LTB Project Password self service
name: ltb-passwd
version: 0.1.0

View File

@ -0,0 +1,63 @@
# LTB Password Self Service Helm Chart
This repository contains the helm chart for the LTB password change webapp.
It is based on several other projects, namely:
- [LTB Self-Service Password](https://ltb-project.org/documentation/self-service-password)
- [LTB Self-Service Password Github Repo](https://github.com/ltb-project/self-service-password)
- [tiredofit Docker Image for the LTB repo](https://github.com/tiredofit/docker-self-service-password)
## Prerequisites
- Kubernetes 1.8+
## Chart Details
This chart will do the following:
- Instantiate an instance of the LTB LDAP Self-Service Password webapp.
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ helm install --name my-release $PATH_TO_THIS_REPO
```
## Configuration
We use this image as base image, please refer to the documentation for specific options.
- [tiredofit Docker Image for the LTB repo](https://github.com/tiredofit/docker-self-service-password)
Configuration is done within `values.yaml`:
| Parameter | Description | Default |
| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------- |
| `ldap.server` | LDAP Server URL, should be of the form: `ldap://ldap.svc:389` | ` ` |
| `ldap.searchBase` | LDAP Search Base for the users | ` ` |
| `ldap.binduserSecret` | Name of an **existing** secret to fetch the credentials for the bind user from. Needs keys `BINDDN` and `BINDPW` | ` ` |
| `env` | List of key value pairs as env variables to be sent to the docker image. See https://github.com/tiredofit/docker-self-service-password for available ones | `[see values.yaml]`|
| `replicaCount` | Number of replicas | `1` |
| `image.repository` | Container image repository | ` tiredofit/self-service-password` |
| `image.tag` | Container image tag | `latest` |
| `image.pullPolicy` | Container pull policy | `Default` |
| `service.port` | External port for the WebApp | `80` |
| `service.type` | Service type | `ClusterIP` |
| `ingress.enabled` | Whether to generate ingress resources | `false` |
| `ingress.annotations` | Annotations to add to the ingress | `{}` |
| `ingress.hosts` | Hostnames to redirect to the webapp | `[]` |
| `ingress.tls` | TLS Configuration | `[]` |
| `resources` | Container resource requests and limits in yaml | `{}` |
| `nodeSelector` | NodeSelector to run the image on | `{}` |
| `tolerations` | Tolerations for the service pod | `[]` |
| `affinity` | Attractions for the service pod | `{}` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```bash
$ helm install --name my-release -f values.yaml $PATH_TO_THIS_REPO
```

View File

@ -0,0 +1 @@
Happy password changing :)

View File

@ -0,0 +1,51 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "ltb-passwd.name" -}}
{{ default .Release.Name .Values.existingSecret }}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "ltb-passwd.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "ltb-passwd.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "ltb-passwd.labels" -}}
app.kubernetes.io/name: {{ include "ltb-passwd.name" . }}
helm.sh/chart: {{ include "ltb-passwd.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Generate chart secret name
*/}}
{{- define "ltb-passwd.secretName" -}}
{{ default (include "ltb-passwd.fullname" .) .Values.existingSecret }}
{{- end -}}

View File

@ -0,0 +1,69 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "ltb-passwd.fullname" . }}
labels:
{{ include "ltb-passwd.labels" . | indent 4 }}
spec:
replicas: {{ default 1 .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "ltb-passwd.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "ltb-passwd.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: LDAP_SERVER
value: {{ .Values.ldap.server | quote }}
- name: LDAP_BINDDN
value: {{ .Values.ldap.bindDN | quote }}
- name: LDAP_BINDPASS
valueFrom:
secretKeyRef:
name: {{ template "ltb-passwd.secretName" . }}
key: {{ .Values.ldap.bindPWKey }}
- name: LDAP_STARTTLS
value: "false"
- name: LDAP_BASE_SEARCH
value: {{ .Values.ldap.searchBase | quote }}
{{- with .Values.env }}
{{- toYaml . | nindent 10 }}
{{- end }}
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,38 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "ltb-passwd.fullname" . -}}
{{- $ingressPath := .Values.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app: {{ template "ltb-passwd.name" . }}
chart: {{ template "ltb-passwd.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . }}
http:
paths:
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}

View File

@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "ltb-passwd.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "ltb-passwd.name" . }}
helm.sh/chart: {{ include "ltb-passwd.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "ltb-passwd.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@ -0,0 +1,51 @@
# Default values for ltb-passwd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: tiredofit/self-service-password
tag: latest
pullPolicy: Always
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
service:
type: ClusterIP
port: 80
ingress:
enabled: true
annotations: {}
host: "ssl-ldap.local"
## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
annotations: {}
path: /
## Ingress Host
# hosts:
# - ssl-ldap.local
#
tls: []
# tls:
# - secretName: ssl-ldap-dedicated-tls
# hosts:
# - ssl-ldap.local
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
ldap:
server: ldap://openldap.openldap
searchBase: dc=example,dc=org
# existingSecret: ssp-ldap
bindDN: cn=admin,dc=example,dc=org
bindPWKey: BINDPW
env:
- name: SECRETEKEY
value: "password"
- name: LDAP_LOGIN_ATTRIBUTE
value: "cn"

View File

@ -0,0 +1,13 @@
apiVersion: v1
appVersion: 0.7.1
description: Web-based LDAP browser to manage your LDAP server
home: http://phpldapadmin.sourceforge.net
icon: http://phpldapadmin.sourceforge.net/wiki/images/d/d4/Logo.jpg
keywords:
- phpldapadmin
- openldap
- userrights
maintainers:
- name: Jean-Philippe Gouin
name: phpldapadmin
version: 0.1.2

View File

@ -0,0 +1,107 @@
# Helm Chart for phpLDAPadmin
[![CircleCI](https://circleci.com/gh/cetic/helm-phpLDAPadmin.svg?style=svg)](https://circleci.com/gh/cetic/helm-phpLDAPadmin/tree/master) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) ![version](https://img.shields.io/github/tag/cetic/helm-phpLDAPadmin.svg?label=release)
## Introduction
This [Helm](https://github.com/kubernetes/helm) chart installs [phpLDAPadmin](http://phpldapadmin.sourceforge.net/wiki/index.php/Main_Page) in a Kubernetes cluster.
## Prerequisites
- Kubernetes cluster 1.10+
- Helm 2.8.0+
- PV provisioner support in the underlying infrastructure.
## Installation
### Add Helm repository
```bash
helm repo add cetic https://cetic.github.io/helm-charts
helm repo update
```
### Configure the chart
The following items can be set via `--set` flag during installation or configured by editing the `values.yaml` directly (you need to download the chart first).
#### Configure the way how to expose phpLDAPadmin service:
- **Ingress**: The ingress controller must be installed in the Kubernetes cluster.
- **ClusterIP**: Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster.
- **NodePort**: Exposes the service on each Nodes IP at a static port (the NodePort). Youll be able to contact the NodePort service, from outside the cluster, by requesting `NodeIP:NodePort`.
- **LoadBalancer**: Exposes the service externally using a cloud providers load balancer.
#### Configure how to persist data (TODO):
- **Disable**: The data does not survive the termination of a pod.
- **Persistent Volume Claim(default)**: A default `StorageClass` is needed in the Kubernetes cluster to dynamic provision the volumes. Specify another StorageClass in the `storageClass` or set `existingClaim` if you have already existing persistent volumes to use.
### Install the chart
Install the phpLDAPadmin helm chart with a release name `my-release`:
```bash
helm install --name my-release cetic/phpldapadmin
```
## Uninstallation
To uninstall/delete the `my-release` deployment:
```bash
helm delete --purge my-release
```
## Configuration
The following table lists the configurable parameters of the phpLDAPadmin chart and the default values.
| Parameter | Description | Default |
| --------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------| ------------------------------- |
| **ReplicaCount** |
| `replicaCount` | number of phpLDAPadmin images | `1` |
| **Env** |
| `env` | See values.yaml | `nil` |
| **Image** |
| `image.repository` | phpldapadmin Image name | `osixia/phpldapadmin` |
| `image.tag` | phpldapadmin Image tag | `0.7.1` |
| `image.pullPolicy` | phpldapadmin Image pull policy | `IfNotPresent` |
| **Service** |
| `service.type` | Type of service for phpldapadmin frontend | `LoadBalancer` |
| `service.port` | Port to expose service | `80` |
| `service.loadBalancerIP` | LoadBalancerIP if service type is `LoadBalancer` | `nil` |
| `service.loadBalancerSourceRanges` | LoadBalancerSourceRanges | `nil` |
| `service.annotations` | Service annotations | `{}` |
| **Ingress** |
| `ingress.enabled` | Enables Ingress | `false` |
| `ingress.annotations` | Ingress annotations | `{}` |
| `ingress.path` | Path to access frontend | `/` |
| `ingress.hosts` | Ingress hosts | `nil` |
| `ingress.tls` | Ingress TLS configuration | `[]` |
| **ReadinessProbe** |
| `readinessProbe` | Rediness Probe settings | `{ "httpGet": { "path": "/", "port": http }}`|
| **LivenessProbe** |
| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/", "port": http }}`|
| **Resources** |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| **nodeSelector** |
| `nodeSelector` | nodeSelector | `{}` |
| **tolerations** |
| `tolerations` | tolerations | `{}` |
| **affinity** |
| `affinity` | affinity | `{}` |
## Credits
Initially inspired from https://github.com/gengen1988/helm-phpldapadmin.
## Contributing
Feel free to contribute by making a [pull request](https://github.com/cetic/helm-phpLDAPadmin/pull/new/master).
Please read the official [Contribution Guide](https://github.com/helm/charts/blob/master/CONTRIBUTING.md) from Helm for more information on how you can contribute to this Chart.
## License
[Apache License 2.0](/LICENSE)

View File

@ -0,0 +1,84 @@
#!/bin/sh
set -e
set -o pipefail
WORKING_DIRECTORY="$PWD"
[ "$GITHUB_PAGES_REPO" ] || {
echo "ERROR: Environment variable GITHUB_PAGES_REPO is required"
exit 1
}
[ "$HELM_CHART" ] || {
echo "ERROR: Environment variable HELM_CHART is required"
exit 1
}
[ -z "$GITHUB_PAGES_BRANCH" ] && GITHUB_PAGES_BRANCH=gh-pages
[ -z "$HELM_CHARTS_SOURCE" ] && HELM_CHARTS_SOURCE="$WORKING_DIRECTORY/$HELM_CHART"
[ -d "$WORKING_DIRECTORY" ] || {
echo "ERROR: Could not find Helm charts in $WORKING_DIRECTORY"
exit 1
}
[ -z "$HELM_VERSION" ] && HELM_VERSION=2.8.1
[ "$CIRCLE_BRANCH" ] || {
echo "ERROR: Environment variable CIRCLE_BRANCH is required"
exit 1
}
echo "GITHUB_PAGES_REPO=$GITHUB_PAGES_REPO"
echo "GITHUB_PAGES_BRANCH=$GITHUB_PAGES_BRANCH"
echo "HELM_CHARTS_SOURCE=$HELM_CHARTS_SOURCE"
echo "HELM_VERSION=$HELM_VERSION"
echo "CIRCLE_BRANCH=$CIRCLE_BRANCH"
echo ">>> Create Chart Directory"
mkdir -p $HELM_CHARTS_SOURCE/
mkdir -p /tmp/helm-tmp/
mv $WORKING_DIRECTORY/* /tmp/helm-tmp/
mv /tmp/helm-tmp/ $HELM_CHARTS_SOURCE/
echo '>> Prepare...'
mkdir -p /tmp/helm/bin
mkdir -p /tmp/helm/publish
apk update
apk add ca-certificates git openssh
echo '>> Installing Helm...'
cd /tmp/helm/bin
wget "https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz"
tar -zxf "helm-v${HELM_VERSION}-linux-amd64.tar.gz"
chmod +x linux-amd64/helm
alias helm=/tmp/helm/bin/linux-amd64/helm
helm version -c
helm init -c
echo ">> Checking out $GITHUB_PAGES_BRANCH branch from $GITHUB_PAGES_REPO"
cd /tmp/helm/publish
mkdir -p "$HOME/.ssh"
ssh-keyscan -H github.com >> "$HOME/.ssh/known_hosts"
git clone -b "$GITHUB_PAGES_BRANCH" "git@github.com:$GITHUB_PAGES_REPO.git" .
echo '>> Building chart...'
echo ">>> helm lint $HELM_CHARTS_SOURCE"
helm lint "$HELM_CHARTS_SOURCE"
echo ">>> helm package -d $HELM_CHART $HELM_CHARTS_SOURCE"
mkdir -p "$HELM_CHART"
helm package -d "$HELM_CHART" "$HELM_CHARTS_SOURCE"
echo '>>> helm repo index'
helm repo index .
if [ "$CIRCLE_BRANCH" != "master" ]; then
echo "Current branch is not master and do not publish"
exit 0
fi
echo ">> Publishing to $GITHUB_PAGES_BRANCH branch of $GITHUB_PAGES_REPO"
git config user.email "$CIRCLE_USERNAME@users.noreply.github.com"
git config user.name CircleCI
git add .
git status
git commit -m "Published by CircleCI $CIRCLE_BUILD_URL"
git push origin "$GITHUB_PAGES_BRANCH"

View File

@ -0,0 +1,26 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
You should be able to access your new phpLDAPadmin installation through
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
Find out your cluster ip address by running:
$ kubectl cluster-info
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "phpldapadmin.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "phpldapadmin.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "phpldapadmin.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "phpldapadmin.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:80
{{- end }}
** Please be patient while the chart is being deployed **

View File

@ -0,0 +1,32 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "phpldapadmin.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "phpldapadmin.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "phpldapadmin.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "phpldapadmin.fullname" . }}
labels:
app: {{ template "phpldapadmin.name" . }}
chart: {{ template "phpldapadmin.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.extraLabels }}
{{ toYaml .Values.extraLabels | indent 4 }}
{{- end }}
data:
{{ toYaml .Values.env | indent 2 }}

View File

@ -0,0 +1,52 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "phpldapadmin.fullname" . }}
labels:
app: {{ template "phpldapadmin.name" . }}
chart: {{ template "phpldapadmin.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ template "phpldapadmin.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "phpldapadmin.name" . }}
release: {{ .Release.Name }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 80
protocol: TCP
envFrom:
- configMapRef:
name: {{ template "phpldapadmin.fullname" . }}
livenessProbe:
{{ toYaml .Values.livenessProbe | indent 12 }}
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 12 }}
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@ -0,0 +1,38 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "phpldapadmin.fullname" . -}}
{{- $ingressPath := .Values.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app: {{ template "phpldapadmin.name" . }}
chart: {{ template "phpldapadmin.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . }}
http:
paths:
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}

View File

@ -0,0 +1,32 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "phpldapadmin.fullname" . }}
labels:
app: {{ template "phpldapadmin.name" . }}
chart: {{ template "phpldapadmin.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.service.annotations }}
annotations:
{{ toYaml .Values.service.annotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ with .Values.service.loadBalancerSourceRanges }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- end }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app: {{ template "phpldapadmin.name" . }}
release: {{ .Release.Name }}

View File

@ -0,0 +1,94 @@
---
# Default values for phpldapadmin.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
## TODO: add this in the deployment.yaml
env:
# PHPLDAPADMIN_LDAP_HOSTS: ...
PHPLDAPADMIN_HTTPS: "false"
PHPLDAPADMIN_TRUST_PROXY_SSL: "true"
## Number of phpLDAPadmin images
replicaCount: 1
## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the
##
image:
repository: osixia/phpldapadmin
tag: 0.9.0
pullPolicy: IfNotPresent
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
## TODO persistence
## Expose the pgAdmin service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
type: ClusterIP
## name: phpldapadmin
port: 80
annotations: {}
## Set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
# loadBalancerIP:
## Load Balancer sources
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges:
# - 10.10.10.0/24
## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
enabled: false
annotations: {}
path: /
## Ingress Host
# hosts:
# - phpldapadmin.example.org
#
tls: []
# tls:
# - secretName: phpldapadmin-dedicated-tls
# hosts:
# - phpldapadmin.example.org
## Configure liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
##
readinessProbe:
httpGet:
path: /
port: http
livenessProbe:
httpGet:
path: /
port: http
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,20 @@
OpenLDAP has been installed. You can access the server from within the k8s cluster using:
{{ template "openldap.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ldapPort }}
You can access the LDAP adminPassword and configPassword using:
kubectl get secret --namespace {{ .Release.Namespace }} {{ template "openldap.secretName" . }} -o jsonpath="{.data.LDAP_ADMIN_PASSWORD}" | base64 --decode; echo
kubectl get secret --namespace {{ .Release.Namespace }} {{ template "openldap.secretName" . }} -o jsonpath="{.data.LDAP_CONFIG_PASSWORD}" | base64 --decode; echo
You can access the LDAP service, from within the cluster (or with kubectl port-forward) with a command like (replace password and domain):
ldapsearch -x -H ldap://{{ template "openldap.fullname" . }}-service.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ldapPort }} -b dc=example,dc=org -D "cn=admin,dc=example,dc=org" -w $LDAP_ADMIN_PASSWORD
Test server health using Helm test:
helm test {{ .Release.Name }}
You can also consider installing the helm chart for phpldapadmin to manage this instance of OpenLDAP, or install Apache Directory Studio, and connect using kubectl port-forward.

View File

@ -0,0 +1,74 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "openldap.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for statefulset.
*/}}
{{- define "statefulset.apiVersion" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "apps/v1beta1" -}}
{{- else -}}
{{- print "apps/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "openldap.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "openldap.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Generate chart secret name
*/}}
{{- define "openldap.secretName" -}}
{{ default (include "openldap.fullname" .) .Values.existingSecret }}
{{- end -}}
{{/*
Generate replication services list
*/}}
{{- define "replicalist" -}}
{{- $name := (include "openldap.fullname" .) }}
{{- $namespace := .Release.Namespace }}
{{- $cluster := .Values.replication.clusterName }}
{{- $nodeCount := .Values.replicaCount | int }}
{{- range $index0 := until $nodeCount -}}
{{- $index1 := $index0 | add1 -}}
'ldap://{{ $name }}-{{ $index0 }}.{{ $name }}-headless.{{ $namespace }}.svc.{{ $cluster }}'{{ if ne $index1 $nodeCount }},{{ end }}
{{- end -}}
{{- end -}}
{{/*
Renders a value that contains template.
Usage:
{{ include "openldap.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }}
*/}}
{{- define "openldap.tplValue" -}}
{{- if typeIs "string" .value }}
{{- tpl .value .context }}
{{- else }}
{{- tpl (.value | toYaml) .context }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,23 @@
#
# A ConfigMap spec for openldap slapd that map directly to files under
# /container/service/slapd/assets/config/bootstrap/ldif/custom
#
{{- if .Values.customLdifFiles }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "openldap.fullname" . }}-customldif
labels:
app: {{ template "openldap.name" . }}
chart: {{ template "openldap.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.extraLabels }}
{{ toYaml .Values.extraLabels | indent 4 }}
{{- end }}
data:
{{- range $key, $val := .Values.customLdifFiles }}
{{ $key }}: |-
{{ $val | indent 4}}
{{- end }}
{{- end }}

View File

@ -0,0 +1,26 @@
#
# A ConfigMap spec for openldap slapd that map directly to env variables in the Pod.
# List of environment variables supported is from the docker image:
# https://github.com/osixia/docker-openldap#beginner-guide
# Note that passwords are defined as secrets
#
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "openldap.fullname" . }}-env
labels:
app: {{ template "openldap.name" . }}
chart: {{ template "openldap.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.extraLabels }}
{{ toYaml .Values.extraLabels | indent 4 }}
{{- end }}
data:
{{ toYaml .Values.env | indent 2 }}
{{- if .Values.replication.enabled }}
LDAP_REPLICATION: "true"
LDAP_REPLICATION_CONFIG_SYNCPROV: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"{{.Values.replication.retry }} +\" timeout={{.Values.replication.timeout }} starttls={{.Values.replication.starttls }} tls_reqcert={{.Values.replication.tls_reqcert }}"
LDAP_REPLICATION_DB_SYNCPROV: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval={{.Values.replication.interval }} retry=\"{{.Values.replication.retry }} +\" timeout={{.Values.replication.timeout }} starttls={{.Values.replication.starttls }} tls_reqcert={{.Values.replication.tls_reqcert }}"
LDAP_REPLICATION_HOSTS: "#PYTHON2BASH:[{{ template "replicalist" . }}]"
{{- end }}

View File

@ -0,0 +1,17 @@
{{ if not .Values.existingSecret }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "openldap.fullname" . }}-ltb-passwd
labels:
app: {{ template "openldap.name" . }}
chart: {{ template "openldap.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.extraLabels }}
{{ toYaml .Values.extraLabels | indent 4 }}
{{- end }}
type: Opaque
data:
LDAP_ADMIN_PASSWORD: {{ .Values.adminPassword | b64enc | quote }}
{{ end }}

View File

@ -0,0 +1,18 @@
{{ if not .Values.existingSecret }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "openldap.fullname" . }}
labels:
app: {{ template "openldap.name" . }}
chart: {{ template "openldap.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.extraLabels }}
{{ toYaml .Values.extraLabels | indent 4 }}
{{- end }}
type: Opaque
data:
LDAP_ADMIN_PASSWORD: {{ .Values.adminPassword | b64enc | quote }}
LDAP_CONFIG_PASSWORD: {{ .Values.configPassword | b64enc | quote }}
{{ end }}

View File

@ -0,0 +1,47 @@
apiVersion: v1
kind: Service
metadata:
{{- if .Values.service.annotations }}
annotations:
{{ toYaml .Values.service.annotations | indent 4 }}
{{- end }}
name: {{ template "openldap.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "openldap.fullname" . }}
chart: {{ template "openldap.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.extraLabels }}
{{ toYaml .Values.extraLabels | indent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges: {{ toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }}
{{- end }}
ports:
- name: ldap-port
protocol: TCP
port: {{ .Values.service.ldapPort }}
targetPort: ldap-port
{{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }}
nodePort: {{ .Values.service.ldapPortNodePort }}
{{- else if eq .Values.service.type "ClusterIP" }}
nodePort: null
{{- end }}
- name: ssl-ldap-port
protocol: TCP
port: {{ .Values.service.sslLdapPort }}
targetPort: ssl-ldap-port
{{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }}
nodePort: {{ .Values.service.sslLdapPortNodePort }}
{{- else if eq .Values.service.type "ClusterIP" }}
nodePort: null
{{- end }}
selector:
app: {{ template "openldap.fullname" . }}
release: {{ .Release.Name }}

View File

@ -0,0 +1,153 @@
apiVersion: {{ template "statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ template "openldap.fullname" . }}
labels:
app: {{ template "openldap.fullname" . }}
chart: {{ template "openldap.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.extraLabels }}
{{ toYaml .Values.extraLabels | indent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicaCount }}
{{- if .Values.strategy }}
strategy:
{{ toYaml .Values.strategy | indent 4 }}
{{- end }}
selector:
matchLabels:
app: {{ template "openldap.fullname" . }}
release: {{ .Release.Name }}
serviceName: {{ template "openldap.fullname" . }}-headless
template:
metadata:
annotations:
checksum/configmap-env: {{ include (print $.Template.BasePath "/configmap-env.yaml") . | sha256sum }}
{{- if .Values.customLdifFiles}}
checksum/configmap-customldif: {{ include (print $.Template.BasePath "/configmap-customldif.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.podAnnotations}}
{{ toYaml .Values.podAnnotations | indent 8}}
{{- end }}
labels:
app: {{ template "openldap.fullname" . }}
release: {{ .Release.Name }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- -l
- {{ .Values.logLevel }}
{{- if .Values.customLdifFiles }}
- --copy-service
{{- end }}
ports:
- name: ldap-port
containerPort: 389
- name: ssl-ldap-port
containerPort: 636
envFrom:
- configMapRef:
name: {{ template "openldap.fullname" . }}-env
- secretRef:
name: {{ template "openldap.secretName" . }}
volumeMounts:
- name: data
mountPath: /var/lib/ldap
subPath: data
- name: data
mountPath: /etc/ldap/slapd.d
subPath: config-data
- name: data
mountPath: /container/service/slapd/assets/certs
{{- if .Values.customLdifFiles }}
- name: custom-ldif-files
mountPath: /container/service/slapd/assets/config/bootstrap/ldif/custom
{{- end }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
#- name: HOSTNAME
# value: $(POD_NAME).{{ template "openldap.fullname" . }}-headless
{{- if .Values.tls.enabled }}
- name: LDAP_TLS_CRT_FILENAME
value: tls.crt
- name: LDAP_TLS_KEY_FILENAME
value: tls.key
{{- if .Values.tls.CA.enabled }}
- name: LDAP_TLS_CA_CRT_FILENAME
value: ca.crt
{{- end }}
{{- end }}
livenessProbe:
tcpSocket:
port: ldap-port
initialDelaySeconds: 20
periodSeconds: 10
failureThreshold: 10
readinessProbe:
tcpSocket:
port: ldap-port
initialDelaySeconds: 20
periodSeconds: 10
failureThreshold: 10
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app.kubernetes.io/component: {{ .Release.Name }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
imagePullSecrets:
- name: {{ .Values.image.pullSecret }}
{{- if .Values.customLdifFiles }}
volumes:
- name: custom-ldif-files
configMap:
name: {{ template "openldap.fullname" . }}-customldif
{{- end }}
{{- if .Values.persistence.enabled }}
volumeClaimTemplates:
- metadata:
name: data
annotations:
{{- range $key, $value := .Values.persistence.annotations }}
{{ $key }}: {{ $value }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.storageClass }}"
{{- end }}
{{- end }}
{{- else }}
- name: data
emptyDir: {}
{{- end }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "openldap.fullname" . }}-headless
labels:
app: {{ template "openldap.fullname" . }}
chart: {{ template "openldap.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
ports:
- port: {{ .Values.service.ldapPort }}
name: ldap-port
targetPort: ldap-port
clusterIP: None
selector:
app: {{ template "openldap.fullname" . }}
release: {{ .Release.Name }}
type: ClusterIP
sessionAffinity: None

View File

@ -0,0 +1,50 @@
{{- if .Values.test.enabled -}}
apiVersion: v1
kind: Pod
metadata:
name: "{{ template "openldap.fullname" . }}-test-{{ randAlphaNum 5 | lower }}"
labels:
app: {{ template "openldap.name" . }}
chart: {{ template "openldap.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.extraLabels }}
{{ toYaml .Values.extraLabels | indent 4 }}
{{- end }}
annotations:
"helm.sh/hook": test-success
spec:
initContainers:
- name: test-framework
image: {{ .Values.test.image.repository }}:{{ .Values.test.image.tag }}
command:
- "bash"
- "-c"
- |
set -ex
# copy bats to tools dir
cp -R /usr/local/libexec/ /tools/bats/
volumeMounts:
- mountPath: /tools
name: tools
containers:
- name: {{ .Release.Name }}-test
image: {{ .Values.test.image.repository }}:{{ .Values.test.image.tag }}
envFrom:
- secretRef:
name: {{ template "openldap.secretName" . }}
command: ["/tools/bats/bats", "-t", "/tests/run.sh"]
volumeMounts:
- mountPath: /tests
name: tests
readOnly: true
- mountPath: /tools
name: tools
volumes:
- name: tests
configMap:
name: {{ template "openldap.fullname" . }}-tests
- name: tools
emptyDir: {}
restartPolicy: Never
{{- end -}}

View File

@ -0,0 +1,22 @@
{{- if .Values.test.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "openldap.fullname" . }}-tests
labels:
app: {{ template "openldap.name" . }}
chart: {{ template "openldap.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.extraLabels }}
{{ toYaml .Values.extraLabels | indent 4 }}
{{- end }}
data:
run.sh: |-
@test "Testing connecting to slapd server" {
# Ideally, this should be in the docker image, but there is not a generic image we can use
# with bats and ldap-utils installed. It is not worth for now to push an image for this.
apt-get update && apt-get install -y ldap-utils
ldapsearch -x -H ldap://{{ template "openldap.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ldapPort }} -b "dc=example,dc=org" -D "cn=admin,dc=example,dc=org" -w $LDAP_ADMIN_PASSWORD
}
{{- end -}}

View File

@ -0,0 +1,179 @@
# Default values for openldap.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 3
# Define deployment strategy - IMPORTANT: use rollingUpdate: null when use Recreate strategy.
# It prevents from merging with existing map keys which are forbidden.
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
#
# or
#
# type: Recreate
# rollingUpdate: null
image:
# From repository https://github.com/osixia/docker-openldap
repository: osixia/openldap
tag: 1.4.0
pullPolicy: Always
pullSecret: harbor
# Set the container log level
# Valid log levels: none, error, warning, info (default), debug, trace
logLevel: info
# Spcifies an existing secret to be used for admin and config user passwords
existingSecret: ""
# settings for enabling TLS with custom certificate
tls:
enabled: true
secret: "" # The name of a kubernetes.io/tls type secret to use for TLS
CA:
enabled: false
secret: "" # The name of a generic secret to use for custom CA certificate (ca.crt)
## Add additional labels to all resources
extraLabels: {}
## Add additional annotations to pods
podAnnotations: {}
service:
annotations: {}
ldapPort: 389
sslLdapPort: 636
## If service type NodePort, define the value here
#ldapPortNodePort:
#sslLdapPortNodePort:
## List of IP addresses at which the service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
#loadBalancerIP:
#loadBalancerSourceRanges: []
type: ClusterIP
# Default configuration for openldap as environment variables. These get injected directly in the container.
# Use the env variables from https://github.com/osixia/docker-openldap#beginner-guide
env:
LDAP_LOG_LEVEL: "256"
LDAP_ORGANISATION: "Example Inc."
LDAP_DOMAIN: "example.org"
LDAP_READONLY_USER: "false"
LDAP_READONLY_USER_USERNAME: "readonly"
LDAP_READONLY_USER_PASSWORD: "readonly"
LDAP_RFC2307BIS_SCHEMA: "false"
LDAP_BACKEND: "mdb"
LDAP_TLS: "true"
LDAP_TLS_CRT_FILENAME: "ldap.crt"
LDAP_TLS_KEY_FILENAME: "ldap.key"
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
LDAP_TLS_ENFORCE: "false"
CONTAINER_LOG_LEVEL: "4"
LDAP_TLS_REQCERT: "never"
KEEP_EXISTING_CONFIG: "false"
LDAP_REMOVE_CONFIG_AFTER_SETUP: "true"
LDAP_SSL_HELPER_PREFIX: "ldap"
LDAP_TLS_VERIFY_CLIENT: "never"
LDAP_TLS_PROTOCOL_MIN: "3.0"
LDAP_TLS_CIPHER_SUITE: "NORMAL"
# Default Passwords to use, stored as a secret.
# You can override these at install time with
# helm install openldap --set openldap.adminPassword=<passwd>,openldap.configPassword=<passwd>
adminPassword: Not@SecurePassw0rd
configPassword: Not@SecurePassw0rd
# Custom openldap configuration files used to override default settings
# customLdifFiles:
# 01-default-users.ldif: |-
# Predefine users here
replication:
enabled: true
# Enter the name of your cluster, defaults to "cluster.local"
clusterName: "cluster.local"
retry: 60
timeout: 1
interval: 00:00:00:10
starttls: "critical"
tls_reqcert: "never"
## Persist data to a persistent volume
persistence:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "standard-singlewriter"
accessModes:
- ReadWriteOnce
size: 8Gi
resources: {}
# requests:
# cpu: "100m"
# memory: "256Mi"
# limits:
# cpu: "500m"
# memory: "512Mi"
nodeSelector: {}
tolerations: []
## test container details
test:
enabled: false
image:
repository: dduportal/bats
tag: 0.4.0
ltb-passwd:
enabled : true
ingress:
enabled: true
annotations: {}
path: /
## Ingress Host
hosts:
- "ssl-ldap2.example"
ldap:
server: ldap://openldap
searchBase: dc=example,dc=org
# existingSecret: openldaptest
bindDN: cn=admin,dc=example,dc=org
bindPWKey: LDAP_ADMIN_PASSWORD
phpldapadmin:
enabled: true
ingress:
enabled: true
annotations: {}
path: /
## Ingress Host
hosts:
- phpldapadmin.example
env:
PHPLDAPADMIN_LDAP_HOSTS: openldap
# TODO make it works
# "#PYTHON2BASH:
# [{'openldap.openldap':
# [{'server': [
# {'tls': False},
# {'port':636}
# ]},
# {'login':
# [{'bind_id': 'cn=admin,dc=example,dc=org'}]
# }]
# }]"

View File

@ -55,47 +55,100 @@ nats:
storageClassName: kind-sc
openldap-stack-ha:
enabled: false
global:
ldapDomain: "opencloud.acme.com"
adminUser: "admin"
adminPassword: "acmeOpenCloudAdmin"
configUser: "admin"
configPassword: "acmeOpenCloudConfig"
openldap:
enabled: true
test:
enabled: false
ltb-passwd:
enabled: false
replicaCount: 1
image:
repository: osixia/openldap
tag: 1.5.0
tls:
enabled: false
env:
LDAP_ORGANISATION: "Acme opencloud"
LDAP_DOMAIN: "acme.com"
LDAP_BACKEND: "mdb"
LDAP_TLS: "false"
LDAP_TLS_ENFORCE: "false"
LDAP_REMOVE_CONFIG_AFTER_SETUP: "true"
adminPassword: "goaChai9"
configPassword: "xaidee2M"
phpldapadmin:
enabled: false
persistence:
enabled: true
accessMode: ReadWriteOnce
size: 20Mi
size: 10Mi
storageClass: kind-sc
ltb-passwd:
enabled : false
env:
LDAP_REQUIRE_TLS: "false"
LDAP_ENABLE_TLS: "yes"
LDAP_TLS_ENFORCE: "false"
phpldapadmin:
enabled: false
replication:
enabled: false
replicaCount: 1
customLdifFiles:
01-schema.ldif: |-
dn: ou=groups,dc=acme,dc=com
objectClass: organizationalUnit
ou: groups
dn: ou=users,dc=acme,dc=com
objectClass: organizationalUnit
ou: users
dn: cn=lastGID,dc=acme,dc=com
objectClass: device
objectClass: top
description: Records the last GID used to create a Posix group. This prevents the re-use of a GID from a deleted group.
cn: lastGID
serialNumber: 2001
dn: cn=lastUID,dc=acme,dc=com
objectClass: device
objectClass: top
serialNumber: 2001
description: Records the last UID used to create a Posix account. This prevents the re-use of a UID from a deleted account.
cn: lastUID
02-ldapadmin.ldif : |-
dn: cn=ldapadmin,ou=groups,dc=acme,dc=com
objectClass: top
objectClass: posixGroup
cn: ldapadmin
memberUid: acme.ldapadmin
gidNumber: 2001
dn: uid=acme.ldapadmin,ou=users,dc=acme,dc=com
givenName: ldapadmin
sn: ldapadmin
uid: acme.ldapadmin
cn: acmeldapadmin
objectClass: person
objectClass: inetOrgPerson
objectClass: posixAccount
userPassword:: e0NSWVBUfSQ2JDhycFZxbk5NJHNmWVhBYUNYUzdZbXFhR1VWTjdJa20wT2hXLmVtT3oua2x5L3V5YUdjNE81MDVEalU0R2ZMb0hTaFVwNUkvVUxFT0JubWJ2d29meFNFcXIuaFRVMm0u
uidNumber: 2001
gidNumber: 2001
loginShell: /bin/bash
homeDirectory: /home/acme.ldapadmin
#acme.ldapadmin ia3Bahr3
# ldap user manager configuration
ldapUserManager:
enabled: false
version: v1.11
enabled: true
env:
SERVER_HOSTNAME: "opencloud.acme.com"
LDAP_BASE_DN: "dc=opencloud,dc=acme,dc=com"
SERVER_HOSTNAME: "users.acme.com"
LDAP_BASE_DN: "dc=acme,dc=com"
LDAP_REQUIRE_STARTTLS: "false"
LDAP_ADMINS_GROUP: "ldapadmin"
LDAP_ADMIN_BIND_DN: "cn=admin,dc=opencloud,dc=acme,dc=com"
LDAP_ADMIN_BIND_PWD: "acmeOpenCloudAdmin"
LDAP_ADMIN_BIND_DN: "cn=admin,dc=acme,dc=com"
LDAP_ADMIN_BIND_PWD: "goaChai9"
LDAP_IGNORE_CERT_ERRORS: "true"
EMAIL_DOMAIN: ""
NO_HTTPS: "true"
SERVER_PATH: "/users"
ORGANISATION_NAME: "Opencloud Acme"
ORGANISATION_NAME: "Acme"
LDAP_USER_OU: "users"
LDAP_GROUP_OU: "groups"
ACCEPT_WEAK_PASSWORDS: "true"

View File

@ -0,0 +1,113 @@
{{- if .Values.ldapUserManager.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ldap-user-manager
name: {{ .Release.Name }}-ldap-user-manager
spec:
replicas: 1
selector:
matchLabels:
app: ldap-user-manager
strategy: {}
template:
metadata:
labels:
app: ldap-user-manager
spec:
containers:
- image: wheelybird/ldap-user-manager:v1.8
name: ldap-user-manager
env:
- name: SERVER_HOSTNAME
value: "{{ .Values.ldapUserManager.env.SERVER_HOSTNAME }}"
- name: LDAP_URI
value: "ldap://{{ .Release.Name }}-openldap.{{ .Release.Namespace }}.svc.cluster.local"
- name: LDAP_BASE_DN
value: "{{ .Values.ldapUserManager.env.LDAP_BASE_DN }}"
- name: LDAP_REQUIRE_STARTTLS
value: "{{ .Values.ldapUserManager.env.LDAP_REQUIRE_STARTTLS }}"
- name: LDAP_ADMINS_GROUP
value: "{{ .Values.ldapUserManager.env.LDAP_ADMINS_GROUP }}"
- name: LDAP_ADMIN_BIND_DN
value: "{{ .Values.ldapUserManager.env.LDAP_ADMIN_BIND_DN }}"
- name: LDAP_ADMIN_BIND_PWD
value: "{{ .Values.ldapUserManager.env.LDAP_ADMIN_BIND_PWD }}"
- name: LDAP_IGNORE_CERT_ERRORS
value: "{{ .Values.ldapUserManager.env.LDAP_IGNORE_CERT_ERRORS }}"
- name: NO_HTTPS
value: "{{ .Values.ldapUserManager.env.NO_HTTPS }}"
- name: EMAIL_DOMAIN
value: "{{ .Values.ldapUserManager.env.EMAIL_DOMAIN }}"
- name: ORGANISATION_NAME
value: "{{ .Values.ldapUserManager.env.ORGANISATION_NAME }}"
- name: LDAP_USER_OU
value: "{{ .Values.ldapUserManager.env.LDAP_USER_OU }}"
- name: LDAP_GROUP_OU
value: "{{ .Values.ldapUserManager.env.LDAP_GROUP_OU }}"
- name: SERVER_PATH
value: "{{ .Values.ldapUserManager.env.SERVER_PATH }}"
- name: LDAP_ACCOUNT_ADDITIONAL_OBJECTCLASSES
value: "{{ .Values.ldapUserManager.env.LDAP_ACCOUNT_ADDITIONAL_OBJECTCLASSES }}"
- name: LDAP_ACCOUNT_ADDITIONAL_ATTRIBUTES
value: "{{ .Values.ldapUserManager.env.LDAP_ACCOUNT_ADDITIONAL_ATTRIBUTES }}"
- name: LDAP_GROUP_ADDITIONAL_OBJECTCLASSES
value: "{{ .Values.ldapUserManager.env.LDAP_GROUP_ADDITIONAL_OBJECTCLASSES }}"
- name: LDAP_GROUP_ADDITIONAL_ATTRIBUTES
value: "{{ .Values.ldapUserManager.env.LDAP_GROUP_ADDITIONAL_ATTRIBUTES }}"
- name: ACCEPT_WEAK_PASSWORDS
value: "{{ .Values.ldapUserManager.env.ACCEPT_WEAK_PASSWORDS }}"
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
resources:
limits:
cpu: "{{ .Values.ldapUserManager.resources.limits.cpu }}"
memory: "{{ .Values.ldapUserManager.resources.limits.memory }}"
requests:
cpu: "{{ .Values.ldapUserManager.resources.requests.cpu }}"
memory: "{{ .Values.ldapUserManager.resources.requests.memory }}"
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-ldap-user-manager-svc
labels:
app: ldap-user-manager-svc
spec:
ports:
- name: http
port: 8080
protocol: TCP
targetPort: 80
- name: https
port: 8443
protocol: TCP
targetPort: 443
selector:
app: ldap-user-manager
type: ClusterIP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: ldap-user-manager-ingress
spec:
entryPoints:
- web
routes:
- kind: Rule
match: Host(`{{ .Values.host }}`) && PathPrefix(`/users`)
priority: 10
services:
- kind: Service
name: {{ .Release.Name }}-ldap-user-manager-svc
passHostHeader: true
port: 8080
{{- end }}