oc-deploy vanilla k8s docker

This commit is contained in:
mr 2025-03-27 13:21:52 +01:00
parent 3b7c3a9526
commit 626a1b1f22
94 changed files with 864 additions and 1647 deletions

View File

@ -0,0 +1,50 @@
#!/bin/bash
REPOS=(
"oc-auth"
"oc-catalog"
"oc-datacenter"
"oc-front"
"oc-monitord"
"oc-peer"
"oc-shared"
"oc-scheduler"
"oc-schedulerd"
"oc-workflow"
"oc-workspace"
)
# Function to clone repositories
clone_repo() {
local repo_url="https://cloud.o-forge.io/core/$1.git"
local repo_name=$(basename "$repo_url" .git)
local branch=$2
echo "Processing repository: $repo_name"
if [ ! -d "$repo_name" ]; then
echo "Cloning repository: $repo_name"
git clone "$repo_url"
if [ $? -ne 0 ]; then
echo "Error cloning $repo_url"
exit 1
fi
fi
echo "Repository '$repo_name' already exists. Pulling latest changes..."
cd "$repo_name" && git pull origin $branch && cd ..
}
cd ..
# Iterate through each repository in the list
branch = "main"
if [ -n "$1" ]; then
branch = $1
fi
for repo in "${REPOS[@]}"; do
clone_repo $repo $branch
done
echo "All repositories processed successfully."

View File

@ -1,88 +0,0 @@
version: '3.8'
services:
traefik:
image: traefik:latest
command:
- "--api.insecure=true"
- "--providers.docker=true"
- "--entrypoints.web.address=:80"
ports:
- "80:80"
- "8080:8080"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
mongo:
image: mongo:latest
ports:
- "27017:27017"
volumes:
- mongo-data:/data/db
labels:
- "traefik.enable=true"
- "traefik.http.routers.mongo.rule=PathPrefix(`/mongo`)"
- "traefik.http.services.mongo.loadbalancer.server.port=27017"
nats:
image: nats:latest
ports:
- "4222:4222"
labels:
- "traefik.enable=true"
- "traefik.http.routers.nats.rule=PathPrefix(`/nats`)"
- "traefik.http.services.nats.loadbalancer.server.port=4222"
zinc:
image: public.ecr.aws/zinclabs/zincsearch:latest
ports:
- "4080:4080"
labels:
- "traefik.enable=true"
- "traefik.http.routers.zinc.rule=PathPrefix(`/zinc`)"
- "traefik.http.services.zinc.loadbalancer.server.port=4080"
dex:
image: quay.io/dexidp/dex:latest
ports:
- "5556:5556"
volumes:
- ./dex/config.yaml:/etc/dex/cfg/config.yaml
command: ["dex", "serve", "/etc/dex/cfg/config.yaml"]
labels:
- "traefik.enable=true"
- "traefik.http.routers.dex.rule=PathPrefix(`/dex`)"
- "traefik.http.services.dex.loadbalancer.server.port=5556"
ldap:
image: bitnami/openldap
ports:
- "389:389"
environment:
- LDAP_ADMIN_USERNAME=admin
- LDAP_ADMIN_PASSWORD=adminpassword
- LDAP_USERS=user01,user02
- LDAP_PASSWORDS=password1,password2
grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
environment:
GF_SECURITY_ADMIN_PASSWORD: "admin"
labels:
- "traefik.enable=true"
- "traefik.http.routers.grafana.rule=PathPrefix(`/grafana`)"
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
loki:
image: grafana/loki:latest
ports:
- "3100:3100"
labels:
- "traefik.enable=true"
- "traefik.http.routers.loki.rule=PathPrefix(`/loki`)"
- "traefik.http.services.loki.loadbalancer.server.port=3100"
volumes:
mongo-data:

31
docker/start.sh Executable file
View File

@ -0,0 +1,31 @@
#!/bin/bash
docker network create oc | true
docker compose down
cd ./tools && docker compose -f ./docker-compose.dev.yml up --force-recreate -d && cd ..
cd ./tools && docker compose -f ./docker-compose.traefik.yml up --force-recreate -d && cd ..
cd ../..
REPOS=(
"oc-auth"
"oc-catalog"
"oc-datacenter"
"oc-monitord"
"oc-peer"
"oc-shared"
"oc-scheduler"
"oc-schedulerd"
"oc-workflow"
"oc-workspace"
"oc-front"
)
for i in "${REPOS[@]}"
do
echo "Building $i"
docker kill $i | true
docker rm $i | true
cd ./$i
docker build . -t $i && docker compose up -d
cd ..
done

48
docker/stop.sh Executable file
View File

@ -0,0 +1,48 @@
#!/bin/bash
docker network delete oc | true
docker compose -f ./tools/docker-compose.traefik.yml down
TOOLS=(
"mongo"
"mongo-express"
"nats"
"loki"
"grafana"
"hydra-client"
"hydra"
"keto"
"ldap"
)
for i in "${TOOLS[@]}"
do
echo "kill $i"
docker kill $i | true
docker rm $i | true
done
cd ../..
REPOS=(
"oc-auth"
"oc-catalog"
"oc-datacenter"
"oc-monitord"
"oc-peer"
"oc-shared"
"oc-scheduler"
"oc-schedulerd"
"oc-workflow"
"oc-workspace"
"oc-front"
)
for i in "${REPOS[@]}"
do
echo "Kill $i"
cd ./$i
docker kill $i | true
docker rm $i | true
make purge | true
cd ..
done

View File

@ -0,0 +1,162 @@
version: '3.4'
services:
mongo:
image: 'mongo:latest'
networks:
- oc
ports:
- 27017:27017
container_name: mongo
volumes:
- oc-data:/data/db
- oc-data:/data/configdb
mongo-express:
image: "mongo-express:latest"
restart: always
depends_on:
- mongo
networks:
- oc
ports:
- 8081:8081
environment:
- ME_CONFIG_BASICAUTH_USERNAME=test
- ME_CONFIG_BASICAUTH_PASSWORD=test
nats:
image: 'nats:latest'
container_name: nats
ports:
- 4222:4222
command:
- "--debug"
networks:
- oc
loki:
image: 'grafana/loki'
container_name: loki
labels:
- "traefik.enable=true"
- "traefik.http.routers.loki.entrypoints=web"
- "traefik.http.routers.loki.rule=PathPrefix(`/tools/loki`)"
- "traefik.http.services.loki.loadbalancer.server.port=3100"
- "traefik.http.middlewares.loki-stripprefix.stripprefix.prefixes=/tools/loki"
- "traefik.http.routers.loki.middlewares=loki-stripprefix"
- "traefik.http.middlewares.loki.forwardauth.address=http://oc-auth:8080/oc/forward"
ports :
- "3100:3100"
networks:
- oc
grafana:
image: 'grafana/grafana'
container_name: grafana
ports:
- '3000:3000'
labels:
- "traefik.enable=true"
- "traefik.http.routers.grafana.entrypoints=web"
- "traefik.http.routers.grafana.rule=PathPrefix(`/tools/grafana`)"
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
- "traefik.http.middlewares.grafana-stripprefix.stripprefix.prefixes=/tools/grafana"
- "traefik.http.routers.grafana.middlewares=grafana-stripprefix"
- "traefik.http.middlewares.grafana.forwardauth.address=http://oc-auth:8080/oc/forward"
networks:
- oc
volumes:
- ./conf/grafana_data_source.yml:/etc/grafana/provisioning/datasources/datasource.yml
environment:
- GF_SECURITY_ADMIN_PASSWORD=pfnirt # Change this to anything but admin to not have a password change page at startup
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_DISABLE_INITIAL_ADMIN_PASSWORD_CHANGE=true
hydra-client:
image: oryd/hydra:v2.2.0
container_name: hydra-client
environment:
HYDRA_ADMIN_URL: http://hydra:4445
ORY_SDK_URL: http://hydra:4445
command:
- create
- oauth2-client
- --skip-tls-verify
- --name
- test-client
- --secret
- oc-auth-got-secret
- --response-type
- id_token,token,code
- --grant-type
- implicit,refresh_token,authorization_code,client_credentials
- --scope
- openid,profile,email,roles
- --token-endpoint-auth-method
- client_secret_post
- --redirect-uri
- http://localhost:3000
networks:
- oc
deploy:
restart_policy:
condition: none
depends_on:
- hydra
healthcheck:
test: ["CMD", "curl", "-f", "http://hydra:4445"]
interval: 10s
timeout: 10s
retries: 10
hydra:
container_name: hydra
image: oryd/hydra:v2.2.0
environment:
SECRETS_SYSTEM: oc-auth-got-secret
LOG_LEAK_SENSITIVE_VALUES: true
# OAUTH2_TOKEN_HOOK_URL: http://oc-auth:8080/oc/claims
URLS_SELF_ISSUER: http://hydra:4444
URLS_SELF_PUBLIC: http://hydra:4444
WEBFINGER_OIDC_DISCOVERY_SUPPORTED_SCOPES: profile,email,phone,roles
WEBFINGER_OIDC_DISCOVERY_SUPPORTED_CLAIMS: name,family_name,given_name,nickname,email,phone_number
DSN: memory
command: serve all --dev
networks:
- oc
ports:
- "4444:4444"
- "4445:4445"
deploy:
restart_policy:
condition: on-failure
ldap:
image: pgarrett/ldap-alpine
container_name: ldap
volumes:
- "./ldap.ldif:/ldif/ldap.ldif"
networks:
- oc
ports:
- "390:389"
deploy:
restart_policy:
condition: on-failure
keto:
image: oryd/keto:v0.7.0-alpha.1-sqlite
ports:
- "4466:4466"
- "4467:4467"
command: serve -c /home/ory/keto.yml
restart: on-failure
volumes:
- type: bind
source: .
target: /home/ory
container_name: keto
networks:
- oc
volumes:
oc-data:
networks:
oc:
external: true

View File

@ -0,0 +1,24 @@
version: '3.4'
services:
traefik:
image: traefik:v2.10.4
container_name: traefik
restart: unless-stopped
networks:
- oc
command:
- "--api.insecure=true"
- "--providers.docker=true"
- "--entrypoints.web.address=:80"
ports:
- "80:8000" # Expose Traefik on port 8000
volumes:
- /var/run/docker.sock:/var/run/docker.sock
volumes:
oc-data:
networks:
oc:
external: true

18
docker/tools/keto.yml Normal file
View File

@ -0,0 +1,18 @@
version: v0.6.0-alpha.1
log:
level: debug
namespaces:
- id: 0
name: open-cloud
dsn: memory
serve:
read:
host: 0.0.0.0
port: 4466
write:
host: 0.0.0.0
port: 4467

24
docker/tools/ldap.ldif Normal file
View File

@ -0,0 +1,24 @@
dn: uid=admin,ou=Users,dc=example,dc=com
objectClass: inetOrgPerson
cn: Admin
sn: Istrator
uid: admin
userPassword: admin
mail: admin@example.com
ou: Users
dn: ou=AppRoles,dc=example,dc=com
objectClass: organizationalunit
ou: AppRoles
description: AppRoles
dn: ou=App1,ou=AppRoles,dc=example,dc=com
objectClass: organizationalunit
ou: App1
description: App1
dn: cn=traveler,ou=App1,ou=AppRoles,dc=example,dc=com
objectClass: groupofnames
cn: traveler
description: traveler
member: uid=admin,ou=Users,dc=example,dc=com

View File

@ -1,5 +0,0 @@
apiVersion: v2
name: oc-catalog
description: A Helm chart for deploying the oc-catalog application
version: 0.1.0
appVersion: "1.0"

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-oc-catalog
spec:
selector:
app: {{ .Chart.Name }}
ports:
- protocol: TCP
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.targetPort }}
type: {{ .Values.service.type }}

View File

@ -1,33 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-oc-catalog
labels:
app: oc-catalog
spec:
serviceName: "{{ .Release.Name }}-oc-catalog"
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: oc-catalog
template:
metadata:
labels:
app: oc-catalog
spec:
containers:
- name: oc-catalog
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: 8080
env:
- name: MONGO_DATABASE
value: "DC_myDC"
- name: MONGO_URI
value: "mongodb://mongo:27017"
imagePullSecrets:
{{- if .Values.imagePullSecrets }}
{{- range .Values.imagePullSecrets }}
- name: {{ .name }}
{{- end }}
{{- end }}

View File

@ -1,19 +0,0 @@
replicaCount: 1
image:
repository: registry.dev.svc.cluster.local:5000/oc-catalog
tag: latest
pullPolicy: IfNotPresent
service:
type: NodePort
port: 8087
targetPort: 8080
mongo:
database: DC_myDC
uri: mongodb://mongo:27017
imagePullSecrets:
- name: regcred

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,12 +0,0 @@
dependencies:
- name: oc-mongo
repository: file://../oc-mongo
version: 0.1.0
- name: oc-mongo-express
repository: file://../oc-mongo-express
version: 0.1.0
- name: oc-catalog
repository: file://../oc-catalog
version: 0.1.0
digest: sha256:036af8acf7fe0a73f039776d13f63aeb7530e7a8b0febb49fd5e8415ac6672c6
generated: "2024-08-27T14:34:41.6038407+02:00"

View File

@ -1,14 +0,0 @@
apiVersion: v2
name: oc-deploy
description: A Helm chart to deploy oc-mongo, oc-mongo-express, and oc-catalog together
version: 0.1.0
dependencies:
- name: oc-mongo
version: 0.1.0
repository: "file://../oc-mongo"
- name: oc-mongo-express
version: 0.1.0
repository: "file://../oc-mongo-express"
- name: oc-catalog
version: 0.1.0
repository: "file://../oc-catalog"

View File

@ -1,5 +0,0 @@
apiVersion: v2
name: oc-catalog
description: A Helm chart for deploying the oc-catalog application
version: 0.1.0
appVersion: "1.0"

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: oc-catalog
spec:
selector:
app: {{ .Chart.Name }}
ports:
- protocol: TCP
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.targetPort }}
type: {{ .Values.service.type }}

View File

@ -1,33 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-oc-catalog
labels:
app: oc-catalog
spec:
serviceName: "oc-catalog"
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: oc-catalog
template:
metadata:
labels:
app: oc-catalog
spec:
containers:
- name: oc-catalog
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: 8080
env:
- name: MONGO_DATABASE
value: "DC_myDC"
- name: MONGO_URI
value: "mongodb://{{ .Release.Name }}-mongo:27017"
imagePullSecrets:
{{- if .Values.imagePullSecrets }}
{{- range .Values.imagePullSecrets }}
- name: {{ .name }}
{{- end }}
{{- end }}

View File

@ -1,19 +0,0 @@
replicaCount: 1
image:
repository: registry.dev.svc.cluster.local:5000/oc-catalog
tag: latest
pullPolicy: IfNotPresent
service:
type: NodePort
port: 8087
targetPort: 8080
mongo:
database: DC_myDC
uri: mongodb://oc-deploy-mongo:27017
imagePullSecrets:
- name: regcred

View File

@ -1,5 +0,0 @@
apiVersion: v2
name: oc-mongo-express
description: A Helm chart for deploying mongo-express
version: 0.1.0
appVersion: "1.0"

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mongo-express
spec:
selector:
app: mongo-express
ports:
- protocol: TCP
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.targetPort }}
type: {{ .Values.service.type }}

View File

@ -1,39 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-mongo-express
labels:
app: mongo-express
spec:
serviceName: "mongo-express"
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: mongo-express
template:
metadata:
labels:
app: mongo-express
spec:
containers:
- name: mongo-express
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: {{ .Values.service.targetPort }}
env:
- name: ME_CONFIG_BASICAUTH_USERNAME
valueFrom:
secretKeyRef:
name: mongo-secret
key: {{ .Values.secret.usernameKey }}
- name: ME_CONFIG_BASICAUTH_PASSWORD
valueFrom:
secretKeyRef:
name: mongo-secret
key: {{ .Values.secret.passwordKey }}
imagePullSecrets:
{{- if .Values.imagePullSecrets }}
{{- range .Values.imagePullSecrets }}
- name: {{ .name }}
{{- end }}
{{- end }}

View File

@ -1,18 +0,0 @@
replicaCount: 1
image:
repository: mongo-express
tag: latest
pullPolicy: IfNotPresent
service:
port: 8081
targetPort: 8081
type: NodePort
imagePullSecrets:
- name: my-registry-key
secret:
usernameKey: mongo-username
passwordKey: mongo-password

View File

@ -1,5 +0,0 @@
apiVersion: v2
name: oc-mongo
description: A Helm chart for deploying the oc-mongo component
version: 0.1.0
appVersion: "1.0"

View File

@ -1,10 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Values.persistence.name }}
spec:
accessModes:
- {{ .Values.persistence.accessMode }}
resources:
requests:
storage: {{ .Values.persistence.size }}

View File

@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-mongo-secret
type: Opaque
data:
username: {{ .Values.secret.username }}
password: {{ .Values.secret.password }}

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mongo
spec:
selector:
app: mongo
ports:
- protocol: TCP
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.port }}

View File

@ -1,31 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-mongo
labels:
app: mongo
spec:
serviceName: "mongo"
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: mongo
template:
metadata:
labels:
app: mongo
spec:
containers:
- name: mongo
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: 27017
volumeMounts:
- name: mongo-persistent-storage
mountPath: /data/db
- name: mongo-persistent-storage
mountPath: /data/configdb
volumes:
- name: mongo-persistent-storage
persistentVolumeClaim:
claimName: {{ .Values.persistence.name }}

View File

@ -1,19 +0,0 @@
replicaCount: 1
image:
repository: mongo
tag: latest
pullPolicy: IfNotPresent
service:
port: 27017
persistence:
name: mongo-pvc-helm
enabled: true
accessMode: ReadWriteOnce
size: 1Gi
secret:
username: dGVzdA== # base64 encoding of 'test'
password: dGVzdA== # base64 encoding of 'test'

View File

@ -1,48 +0,0 @@
oc-mongo:
replicaCount: 1
image:
repository: registry.dev.svc.cluster.local:5000/mongo
tag: latest
pullPolicy: IfNotPresent
service:
port: 27017
persistence:
name: mongo-pvc-helm
enabled: true
accessMode: ReadWriteOnce
size: 1Gi
secret:
username: dGVzdA== # base64 encoding of 'test'
password: dGVzdA== # base64 encoding of 'test'
oc-mongo-express:
replicaCount: 1
image:
repository: registry.dev.svc.cluster.local:5000/mongo-express
tag: latest
pullPolicy: IfNotPresent
service:
port: 8081
targetPort: 8081
type: NodePort
imagePullSecrets:
- name: regcred
secret:
usernameKey: mongo-username
passwordKey: mongo-password
oc-catalog:
replicaCount: 1
image:
repository: registry.dev.svc.cluster.local:5000/oc-catalog
tag: latest
pullPolicy: IfNotPresent
service:
type: NodePort
port: 8087
targetPort: 8080
mongo:
database: DC_myDC
uri: mongodb://oc-catalog-mongo:27017
imagePullSecrets:
- name: regcred

View File

@ -1,5 +0,0 @@
apiVersion: v2
name: oc-mongo-express
description: A Helm chart for deploying mongo-express
version: 0.1.0
appVersion: "1.0"

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-mongo-express
spec:
selector:
app: mongo-express
ports:
- protocol: TCP
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.targetPort }}
type: {{ .Values.service.type }}

View File

@ -1,39 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-mongo-express
labels:
app: mongo-express
spec:
serviceName: "{{ .Release.Name }}-mongo-express"
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: mongo-express
template:
metadata:
labels:
app: mongo-express
spec:
containers:
- name: mongo-express
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: {{ .Values.service.targetPort }}
env:
- name: ME_CONFIG_BASICAUTH_USERNAME
valueFrom:
secretKeyRef:
name: mongo-secret
key: {{ .Values.secret.usernameKey }}
- name: ME_CONFIG_BASICAUTH_PASSWORD
valueFrom:
secretKeyRef:
name: mongo-secret
key: {{ .Values.secret.passwordKey }}
imagePullSecrets:
{{- if .Values.imagePullSecrets }}
{{- range .Values.imagePullSecrets }}
- name: {{ .name }}
{{- end }}
{{- end }}

View File

@ -1,18 +0,0 @@
replicaCount: 1
image:
repository: mongo-express
tag: latest
pullPolicy: IfNotPresent
service:
port: 8081
targetPort: 8081
type: NodePort
imagePullSecrets:
- name: my-registry-key
secret:
usernameKey: mongo-username
passwordKey: mongo-password

View File

@ -1,5 +0,0 @@
apiVersion: v2
name: oc-mongo
description: A Helm chart for deploying the oc-mongo component
version: 0.1.0
appVersion: "1.0"

View File

@ -1,10 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Values.persistence.name }}
spec:
accessModes:
- {{ .Values.persistence.accessMode }}
resources:
requests:
storage: {{ .Values.persistence.size }}

View File

@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-mongo-secret
type: Opaque
data:
username: {{ .Values.secret.username }}
password: {{ .Values.secret.password }}

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mongo
spec:
selector:
app: mongo
ports:
- protocol: TCP
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.port }}

View File

@ -1,31 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-mongo
labels:
app: mongo
spec:
serviceName: "{{ .Release.Name }}-mongo"
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: mongo
template:
metadata:
labels:
app: mongo
spec:
containers:
- name: mongo
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: 27017
volumeMounts:
- name: mongo-persistent-storage
mountPath: /data/db
- name: mongo-persistent-storage
mountPath: /data/configdb
volumes:
- name: mongo-persistent-storage
persistentVolumeClaim:
claimName: {{ .Values.persistence.name }}

View File

@ -1,19 +0,0 @@
replicaCount: 1
image:
repository: mongo
tag: latest
pullPolicy: IfNotPresent
service:
port: 27017
persistence:
name: mongo-pvc-helm
enabled: true
accessMode: ReadWriteOnce
size: 1Gi
secret:
username: dGVzdA== # base64 encoding of 'test'
password: dGVzdA== # base64 encoding of 'test'

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,24 +0,0 @@
apiVersion: v2
name: occhart
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@ -1,32 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: dex
labels:
app: dex
spec:
replicas: 1
selector:
matchLabels:
app: dex
template:
metadata:
labels:
app: dex
spec:
containers:
- name: dex
image: quay.io/dexidp/dex:v2.27.0
ports:
- containerPort: 5556
args:
- serve
- /etc/dex/cfg/config.yaml
volumeMounts:
- mountPath: /etc/dex/cfg
name: config
volumes:
- name: config
configMap:
name: dex-config

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: dex
labels:
app: dex
spec:
ports:
- port: 5556
selector:
app: dex

View File

@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana
labels:
app: grafana
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
containers:
- name: grafana
image: grafana/grafana:7.5.0
ports:
- containerPort: 3000

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: grafana
labels:
app: grafana
spec:
ports:
- port: 3000
selector:
app: grafana

View File

@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ldap
labels:
app: ldap
spec:
replicas: 1
selector:
matchLabels:
app: ldap
template:
metadata:
labels:
app: ldap
spec:
containers:
- name: ldap
image: osixia/openldap:1.5.0
ports:
- containerPort: 389

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ldap
labels:
app: ldap
spec:
ports:
- port: 389
selector:
app: ldap

View File

@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: loki
labels:
app: loki
spec:
replicas: 1
selector:
matchLabels:
app: loki
template:
metadata:
labels:
app: loki
spec:
containers:
- name: loki
image: grafana/loki:2.2.0
ports:
- containerPort: 3100

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: loki
labels:
app: loki
spec:
ports:
- port: 3100
selector:
app: loki

View File

@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mongo
labels:
app: mongo
spec:
replicas: 1
selector:
matchLabels:
app: mongo
template:
metadata:
labels:
app: mongo
spec:
containers:
- name: mongo
image: mongo:4.4
ports:
- containerPort: 27017

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mongo
labels:
app: mongo
spec:
ports:
- port: 27017
selector:
app: mongo

View File

@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nats
labels:
app: nats
spec:
replicas: 1
selector:
matchLabels:
app: nats
template:
metadata:
labels:
app: nats
spec:
containers:
- name: nats
image: nats:2.1.9
ports:
- containerPort: 4222

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nats
labels:
app: nats
spec:
ports:
- port: 4222
selector:
app: nats

View File

@ -1,37 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: traefik
labels:
app: traefik
spec:
replicas: 1
selector:
matchLabels:
app: traefik
template:
metadata:
labels:
app: traefik
spec:
containers:
- name: traefik
image: traefik:v2.4
ports:
- name: web
containerPort: 80
- name: admin
containerPort: 8080
args:
- --entrypoints.web.address=:80
- --entrypoints.websecure.address=:443
- --providers.kubernetescrd
- --api
volumeMounts:
- mountPath: /etc/traefik
name: traefik-config
volumes:
- name: traefik-config
configMap:
name: traefik-config

View File

@ -1,81 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: traefik-ingress
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web
spec:
rules:
- host: <your-domain>
http:
paths:
- path: /front
pathType: Prefix
backend:
service:
name: front-service
port:
number: 80
- path: /back1
pathType: Prefix
backend:
service:
name: back1-service
port:
number: 80
- path: /back2
pathType: Prefix
backend:
service:
name: back2-service
port:
number: 80
- path: /mongo
pathType: Prefix
backend:
service:
name: mongo
port:
number: 27017
- path: /nats
pathType: Prefix
backend:
service:
name: nats
port:
number: 4222
- path: /zinc
pathType: Prefix
backend:
service:
name: zinc
port:
number: 4080
- path: /dex
pathType: Prefix
backend:
service:
name: dex
port:
number: 5556
- path: /ldap
pathType: Prefix
backend:
service:
name: ldap
port:
number: 389
- path: /grafana
pathType: Prefix
backend:
service:
name: grafana
port:
number: 3000
- path: /loki
pathType: Prefix
backend:
service:
name: loki
port:
number: 3100

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: traefik
labels:
app: traefik
spec:
type: LoadBalancer
ports:
- port: 80
name: web
targetPort: 80
- port: 8080
name: admin
targetPort: 8080
selector:
app: traefik

View File

@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: zinc
labels:
app: zinc
spec:
replicas: 1
selector:
matchLabels:
app: zinc
template:
metadata:
labels:
app: zinc
spec:
containers:
- name: zinc
image: public.ecr.aws/zinclabs/zinc:latest
ports:
- containerPort: 4080

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: zinc
labels:
app: zinc
spec:
ports:
- port: 4080
selector:
app: zinc

View File

@ -1,22 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "occhart.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "occhart.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "occhart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "occhart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "occhart.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "occhart.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "occhart.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "occhart.labels" -}}
helm.sh/chart: {{ include "occhart.chart" . }}
{{ include "occhart.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "occhart.selectorLabels" -}}
app.kubernetes.io/name: {{ include "occhart.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "occhart.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "occhart.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -1,68 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "occhart.fullname" . }}
labels:
{{- include "occhart.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "occhart.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "occhart.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "occhart.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
readinessProbe:
{{- toYaml .Values.readinessProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -1,32 +0,0 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "occhart.fullname" . }}
labels:
{{- include "occhart.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "occhart.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@ -1,61 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "occhart.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "occhart.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "occhart.fullname" . }}
labels:
{{- include "occhart.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "occhart.selectorLabels" . | nindent 4 }}

View File

@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "occhart.serviceAccountName" . }}
labels:
{{- include "occhart.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "occhart.fullname" . }}-test-connection"
labels:
{{- include "occhart.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "occhart.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@ -1,107 +0,0 @@
# Default values for occhart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: nginx
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}

46
k8s/README.md Normal file
View File

@ -0,0 +1,46 @@
## Deploy the opencloud chart
```
./start.sh <mode: dev|prod default:dev> <branche | default:main>
```
Feel free to modify/create a new opencloud/dev-values.yaml. Provided setup should work out of the box, but is not suitable for production usage.
## Hostname settings
Edit your /etc/hosts file, and add following line:
```
127.0.0.1 beta.opencloud.com
```
## Done
Everything should be operational now, go to http://beta.opencloud.com and enjoy the ride
# Prebuilt microservices deployment procedure
TODO
# First steps
Go to http://beta.opencloud.com/users
Log in using default user/password combo ldapadmin/ldapadmin
Create a new user, or change the default one
Go to http://beta.opencloud.com
Log in using your fresh credentials
Do stuff
You can go to http://beta.opencloud.com/mongoexpress
... for mongo express web client access (default login/password is test/testme)
You can go to http://localhost/dashboard/
... for access to Traefik reverse proxy front-end

21
k8s/start.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/bash
mode=${1:-dev}
branch=${2:-main}
cd ../..
if [ ! -d "oc-k8s" ];
echo "Cloning repository: $repo_name"
git clone "https://cloud.o-forge.io/core/oc-k8s.git"
if [ $? -ne 0 ]; then
echo "Error cloning oc-k8s"
exit 1
fi
fi
echo "Repository 'oc-k8s' already exists. Pulling latest changes..."
cd "oc-k8s" && git checkout $branch && git pull
./create_kind_cluster.sh
./clone_opencloud_microservices.sh $branch
./build_opencloud_microservices.sh
./install.sh $mode

20
k8s/stop.sh Executable file
View File

@ -0,0 +1,20 @@
#!/bin/bash
mode=${1:-dev}
branch=${2:-main}
cd ../..
if [ ! -d "oc-k8s" ];
echo "Cloning repository: $repo_name"
git clone "https://cloud.o-forge.io/core/oc-k8s.git"
if [ $? -ne 0 ]; then
echo "Error cloning oc-k8s"
exit 1
fi
fi
echo "Repository 'oc-k8s' already exists. Pulling latest changes..."
cd "oc-k8s" && git checkout $branch && git pull
./uninstall.sh $mode
./delete_kind_cluster.sh

View File

@ -1,29 +0,0 @@
# oc-catalog-deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: oc-catalog
labels:
app: oc-catalog
spec:
replicas: 1
selector:
matchLabels:
app: oc-catalog
template:
metadata:
labels:
app: oc-catalog
spec:
containers:
- name: oc-catalog
image: registry.dev.svc.cluster.local:5000/oc-catalog:latest
ports:
- containerPort: 8080
env:
- name: MONGO_DATABASE
value: "DC_myDC"
- name: MONGO_URI
value: "mongodb://mongo:27017"
imagePullSecrets:
- name: regcred

View File

@ -1,13 +0,0 @@
# oc-catalog-service.yml
apiVersion: v1
kind: Service
metadata:
name: oc-catalog
spec:
selector:
app: oc-catalog
ports:
- protocol: TCP
port: 8087
targetPort: 8080
type: NodePort # Optional, useful for accessing via Minikube IP and NodePort

View File

@ -1,32 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mongo-express
spec:
replicas: 1
selector:
matchLabels:
app: mongo-express
template:
metadata:
labels:
app: mongo-express
spec:
containers:
- name: mongo-express
image: mongo-express:latest
ports:
- containerPort: 8081
env:
- name: ME_CONFIG_BASICAUTH_USERNAME
valueFrom:
secretKeyRef:
name: mongo-secret
key: mongo-username
- name: ME_CONFIG_BASICAUTH_PASSWORD
valueFrom:
secretKeyRef:
name: mongo-secret
key: mongo-password
imagePullSecrets:
- name: my-registry-key

View File

@ -1,13 +0,0 @@
# mongo-express-service.yml
apiVersion: v1
kind: Service
metadata:
name: mongo-express
spec:
selector:
app: mongo-express
ports:
- protocol: TCP
port: 8081
targetPort: 8081
type: NodePort # Optional, useful for accessing via Minikube IP and NodePort

View File

@ -1,42 +0,0 @@
# mongo-deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mongo
labels:
app: mongo
spec:
replicas: 1
selector:
matchLabels:
app: mongo
template:
metadata:
labels:
app: mongo
spec:
containers:
- name: mongo
image: mongo:latest
ports:
- containerPort: 27017
volumeMounts:
- name: mongo-persistent-storage
mountPath: /data/db
- name: mongo-persistent-storage
mountPath: /data/configdb
volumes:
- name: mongo-persistent-storage
persistentVolumeClaim:
claimName: mongo-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mongo-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@ -1,9 +0,0 @@
# mongo-secret.yml
apiVersion: v1
kind: Secret
metadata:
name: mongo-secret
type: Opaque
data:
username: dGVzdA== # base64 encoding of 'test'
password: dGVzdA== # base64 encoding of 'test'

View File

@ -1,12 +0,0 @@
# mongo-service.yml
apiVersion: v1
kind: Service
metadata:
name: mongo
spec:
selector:
app: mongo
ports:
- protocol: TCP
port: 27017
targetPort: 27017

View File

@ -1,11 +0,0 @@
#!/bin/bash
branch = "main"
if [ -n "$1" ]; then
branch = $1
fi
arr=("oc-catalog" "oc-datacenter" "oc-peer" "oc-scheduler" "oc-shared" "oc-workflow" "oc-workspace" "oc-auth" "oc-schedulerd" "oc-front")
for i in "${arr[@]}"
do
git pull origin $branch
done

View File

@ -1,14 +0,0 @@
#!/bin/bash
docker network create catalog | true
arr=("oc-catalog" "oc-datacenter" "oc-peer" "oc-scheduler" "oc-shared" "oc-workflow" "oc-workspace" "oc-auth" "oc-schedulerd" "oc-front")
for i in "${arr[@]}"
do
echo $i
docker kill $i | true
cd ../$i && make dev | true
done
docker compose -f ./traefik-dev-reverse/docker-compose.yml up -d

View File

7
vanilla/README.md Normal file
View File

@ -0,0 +1,7 @@
# RUN
- `./start.sh <YOUR INTERNET IP>`
Now reach localhost:8000
# STOP
- `./stop.sh`

38
vanilla/start.sh Executable file
View File

@ -0,0 +1,38 @@
#!/bin/bash
echo "Stopping all services..."
./stop.sh > /dev/null 2>&1
echo "Starting all services"
cp ./traefik-dev-reverse/template_dynamic.yml ./traefik-dev-reverse/dynamic.yml
sed -i "s/localhost/$1/g" ./traefik-dev-reverse/dynamic.yml
docker network create oc | true
cd ./tools && docker compose -f ./docker-compose.dev.yml up --force-recreate -d
cd .. && docker compose -f ./traefik-dev-reverse/docker-compose.yml up --force-recreate -d
cd ../..
REPOS=(
"oc-auth"
"oc-catalog"
"oc-datacenter"
"oc-monitord"
"oc-peer"
"oc-shared"
"oc-scheduler"
"oc-schedulerd"
"oc-workflow"
"oc-workspace"
"oc-front"
)
for i in "${REPOS[@]}"
do
echo "Building $i"
cd ./$i
make dev &
cd ..
done

47
vanilla/stop.sh Executable file
View File

@ -0,0 +1,47 @@
#!/bin/bash
docker network delete oc | true
docker compose -f ./traefik-dev-reverse/docker-compose.yml rm -s -v -f
TOOLS=(
"mongo"
"mongo-express"
"nats"
"loki"
"grafana"
"keto"
"ldap"
)
for i in "${TOOLS[@]}"
do
echo "kill $i"
docker kill $i | true
docker rm $i | true
done
cd ../..
REPOS=(
"oc-auth"
"oc-catalog"
"oc-datacenter"
"oc-monitord"
"oc-peer"
"oc-shared"
"oc-scheduler"
"oc-schedulerd"
"oc-workflow"
"oc-workspace"
"oc-front"
)
for i in "${REPOS[@]}"
do
echo "kill $i"
docker kill $i | true
docker rm $i | true
cd ./$i
make purge | true
cd ..
done

View File

@ -0,0 +1,104 @@
version: '3.4'
services:
mongo:
image: 'mongo:latest'
networks:
- oc
ports:
- 27017:27017
container_name: mongo
volumes:
- oc-data:/data/db
- oc-data:/data/configdb
mongo-express:
image: "mongo-express:latest"
restart: always
depends_on:
- mongo
networks:
- oc
ports:
- 8081:8081
environment:
- ME_CONFIG_BASICAUTH_USERNAME=test
- ME_CONFIG_BASICAUTH_PASSWORD=test
nats:
image: 'nats:latest'
container_name: nats
ports:
- 4222:4222
command:
- "--debug"
networks:
- oc
loki:
image: 'grafana/loki'
container_name: loki
labels:
- "traefik.enable=true"
- "traefik.http.routers.loki.entrypoints=web"
- "traefik.http.routers.loki.rule=PathPrefix(`/tools/loki`)"
- "traefik.http.services.loki.loadbalancer.server.port=3100"
- "traefik.http.middlewares.loki-stripprefix.stripprefix.prefixes=/tools/loki"
- "traefik.http.routers.loki.middlewares=loki-stripprefix"
- "traefik.http.middlewares.loki.forwardauth.address=http://localhost:8094/oc/forward"
ports :
- "3100:3100"
networks:
- oc
grafana:
image: 'grafana/grafana'
container_name: grafana
ports:
- '3000:3000'
labels:
- "traefik.enable=true"
- "traefik.http.routers.grafana.entrypoints=web"
- "traefik.http.routers.grafana.rule=PathPrefix(`/tools/grafana`)"
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
- "traefik.http.middlewares.grafana-stripprefix.stripprefix.prefixes=/tools/grafana"
- "traefik.http.routers.grafana.middlewares=grafana-stripprefix"
- "traefik.http.middlewares.grafana.forwardauth.address=http://localhost:8094/oc/forward"
networks:
- oc
volumes:
- ./conf/grafana_data_source.yml:/etc/grafana/provisioning/datasources/datasource.yml
environment:
- GF_SECURITY_ADMIN_PASSWORD=pfnirt # Change this to anything but admin to not have a password change page at startup
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_DISABLE_INITIAL_ADMIN_PASSWORD_CHANGE=true
ldap:
image: pgarrett/ldap-alpine
container_name: ldap
volumes:
- "./ldap.ldif:/ldif/ldap.ldif"
networks:
- oc
ports:
- "390:389"
deploy:
restart_policy:
condition: on-failure
keto:
image: oryd/keto:v0.7.0-alpha.1-sqlite
ports:
- "4466:4466"
- "4467:4467"
command: serve -c /home/ory/keto.yml
restart: on-failure
volumes:
- type: bind
source: .
target: /home/ory
container_name: keto
networks:
- oc
volumes:
oc-data:
networks:
oc:
external: true

18
vanilla/tools/keto.yml Normal file
View File

@ -0,0 +1,18 @@
version: v0.6.0-alpha.1
log:
level: debug
namespaces:
- id: 0
name: open-cloud
dsn: memory
serve:
read:
host: 0.0.0.0
port: 4466
write:
host: 0.0.0.0
port: 4467

24
vanilla/tools/ldap.ldif Normal file
View File

@ -0,0 +1,24 @@
dn: uid=admin,ou=Users,dc=example,dc=com
objectClass: inetOrgPerson
cn: Admin
sn: Istrator
uid: admin
userPassword: admin
mail: admin@example.com
ou: Users
dn: ou=AppRoles,dc=example,dc=com
objectClass: organizationalunit
ou: AppRoles
description: AppRoles
dn: ou=App1,ou=AppRoles,dc=example,dc=com
objectClass: organizationalunit
ou: App1
description: App1
dn: cn=traveler,ou=App1,ou=AppRoles,dc=example,dc=com
objectClass: groupofnames
cn: traveler
description: traveler
member: uid=admin,ou=Users,dc=example,dc=com

View File

@ -68,75 +68,72 @@ http:
workspace-service:
loadBalancer:
servers:
- url: "http://localhost:8089"
- url: "http://192.168.1.169:8089"
workflow-service:
loadBalancer:
servers:
- url: "http://localhost:8088"
- url: "http://192.168.1.169:8088"
shared-service:
loadBalancer:
servers:
- url: "http://localhost:8091"
- url: "http://192.168.1.169:8091"
scheduler-service:
loadBalancer:
servers:
- url: "http://localhost:8090"
- url: "http://192.168.1.169:8090"
peer-service:
loadBalancer:
servers:
- url: "http://localhost:8093"
- url: "http://192.168.1.169:8093"
datacenter-service:
loadBalancer:
servers:
- url: "http://localhost:8092"
- url: "http://192.168.1.169:8092"
catalog-service:
loadBalancer:
servers:
- url: "http://localhost:8087"
- url: "http://192.168.1.169:8087"
auth-service:
loadBalancer:
servers:
- url: "http://localhost:8094"
- url: "http://192.168.1.169:8094"
front-service:
loadBalancer:
servers:
- url: "http://localhost:8080"
- url: "http://192.168.1.169:8080"
middlewares:
workspace:
forwardauth:
address: "http://localhost:8094/oc/forward"
address: "http://192.168.1.169:8094/oc/forward"
workflow:
forwardauth:
address: "http://localhost:8094/oc/forward"
address: "http://192.168.1.169:8094/oc/forward"
shared:
forwardauth:
address: "http://localhost:8094/oc/forward"
address: "http://192.168.1.169:8094/oc/forward"
scheduler:
forwardauth:
address: "http://localhost:8094/oc/forward"
address: "http://192.168.1.169:8094/oc/forward"
peer:
forwardauth:
address: "http://localhost:8094/oc/forward"
address: "http://192.168.1.169:8094/oc/forward"
datacenter:
forwardauth:
address: "http://localhost:8094/oc/forward"
address: "http://192.168.1.169:8094/oc/forward"
catalog:
forwardauth:
address: "http://localhost:8094/oc/forward"
address: "http://192.168.1.169:8094/oc/forward"
auth:
forwardauth:
address: "http://localhost:8094/oc/forward"
auth:
forwardauth:
address: "http://localhost:8094/oc/forward"
address: "http://192.168.1.169:8094/oc/forward"
replace-workspace:
replacePathRegex:
regex: "^/workspace(.*)"
replacement: "/oc$1"
replace-workflow:
replacePathRegex:
regex: "^/workspace(.*)"
regex: "^/workflow(.*)"
replacement: "/oc$1"
replace-shared:
replacePathRegex:

View File

@ -0,0 +1,164 @@
http:
routers:
workspace-router:
rule: "PathPrefix(`/workspace`)"
entryPoints:
- "web"
service: workspace-service
middlewares:
- replace-workspace
workflow-router:
rule: "PathPrefix(`/workflow`)"
entryPoints:
- "web"
service: workflow-service
middlewares:
- replace-workflow
shared-router:
rule: "PathPrefix(`/shared`)"
entryPoints:
- "web"
service: shared-service
middlewares:
- replace-shared
scheduler-router:
rule: "PathPrefix(`/scheduler`)"
entryPoints:
- "web"
service: scheduler-service
middlewares:
- replace-scheduler
peer-router:
rule: "PathPrefix(`/peer`)"
entryPoints:
- "web"
service: peer-service
middlewares:
- replace-peer
datacenter-router:
rule: "PathPrefix(`/datacenter`)"
entryPoints:
- "web"
service: datacenter-service
middlewares:
- replace-datacenter
catalog-router:
rule: "PathPrefix(`/catalog`)"
entryPoints:
- "web"
service: catalog-service
middlewares:
- replace-catalog
auth-router:
rule: "PathPrefix(`/auth`)"
entryPoints:
- "web"
service: auth-service
middlewares:
- replace-auth
front-router:
rule: "PathPrefix(`/`)"
entryPoints:
- "web"
service: front-service
middlewares:
- replace-front
services:
workspace-service:
loadBalancer:
servers:
- url: "http://localhost:8089"
workflow-service:
loadBalancer:
servers:
- url: "http://localhost:8088"
shared-service:
loadBalancer:
servers:
- url: "http://localhost:8091"
scheduler-service:
loadBalancer:
servers:
- url: "http://localhost:8090"
peer-service:
loadBalancer:
servers:
- url: "http://localhost:8093"
datacenter-service:
loadBalancer:
servers:
- url: "http://localhost:8092"
catalog-service:
loadBalancer:
servers:
- url: "http://localhost:8087"
auth-service:
loadBalancer:
servers:
- url: "http://localhost:8094"
front-service:
loadBalancer:
servers:
- url: "http://localhost:8080"
middlewares:
workspace:
forwardauth:
address: "http://localhost:8094/oc/forward"
workflow:
forwardauth:
address: "http://localhost:8094/oc/forward"
shared:
forwardauth:
address: "http://localhost:8094/oc/forward"
scheduler:
forwardauth:
address: "http://localhost:8094/oc/forward"
peer:
forwardauth:
address: "http://localhost:8094/oc/forward"
datacenter:
forwardauth:
address: "http://localhost:8094/oc/forward"
catalog:
forwardauth:
address: "http://localhost:8094/oc/forward"
auth:
forwardauth:
address: "http://localhost:8094/oc/forward"
replace-workspace:
replacePathRegex:
regex: "^/workspace(.*)"
replacement: "/oc$1"
replace-workflow:
replacePathRegex:
regex: "^/workflow(.*)"
replacement: "/oc$1"
replace-shared:
replacePathRegex:
regex: "^/shared(.*)"
replacement: "/oc$1"
replace-scheduler:
replacePathRegex:
regex: "^/scheduler(.*)"
replacement: "/oc$1"
replace-peer:
replacePathRegex:
regex: "^/peer(.*)"
replacement: "/oc$1"
replace-datacenter:
replacePathRegex:
regex: "^/datacenter(.*)"
replacement: "/oc$1"
replace-catalog:
replacePathRegex:
regex: "^/catalog(.*)"
replacement: "/oc$1"
replace-auth:
replacePathRegex:
regex: "^/auth(.*)"
replacement: "/oc$1"
replace-front:
stripprefix:
prefixes: "/"