Compare commits
2 Commits
main
...
feat/chart
Author | SHA1 | Date | |
---|---|---|---|
864ff3baba | |||
d2f602bf79 |
1
.gitignore
vendored
1
.gitignore
vendored
@ -1 +0,0 @@
|
||||
k8s/deployed_config
|
@ -1,50 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
REPOS=(
|
||||
"oc-auth"
|
||||
"oc-catalog"
|
||||
"oc-datacenter"
|
||||
"oc-front"
|
||||
"oc-monitord"
|
||||
"oc-peer"
|
||||
"oc-shared"
|
||||
"oc-scheduler"
|
||||
"oc-schedulerd"
|
||||
"oc-workflow"
|
||||
"oc-workspace"
|
||||
)
|
||||
|
||||
# Function to clone repositories
|
||||
clone_repo() {
|
||||
local repo_url="https://cloud.o-forge.io/core/$1.git"
|
||||
local repo_name=$(basename "$repo_url" .git)
|
||||
local branch=$2
|
||||
echo "Processing repository: $repo_name"
|
||||
|
||||
if [ ! -d "$repo_name" ]; then
|
||||
echo "Cloning repository: $repo_name"
|
||||
git clone "$repo_url"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error cloning $repo_url"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
echo "Repository '$repo_name' already exists. Pulling latest changes..."
|
||||
cd "$repo_name" && git pull origin $branch && cd ..
|
||||
}
|
||||
|
||||
cd ..
|
||||
# Iterate through each repository in the list
|
||||
branch = "main"
|
||||
if [ -n "$1" ]; then
|
||||
branch = $1
|
||||
fi
|
||||
for repo in "${REPOS[@]}"; do
|
||||
clone_repo $repo $branch
|
||||
done
|
||||
|
||||
echo "All repositories processed successfully."
|
||||
|
||||
|
||||
|
@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker cp ./datas mongo:.
|
||||
|
||||
for i in $(ls ./datas); do
|
||||
firstString=$i
|
||||
echo "ADD file $i in collection ${i/.json/}"
|
||||
docker exec -it mongo sh -c "mongoimport --jsonArray --db DC_myDC --collection ${i/.json/} --file ./datas/$i"
|
||||
done
|
@ -1 +0,0 @@
|
||||
[{"_id":"0b6a375f-be3e-49a9-9827-3c2d5eddb057","abstractobject":{"id":"0b6a375f-be3e-49a9-9827-3c2d5eddb057","name":"test","is_draft":false,"creator_id":"c0cece97-7730-4c2a-8c20-a30944564106","creation_date":{"$date":"2025-01-27T10:41:47.741Z"},"update_date":{"$date":"2025-01-27T10:41:47.741Z"},"updater_id":"c0cece97-7730-4c2a-8c20-a30944564106","access_mode":0},"description":"Proto Collaborative area example","collaborative_area":{},"workflows":["58314c99-c595-4ca2-8b5e-822a6774efed"],"allowed_peers_group":{"c0cece97-7730-4c2a-8c20-a30944564106":["*"]},"workspaces":[]}]
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1 +0,0 @@
|
||||
[{"_id":"04bc70b5-8d7b-44e6-9015-fadfa0fb102d","abstractinstanciatedresource":{"abstractresource":{"type":"storage","abstractobject":{"id":"04bc70b5-8d7b-44e6-9015-fadfa0fb102d","name":"IRT risk database","is_draft":false,"creator_id":"c0cece97-7730-4c2a-8c20-a30944564106","creation_date":"2021-09-30T14:00:00.000Z","update_date":"2021-09-30T14:00:00.000Z","updater_id":"c0cece97-7730-4c2a-8c20-a30944564106","access_mode":1},"logo":"https://cloud.o-forge.io/core/deperecated-oc-catalog/raw/branch/main/scripts/local_imgs/IRT risk database.png","description":"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.","short_description":"S3 compliant IRT file storage","owners":[{"name":"IRT"}]},"instances":[{"env":[{"attr":"source","readonly":true}],"resourceinstance":{"abstractobject":{"id":"7fdccb9c-7090-40a5-bacd-7435bc56c90d","name":"IRT local file storage Marseille"},"location":{"latitude":50.62925,"longitude":3.057256},"country":250,"partnerships":[{"resourcepartnership":{"namespace":"default","peer_groups":{"c0cece97-7730-4c2a-8c20-a30944564106":["*"]},"pricing_profiles":[{"pricing":{"price":50,"currency":"EUR","buying_strategy":0,"time_pricing_strategy":0}}]}}]},"source":"/mnt/vol","local":false,"security_level":"public","size":50,"size_type":3,"redundancy":"RAID5","throughput":"r:200,w:150"}]},"storage_type":5,"acronym":"DC_myDC"},{"_id":"e726020a-b68e-4abc-ab36-c3640ea3f557","abstractinstanciatedresource":{"abstractresource":{"type":"storage","abstractobject":{"id":"e726020a-b68e-4abc-ab36-c3640ea3f557","name":"IRT local file storage","is_draft":false,"creator_id":"c0cece97-7730-4c2a-8c20-a30944564106","creation_date":"2021-09-30T14:00:00.000Z","update_date":"2021-09-30T14:00:00.000Z","updater_id":"c0cece97-7730-4c2a-8c20-a30944564106","access_mode":1},"logo":"https://cloud.o-forge.io/core/deperecated-oc-catalog/raw/branch/main/scripts/local_imgs/IRT local file storage.png","description":"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.","short_description":"S3 compliant IRT file storage","owners":[{"name":"IRT"}]},"instances":[{"resourceinstance":{"env":[{"attr":"source","readonly":true}],"abstractobject":{"id":"7fdccb9c-7090-40a5-bacd-7435bc56c90d","name":"IRT local file storage Marseille"},"location":{"latitude":50.62925,"longitude":3.057256},"country":250,"partnerships":[{"resourcepartnership":{"namespace":"default","peer_groups":{"c0cece97-7730-4c2a-8c20-a30944564106":["*"]},"pricing_profiles":[{"pricing":{"price":50,"currency":"EUR","buying_strategy":0,"time_pricing_strategy":0}}]}}]},"source":"/mnt/vol","local":true,"security_level":"public","size":500,"size_type":0,"encryption":true,"redundancy":"RAID5S","throughput":"r:300,w:350"}]},"storage_type":5,"acronym":"DC_myDC"}]
|
File diff suppressed because one or more lines are too long
@ -1,31 +0,0 @@
|
||||
#!/bin/bash
|
||||
docker network create oc | true
|
||||
|
||||
docker compose down
|
||||
cd ./tools && docker compose -f ./docker-compose.dev.yml up --force-recreate -d && cd ..
|
||||
cd ./tools && docker compose -f ./docker-compose.traefik.yml up --force-recreate -d && cd ..
|
||||
|
||||
cd ../..
|
||||
|
||||
REPOS=(
|
||||
"oc-auth"
|
||||
"oc-catalog"
|
||||
"oc-datacenter"
|
||||
"oc-monitord"
|
||||
"oc-peer"
|
||||
"oc-shared"
|
||||
"oc-scheduler"
|
||||
"oc-schedulerd"
|
||||
"oc-workflow"
|
||||
"oc-workspace"
|
||||
"oc-front"
|
||||
)
|
||||
for i in "${REPOS[@]}"
|
||||
do
|
||||
echo "Building $i"
|
||||
docker kill $i | true
|
||||
docker rm $i | true
|
||||
cd ./$i
|
||||
docker build . -t $i && docker compose up -d
|
||||
cd ..
|
||||
done
|
@ -1,48 +0,0 @@
|
||||
#!/bin/bash
|
||||
docker network delete oc | true
|
||||
|
||||
docker compose -f ./tools/docker-compose.traefik.yml down
|
||||
|
||||
TOOLS=(
|
||||
"mongo"
|
||||
"mongo-express"
|
||||
"nats"
|
||||
"loki"
|
||||
"grafana"
|
||||
"hydra-client"
|
||||
"hydra"
|
||||
"keto"
|
||||
"ldap"
|
||||
)
|
||||
|
||||
for i in "${TOOLS[@]}"
|
||||
do
|
||||
echo "kill $i"
|
||||
docker kill $i | true
|
||||
docker rm $i | true
|
||||
done
|
||||
|
||||
cd ../..
|
||||
|
||||
REPOS=(
|
||||
"oc-auth"
|
||||
"oc-catalog"
|
||||
"oc-datacenter"
|
||||
"oc-monitord"
|
||||
"oc-peer"
|
||||
"oc-shared"
|
||||
"oc-scheduler"
|
||||
"oc-schedulerd"
|
||||
"oc-workflow"
|
||||
"oc-workspace"
|
||||
"oc-front"
|
||||
)
|
||||
for i in "${REPOS[@]}"
|
||||
do
|
||||
echo "Kill $i"
|
||||
cd ./$i
|
||||
docker kill $i | true
|
||||
docker rm $i | true
|
||||
make purge | true
|
||||
cd ..
|
||||
done
|
@ -1,8 +0,0 @@
|
||||
datasources:
|
||||
- name: Loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://loki:3100
|
||||
isDefault: true
|
||||
jsonData:
|
||||
httpMethod: POST
|
@ -1,162 +0,0 @@
|
||||
version: '3.4'
|
||||
|
||||
services:
|
||||
mongo:
|
||||
image: 'mongo:latest'
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- 27017:27017
|
||||
container_name: mongo
|
||||
volumes:
|
||||
- oc-data:/data/db
|
||||
- oc-data:/data/configdb
|
||||
|
||||
mongo-express:
|
||||
image: "mongo-express:latest"
|
||||
restart: always
|
||||
depends_on:
|
||||
- mongo
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- 8081:8081
|
||||
environment:
|
||||
- ME_CONFIG_BASICAUTH_USERNAME=test
|
||||
- ME_CONFIG_BASICAUTH_PASSWORD=test
|
||||
nats:
|
||||
image: 'nats:latest'
|
||||
container_name: nats
|
||||
ports:
|
||||
- 4222:4222
|
||||
command:
|
||||
- "--debug"
|
||||
networks:
|
||||
- oc
|
||||
loki:
|
||||
image: 'grafana/loki'
|
||||
container_name: loki
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.loki.entrypoints=web"
|
||||
- "traefik.http.routers.loki.rule=PathPrefix(`/tools/loki`)"
|
||||
- "traefik.http.services.loki.loadbalancer.server.port=3100"
|
||||
- "traefik.http.middlewares.loki-stripprefix.stripprefix.prefixes=/tools/loki"
|
||||
- "traefik.http.routers.loki.middlewares=loki-stripprefix"
|
||||
- "traefik.http.middlewares.loki.forwardauth.address=http://oc-auth:8080/oc/forward"
|
||||
ports :
|
||||
- "3100:3100"
|
||||
networks:
|
||||
- oc
|
||||
grafana:
|
||||
image: 'grafana/grafana'
|
||||
container_name: grafana
|
||||
ports:
|
||||
- '3000:3000'
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.grafana.entrypoints=web"
|
||||
- "traefik.http.routers.grafana.rule=PathPrefix(`/tools/grafana`)"
|
||||
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
|
||||
- "traefik.http.middlewares.grafana-stripprefix.stripprefix.prefixes=/tools/grafana"
|
||||
- "traefik.http.routers.grafana.middlewares=grafana-stripprefix"
|
||||
- "traefik.http.middlewares.grafana.forwardauth.address=http://oc-auth:8080/oc/forward"
|
||||
networks:
|
||||
- oc
|
||||
volumes:
|
||||
- ./conf/grafana_data_source.yml:/etc/grafana/provisioning/datasources/datasource.yml
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=pfnirt # Change this to anything but admin to not have a password change page at startup
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_DISABLE_INITIAL_ADMIN_PASSWORD_CHANGE=true
|
||||
hydra-client:
|
||||
image: oryd/hydra:v2.2.0
|
||||
container_name: hydra-client
|
||||
environment:
|
||||
HYDRA_ADMIN_URL: http://hydra:4445
|
||||
ORY_SDK_URL: http://hydra:4445
|
||||
command:
|
||||
- create
|
||||
- oauth2-client
|
||||
- --skip-tls-verify
|
||||
- --name
|
||||
- test-client
|
||||
- --secret
|
||||
- oc-auth-got-secret
|
||||
- --response-type
|
||||
- id_token,token,code
|
||||
- --grant-type
|
||||
- implicit,refresh_token,authorization_code,client_credentials
|
||||
- --scope
|
||||
- openid,profile,email,roles
|
||||
- --token-endpoint-auth-method
|
||||
- client_secret_post
|
||||
- --redirect-uri
|
||||
- http://localhost:3000
|
||||
|
||||
networks:
|
||||
- oc
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: none
|
||||
depends_on:
|
||||
- hydra
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://hydra:4445"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
hydra:
|
||||
container_name: hydra
|
||||
image: oryd/hydra:v2.2.0
|
||||
environment:
|
||||
SECRETS_SYSTEM: oc-auth-got-secret
|
||||
LOG_LEAK_SENSITIVE_VALUES: true
|
||||
# OAUTH2_TOKEN_HOOK_URL: http://oc-auth:8080/oc/claims
|
||||
URLS_SELF_ISSUER: http://hydra:4444
|
||||
URLS_SELF_PUBLIC: http://hydra:4444
|
||||
WEBFINGER_OIDC_DISCOVERY_SUPPORTED_SCOPES: profile,email,phone,roles
|
||||
WEBFINGER_OIDC_DISCOVERY_SUPPORTED_CLAIMS: name,family_name,given_name,nickname,email,phone_number
|
||||
DSN: memory
|
||||
command: serve all --dev
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- "4444:4444"
|
||||
- "4445:4445"
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
ldap:
|
||||
image: pgarrett/ldap-alpine
|
||||
container_name: ldap
|
||||
volumes:
|
||||
- "./ldap.ldif:/ldif/ldap.ldif"
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- "390:389"
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
keto:
|
||||
image: oryd/keto:v0.7.0-alpha.1-sqlite
|
||||
ports:
|
||||
- "4466:4466"
|
||||
- "4467:4467"
|
||||
command: serve -c /home/ory/keto.yml
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- type: bind
|
||||
source: .
|
||||
target: /home/ory
|
||||
container_name: keto
|
||||
networks:
|
||||
- oc
|
||||
|
||||
volumes:
|
||||
oc-data:
|
||||
|
||||
networks:
|
||||
oc:
|
||||
external: true
|
@ -1,24 +0,0 @@
|
||||
version: '3.4'
|
||||
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:v2.10.4
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- oc
|
||||
command:
|
||||
- "--api.insecure=true"
|
||||
- "--providers.docker=true"
|
||||
- "--entrypoints.web.address=:80"
|
||||
ports:
|
||||
- "80:80" # Expose Traefik on port 80
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
volumes:
|
||||
oc-data:
|
||||
|
||||
networks:
|
||||
oc:
|
||||
external: true
|
@ -1,18 +0,0 @@
|
||||
version: v0.6.0-alpha.1
|
||||
|
||||
log:
|
||||
level: debug
|
||||
|
||||
namespaces:
|
||||
- id: 0
|
||||
name: open-cloud
|
||||
|
||||
dsn: memory
|
||||
|
||||
serve:
|
||||
read:
|
||||
host: 0.0.0.0
|
||||
port: 4466
|
||||
write:
|
||||
host: 0.0.0.0
|
||||
port: 4467
|
@ -1,24 +0,0 @@
|
||||
dn: uid=admin,ou=Users,dc=example,dc=com
|
||||
objectClass: inetOrgPerson
|
||||
cn: Admin
|
||||
sn: Istrator
|
||||
uid: admin
|
||||
userPassword: admin
|
||||
mail: admin@example.com
|
||||
ou: Users
|
||||
|
||||
dn: ou=AppRoles,dc=example,dc=com
|
||||
objectClass: organizationalunit
|
||||
ou: AppRoles
|
||||
description: AppRoles
|
||||
|
||||
dn: ou=App1,ou=AppRoles,dc=example,dc=com
|
||||
objectClass: organizationalunit
|
||||
ou: App1
|
||||
description: App1
|
||||
|
||||
dn: cn=traveler,ou=App1,ou=AppRoles,dc=example,dc=com
|
||||
objectClass: groupofnames
|
||||
cn: traveler
|
||||
description: traveler
|
||||
member: uid=admin,ou=Users,dc=example,dc=com
|
207
docs/diagrams/src/oc-kube.puml
Normal file
207
docs/diagrams/src/oc-kube.puml
Normal file
@ -0,0 +1,207 @@
|
||||
@startuml
|
||||
skinparam componentStyle rectangle
|
||||
|
||||
node "Kubernetes Cluster" {
|
||||
|
||||
cloud "Service: oc-catalog" as oc_catalog_service {
|
||||
oc_catalog_service : Type: NodePort
|
||||
oc_catalog_service : External NodePort: 8087 # Exposed NodePort for external access
|
||||
oc_catalog_service : Internal TargetPort: 8080
|
||||
}
|
||||
|
||||
' Deployment for oc-catalog managing the pods
|
||||
node "Deployment: oc-catalog" as oc_catalog_deployment {
|
||||
oc_catalog_deployment : Replicas: {{ .Values.replicaCount }}
|
||||
oc_catalog_deployment : Image: registry.dev.svc.cluster.local:5000/oc-catalog:latest
|
||||
oc_catalog_deployment : PullPolicy: IfNotPresent
|
||||
oc_catalog_deployment : TargetPort: 8080
|
||||
|
||||
node "Pod: oc-catalog-1" as catalog_1 {
|
||||
component "Container: oc-catalog" as oc_catalog_container1 {
|
||||
oc_catalog_container1 : Internal Port: 8080
|
||||
oc_catalog_container1 : MONGO_DATABASE=DC_myDC
|
||||
oc_catalog_container1 : MONGO_URI=mongodb://mongo:27017
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oc_catalog_service --> oc_catalog_deployment : Routes traffic to Deployment
|
||||
oc_catalog_deployment --> catalog_1 : Manages Pods
|
||||
|
||||
' MongoDB service and statefulset
|
||||
|
||||
cloud "Service: mongo" as mongo_service {
|
||||
mongo_service : Type: ClusterIP
|
||||
mongo_service : Internal Port: 27017
|
||||
}
|
||||
|
||||
catalog_1 --> mongo_service : Connects to MongoDB
|
||||
|
||||
cloud "Service: oc-shared" as oc_shared_service {
|
||||
oc_shared_service : Type: NodePort
|
||||
oc_shared_service : External NodePort: 8091 # Exposed NodePort for external access
|
||||
oc_shared_service : Internal TargetPort: 8080
|
||||
}
|
||||
|
||||
' Deployment for oc-shared managing the pods
|
||||
node "Deployment: oc-shared" as oc_shared_deployment {
|
||||
oc_shared_deployment : Replicas: {{ .Values.replicaCount }}
|
||||
oc_shared_deployment : Image: registry.dev.svc.cluster.local:5000/oc-shared:latest
|
||||
oc_shared_deployment : PullPolicy: IfNotPresent
|
||||
oc_shared_deployment : TargetPort: 8080
|
||||
|
||||
node "Pod: oc-shared-1" as shared_1 {
|
||||
component "Container: oc-shared" as oc_shared_container1 {
|
||||
oc_shared_container1 : Internal Port: 8080
|
||||
oc_shared_container1 : MONGO_DATABASE=DC_myDC
|
||||
oc_shared_container1 : MONGO_URI=mongodb://mongo:27017
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oc_shared_service --> oc_shared_deployment : Routes traffic to Deployment
|
||||
oc_shared_deployment --> shared_1 : Manages Pods
|
||||
|
||||
' MongoDB service and statefulset
|
||||
|
||||
cloud "Service: mongo" as mongo_service {
|
||||
mongo_service : Type: ClusterIP
|
||||
mongo_service : Internal Port: 27017
|
||||
}
|
||||
|
||||
shared_1 --> mongo_service : Connects to MongoDB
|
||||
|
||||
cloud "Service: oc-workflow" as oc_workflow_service {
|
||||
oc_workflow_service : Type: NodePort
|
||||
oc_workflow_service : External NodePort: 8088 # Exposed NodePort for external access
|
||||
oc_workflow_service : Internal TargetPort: 8080
|
||||
}
|
||||
|
||||
' Deployment for oc-workflow managing the pods
|
||||
node "Deployment: oc-workflow" as oc_workflow_deployment {
|
||||
oc_workflow_deployment : Replicas: {{ .Values.replicaCount }}
|
||||
oc_workflow_deployment : Image: registry.dev.svc.cluster.local:5000/oc-workflow:latest
|
||||
oc_workflow_deployment : PullPolicy: IfNotPresent
|
||||
oc_workflow_deployment : TargetPort: 8080
|
||||
|
||||
node "Pod: oc-workflow-1" as workflow_1 {
|
||||
component "Container: oc-workflow" as oc_workflow_container1 {
|
||||
oc_workflow_container1 : Internal Port: 8080
|
||||
oc_workflow_container1 : MONGO_DATABASE=DC_myDC
|
||||
oc_workflow_container1 : MONGO_URI=mongodb://mongo:27017
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oc_workflow_service --> oc_workflow_deployment : Routes traffic to Deployment
|
||||
oc_workflow_deployment --> workflow_1 : Manages Pods
|
||||
|
||||
' MongoDB service and statefulset
|
||||
|
||||
cloud "Service: mongo" as mongo_service {
|
||||
mongo_service : Type: ClusterIP
|
||||
mongo_service : Internal Port: 27017
|
||||
}
|
||||
|
||||
workflow_1 --> mongo_service : Connects to MongoDB
|
||||
|
||||
|
||||
cloud "Service: oc-workspace" as oc_workspace_service {
|
||||
oc_workspace_service : Type: NodePort
|
||||
oc_workspace_service : External NodePort: 8089 # Exposed NodePort for external access
|
||||
oc_workspace_service : Internal TargetPort: 8080
|
||||
}
|
||||
|
||||
' Deployment for oc-workspace managing the pods
|
||||
node "Deployment: oc-workspace" as oc_workspace_deployment {
|
||||
oc_workspace_deployment : Replicas: {{ .Values.replicaCount }}
|
||||
oc_workspace_deployment : Image: registry.dev.svc.cluster.local:5000/oc-workspace:latest
|
||||
oc_workspace_deployment : PullPolicy: IfNotPresent
|
||||
oc_workspace_deployment : TargetPort: 8080
|
||||
|
||||
node "Pod: oc-workspace-1" as workspace_1 {
|
||||
component "Container: oc-workspace" as oc_workspace_container1 {
|
||||
oc_workspace_container1 : Internal Port: 8080
|
||||
oc_workspace_container1 : MONGO_DATABASE=DC_myDC
|
||||
oc_workspace_container1 : MONGO_URI=mongodb://mongo:27017
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oc_workspace_service --> oc_workspace_deployment : Routes traffic to Deployment
|
||||
oc_workspace_deployment --> workspace_1 : Manages Pods
|
||||
|
||||
|
||||
|
||||
cloud "Service: oc-scheduler" as oc_scheduler_service {
|
||||
oc_scheduler_service : Type: NodePort
|
||||
oc_scheduler_service : External NodePort: 8090 # Exposed NodePort for external access
|
||||
oc_scheduler_service : Internal TargetPort: 8080
|
||||
}
|
||||
|
||||
' Deployment for oc-scheduler managing the pods
|
||||
node "Deployment: oc-scheduler" as oc_scheduler_deployment {
|
||||
oc_scheduler_deployment : Replicas: {{ .Values.replicaCount }}
|
||||
oc_scheduler_deployment : Image: registry.dev.svc.cluster.local:5000/oc-scheduler:latest
|
||||
oc_scheduler_deployment : PullPolicy: IfNotPresent
|
||||
oc_scheduler_deployment : TargetPort: 8080
|
||||
|
||||
node "Pod: oc-scheduler-1" as scheduler_1 {
|
||||
component "Container: oc-scheduler" as oc_scheduler_container1 {
|
||||
oc_scheduler_container1 : Internal Port: 8080
|
||||
oc_scheduler_container1 : MONGO_DATABASE=DC_myDC
|
||||
oc_scheduler_container1 : MONGO_URI=mongodb://mongo:27017
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oc_scheduler_service --> oc_scheduler_deployment : Routes traffic to Deployment
|
||||
oc_scheduler_deployment --> scheduler_1 : Manages Pods
|
||||
|
||||
' MongoDB service and statefulset
|
||||
|
||||
cloud "Service: mongo" as mongo_service {
|
||||
mongo_service : Type: ClusterIP
|
||||
mongo_service : Internal Port: 27017
|
||||
}
|
||||
|
||||
scheduler_1 --> mongo_service : Connects to MongoDB
|
||||
|
||||
' MongoDB service and statefulset
|
||||
|
||||
cloud "Service: mongo" as mongo_service {
|
||||
mongo_service : Type: ClusterIP
|
||||
mongo_service : Internal Port: 27017
|
||||
}
|
||||
|
||||
' MongoDB StatefulSet with PVC and PV
|
||||
|
||||
node "StatefulSet: MongoDB" as mongo_statefulset {
|
||||
component " Pod: MongoDB" as mongo_pod{
|
||||
component "Container: MongoDB" as mongo_container {
|
||||
mongo_container : Image: mongo:latest
|
||||
mongo_container : PullPolicy: IfNotPresent
|
||||
mongo_container : Exposed Port: 27017
|
||||
mongo_container : Volume Mount: /data/db
|
||||
mongo_container : Volume Mount: /data/configdb
|
||||
mongo_container : Secret: username, password (base64)
|
||||
}
|
||||
}
|
||||
|
||||
storage "PersistentVolumeClaim: mongo-pvc" as mongo_PVC {
|
||||
mongo_pvc : Access Mode: ReadWriteOnce
|
||||
mongo_pvc : Size: 1Gi
|
||||
mongo_pvc : Storage Class: {{ .Values.persistence.storageClass }}
|
||||
}
|
||||
}
|
||||
|
||||
storage "PersistentVolume: PV" as mongo_PV {
|
||||
mongo_pv : Bound to PVC: mongo-pvc
|
||||
}
|
||||
mongo_service --> mongo_statefulset : Routes traffic to MongoDB StatefulSet
|
||||
mongo_pod --> mongo_PVC : Mounted Persistent Volume Claim
|
||||
mongo_pvc --> mongo_PV : Bound Persistent Volume
|
||||
workspace_1 --> mongo_service : Connects to MongoDB
|
||||
|
||||
}
|
||||
@enduml
|
@ -1,46 +0,0 @@
|
||||
|
||||
## Deploy the opencloud chart
|
||||
|
||||
```
|
||||
./start.sh <mode: dev|prod default:dev> <branche | default:main>
|
||||
```
|
||||
|
||||
Feel free to modify/create a new opencloud/dev-values.yaml. Provided setup should work out of the box, but is not suitable for production usage.
|
||||
|
||||
## Hostname settings
|
||||
|
||||
Edit your /etc/hosts file, and add following line:
|
||||
|
||||
```
|
||||
127.0.0.1 beta.opencloud.com
|
||||
```
|
||||
|
||||
## Done
|
||||
|
||||
Everything should be operational now, go to http://beta.opencloud.com and enjoy the ride
|
||||
|
||||
# Prebuilt microservices deployment procedure
|
||||
|
||||
TODO
|
||||
|
||||
# First steps
|
||||
|
||||
Go to http://beta.opencloud.com/users
|
||||
|
||||
Log in using default user/password combo ldapadmin/ldapadmin
|
||||
|
||||
Create a new user, or change the default one
|
||||
|
||||
Go to http://beta.opencloud.com
|
||||
|
||||
Log in using your fresh credentials
|
||||
|
||||
Do stuff
|
||||
|
||||
You can go to http://beta.opencloud.com/mongoexpress
|
||||
|
||||
... for mongo express web client access (default login/password is test/testme)
|
||||
|
||||
You can go to http://localhost/dashboard/
|
||||
|
||||
... for access to Traefik reverse proxy front-end
|
21
k8s/start.sh
21
k8s/start.sh
@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
mode=${1:-dev}
|
||||
branch=${2:-main}
|
||||
|
||||
cd ../..
|
||||
|
||||
if [ ! -d "oc-k8s" ]; then
|
||||
echo "Cloning repository: $repo_name"
|
||||
git clone "https://cloud.o-forge.io/core/oc-k8s.git"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error cloning oc-k8s"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
echo "Repository 'oc-k8s' already exists. Pulling latest changes..."
|
||||
cd "oc-k8s" && git checkout $branch && git pull
|
||||
|
||||
./create_kind_cluster.sh
|
||||
./clone_opencloud_microservices.sh $branch
|
||||
./build_opencloud_microservices.sh
|
||||
./install.sh $mode
|
20
k8s/stop.sh
20
k8s/stop.sh
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
mode=${1:-dev}
|
||||
branch=${2:-main}
|
||||
|
||||
cd ../..
|
||||
|
||||
if [ ! -d "oc-k8s" ];
|
||||
echo "Cloning repository: $repo_name"
|
||||
git clone "https://cloud.o-forge.io/core/oc-k8s.git"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error cloning oc-k8s"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
echo "Repository 'oc-k8s' already exists. Pulling latest changes..."
|
||||
cd "oc-k8s" && git checkout $branch && git pull
|
||||
|
||||
./uninstall.sh $mode
|
||||
./delete_kind_cluster.sh
|
||||
|
@ -1,7 +0,0 @@
|
||||
# RUN
|
||||
- `./start.sh <YOUR INTERNET IP>`
|
||||
Now reach localhost:8000
|
||||
|
||||
# STOP
|
||||
- `./stop.sh`
|
||||
|
@ -1,38 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "Stopping all services..."
|
||||
|
||||
./stop.sh > /dev/null 2>&1
|
||||
|
||||
echo "Starting all services"
|
||||
|
||||
cp ./traefik-dev-reverse/template_dynamic.yml ./traefik-dev-reverse/dynamic.yml
|
||||
sed -i "s/localhost/$1/g" ./traefik-dev-reverse/dynamic.yml
|
||||
|
||||
docker network create oc | true
|
||||
cd ./tools && docker compose -f ./docker-compose.dev.yml up --force-recreate -d
|
||||
cd .. && docker compose -f ./traefik-dev-reverse/docker-compose.yml up --force-recreate -d
|
||||
|
||||
cd ../..
|
||||
|
||||
REPOS=(
|
||||
"oc-auth"
|
||||
"oc-catalog"
|
||||
"oc-datacenter"
|
||||
"oc-monitord"
|
||||
"oc-peer"
|
||||
"oc-shared"
|
||||
"oc-scheduler"
|
||||
"oc-schedulerd"
|
||||
"oc-workflow"
|
||||
"oc-workspace"
|
||||
"oc-front"
|
||||
)
|
||||
for i in "${REPOS[@]}"
|
||||
do
|
||||
echo "Building $i"
|
||||
cd ./$i
|
||||
make dev HOST="${2:-http://localhost:8000}" &
|
||||
cd ..
|
||||
done
|
||||
|
@ -1,47 +0,0 @@
|
||||
#!/bin/bash
|
||||
docker network delete oc | true
|
||||
|
||||
docker compose -f ./traefik-dev-reverse/docker-compose.yml rm -s -v -f
|
||||
|
||||
TOOLS=(
|
||||
"mongo"
|
||||
"mongo-express"
|
||||
"nats"
|
||||
"loki"
|
||||
"grafana"
|
||||
"keto"
|
||||
"ldap"
|
||||
)
|
||||
|
||||
for i in "${TOOLS[@]}"
|
||||
do
|
||||
echo "kill $i"
|
||||
docker kill $i | true
|
||||
docker rm $i | true
|
||||
done
|
||||
|
||||
cd ../..
|
||||
|
||||
REPOS=(
|
||||
"oc-auth"
|
||||
"oc-catalog"
|
||||
"oc-datacenter"
|
||||
"oc-monitord"
|
||||
"oc-peer"
|
||||
"oc-shared"
|
||||
"oc-scheduler"
|
||||
"oc-schedulerd"
|
||||
"oc-workflow"
|
||||
"oc-workspace"
|
||||
"oc-front"
|
||||
)
|
||||
for i in "${REPOS[@]}"
|
||||
do
|
||||
echo "kill $i"
|
||||
docker kill $i | true
|
||||
docker rm $i | true
|
||||
cd ./$i
|
||||
make purge | true
|
||||
cd ..
|
||||
done
|
||||
|
@ -1,104 +0,0 @@
|
||||
version: '3.4'
|
||||
|
||||
services:
|
||||
mongo:
|
||||
image: 'mongo:latest'
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- 27017:27017
|
||||
container_name: mongo
|
||||
volumes:
|
||||
- oc-data:/data/db
|
||||
- oc-data:/data/configdb
|
||||
|
||||
mongo-express:
|
||||
image: "mongo-express:latest"
|
||||
restart: always
|
||||
depends_on:
|
||||
- mongo
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- 8081:8081
|
||||
environment:
|
||||
- ME_CONFIG_BASICAUTH_USERNAME=test
|
||||
- ME_CONFIG_BASICAUTH_PASSWORD=test
|
||||
nats:
|
||||
image: 'nats:latest'
|
||||
container_name: nats
|
||||
ports:
|
||||
- 4222:4222
|
||||
command:
|
||||
- "--debug"
|
||||
networks:
|
||||
- oc
|
||||
loki:
|
||||
image: 'grafana/loki'
|
||||
container_name: loki
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.loki.entrypoints=web"
|
||||
- "traefik.http.routers.loki.rule=PathPrefix(`/tools/loki`)"
|
||||
- "traefik.http.services.loki.loadbalancer.server.port=3100"
|
||||
- "traefik.http.middlewares.loki-stripprefix.stripprefix.prefixes=/tools/loki"
|
||||
- "traefik.http.routers.loki.middlewares=loki-stripprefix"
|
||||
- "traefik.http.middlewares.loki.forwardauth.address=http://localhost:8094/oc/forward"
|
||||
ports :
|
||||
- "3100:3100"
|
||||
networks:
|
||||
- oc
|
||||
grafana:
|
||||
image: 'grafana/grafana'
|
||||
container_name: grafana
|
||||
ports:
|
||||
- '3000:3000'
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.grafana.entrypoints=web"
|
||||
- "traefik.http.routers.grafana.rule=PathPrefix(`/tools/grafana`)"
|
||||
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
|
||||
- "traefik.http.middlewares.grafana-stripprefix.stripprefix.prefixes=/tools/grafana"
|
||||
- "traefik.http.routers.grafana.middlewares=grafana-stripprefix"
|
||||
- "traefik.http.middlewares.grafana.forwardauth.address=http://localhost:8094/oc/forward"
|
||||
networks:
|
||||
- oc
|
||||
volumes:
|
||||
- ./conf/grafana_data_source.yml:/etc/grafana/provisioning/datasources/datasource.yml
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=pfnirt # Change this to anything but admin to not have a password change page at startup
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_DISABLE_INITIAL_ADMIN_PASSWORD_CHANGE=true
|
||||
ldap:
|
||||
image: pgarrett/ldap-alpine
|
||||
container_name: ldap
|
||||
volumes:
|
||||
- "./ldap.ldif:/ldif/ldap.ldif"
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- "390:389"
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
keto:
|
||||
image: oryd/keto:v0.7.0-alpha.1-sqlite
|
||||
ports:
|
||||
- "4466:4466"
|
||||
- "4467:4467"
|
||||
command: serve -c /home/ory/keto.yml
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- type: bind
|
||||
source: .
|
||||
target: /home/ory
|
||||
container_name: keto
|
||||
networks:
|
||||
- oc
|
||||
|
||||
volumes:
|
||||
oc-data:
|
||||
|
||||
networks:
|
||||
oc:
|
||||
external: true
|
@ -1,18 +0,0 @@
|
||||
version: v0.6.0-alpha.1
|
||||
|
||||
log:
|
||||
level: debug
|
||||
|
||||
namespaces:
|
||||
- id: 0
|
||||
name: open-cloud
|
||||
|
||||
dsn: memory
|
||||
|
||||
serve:
|
||||
read:
|
||||
host: 0.0.0.0
|
||||
port: 4466
|
||||
write:
|
||||
host: 0.0.0.0
|
||||
port: 4467
|
@ -1,24 +0,0 @@
|
||||
dn: uid=admin,ou=Users,dc=example,dc=com
|
||||
objectClass: inetOrgPerson
|
||||
cn: Admin
|
||||
sn: Istrator
|
||||
uid: admin
|
||||
userPassword: admin
|
||||
mail: admin@example.com
|
||||
ou: Users
|
||||
|
||||
dn: ou=AppRoles,dc=example,dc=com
|
||||
objectClass: organizationalunit
|
||||
ou: AppRoles
|
||||
description: AppRoles
|
||||
|
||||
dn: ou=App1,ou=AppRoles,dc=example,dc=com
|
||||
objectClass: organizationalunit
|
||||
ou: App1
|
||||
description: App1
|
||||
|
||||
dn: cn=traveler,ou=App1,ou=AppRoles,dc=example,dc=com
|
||||
objectClass: groupofnames
|
||||
cn: traveler
|
||||
description: traveler
|
||||
member: uid=admin,ou=Users,dc=example,dc=com
|
@ -1,13 +0,0 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:v3.0
|
||||
container_name: traefik
|
||||
restart: always
|
||||
ports:
|
||||
- "8000:8000" # Expose Traefik on port 8000
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||
- "./traefik.yml:/etc/traefik/traefik.yml"
|
||||
- "./dynamic.yml:/etc/traefik/dynamic.yml"
|
@ -1,164 +0,0 @@
|
||||
http:
|
||||
routers:
|
||||
workspace-router:
|
||||
rule: "PathPrefix(`/workspace`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: workspace-service
|
||||
middlewares:
|
||||
- replace-workspace
|
||||
workflow-router:
|
||||
rule: "PathPrefix(`/workflow`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: workflow-service
|
||||
middlewares:
|
||||
- replace-workflow
|
||||
shared-router:
|
||||
rule: "PathPrefix(`/shared`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: shared-service
|
||||
middlewares:
|
||||
- replace-shared
|
||||
scheduler-router:
|
||||
rule: "PathPrefix(`/scheduler`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: scheduler-service
|
||||
middlewares:
|
||||
- replace-scheduler
|
||||
peer-router:
|
||||
rule: "PathPrefix(`/peer`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: peer-service
|
||||
middlewares:
|
||||
- replace-peer
|
||||
datacenter-router:
|
||||
rule: "PathPrefix(`/datacenter`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: datacenter-service
|
||||
middlewares:
|
||||
- replace-datacenter
|
||||
catalog-router:
|
||||
rule: "PathPrefix(`/catalog`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: catalog-service
|
||||
middlewares:
|
||||
- replace-catalog
|
||||
auth-router:
|
||||
rule: "PathPrefix(`/auth`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: auth-service
|
||||
middlewares:
|
||||
- replace-auth
|
||||
front-router:
|
||||
rule: "PathPrefix(`/`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: front-service
|
||||
middlewares:
|
||||
- replace-front
|
||||
|
||||
services:
|
||||
workspace-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8089"
|
||||
workflow-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8088"
|
||||
shared-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8091"
|
||||
scheduler-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8090"
|
||||
peer-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8093"
|
||||
datacenter-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8092"
|
||||
catalog-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8087"
|
||||
auth-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8094"
|
||||
front-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8080"
|
||||
|
||||
middlewares:
|
||||
workspace:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
workflow:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
shared:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
scheduler:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
peer:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
datacenter:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
catalog:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
auth:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
replace-workspace:
|
||||
replacePathRegex:
|
||||
regex: "^/workspace(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-workflow:
|
||||
replacePathRegex:
|
||||
regex: "^/workflow(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-shared:
|
||||
replacePathRegex:
|
||||
regex: "^/shared(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-scheduler:
|
||||
replacePathRegex:
|
||||
regex: "^/scheduler(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-peer:
|
||||
replacePathRegex:
|
||||
regex: "^/peer(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-datacenter:
|
||||
replacePathRegex:
|
||||
regex: "^/datacenter(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-catalog:
|
||||
replacePathRegex:
|
||||
regex: "^/catalog(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-auth:
|
||||
replacePathRegex:
|
||||
regex: "^/auth(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-front:
|
||||
stripprefix:
|
||||
prefixes: "/"
|
@ -1,164 +0,0 @@
|
||||
http:
|
||||
routers:
|
||||
workspace-router:
|
||||
rule: "PathPrefix(`/workspace`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: workspace-service
|
||||
middlewares:
|
||||
- replace-workspace
|
||||
workflow-router:
|
||||
rule: "PathPrefix(`/workflow`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: workflow-service
|
||||
middlewares:
|
||||
- replace-workflow
|
||||
shared-router:
|
||||
rule: "PathPrefix(`/shared`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: shared-service
|
||||
middlewares:
|
||||
- replace-shared
|
||||
scheduler-router:
|
||||
rule: "PathPrefix(`/scheduler`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: scheduler-service
|
||||
middlewares:
|
||||
- replace-scheduler
|
||||
peer-router:
|
||||
rule: "PathPrefix(`/peer`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: peer-service
|
||||
middlewares:
|
||||
- replace-peer
|
||||
datacenter-router:
|
||||
rule: "PathPrefix(`/datacenter`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: datacenter-service
|
||||
middlewares:
|
||||
- replace-datacenter
|
||||
catalog-router:
|
||||
rule: "PathPrefix(`/catalog`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: catalog-service
|
||||
middlewares:
|
||||
- replace-catalog
|
||||
auth-router:
|
||||
rule: "PathPrefix(`/auth`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: auth-service
|
||||
middlewares:
|
||||
- replace-auth
|
||||
front-router:
|
||||
rule: "PathPrefix(`/`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: front-service
|
||||
middlewares:
|
||||
- replace-front
|
||||
|
||||
services:
|
||||
workspace-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8089"
|
||||
workflow-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8088"
|
||||
shared-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8091"
|
||||
scheduler-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8090"
|
||||
peer-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8093"
|
||||
datacenter-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8092"
|
||||
catalog-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8087"
|
||||
auth-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8094"
|
||||
front-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8080"
|
||||
|
||||
middlewares:
|
||||
workspace:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
workflow:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
shared:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
scheduler:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
peer:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
datacenter:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
catalog:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
auth:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
replace-workspace:
|
||||
replacePathRegex:
|
||||
regex: "^/workspace(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-workflow:
|
||||
replacePathRegex:
|
||||
regex: "^/workflow(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-shared:
|
||||
replacePathRegex:
|
||||
regex: "^/shared(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-scheduler:
|
||||
replacePathRegex:
|
||||
regex: "^/scheduler(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-peer:
|
||||
replacePathRegex:
|
||||
regex: "^/peer(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-datacenter:
|
||||
replacePathRegex:
|
||||
regex: "^/datacenter(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-catalog:
|
||||
replacePathRegex:
|
||||
regex: "^/catalog(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-auth:
|
||||
replacePathRegex:
|
||||
regex: "^/auth(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-front:
|
||||
stripprefix:
|
||||
prefixes: "/"
|
@ -1,8 +0,0 @@
|
||||
entryPoints:
|
||||
web:
|
||||
address: ":8000" # Single entry point for all requests
|
||||
|
||||
providers:
|
||||
file:
|
||||
filename: "/etc/traefik/dynamic.yml"
|
||||
watch: true
|
Loading…
Reference in New Issue
Block a user