Compare commits
13 Commits
feat/chart
...
pierre
Author | SHA1 | Date | |
---|---|---|---|
|
8a05b145f4 | ||
|
88576db39f | ||
|
8b077b2eff | ||
|
d1fbd0fac4 | ||
|
cdd8dd8a03 | ||
|
3892692a07 | ||
7ec310f161 | |||
ced5e55698 | |||
7cdb02b677 | |||
82aed0fdb6 | |||
626a1b1f22 | |||
3b7c3a9526 | |||
0a96827200 |
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
k8s/deployed_config
|
||||
docker/build_logs/
|
50
clone_opencloud_microservices.sh
Executable file
50
clone_opencloud_microservices.sh
Executable file
@@ -0,0 +1,50 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
REPOS=(
|
||||
"oc-auth"
|
||||
"oc-catalog"
|
||||
"oc-datacenter"
|
||||
"oc-front"
|
||||
"oc-monitord"
|
||||
"oc-peer"
|
||||
"oc-shared"
|
||||
"oc-scheduler"
|
||||
"oc-schedulerd"
|
||||
"oc-workflow"
|
||||
"oc-workspace"
|
||||
)
|
||||
|
||||
# Function to clone repositories
|
||||
clone_repo() {
|
||||
local repo_url="https://cloud.o-forge.io/core/$1.git"
|
||||
local repo_name=$(basename "$repo_url" .git)
|
||||
local branch=$2
|
||||
echo "Processing repository: $repo_name"
|
||||
|
||||
if [ ! -d "$repo_name" ]; then
|
||||
echo "Cloning repository: $repo_name"
|
||||
git clone "$repo_url"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error cloning $repo_url"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
echo "Repository '$repo_name' already exists. Pulling latest changes..."
|
||||
cd "$repo_name" && git pull origin $branch && cd ..
|
||||
}
|
||||
|
||||
cd ..
|
||||
# Iterate through each repository in the list
|
||||
branch = "main"
|
||||
if [ -n "$1" ]; then
|
||||
branch = $1
|
||||
fi
|
||||
for repo in "${REPOS[@]}"; do
|
||||
clone_repo $repo $branch
|
||||
done
|
||||
|
||||
echo "All repositories processed successfully."
|
||||
|
||||
|
||||
|
41
docker/README.md
Normal file
41
docker/README.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# A guide to effective docker deployment for Open Cloud
|
||||
|
||||
We need a reliable process using the existing tools and the creation of new one to have a consistant deployment of Open Cloud when each service is running in a docker container.
|
||||
|
||||
This document aims at addressing :
|
||||
- The existing tools used
|
||||
- The functionning of said tools
|
||||
- The needed improvement (bugs/new features)
|
||||
- The required configuration for each service
|
||||
|
||||
## Steps
|
||||
|
||||
- Downloading the repos : `oc-deploy/download_oc.py` uses the interactivity offered by python's library to select and follow the cloning of the repos on the forge, `oc-deploy/clone_opencloud_microservices.sh` is more straifhtforward using bash.
|
||||
|
||||
- Selecting the services to launch : `build_containers.sh` asks the user for the services that need to be launched. The user can choose non essential services (in front, monitord and shared) to be added to tthe list of minimum service to run open cloud (auth, catalog, datacenter, peer, workspace, worflow, scheduler, schedulerd)
|
||||
|
||||
- Verify if the service really need a `docker build` : this operation is time and resource consumming, so we need to check :
|
||||
- is a container already runs
|
||||
- does an image already exist
|
||||
|
||||
and prompt the user if he wants to proceed with the build, or just start a container with the existing image or let the the current container run.
|
||||
|
||||
- Filling the configuration file for the service to be built :
|
||||
- We could create a script to interact with the user to display each key from the default config file and ask the value to use.
|
||||
|
||||
- Build the image
|
||||
|
||||
## Todo
|
||||
|
||||
- Implement a script that interacts with the user to fill the configuration json file
|
||||
|
||||
- Remove the filed json file from the forge to prevent that data from other dev are stored and used during build, which would lead the services to be missconfigured
|
||||
- We could let some generic value, like ports, container addresses...
|
||||
|
||||
- Download and generate swagger for beego services
|
||||
|
||||
# Knowns errors and solutions
|
||||
|
||||
## Missing claims in the token
|
||||
|
||||
oc-auth uses NATS to communicate with the other services in order to retrieves claims/permissions. If you notice that the claims for a route on a specific service is absent from the token, check the service container's logs to see if it managed to connect with the NATS container.
|
129
docker/build_containers.sh
Executable file
129
docker/build_containers.sh
Executable file
@@ -0,0 +1,129 @@
|
||||
#!/bin/bash
|
||||
|
||||
# List of services to build
|
||||
|
||||
MINIMUM_REPOS=(
|
||||
"oc-auth"
|
||||
"oc-catalog"
|
||||
"oc-datacenter"
|
||||
"oc-peer"
|
||||
"oc-scheduler"
|
||||
"oc-schedulerd"
|
||||
"oc-workflow"
|
||||
"oc-workspace"
|
||||
)
|
||||
|
||||
EXTRA_REPOS=(
|
||||
"oc-front"
|
||||
"oc-shared"
|
||||
"oc-monitord"
|
||||
)
|
||||
|
||||
REPOS=("${MINIMUM_REPOS[@]}") # Start with minimum repos
|
||||
|
||||
OC_ROOT="$(realpath ../..)"
|
||||
DOCKER_BUILD="$(pwd)"
|
||||
LOG_DIR="$DOCKER_BUILD/build_logs"
|
||||
mkdir -p "$LOG_DIR"
|
||||
|
||||
cd "$OC_ROOT" || exit 1
|
||||
|
||||
# Function to build a service
|
||||
build_service() {
|
||||
local service=$1
|
||||
local logfile="$LOG_DIR/$service.log"
|
||||
|
||||
echo "[START] Building $service..."
|
||||
|
||||
docker build -t "$service" "$OC_ROOT/$service" > "$logfile" 2>&1 &
|
||||
echo $! # Return PID
|
||||
}
|
||||
|
||||
# Track running builds
|
||||
declare -A pids
|
||||
declare -a active_services=()
|
||||
|
||||
|
||||
# Select services to build
|
||||
echo "🔧 Optional extra services:"
|
||||
for i in "${!EXTRA_REPOS[@]}"; do
|
||||
echo " [$((i+1))] ${EXTRA_REPOS[$i]}"
|
||||
done
|
||||
|
||||
read -p "🟡 Do you want to add any extra services? Enter numbers separated by space (e.g., 1 3), or press Enter to skip: " -a selected
|
||||
|
||||
for index in "${selected[@]}"; do
|
||||
if [[ "$index" =~ ^[0-9]+$ ]] && (( index >= 1 && index <= ${#EXTRA_REPOS[@]} )); then
|
||||
REPOS+=("${EXTRA_REPOS[$((index-1))]}")
|
||||
else
|
||||
echo "⚠️ Invalid selection: $index"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ Selected services:"
|
||||
for repo in "${REPOS[@]}"; do
|
||||
echo " - $repo"
|
||||
done
|
||||
|
||||
# Launch builds
|
||||
for service in "${REPOS[@]}"; do
|
||||
IMAGE_NAME="$service"
|
||||
|
||||
# Check if the image exists locally
|
||||
if docker image inspect "$IMAGE_NAME" >/dev/null 2>&1; then
|
||||
read -p "🟡 Image '$IMAGE_NAME' already exists. Rebuild? (y/N): " rebuild
|
||||
if [[ "$rebuild" =~ ^[Yy]$ ]]; then
|
||||
echo "🔄 Rebuilding image for '$IMAGE_NAME'..."
|
||||
else
|
||||
echo "⏭️ Skipping build for '$IMAGE_NAME'."
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if a container is already running from this image
|
||||
if docker ps --filter "ancestor=$IMAGE_NAME" --format '{{.ID}}' | grep -q .; then
|
||||
echo "✅ A container from image '$IMAGE_NAME' is already running. Skipping build."
|
||||
else
|
||||
SERVICE_PATH="$OC_ROOT/$service"
|
||||
if [ -d "$SERVICE_PATH" ]; then
|
||||
build_service "$service" &
|
||||
pids["$service"]=$!
|
||||
active_services+=("$service")
|
||||
else
|
||||
echo "⚠️ Directory not found for $service. Skipping."
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
echo "========================"
|
||||
echo "Building: ${active_services[*]}"
|
||||
echo "========================"
|
||||
|
||||
# Monitor logs for each build in parallel
|
||||
for service in "${active_services[@]}"; do
|
||||
logfile="$LOG_DIR/$service.log"
|
||||
|
||||
(
|
||||
tail -n 0 -f "$logfile" | while IFS= read -r line; do
|
||||
# Highlight docker build steps
|
||||
if [[ "$line" =~ Step\ ([0-9]+/[0-9]+) ]]; then
|
||||
echo -e "[$service] 🚧 ${BASH_REMATCH[0]}: $line"
|
||||
else
|
||||
echo "[$service] $line"
|
||||
fi
|
||||
done
|
||||
) &
|
||||
done
|
||||
|
||||
# Wait for all builds to complete
|
||||
for pid in "${pids[@]}"; do
|
||||
wait "$pid"
|
||||
done
|
||||
|
||||
for service in "${active_services[@]}"; do
|
||||
cd $OC_ROOT/$service
|
||||
docker compose up -d
|
||||
done
|
||||
|
||||
echo "✅ All builds completed."
|
9
docker/db/add.sh
Executable file
9
docker/db/add.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker cp ./datas mongo:.
|
||||
|
||||
for i in $(ls ./datas); do
|
||||
firstString=$i
|
||||
echo "ADD file $i in collection ${i/.json/}"
|
||||
docker exec -it mongo sh -c "mongoimport --jsonArray --db DC_myDC --collection ${i/.json/} --file ./datas/$i"
|
||||
done
|
1
docker/db/datas/collaborative_area.json
Normal file
1
docker/db/datas/collaborative_area.json
Normal file
@@ -0,0 +1 @@
|
||||
[{"_id":"0b6a375f-be3e-49a9-9827-3c2d5eddb057","abstractobject":{"id":"0b6a375f-be3e-49a9-9827-3c2d5eddb057","name":"test","is_draft":false,"creator_id":"c0cece97-7730-4c2a-8c20-a30944564106","creation_date":{"$date":"2025-01-27T10:41:47.741Z"},"update_date":{"$date":"2025-01-27T10:41:47.741Z"},"updater_id":"c0cece97-7730-4c2a-8c20-a30944564106","access_mode":0},"description":"Proto Collaborative area example","collaborative_area":{},"workflows":["58314c99-c595-4ca2-8b5e-822a6774efed"],"allowed_peers_group":{"c0cece97-7730-4c2a-8c20-a30944564106":["*"]},"workspaces":[]}]
|
1
docker/db/datas/compute_resource.json
Normal file
1
docker/db/datas/compute_resource.json
Normal file
File diff suppressed because one or more lines are too long
1
docker/db/datas/data_resource.json
Normal file
1
docker/db/datas/data_resource.json
Normal file
File diff suppressed because one or more lines are too long
1
docker/db/datas/processing_resource.json
Normal file
1
docker/db/datas/processing_resource.json
Normal file
File diff suppressed because one or more lines are too long
1
docker/db/datas/storage_resource.json
Normal file
1
docker/db/datas/storage_resource.json
Normal file
@@ -0,0 +1 @@
|
||||
[{"_id":"04bc70b5-8d7b-44e6-9015-fadfa0fb102d","abstractinstanciatedresource":{"abstractresource":{"type":"storage","abstractobject":{"id":"04bc70b5-8d7b-44e6-9015-fadfa0fb102d","name":"IRT risk database","is_draft":false,"creator_id":"c0cece97-7730-4c2a-8c20-a30944564106","creation_date":"2021-09-30T14:00:00.000Z","update_date":"2021-09-30T14:00:00.000Z","updater_id":"c0cece97-7730-4c2a-8c20-a30944564106","access_mode":1},"logo":"https://cloud.o-forge.io/core/deperecated-oc-catalog/raw/branch/main/scripts/local_imgs/IRT risk database.png","description":"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.","short_description":"S3 compliant IRT file storage","owners":[{"name":"IRT"}]},"instances":[{"env":[{"attr":"source","readonly":true}],"resourceinstance":{"abstractobject":{"id":"7fdccb9c-7090-40a5-bacd-7435bc56c90d","name":"IRT local file storage Marseille"},"location":{"latitude":50.62925,"longitude":3.057256},"country":250,"partnerships":[{"resourcepartnership":{"namespace":"default","peer_groups":{"c0cece97-7730-4c2a-8c20-a30944564106":["*"]},"pricing_profiles":[{"pricing":{"price":50,"currency":"EUR","buying_strategy":0,"time_pricing_strategy":0}}]}}]},"source":"/mnt/vol","local":false,"security_level":"public","size":50,"size_type":3,"redundancy":"RAID5","throughput":"r:200,w:150"}]},"storage_type":5,"acronym":"DC_myDC"},{"_id":"e726020a-b68e-4abc-ab36-c3640ea3f557","abstractinstanciatedresource":{"abstractresource":{"type":"storage","abstractobject":{"id":"e726020a-b68e-4abc-ab36-c3640ea3f557","name":"IRT local file storage","is_draft":false,"creator_id":"c0cece97-7730-4c2a-8c20-a30944564106","creation_date":"2021-09-30T14:00:00.000Z","update_date":"2021-09-30T14:00:00.000Z","updater_id":"c0cece97-7730-4c2a-8c20-a30944564106","access_mode":1},"logo":"https://cloud.o-forge.io/core/deperecated-oc-catalog/raw/branch/main/scripts/local_imgs/IRT local file storage.png","description":"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.","short_description":"S3 compliant IRT file storage","owners":[{"name":"IRT"}]},"instances":[{"resourceinstance":{"env":[{"attr":"source","readonly":true}],"abstractobject":{"id":"7fdccb9c-7090-40a5-bacd-7435bc56c90d","name":"IRT local file storage Marseille"},"location":{"latitude":50.62925,"longitude":3.057256},"country":250,"partnerships":[{"resourcepartnership":{"namespace":"default","peer_groups":{"c0cece97-7730-4c2a-8c20-a30944564106":["*"]},"pricing_profiles":[{"pricing":{"price":50,"currency":"EUR","buying_strategy":0,"time_pricing_strategy":0}}]}}]},"source":"/mnt/vol","local":true,"security_level":"public","size":500,"size_type":0,"encryption":true,"redundancy":"RAID5S","throughput":"r:300,w:350"}]},"storage_type":5,"acronym":"DC_myDC"}]
|
1
docker/db/datas/workflow.json
Normal file
1
docker/db/datas/workflow.json
Normal file
File diff suppressed because one or more lines are too long
31
docker/start.sh
Executable file
31
docker/start.sh
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
docker network create oc | true
|
||||
|
||||
docker compose down
|
||||
cd ./tools && docker compose -f ./docker-compose.dev.yml up --force-recreate -d && cd ..
|
||||
cd ./tools && docker compose -f ./docker-compose.traefik.yml up --force-recreate -d && cd ..
|
||||
|
||||
cd ../..
|
||||
|
||||
REPOS=(
|
||||
"oc-auth"
|
||||
"oc-catalog"
|
||||
"oc-datacenter"
|
||||
"oc-monitord"
|
||||
"oc-peer"
|
||||
"oc-shared"
|
||||
"oc-scheduler"
|
||||
"oc-schedulerd"
|
||||
"oc-workflow"
|
||||
"oc-workspace"
|
||||
"oc-front"
|
||||
)
|
||||
for i in "${REPOS[@]}"
|
||||
do
|
||||
echo "Building $i"
|
||||
docker kill $i | true
|
||||
docker rm $i | true
|
||||
cd ./$i
|
||||
docker build . -t $i && docker compose up -d
|
||||
cd ..
|
||||
done
|
48
docker/stop.sh
Executable file
48
docker/stop.sh
Executable file
@@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
docker network delete oc | true
|
||||
|
||||
docker compose -f ./tools/docker-compose.traefik.yml down
|
||||
|
||||
TOOLS=(
|
||||
"mongo"
|
||||
"mongo-express"
|
||||
"nats"
|
||||
"loki"
|
||||
"grafana"
|
||||
"hydra-client"
|
||||
"hydra"
|
||||
"keto"
|
||||
"ldap"
|
||||
)
|
||||
|
||||
for i in "${TOOLS[@]}"
|
||||
do
|
||||
echo "kill $i"
|
||||
docker kill $i | true
|
||||
docker rm $i | true
|
||||
done
|
||||
|
||||
cd ../..
|
||||
|
||||
REPOS=(
|
||||
"oc-auth"
|
||||
"oc-catalog"
|
||||
"oc-datacenter"
|
||||
"oc-monitord"
|
||||
"oc-peer"
|
||||
"oc-shared"
|
||||
"oc-scheduler"
|
||||
"oc-schedulerd"
|
||||
"oc-workflow"
|
||||
"oc-workspace"
|
||||
"oc-front"
|
||||
)
|
||||
for i in "${REPOS[@]}"
|
||||
do
|
||||
echo "Kill $i"
|
||||
cd ./$i
|
||||
docker kill $i | true
|
||||
docker rm $i | true
|
||||
make purge | true
|
||||
cd ..
|
||||
done
|
8
docker/tools/conf/grafana_data_source.yml
Normal file
8
docker/tools/conf/grafana_data_source.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
datasources:
|
||||
- name: Loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://loki:3100
|
||||
isDefault: true
|
||||
jsonData:
|
||||
httpMethod: POST
|
162
docker/tools/docker-compose.dev.yml
Normal file
162
docker/tools/docker-compose.dev.yml
Normal file
@@ -0,0 +1,162 @@
|
||||
version: '3.4'
|
||||
|
||||
services:
|
||||
mongo:
|
||||
image: 'mongo:latest'
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- 27017:27017
|
||||
container_name: mongo
|
||||
volumes:
|
||||
- oc-data:/data/db
|
||||
- oc-data:/data/configdb
|
||||
|
||||
mongo-express:
|
||||
image: "mongo-express:latest"
|
||||
restart: always
|
||||
depends_on:
|
||||
- mongo
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- 8081:8081
|
||||
environment:
|
||||
- ME_CONFIG_BASICAUTH_USERNAME=test
|
||||
- ME_CONFIG_BASICAUTH_PASSWORD=test
|
||||
nats:
|
||||
image: 'nats:latest'
|
||||
container_name: nats
|
||||
ports:
|
||||
- 4222:4222
|
||||
command:
|
||||
- "--debug"
|
||||
networks:
|
||||
- oc
|
||||
loki:
|
||||
image: 'grafana/loki'
|
||||
container_name: loki
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.loki.entrypoints=web"
|
||||
- "traefik.http.routers.loki.rule=PathPrefix(`/tools/loki`)"
|
||||
- "traefik.http.services.loki.loadbalancer.server.port=3100"
|
||||
- "traefik.http.middlewares.loki-stripprefix.stripprefix.prefixes=/tools/loki"
|
||||
- "traefik.http.routers.loki.middlewares=loki-stripprefix"
|
||||
- "traefik.http.middlewares.loki.forwardauth.address=http://oc-auth:8080/oc/forward"
|
||||
ports :
|
||||
- "3100:3100"
|
||||
networks:
|
||||
- oc
|
||||
grafana:
|
||||
image: 'grafana/grafana'
|
||||
container_name: grafana
|
||||
ports:
|
||||
- '3000:3000'
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.grafana.entrypoints=web"
|
||||
- "traefik.http.routers.grafana.rule=PathPrefix(`/tools/grafana`)"
|
||||
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
|
||||
- "traefik.http.middlewares.grafana-stripprefix.stripprefix.prefixes=/tools/grafana"
|
||||
- "traefik.http.routers.grafana.middlewares=grafana-stripprefix"
|
||||
- "traefik.http.middlewares.grafana.forwardauth.address=http://oc-auth:8080/oc/forward"
|
||||
networks:
|
||||
- oc
|
||||
volumes:
|
||||
- ./conf/grafana_data_source.yml:/etc/grafana/provisioning/datasources/datasource.yml
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=pfnirt # Change this to anything but admin to not have a password change page at startup
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_DISABLE_INITIAL_ADMIN_PASSWORD_CHANGE=true
|
||||
hydra-client:
|
||||
image: oryd/hydra:v2.2.0
|
||||
container_name: hydra-client
|
||||
environment:
|
||||
HYDRA_ADMIN_URL: http://hydra:4445
|
||||
ORY_SDK_URL: http://hydra:4445
|
||||
command:
|
||||
- create
|
||||
- oauth2-client
|
||||
- --skip-tls-verify
|
||||
- --name
|
||||
- test-client
|
||||
- --secret
|
||||
- oc-auth-got-secret
|
||||
- --response-type
|
||||
- id_token,token,code
|
||||
- --grant-type
|
||||
- implicit,refresh_token,authorization_code,client_credentials
|
||||
- --scope
|
||||
- openid,profile,email,roles
|
||||
- --token-endpoint-auth-method
|
||||
- client_secret_post
|
||||
- --redirect-uri
|
||||
- http://localhost:3000
|
||||
|
||||
networks:
|
||||
- oc
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: none
|
||||
depends_on:
|
||||
- hydra
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://hydra:4445"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
hydra:
|
||||
container_name: hydra
|
||||
image: oryd/hydra:v2.2.0
|
||||
environment:
|
||||
SECRETS_SYSTEM: oc-auth-got-secret
|
||||
LOG_LEAK_SENSITIVE_VALUES: true
|
||||
# OAUTH2_TOKEN_HOOK_URL: http://oc-auth:8080/oc/claims
|
||||
URLS_SELF_ISSUER: http://hydra:4444
|
||||
URLS_SELF_PUBLIC: http://hydra:4444
|
||||
WEBFINGER_OIDC_DISCOVERY_SUPPORTED_SCOPES: profile,email,phone,roles
|
||||
WEBFINGER_OIDC_DISCOVERY_SUPPORTED_CLAIMS: name,family_name,given_name,nickname,email,phone_number
|
||||
DSN: memory
|
||||
command: serve all --dev
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- "4444:4444"
|
||||
- "4445:4445"
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
ldap:
|
||||
image: pgarrett/ldap-alpine
|
||||
container_name: ldap
|
||||
volumes:
|
||||
- "./ldap.ldif:/ldif/ldap.ldif"
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- "390:389"
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
keto:
|
||||
image: oryd/keto:v0.7.0-alpha.1-sqlite
|
||||
ports:
|
||||
- "4466:4466"
|
||||
- "4467:4467"
|
||||
command: serve -c /home/ory/keto.yml
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- type: bind
|
||||
source: .
|
||||
target: /home/ory
|
||||
container_name: keto
|
||||
networks:
|
||||
- oc
|
||||
|
||||
volumes:
|
||||
oc-data:
|
||||
|
||||
networks:
|
||||
oc:
|
||||
external: true
|
26
docker/tools/docker-compose.traefik.yml
Normal file
26
docker/tools/docker-compose.traefik.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
version: '3.4'
|
||||
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:v2.10.4
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- oc
|
||||
command:
|
||||
- "--api.insecure=true"
|
||||
- "--providers.docker=true"
|
||||
- "--entrypoints.web.address=:8000"
|
||||
- "--api.dashboard=true"
|
||||
ports:
|
||||
- "8000:8000" # Expose Traefik on port 80
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
volumes:
|
||||
oc-data:
|
||||
|
||||
networks:
|
||||
oc:
|
||||
external: true
|
18
docker/tools/keto.yml
Normal file
18
docker/tools/keto.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
version: v0.6.0-alpha.1
|
||||
|
||||
log:
|
||||
level: debug
|
||||
|
||||
namespaces:
|
||||
- id: 0
|
||||
name: open-cloud
|
||||
|
||||
dsn: memory
|
||||
|
||||
serve:
|
||||
read:
|
||||
host: 0.0.0.0
|
||||
port: 4466
|
||||
write:
|
||||
host: 0.0.0.0
|
||||
port: 4467
|
24
docker/tools/ldap.ldif
Normal file
24
docker/tools/ldap.ldif
Normal file
@@ -0,0 +1,24 @@
|
||||
dn: uid=admin,ou=Users,dc=example,dc=com
|
||||
objectClass: inetOrgPerson
|
||||
cn: Admin
|
||||
sn: Istrator
|
||||
uid: admin
|
||||
userPassword: admin
|
||||
mail: admin@example.com
|
||||
ou: Users
|
||||
|
||||
dn: ou=AppRoles,dc=example,dc=com
|
||||
objectClass: organizationalunit
|
||||
ou: AppRoles
|
||||
description: AppRoles
|
||||
|
||||
dn: ou=App1,ou=AppRoles,dc=example,dc=com
|
||||
objectClass: organizationalunit
|
||||
ou: App1
|
||||
description: App1
|
||||
|
||||
dn: cn=traveler,ou=App1,ou=AppRoles,dc=example,dc=com
|
||||
objectClass: groupofnames
|
||||
cn: traveler
|
||||
description: traveler
|
||||
member: uid=admin,ou=Users,dc=example,dc=com
|
@@ -1,207 +0,0 @@
|
||||
@startuml
|
||||
skinparam componentStyle rectangle
|
||||
|
||||
node "Kubernetes Cluster" {
|
||||
|
||||
cloud "Service: oc-catalog" as oc_catalog_service {
|
||||
oc_catalog_service : Type: NodePort
|
||||
oc_catalog_service : External NodePort: 8087 # Exposed NodePort for external access
|
||||
oc_catalog_service : Internal TargetPort: 8080
|
||||
}
|
||||
|
||||
' Deployment for oc-catalog managing the pods
|
||||
node "Deployment: oc-catalog" as oc_catalog_deployment {
|
||||
oc_catalog_deployment : Replicas: {{ .Values.replicaCount }}
|
||||
oc_catalog_deployment : Image: registry.dev.svc.cluster.local:5000/oc-catalog:latest
|
||||
oc_catalog_deployment : PullPolicy: IfNotPresent
|
||||
oc_catalog_deployment : TargetPort: 8080
|
||||
|
||||
node "Pod: oc-catalog-1" as catalog_1 {
|
||||
component "Container: oc-catalog" as oc_catalog_container1 {
|
||||
oc_catalog_container1 : Internal Port: 8080
|
||||
oc_catalog_container1 : MONGO_DATABASE=DC_myDC
|
||||
oc_catalog_container1 : MONGO_URI=mongodb://mongo:27017
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oc_catalog_service --> oc_catalog_deployment : Routes traffic to Deployment
|
||||
oc_catalog_deployment --> catalog_1 : Manages Pods
|
||||
|
||||
' MongoDB service and statefulset
|
||||
|
||||
cloud "Service: mongo" as mongo_service {
|
||||
mongo_service : Type: ClusterIP
|
||||
mongo_service : Internal Port: 27017
|
||||
}
|
||||
|
||||
catalog_1 --> mongo_service : Connects to MongoDB
|
||||
|
||||
cloud "Service: oc-shared" as oc_shared_service {
|
||||
oc_shared_service : Type: NodePort
|
||||
oc_shared_service : External NodePort: 8091 # Exposed NodePort for external access
|
||||
oc_shared_service : Internal TargetPort: 8080
|
||||
}
|
||||
|
||||
' Deployment for oc-shared managing the pods
|
||||
node "Deployment: oc-shared" as oc_shared_deployment {
|
||||
oc_shared_deployment : Replicas: {{ .Values.replicaCount }}
|
||||
oc_shared_deployment : Image: registry.dev.svc.cluster.local:5000/oc-shared:latest
|
||||
oc_shared_deployment : PullPolicy: IfNotPresent
|
||||
oc_shared_deployment : TargetPort: 8080
|
||||
|
||||
node "Pod: oc-shared-1" as shared_1 {
|
||||
component "Container: oc-shared" as oc_shared_container1 {
|
||||
oc_shared_container1 : Internal Port: 8080
|
||||
oc_shared_container1 : MONGO_DATABASE=DC_myDC
|
||||
oc_shared_container1 : MONGO_URI=mongodb://mongo:27017
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oc_shared_service --> oc_shared_deployment : Routes traffic to Deployment
|
||||
oc_shared_deployment --> shared_1 : Manages Pods
|
||||
|
||||
' MongoDB service and statefulset
|
||||
|
||||
cloud "Service: mongo" as mongo_service {
|
||||
mongo_service : Type: ClusterIP
|
||||
mongo_service : Internal Port: 27017
|
||||
}
|
||||
|
||||
shared_1 --> mongo_service : Connects to MongoDB
|
||||
|
||||
cloud "Service: oc-workflow" as oc_workflow_service {
|
||||
oc_workflow_service : Type: NodePort
|
||||
oc_workflow_service : External NodePort: 8088 # Exposed NodePort for external access
|
||||
oc_workflow_service : Internal TargetPort: 8080
|
||||
}
|
||||
|
||||
' Deployment for oc-workflow managing the pods
|
||||
node "Deployment: oc-workflow" as oc_workflow_deployment {
|
||||
oc_workflow_deployment : Replicas: {{ .Values.replicaCount }}
|
||||
oc_workflow_deployment : Image: registry.dev.svc.cluster.local:5000/oc-workflow:latest
|
||||
oc_workflow_deployment : PullPolicy: IfNotPresent
|
||||
oc_workflow_deployment : TargetPort: 8080
|
||||
|
||||
node "Pod: oc-workflow-1" as workflow_1 {
|
||||
component "Container: oc-workflow" as oc_workflow_container1 {
|
||||
oc_workflow_container1 : Internal Port: 8080
|
||||
oc_workflow_container1 : MONGO_DATABASE=DC_myDC
|
||||
oc_workflow_container1 : MONGO_URI=mongodb://mongo:27017
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oc_workflow_service --> oc_workflow_deployment : Routes traffic to Deployment
|
||||
oc_workflow_deployment --> workflow_1 : Manages Pods
|
||||
|
||||
' MongoDB service and statefulset
|
||||
|
||||
cloud "Service: mongo" as mongo_service {
|
||||
mongo_service : Type: ClusterIP
|
||||
mongo_service : Internal Port: 27017
|
||||
}
|
||||
|
||||
workflow_1 --> mongo_service : Connects to MongoDB
|
||||
|
||||
|
||||
cloud "Service: oc-workspace" as oc_workspace_service {
|
||||
oc_workspace_service : Type: NodePort
|
||||
oc_workspace_service : External NodePort: 8089 # Exposed NodePort for external access
|
||||
oc_workspace_service : Internal TargetPort: 8080
|
||||
}
|
||||
|
||||
' Deployment for oc-workspace managing the pods
|
||||
node "Deployment: oc-workspace" as oc_workspace_deployment {
|
||||
oc_workspace_deployment : Replicas: {{ .Values.replicaCount }}
|
||||
oc_workspace_deployment : Image: registry.dev.svc.cluster.local:5000/oc-workspace:latest
|
||||
oc_workspace_deployment : PullPolicy: IfNotPresent
|
||||
oc_workspace_deployment : TargetPort: 8080
|
||||
|
||||
node "Pod: oc-workspace-1" as workspace_1 {
|
||||
component "Container: oc-workspace" as oc_workspace_container1 {
|
||||
oc_workspace_container1 : Internal Port: 8080
|
||||
oc_workspace_container1 : MONGO_DATABASE=DC_myDC
|
||||
oc_workspace_container1 : MONGO_URI=mongodb://mongo:27017
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oc_workspace_service --> oc_workspace_deployment : Routes traffic to Deployment
|
||||
oc_workspace_deployment --> workspace_1 : Manages Pods
|
||||
|
||||
|
||||
|
||||
cloud "Service: oc-scheduler" as oc_scheduler_service {
|
||||
oc_scheduler_service : Type: NodePort
|
||||
oc_scheduler_service : External NodePort: 8090 # Exposed NodePort for external access
|
||||
oc_scheduler_service : Internal TargetPort: 8080
|
||||
}
|
||||
|
||||
' Deployment for oc-scheduler managing the pods
|
||||
node "Deployment: oc-scheduler" as oc_scheduler_deployment {
|
||||
oc_scheduler_deployment : Replicas: {{ .Values.replicaCount }}
|
||||
oc_scheduler_deployment : Image: registry.dev.svc.cluster.local:5000/oc-scheduler:latest
|
||||
oc_scheduler_deployment : PullPolicy: IfNotPresent
|
||||
oc_scheduler_deployment : TargetPort: 8080
|
||||
|
||||
node "Pod: oc-scheduler-1" as scheduler_1 {
|
||||
component "Container: oc-scheduler" as oc_scheduler_container1 {
|
||||
oc_scheduler_container1 : Internal Port: 8080
|
||||
oc_scheduler_container1 : MONGO_DATABASE=DC_myDC
|
||||
oc_scheduler_container1 : MONGO_URI=mongodb://mongo:27017
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oc_scheduler_service --> oc_scheduler_deployment : Routes traffic to Deployment
|
||||
oc_scheduler_deployment --> scheduler_1 : Manages Pods
|
||||
|
||||
' MongoDB service and statefulset
|
||||
|
||||
cloud "Service: mongo" as mongo_service {
|
||||
mongo_service : Type: ClusterIP
|
||||
mongo_service : Internal Port: 27017
|
||||
}
|
||||
|
||||
scheduler_1 --> mongo_service : Connects to MongoDB
|
||||
|
||||
' MongoDB service and statefulset
|
||||
|
||||
cloud "Service: mongo" as mongo_service {
|
||||
mongo_service : Type: ClusterIP
|
||||
mongo_service : Internal Port: 27017
|
||||
}
|
||||
|
||||
' MongoDB StatefulSet with PVC and PV
|
||||
|
||||
node "StatefulSet: MongoDB" as mongo_statefulset {
|
||||
component " Pod: MongoDB" as mongo_pod{
|
||||
component "Container: MongoDB" as mongo_container {
|
||||
mongo_container : Image: mongo:latest
|
||||
mongo_container : PullPolicy: IfNotPresent
|
||||
mongo_container : Exposed Port: 27017
|
||||
mongo_container : Volume Mount: /data/db
|
||||
mongo_container : Volume Mount: /data/configdb
|
||||
mongo_container : Secret: username, password (base64)
|
||||
}
|
||||
}
|
||||
|
||||
storage "PersistentVolumeClaim: mongo-pvc" as mongo_PVC {
|
||||
mongo_pvc : Access Mode: ReadWriteOnce
|
||||
mongo_pvc : Size: 1Gi
|
||||
mongo_pvc : Storage Class: {{ .Values.persistence.storageClass }}
|
||||
}
|
||||
}
|
||||
|
||||
storage "PersistentVolume: PV" as mongo_PV {
|
||||
mongo_pv : Bound to PVC: mongo-pvc
|
||||
}
|
||||
mongo_service --> mongo_statefulset : Routes traffic to MongoDB StatefulSet
|
||||
mongo_pod --> mongo_PVC : Mounted Persistent Volume Claim
|
||||
mongo_pvc --> mongo_PV : Bound Persistent Volume
|
||||
workspace_1 --> mongo_service : Connects to MongoDB
|
||||
|
||||
}
|
||||
@enduml
|
78
download_oc.py
Executable file
78
download_oc.py
Executable file
@@ -0,0 +1,78 @@
|
||||
#/bin/python3
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
import inquirer
|
||||
import os
|
||||
import subprocess
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.live import Live
|
||||
|
||||
# URLs des pages à analyser
|
||||
urls = [
|
||||
'https://cloud.o-forge.io/explore/repos?page=1',
|
||||
'https://cloud.o-forge.io/explore/repos?page=2'
|
||||
]
|
||||
|
||||
def get_all_repo(urls):
|
||||
repositories = []
|
||||
|
||||
for url in urls:
|
||||
response = requests.get(url)
|
||||
response.raise_for_status() # Vérifie si la requête a réussi
|
||||
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
|
||||
titles = soup.find_all(class_='flex-item-title')
|
||||
|
||||
for title in titles:
|
||||
repo_name = title.get_text(strip=True)
|
||||
if repo_name.startswith('core/'):
|
||||
repositories.append(repo_name.split("core/")[1])
|
||||
return repositories
|
||||
|
||||
def git_clone_repo(repo: str, dir: str, status: dict):
|
||||
status[repo] = "⏳ Cloning..."
|
||||
|
||||
try:
|
||||
if os.path.exists(f"{dir}/{repo}") :
|
||||
subprocess.run(["git", "-C", f"{dir}/{repo}","pull"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
else:
|
||||
base_url = f"https://cloud.o-forge.io/core/{repo}.git"
|
||||
subprocess.run(["git", "clone", base_url, f"{dir}/{repo}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
status[repo] = "✅ Done"
|
||||
except Exception as e:
|
||||
status[repo] = f"❌ Failed: {e}"
|
||||
|
||||
def display_status(status):
|
||||
table = Table(title="Repository Cloning Status")
|
||||
table.add_column("Repository", justify="left")
|
||||
table.add_column("Status", justify="right")
|
||||
|
||||
for repo, state in status.items():
|
||||
table.add_row(repo, state)
|
||||
|
||||
return table
|
||||
|
||||
repositories = get_all_repo(urls)
|
||||
cwd = os.getcwd()
|
||||
|
||||
questions = [
|
||||
inquirer.Checkbox('repo_choice',
|
||||
message=f"Which Open Cloud repo do you want to download ? (Will be download in {cwd})?",
|
||||
choices=repositories,
|
||||
),
|
||||
]
|
||||
|
||||
selected_repo = inquirer.prompt(questions)
|
||||
status = {repo: "Waiting" for repo in selected_repo["repo_choice"]}
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
futures = {executor.submit(git_clone_repo, repo, cwd, status): repo for repo in selected_repo["repo_choice"]}
|
||||
|
||||
with Live(display_status(status), refresh_per_second=2) as live:
|
||||
for future in as_completed(futures):
|
||||
live.update(display_status(status))
|
46
k8s/README.md
Normal file
46
k8s/README.md
Normal file
@@ -0,0 +1,46 @@
|
||||
|
||||
## Deploy the opencloud chart
|
||||
|
||||
```
|
||||
./start.sh <mode: dev|prod default:dev> <branche | default:main>
|
||||
```
|
||||
|
||||
Feel free to modify/create a new opencloud/dev-values.yaml. Provided setup should work out of the box, but is not suitable for production usage.
|
||||
|
||||
## Hostname settings
|
||||
|
||||
Edit your /etc/hosts file, and add following line:
|
||||
|
||||
```
|
||||
127.0.0.1 beta.opencloud.com
|
||||
```
|
||||
|
||||
## Done
|
||||
|
||||
Everything should be operational now, go to http://beta.opencloud.com and enjoy the ride
|
||||
|
||||
# Prebuilt microservices deployment procedure
|
||||
|
||||
TODO
|
||||
|
||||
# First steps
|
||||
|
||||
Go to http://beta.opencloud.com/users
|
||||
|
||||
Log in using default user/password combo ldapadmin/ldapadmin
|
||||
|
||||
Create a new user, or change the default one
|
||||
|
||||
Go to http://beta.opencloud.com
|
||||
|
||||
Log in using your fresh credentials
|
||||
|
||||
Do stuff
|
||||
|
||||
You can go to http://beta.opencloud.com/mongoexpress
|
||||
|
||||
... for mongo express web client access (default login/password is test/testme)
|
||||
|
||||
You can go to http://localhost/dashboard/
|
||||
|
||||
... for access to Traefik reverse proxy front-end
|
21
k8s/start.sh
Executable file
21
k8s/start.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
mode=${1:-dev}
|
||||
branch=${2:-main}
|
||||
|
||||
cd ../..
|
||||
|
||||
if [ ! -d "oc-k8s" ]; then
|
||||
echo "Cloning repository: $repo_name"
|
||||
git clone "https://cloud.o-forge.io/core/oc-k8s.git"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error cloning oc-k8s"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
echo "Repository 'oc-k8s' already exists. Pulling latest changes..."
|
||||
cd "oc-k8s" && git checkout $branch && git pull
|
||||
|
||||
./create_kind_cluster.sh
|
||||
./clone_opencloud_microservices.sh $branch
|
||||
./build_opencloud_microservices.sh
|
||||
./install.sh $mode
|
20
k8s/stop.sh
Executable file
20
k8s/stop.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
mode=${1:-dev}
|
||||
branch=${2:-main}
|
||||
|
||||
cd ../..
|
||||
|
||||
if [ ! -d "oc-k8s" ];
|
||||
echo "Cloning repository: $repo_name"
|
||||
git clone "https://cloud.o-forge.io/core/oc-k8s.git"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error cloning oc-k8s"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
echo "Repository 'oc-k8s' already exists. Pulling latest changes..."
|
||||
cd "oc-k8s" && git checkout $branch && git pull
|
||||
|
||||
./uninstall.sh $mode
|
||||
./delete_kind_cluster.sh
|
||||
|
0
update_opencloud_oc_lib.sh
Normal file
0
update_opencloud_oc_lib.sh
Normal file
7
vanilla/README.md
Normal file
7
vanilla/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# RUN
|
||||
- `./start.sh <YOUR INTERNET IP>`
|
||||
Now reach localhost:8000
|
||||
|
||||
# STOP
|
||||
- `./stop.sh`
|
||||
|
38
vanilla/start.sh
Executable file
38
vanilla/start.sh
Executable file
@@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "Stopping all services..."
|
||||
|
||||
./stop.sh > /dev/null 2>&1
|
||||
|
||||
echo "Starting all services"
|
||||
|
||||
cp ./traefik-dev-reverse/template_dynamic.yml ./traefik-dev-reverse/dynamic.yml
|
||||
sed -i "s/localhost/$1/g" ./traefik-dev-reverse/dynamic.yml
|
||||
|
||||
docker network create oc | true
|
||||
cd ./tools && docker compose -f ./docker-compose.dev.yml up --force-recreate -d
|
||||
cd .. && docker compose -f ./traefik-dev-reverse/docker-compose.yml up --force-recreate -d
|
||||
|
||||
cd ../..
|
||||
|
||||
REPOS=(
|
||||
"oc-auth"
|
||||
"oc-catalog"
|
||||
"oc-datacenter"
|
||||
"oc-monitord"
|
||||
"oc-peer"
|
||||
"oc-shared"
|
||||
"oc-scheduler"
|
||||
"oc-schedulerd"
|
||||
"oc-workflow"
|
||||
"oc-workspace"
|
||||
"oc-front"
|
||||
)
|
||||
for i in "${REPOS[@]}"
|
||||
do
|
||||
echo "Building $i"
|
||||
cd ./$i
|
||||
make dev HOST="${2:-http://localhost:8000}" &
|
||||
cd ..
|
||||
done
|
||||
|
47
vanilla/stop.sh
Executable file
47
vanilla/stop.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
docker network delete oc | true
|
||||
|
||||
docker compose -f ./traefik-dev-reverse/docker-compose.yml rm -s -v -f
|
||||
|
||||
TOOLS=(
|
||||
"mongo"
|
||||
"mongo-express"
|
||||
"nats"
|
||||
"loki"
|
||||
"grafana"
|
||||
"keto"
|
||||
"ldap"
|
||||
)
|
||||
|
||||
for i in "${TOOLS[@]}"
|
||||
do
|
||||
echo "kill $i"
|
||||
docker kill $i | true
|
||||
docker rm $i | true
|
||||
done
|
||||
|
||||
cd ../..
|
||||
|
||||
REPOS=(
|
||||
"oc-auth"
|
||||
"oc-catalog"
|
||||
"oc-datacenter"
|
||||
"oc-monitord"
|
||||
"oc-peer"
|
||||
"oc-shared"
|
||||
"oc-scheduler"
|
||||
"oc-schedulerd"
|
||||
"oc-workflow"
|
||||
"oc-workspace"
|
||||
"oc-front"
|
||||
)
|
||||
for i in "${REPOS[@]}"
|
||||
do
|
||||
echo "kill $i"
|
||||
docker kill $i | true
|
||||
docker rm $i | true
|
||||
cd ./$i
|
||||
make purge | true
|
||||
cd ..
|
||||
done
|
||||
|
104
vanilla/tools/docker-compose.dev.yml
Normal file
104
vanilla/tools/docker-compose.dev.yml
Normal file
@@ -0,0 +1,104 @@
|
||||
version: '3.4'
|
||||
|
||||
services:
|
||||
mongo:
|
||||
image: 'mongo:latest'
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- 27017:27017
|
||||
container_name: mongo
|
||||
volumes:
|
||||
- oc-data:/data/db
|
||||
- oc-data:/data/configdb
|
||||
|
||||
mongo-express:
|
||||
image: "mongo-express:latest"
|
||||
restart: always
|
||||
depends_on:
|
||||
- mongo
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- 8081:8081
|
||||
environment:
|
||||
- ME_CONFIG_BASICAUTH_USERNAME=test
|
||||
- ME_CONFIG_BASICAUTH_PASSWORD=test
|
||||
nats:
|
||||
image: 'nats:latest'
|
||||
container_name: nats
|
||||
ports:
|
||||
- 4222:4222
|
||||
command:
|
||||
- "--debug"
|
||||
networks:
|
||||
- oc
|
||||
loki:
|
||||
image: 'grafana/loki'
|
||||
container_name: loki
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.loki.entrypoints=web"
|
||||
- "traefik.http.routers.loki.rule=PathPrefix(`/tools/loki`)"
|
||||
- "traefik.http.services.loki.loadbalancer.server.port=3100"
|
||||
- "traefik.http.middlewares.loki-stripprefix.stripprefix.prefixes=/tools/loki"
|
||||
- "traefik.http.routers.loki.middlewares=loki-stripprefix"
|
||||
- "traefik.http.middlewares.loki.forwardauth.address=http://localhost:8094/oc/forward"
|
||||
ports :
|
||||
- "3100:3100"
|
||||
networks:
|
||||
- oc
|
||||
grafana:
|
||||
image: 'grafana/grafana'
|
||||
container_name: grafana
|
||||
ports:
|
||||
- '3000:3000'
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.grafana.entrypoints=web"
|
||||
- "traefik.http.routers.grafana.rule=PathPrefix(`/tools/grafana`)"
|
||||
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
|
||||
- "traefik.http.middlewares.grafana-stripprefix.stripprefix.prefixes=/tools/grafana"
|
||||
- "traefik.http.routers.grafana.middlewares=grafana-stripprefix"
|
||||
- "traefik.http.middlewares.grafana.forwardauth.address=http://localhost:8094/oc/forward"
|
||||
networks:
|
||||
- oc
|
||||
volumes:
|
||||
- ./conf/grafana_data_source.yml:/etc/grafana/provisioning/datasources/datasource.yml
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=pfnirt # Change this to anything but admin to not have a password change page at startup
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_DISABLE_INITIAL_ADMIN_PASSWORD_CHANGE=true
|
||||
ldap:
|
||||
image: pgarrett/ldap-alpine
|
||||
container_name: ldap
|
||||
volumes:
|
||||
- "./ldap.ldif:/ldif/ldap.ldif"
|
||||
networks:
|
||||
- oc
|
||||
ports:
|
||||
- "390:389"
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
keto:
|
||||
image: oryd/keto:v0.7.0-alpha.1-sqlite
|
||||
ports:
|
||||
- "4466:4466"
|
||||
- "4467:4467"
|
||||
command: serve -c /home/ory/keto.yml
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- type: bind
|
||||
source: .
|
||||
target: /home/ory
|
||||
container_name: keto
|
||||
networks:
|
||||
- oc
|
||||
|
||||
volumes:
|
||||
oc-data:
|
||||
|
||||
networks:
|
||||
oc:
|
||||
external: true
|
18
vanilla/tools/keto.yml
Normal file
18
vanilla/tools/keto.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
version: v0.6.0-alpha.1
|
||||
|
||||
log:
|
||||
level: debug
|
||||
|
||||
namespaces:
|
||||
- id: 0
|
||||
name: open-cloud
|
||||
|
||||
dsn: memory
|
||||
|
||||
serve:
|
||||
read:
|
||||
host: 0.0.0.0
|
||||
port: 4466
|
||||
write:
|
||||
host: 0.0.0.0
|
||||
port: 4467
|
24
vanilla/tools/ldap.ldif
Normal file
24
vanilla/tools/ldap.ldif
Normal file
@@ -0,0 +1,24 @@
|
||||
dn: uid=admin,ou=Users,dc=example,dc=com
|
||||
objectClass: inetOrgPerson
|
||||
cn: Admin
|
||||
sn: Istrator
|
||||
uid: admin
|
||||
userPassword: admin
|
||||
mail: admin@example.com
|
||||
ou: Users
|
||||
|
||||
dn: ou=AppRoles,dc=example,dc=com
|
||||
objectClass: organizationalunit
|
||||
ou: AppRoles
|
||||
description: AppRoles
|
||||
|
||||
dn: ou=App1,ou=AppRoles,dc=example,dc=com
|
||||
objectClass: organizationalunit
|
||||
ou: App1
|
||||
description: App1
|
||||
|
||||
dn: cn=traveler,ou=App1,ou=AppRoles,dc=example,dc=com
|
||||
objectClass: groupofnames
|
||||
cn: traveler
|
||||
description: traveler
|
||||
member: uid=admin,ou=Users,dc=example,dc=com
|
13
vanilla/traefik-dev-reverse/docker-compose.yml
Normal file
13
vanilla/traefik-dev-reverse/docker-compose.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:v3.0
|
||||
container_name: traefik
|
||||
restart: always
|
||||
ports:
|
||||
- "8000:8000" # Expose Traefik on port 8000
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||
- "./traefik.yml:/etc/traefik/traefik.yml"
|
||||
- "./dynamic.yml:/etc/traefik/dynamic.yml"
|
164
vanilla/traefik-dev-reverse/dynamic.yml
Normal file
164
vanilla/traefik-dev-reverse/dynamic.yml
Normal file
@@ -0,0 +1,164 @@
|
||||
http:
|
||||
routers:
|
||||
workspace-router:
|
||||
rule: "PathPrefix(`/workspace`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: workspace-service
|
||||
middlewares:
|
||||
- replace-workspace
|
||||
workflow-router:
|
||||
rule: "PathPrefix(`/workflow`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: workflow-service
|
||||
middlewares:
|
||||
- replace-workflow
|
||||
shared-router:
|
||||
rule: "PathPrefix(`/shared`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: shared-service
|
||||
middlewares:
|
||||
- replace-shared
|
||||
scheduler-router:
|
||||
rule: "PathPrefix(`/scheduler`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: scheduler-service
|
||||
middlewares:
|
||||
- replace-scheduler
|
||||
peer-router:
|
||||
rule: "PathPrefix(`/peer`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: peer-service
|
||||
middlewares:
|
||||
- replace-peer
|
||||
datacenter-router:
|
||||
rule: "PathPrefix(`/datacenter`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: datacenter-service
|
||||
middlewares:
|
||||
- replace-datacenter
|
||||
catalog-router:
|
||||
rule: "PathPrefix(`/catalog`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: catalog-service
|
||||
middlewares:
|
||||
- replace-catalog
|
||||
auth-router:
|
||||
rule: "PathPrefix(`/auth`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: auth-service
|
||||
middlewares:
|
||||
- replace-auth
|
||||
front-router:
|
||||
rule: "PathPrefix(`/`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: front-service
|
||||
middlewares:
|
||||
- replace-front
|
||||
|
||||
services:
|
||||
workspace-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8089"
|
||||
workflow-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8088"
|
||||
shared-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8091"
|
||||
scheduler-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8090"
|
||||
peer-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8093"
|
||||
datacenter-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8092"
|
||||
catalog-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8087"
|
||||
auth-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8094"
|
||||
front-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.169:8080"
|
||||
|
||||
middlewares:
|
||||
workspace:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
workflow:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
shared:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
scheduler:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
peer:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
datacenter:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
catalog:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
auth:
|
||||
forwardauth:
|
||||
address: "http://192.168.1.169:8094/oc/forward"
|
||||
replace-workspace:
|
||||
replacePathRegex:
|
||||
regex: "^/workspace(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-workflow:
|
||||
replacePathRegex:
|
||||
regex: "^/workflow(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-shared:
|
||||
replacePathRegex:
|
||||
regex: "^/shared(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-scheduler:
|
||||
replacePathRegex:
|
||||
regex: "^/scheduler(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-peer:
|
||||
replacePathRegex:
|
||||
regex: "^/peer(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-datacenter:
|
||||
replacePathRegex:
|
||||
regex: "^/datacenter(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-catalog:
|
||||
replacePathRegex:
|
||||
regex: "^/catalog(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-auth:
|
||||
replacePathRegex:
|
||||
regex: "^/auth(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-front:
|
||||
stripprefix:
|
||||
prefixes: "/"
|
164
vanilla/traefik-dev-reverse/template_dynamic.yml
Normal file
164
vanilla/traefik-dev-reverse/template_dynamic.yml
Normal file
@@ -0,0 +1,164 @@
|
||||
http:
|
||||
routers:
|
||||
workspace-router:
|
||||
rule: "PathPrefix(`/workspace`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: workspace-service
|
||||
middlewares:
|
||||
- replace-workspace
|
||||
workflow-router:
|
||||
rule: "PathPrefix(`/workflow`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: workflow-service
|
||||
middlewares:
|
||||
- replace-workflow
|
||||
shared-router:
|
||||
rule: "PathPrefix(`/shared`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: shared-service
|
||||
middlewares:
|
||||
- replace-shared
|
||||
scheduler-router:
|
||||
rule: "PathPrefix(`/scheduler`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: scheduler-service
|
||||
middlewares:
|
||||
- replace-scheduler
|
||||
peer-router:
|
||||
rule: "PathPrefix(`/peer`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: peer-service
|
||||
middlewares:
|
||||
- replace-peer
|
||||
datacenter-router:
|
||||
rule: "PathPrefix(`/datacenter`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: datacenter-service
|
||||
middlewares:
|
||||
- replace-datacenter
|
||||
catalog-router:
|
||||
rule: "PathPrefix(`/catalog`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: catalog-service
|
||||
middlewares:
|
||||
- replace-catalog
|
||||
auth-router:
|
||||
rule: "PathPrefix(`/auth`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: auth-service
|
||||
middlewares:
|
||||
- replace-auth
|
||||
front-router:
|
||||
rule: "PathPrefix(`/`)"
|
||||
entryPoints:
|
||||
- "web"
|
||||
service: front-service
|
||||
middlewares:
|
||||
- replace-front
|
||||
|
||||
services:
|
||||
workspace-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8089"
|
||||
workflow-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8088"
|
||||
shared-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8091"
|
||||
scheduler-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8090"
|
||||
peer-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8093"
|
||||
datacenter-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8092"
|
||||
catalog-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8087"
|
||||
auth-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8094"
|
||||
front-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://localhost:8080"
|
||||
|
||||
middlewares:
|
||||
workspace:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
workflow:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
shared:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
scheduler:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
peer:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
datacenter:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
catalog:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
auth:
|
||||
forwardauth:
|
||||
address: "http://localhost:8094/oc/forward"
|
||||
replace-workspace:
|
||||
replacePathRegex:
|
||||
regex: "^/workspace(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-workflow:
|
||||
replacePathRegex:
|
||||
regex: "^/workflow(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-shared:
|
||||
replacePathRegex:
|
||||
regex: "^/shared(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-scheduler:
|
||||
replacePathRegex:
|
||||
regex: "^/scheduler(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-peer:
|
||||
replacePathRegex:
|
||||
regex: "^/peer(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-datacenter:
|
||||
replacePathRegex:
|
||||
regex: "^/datacenter(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-catalog:
|
||||
replacePathRegex:
|
||||
regex: "^/catalog(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-auth:
|
||||
replacePathRegex:
|
||||
regex: "^/auth(.*)"
|
||||
replacement: "/oc$1"
|
||||
replace-front:
|
||||
stripprefix:
|
||||
prefixes: "/"
|
8
vanilla/traefik-dev-reverse/traefik.yml
Normal file
8
vanilla/traefik-dev-reverse/traefik.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
entryPoints:
|
||||
web:
|
||||
address: ":8000" # Single entry point for all requests
|
||||
|
||||
providers:
|
||||
file:
|
||||
filename: "/etc/traefik/dynamic.yml"
|
||||
watch: true
|
Reference in New Issue
Block a user