Compare commits

..

4 Commits
pierre ... main

Author SHA1 Message Date
mr
90cc774341 clone debugged 2025-04-29 10:30:28 +02:00
mr
db10baf460 update 2025-04-28 14:11:18 +02:00
mr
53fca60178 Merge branch 'main' of https://cloud.o-forge.io/core/oc-deploy into main 2025-04-28 09:46:44 +02:00
mr
8b53c2e70e update oc-deploy 2025-04-28 09:45:54 +02:00
11 changed files with 58 additions and 250 deletions

3
.gitignore vendored
View File

@ -1,2 +1 @@
k8s/deployed_config
docker/build_logs/
k8s/deployed_config

View File

@ -1,6 +1,5 @@
#!/bin/bash
REPOS=(
"oc-auth"
"oc-catalog"
@ -19,7 +18,7 @@ REPOS=(
clone_repo() {
local repo_url="https://cloud.o-forge.io/core/$1.git"
local repo_name=$(basename "$repo_url" .git)
local branch=$2
local branche=$2
echo "Processing repository: $repo_name"
if [ ! -d "$repo_name" ]; then
@ -31,17 +30,18 @@ clone_repo() {
fi
fi
echo "Repository '$repo_name' already exists. Pulling latest changes..."
cd "$repo_name" && git pull origin $branch && cd ..
cd "$repo_name" && git checkout $branche && git pull && cd ..
}
cd ..
# Iterate through each repository in the list
branch = "main"
branche = "main"
if [ -n "$1" ]; then
branch = $1
branche = $1
fi
for repo in "${REPOS[@]}"; do
clone_repo $repo $branch
clone_repo $repo $branche
done
echo "All repositories processed successfully."

View File

@ -1,30 +0,0 @@
# A guide to effective docker deployment for Open Cloud
We need a reliable process using the existing tools and the creation of new one to have a consistant deployment of Open Cloud when each service is running in a docker container.
This document aims at addressing :
- The existing tools used
- The functionning of said tools
- The needed improvement (bugs/new features)
- The required configuration for each service
## Steps
- Downloading the repos : `oc-deploy/download_oc.py` uses the interactivity offered by python's library to select and follow the cloning of the repos on the forge, `oc-deploy/clone_opencloud_microservices.sh` is more straifhtforward using bash.
- Selecting the services to launch : `build_containers.sh` asks the user for the services that need to be launched. The user can choose non essential services (in front, monitord and shared) to be added to tthe list of minimum service to run open cloud (auth, catalog, datacenter, peer, workspace, worflow, scheduler, schedulerd)
- Verify if the service really need a `docker build` : this operation is time and resource consumming, so we need to check :
- is a container already runs
- does an image already exist
and prompt the user if he wants to proceed with the build, or just start a container with the existing image or let the the current container run.
- Fill the configuration file for each service selected to be built.
## Todo
- Implement a script that interacts with the user to fill the configuration json file
- Remove the filed json file from the forge to prevent that data from other dev are stored and used during build, which would lead the services to be missconfigured
- We could let some generic value, like ports, container addresses...

View File

@ -1,129 +0,0 @@
#!/bin/bash
# List of services to build
MINIMUM_REPOS=(
"oc-auth"
"oc-catalog"
"oc-datacenter"
"oc-peer"
"oc-scheduler"
"oc-schedulerd"
"oc-workflow"
"oc-workspace"
)
EXTRA_REPOS=(
"oc-front"
"oc-shared"
"oc-monitord"
)
REPOS=("${MINIMUM_REPOS[@]}") # Start with minimum repos
OC_ROOT="$(realpath ../..)"
DOCKER_BUILD="$(pwd)"
LOG_DIR="$DOCKER_BUILD/build_logs"
mkdir -p "$LOG_DIR"
cd "$OC_ROOT" || exit 1
# Function to build a service
build_service() {
local service=$1
local logfile="$LOG_DIR/$service.log"
echo "[START] Building $service..."
docker build -t "$service" "$OC_ROOT/$service" > "$logfile" 2>&1 &
echo $! # Return PID
}
# Track running builds
declare -A pids
declare -a active_services=()
# Select services to build
echo "🔧 Optional extra services:"
for i in "${!EXTRA_REPOS[@]}"; do
echo " [$((i+1))] ${EXTRA_REPOS[$i]}"
done
read -p "🟡 Do you want to add any extra services? Enter numbers separated by space (e.g., 1 3), or press Enter to skip: " -a selected
for index in "${selected[@]}"; do
if [[ "$index" =~ ^[0-9]+$ ]] && (( index >= 1 && index <= ${#EXTRA_REPOS[@]} )); then
REPOS+=("${EXTRA_REPOS[$((index-1))]}")
else
echo "⚠️ Invalid selection: $index"
fi
done
echo "✅ Selected services:"
for repo in "${REPOS[@]}"; do
echo " - $repo"
done
# Launch builds
for service in "${REPOS[@]}"; do
IMAGE_NAME="$service"
# Check if the image exists locally
if docker image inspect "$IMAGE_NAME" >/dev/null 2>&1; then
read -p "🟡 Image '$IMAGE_NAME' already exists. Rebuild? (y/N): " rebuild
if [[ "$rebuild" =~ ^[Yy]$ ]]; then
echo "🔄 Rebuilding image for '$IMAGE_NAME'..."
else
echo "⏭️ Skipping build for '$IMAGE_NAME'."
continue
fi
fi
# Check if a container is already running from this image
if docker ps --filter "ancestor=$IMAGE_NAME" --format '{{.ID}}' | grep -q .; then
echo "✅ A container from image '$IMAGE_NAME' is already running. Skipping build."
else
SERVICE_PATH="$OC_ROOT/$service"
if [ -d "$SERVICE_PATH" ]; then
build_service "$service" &
pids["$service"]=$!
active_services+=("$service")
else
echo "⚠️ Directory not found for $service. Skipping."
fi
fi
done
echo "========================"
echo "Building: ${active_services[*]}"
echo "========================"
# Monitor logs for each build in parallel
for service in "${active_services[@]}"; do
logfile="$LOG_DIR/$service.log"
(
tail -n 0 -f "$logfile" | while IFS= read -r line; do
# Highlight docker build steps
if [[ "$line" =~ Step\ ([0-9]+/[0-9]+) ]]; then
echo -e "[$service] 🚧 ${BASH_REMATCH[0]}: $line"
else
echo "[$service] $line"
fi
done
) &
done
# Wait for all builds to complete
for pid in "${pids[@]}"; do
wait "$pid"
done
for service in "${active_services[@]}"; do
cd $OC_ROOT/service
docker compose up -d
done
echo "✅ All builds completed."

View File

@ -0,0 +1 @@
[{"_id":"c0cece97-7730-4c2a-8c20-a30944564106","failed_execution":null,"abstractobject":{"update_date":{"$date":"2025-03-27T09:13:13.230Z"},"access_mode":0,"id":"c0cece97-7730-4c2a-8c20-a30944564106","name":"local","is_draft":false,"creation_date":{"$date":"2025-03-27T09:13:13.230Z"}},"url":"http://localhost:8000","wallet_address":"my-wallet","public_key":"-----BEGIN RSA PUBLIC KEY-----\nMIICCgKCAgEAw2pdG6wMtuLcP0+k1LFvIb0DQo/oHW2uNJaEJK74plXqp4ztz2dR\nb+RQHFLeLuqk4i/zc3b4K3fKPXSlwnVPJCwzPrnyT8jYGOZVlWlETiV9xeJhu6s/\nBh6g1PWz75XjjwV50iv/CEiLNBT23f/3J44wrQzygqNQCiQSALdxWLAEl4l5kHSa\n9oMyV70/Uql94/ayMARZsHgp9ZvqQKbkZPw6yzVMfCBxQozlNlo315OHevudhnhp\nDRjN5I7zWmqYt6rbXJJC7Y3Izdvzn7QI88RqjSRST5I/7Kz3ndCqrOnI+OQUE5NT\nREyQebphvQfTDTKlRPXkdyktdK2DH28Zj6ZF3yjQvN35Q4zhOzlq77dO5IhhopI7\nct8dZH1T1nYkvdyCA/EVMtQsASmBOitH0Y0ACoXQK5Kb6nm/TcM/9ZSJUNiEMuy5\ngBZ3YKE9oa4cpTpPXwcA+S/cU7HPNnQAsvD3iJi8GTW9uJs84pn4/WhpQqmXd4rv\nhKWECCN3fHy01fUs/U0PaSj2jDY/kQVeXoikNMzPUjdZd9m816TIBh3v3aVXCH/0\niTHHAxctvDgMRb2fpvRJ/wwnYjFG9RpamVFDMvC9NffuYzWAA9IRIY4cqgerfHrV\nZ2HHiPTDDvDAIsvImXZc/h7mXN6m3RCQ4Qywy993wd9gUdgg/qnynHcCAwEAAQ==\n-----END RSA PUBLIC KEY-----\n","state":1}]

4
docker/kube.exemple.env Normal file
View File

@ -0,0 +1,4 @@
KUBERNETES_SERVICE_HOST=192.168.1.169
KUBE_CA="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTWpNeE1USXdNell3SGhjTk1qUXdPREE0TVRBeE16VTJXaGNOTXpRd09EQTJNVEF4TXpVMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTWpNeE1USXdNell3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTVlk3ZHZhNEdYTVdkMy9jMlhLN3JLYjlnWXgyNSthaEE0NmkyNVBkSFAKRktQL2UxSVMyWVF0dzNYZW1TTUQxaStZdzJSaVppNUQrSVZUamNtNHdhcnFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWtlUVJpNFJiODduME5yRnZaWjZHClc2SU55NnN3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUlnRXA5ck04WmdNclRZSHYxZjNzOW5DZXZZeWVVa3lZUk4KWjUzazdoaytJS1FDSVFDbk05TnVGKzlTakIzNDFacGZ5ays2NEpWdkpSM3BhcmVaejdMd2lhNm9kdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
KUBE_CERT="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJWUxWNkFPQkdrU1F3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOekl6TVRFeU1ETTJNQjRYRFRJME1EZ3dPREV3TVRNMU5sb1hEVEkxTURndwpPREV3TVRNMU5sb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJGQ2Q1MFdPeWdlQ2syQzcKV2FrOWY4MVAvSkJieVRIajRWOXBsTEo0ck5HeHFtSjJOb2xROFYxdUx5RjBtOTQ2Nkc0RmRDQ2dqaXFVSk92Swp3NVRPNnd5alNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVFJkOFI5cXVWK2pjeUVmL0ovT1hQSzMyS09XekFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQTArbThqTDBJVldvUTZ0dnB4cFo4NVlMalF1SmpwdXM0aDdnSXRxS3NmUVVDSUI2M2ZNdzFBMm5OVWU1TgpIUGZOcEQwSEtwcVN0Wnk4djIyVzliYlJUNklZCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTWpNeE1USXdNell3SGhjTk1qUXdPREE0TVRBeE16VTJXaGNOTXpRd09EQTJNVEF4TXpVMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTWpNeE1USXdNell3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRc3hXWk9pbnIrcVp4TmFEQjVGMGsvTDF5cE01VHAxOFRaeU92ektJazQKRTFsZWVqUm9STW0zNmhPeVljbnN3d3JoNnhSUnBpMW5RdGhyMzg0S0Z6MlBvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTBYZkVmYXJsZm8zTWhIL3lmemx6Cnl0OWlqbHN3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUxJL2dNYnNMT3MvUUpJa3U2WHVpRVMwTEE2cEJHMXgKcnBlTnpGdlZOekZsQWlFQW1wdjBubjZqN3M0MVI0QzFNMEpSL0djNE53MHdldlFmZWdEVGF1R2p3cFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
KUBE_DATA="LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU5ZS1BFb1dhd1NKUzJlRW5oWmlYMk5VZlY1ZlhKV2krSVNnV09TNFE5VTlvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFVUozblJZN0tCNEtUWUx0WnFUMS96VS84a0Z2Sk1lUGhYMm1Vc25pczBiR3FZblkyaVZEeApYVzR2SVhTYjNqcm9iZ1YwSUtDT0twUWs2OHJEbE03ckRBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo="

36
docker/start-demo.sh Executable file
View File

@ -0,0 +1,36 @@
#!/bin/bash
KUBERNETES_ENV_FILE=$(realpath ${1:-"./kube.exemple.env"})
HOST=${2:-"http://localhost:8000"}
docker network create oc | true
docker compose down
cd ./tools && docker compose -f ./docker-compose.dev.yml up --force-recreate -d
docker compose -f ./docker-compose.traefik.yml up --force-recreate -d && cd ..
cd ./db && ./add.sh && cd ..
cd ../..
REPOS=(
"oc-auth"
"oc-catalog"
"oc-datacenter"
"oc-monitord"
"oc-peer"
"oc-shared"
"oc-scheduler"
"oc-schedulerd"
"oc-workflow"
"oc-workspace"
"oc-front"
)
for i in "${REPOS[@]}"
do
echo "Building $i"
docker kill $i | true
docker rm $i | true
cd ./$i
cp $KUBERNETES_ENV_FILE ./env.env
docker build . -t $i --build-arg=HOST=$HOST && docker compose up -d
cd ..
done

View File

@ -1,9 +1,11 @@
#!/bin/bash
KUBERNETES_ENV_FILE=$(realpath ${1:-"./kube.exemple.env"})
HOST=${2:-"http://localhost:8000"}
docker network create oc | true
docker compose down
cd ./tools && docker compose -f ./docker-compose.dev.yml up --force-recreate -d && cd ..
cd ./tools && docker compose -f ./docker-compose.traefik.yml up --force-recreate -d && cd ..
cd ./tools && docker compose -f ./docker-compose.dev.yml up --force-recreate -d
docker compose -f ./docker-compose.traefik.yml up --force-recreate -d && cd ..
cd ../..
@ -26,6 +28,7 @@ do
docker kill $i | true
docker rm $i | true
cd ./$i
docker build . -t $i && docker compose up -d
cp $KUBERNETES_ENV_FILE ./env.env
docker build . -t $i --build-arg=HOST=$HOST && docker compose up -d
cd ..
done

View File

@ -22,6 +22,8 @@ do
docker rm $i | true
done
docker volume rm tools_oc-data
cd ../..
REPOS=(

View File

@ -10,9 +10,9 @@ services:
command:
- "--api.insecure=true"
- "--providers.docker=true"
- "--entrypoints.web.address=:80"
- "--entrypoints.web.address=:8000"
ports:
- "80:80" # Expose Traefik on port 80
- "8000:8000" # Expose Traefik on port 8000
volumes:
- /var/run/docker.sock:/var/run/docker.sock

View File

@ -1,78 +0,0 @@
#/bin/python3
import requests
from bs4 import BeautifulSoup
import inquirer
import os
import subprocess
from concurrent.futures import ThreadPoolExecutor, as_completed
from rich.console import Console
from rich.table import Table
from rich.live import Live
# URLs des pages à analyser
urls = [
'https://cloud.o-forge.io/explore/repos?page=1',
'https://cloud.o-forge.io/explore/repos?page=2'
]
def get_all_repo(urls):
repositories = []
for url in urls:
response = requests.get(url)
response.raise_for_status() # Vérifie si la requête a réussi
soup = BeautifulSoup(response.text, 'html.parser')
titles = soup.find_all(class_='flex-item-title')
for title in titles:
repo_name = title.get_text(strip=True)
if repo_name.startswith('core/'):
repositories.append(repo_name.split("core/")[1])
return repositories
def git_clone_repo(repo: str, dir: str, status: dict):
status[repo] = "⏳ Cloning..."
try:
if os.path.exists(f"{dir}/{repo}") :
subprocess.run(["git", "-C", f"{dir}/{repo}","pull"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
base_url = f"https://cloud.o-forge.io/core/{repo}.git"
subprocess.run(["git", "clone", base_url, f"{dir}/{repo}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
status[repo] = "✅ Done"
except Exception as e:
status[repo] = f"❌ Failed: {e}"
def display_status(status):
table = Table(title="Repository Cloning Status")
table.add_column("Repository", justify="left")
table.add_column("Status", justify="right")
for repo, state in status.items():
table.add_row(repo, state)
return table
repositories = get_all_repo(urls)
cwd = os.getcwd()
questions = [
inquirer.Checkbox('repo_choice',
message=f"Which Open Cloud repo do you want to download ? (Will be download in {cwd})?",
choices=repositories,
),
]
selected_repo = inquirer.prompt(questions)
status = {repo: "Waiting" for repo in selected_repo["repo_choice"]}
with ThreadPoolExecutor() as executor:
futures = {executor.submit(git_clone_repo, repo, cwd, status): repo for repo in selected_repo["repo_choice"]}
with Live(display_status(status), refresh_per_second=2) as live:
for future in as_completed(futures):
live.update(display_status(status))