12 Commits

Author SHA1 Message Date
pb
47570d9423 change to fit update to oclib 2024-09-04 17:34:05 +02:00
pb
2bc6e4327e change to fit update to oclib 2024-09-04 17:29:00 +02:00
pb
a69ecc4ab5 debug 2024-08-28 14:03:48 +02:00
mr
7206de35a8 Scheduler deleted 2024-08-22 10:51:07 +02:00
mr
20b5955ba9 Oclib major new version 2024-08-21 14:20:13 +02:00
mr
826650487b debug multiple 2024-08-20 16:14:10 +02:00
mr
c5d15d32da debug 2024-08-20 15:24:46 +02:00
mr
825c18b6d6 simplify 2024-08-20 09:23:05 +02:00
mr
e5cfd6f4fb minimize code + schedulerd naming + docker 2024-08-19 11:42:26 +02:00
pb
c710469881 Added grafana to compose and conf for easier setup 2024-08-13 11:15:18 +02:00
pb
41f93a292c don't commit binary 2024-08-12 16:19:27 +02:00
pb
5b626dcb21 for future k8s exec 2024-08-12 16:11:13 +02:00
24 changed files with 291 additions and 531 deletions

18
.vscode/launch.json vendored Normal file
View File

@@ -0,0 +1,18 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Launch Package",
"type": "go",
"request": "launch",
"mode": "auto",
"program": "${fileDirname}",
"env": {
"MONITOR_METHOD" : "local"
}
}
]
}

View File

@@ -4,15 +4,22 @@ ENV DOCKER_ENVIRONMENT=true
WORKDIR /app
COPY . .
COPY conf/docker_scheduler.json /etc/oc/scheduler.json
RUN go build .
FROM golang:alpine
FROM oc-monitord:latest AS monitord
FROM argoproj/argocd:latest
ENV MONITORD_PATH = "./oc-monitord"
WORKDIR /app
COPY --from=builder /app/oc-scheduler .
COPY conf/docker_scheduler.json /etc/oc/scheduler.json
COPY conf/docker_schedulerd.json /etc/oc/schedulerd.json
ENTRYPOINT ["/app/oc-scheduler"]
COPY --from=monitord /app/oc-monitord .
COPY --from=builder /app/oc-schedulerd .
COPY conf/docker_schedulerd.json /etc/oc/schedulerd.json
ENTRYPOINT ["/app/oc-schedulerd"]

View File

@@ -8,54 +8,41 @@ import (
)
type Config struct {
OcCatalogUrl string
MongoUrl string
DBName string
Logs string
LokiUrl string
NatsUrl string
MonitorPath string
MongoUrl string
DBName string
Logs string
LokiUrl string
NatsUrl string
}
var instance *Config
var once sync.Once
const defaultConfigFile = "/etc/oc/scheduler.json"
const localConfigFile = "./conf/local_scheduler.json"
const defaultConfigFile = "/etc/oc/schedulerd.json"
func init(){
func init() {
configFile := ""
var o *onion.Onion
l3 := onion.NewEnvLayerPrefix("_", "OCSCHEDULER_")
l3 := onion.NewEnvLayerPrefix("_", "OCSCHEDULERD_")
l2, err := onion.NewFileLayer(defaultConfigFile, nil)
if err == nil {
logs.Info("Config file found : " + defaultConfigFile)
configFile = defaultConfigFile
}
l1, err := onion.NewFileLayer(localConfigFile, nil)
if err == nil {
logs.Info("Local config file found " + localConfigFile + ", overriding default file")
configFile = localConfigFile
}
if configFile == "" {
if configFile == "" || l2 == nil {
logs.Info("No config file found, using env")
o = onion.New(l3)
} else if l1 == nil && l2 == nil {
o = onion.New(l1, l2, l3)
} else if l1 == nil {
} else {
o = onion.New(l2, l3)
} else if l2 == nil {
o = onion.New(l1, l3)
}
GetConfig().OcCatalogUrl = o.GetStringDefault("oc-catalog", "https://localhost:49618")
GetConfig().Logs = o.GetStringDefault("loglevel", "info")
GetConfig().LokiUrl = o.GetStringDefault("loki_url","http://127.0.0.1:3100")
GetConfig().NatsUrl = o.GetStringDefault("nats_url","http://127.0.0.1:4222")
GetConfig().MongoUrl = o.GetStringDefault("mongo_url","mongodb://127.0.0.1:27017")
GetConfig().DBName = o.GetStringDefault("database_name","DC_myDC")
GetConfig().MonitorPath = o.GetStringDefault("MONITORD_PATH", "../oc-monitord/oc-monitord")
GetConfig().Logs = o.GetStringDefault("LOG_LEVEL", "info")
GetConfig().LokiUrl = o.GetStringDefault("LOKI_URL", "http://127.0.0.1:3100")
GetConfig().NatsUrl = o.GetStringDefault("NATS_URL", "http://127.0.0.1:4222")
GetConfig().MongoUrl = o.GetStringDefault("MONGO_URL", "mongodb://127.0.0.1:27017")
GetConfig().DBName = o.GetStringDefault("MONGO_DATABASE", "DC_myDC")
}

View File

@@ -1,4 +0,0 @@
{
"oc-catalog" : "http://oc-catalog:49618/",
"loki_url" : "http://192.168.1.18:3100"
}

View File

@@ -0,0 +1,6 @@
{
"LOKI_URL" : "http://loki:3100",
"MONGO_URL":"mongodb://mongo:27017/",
"MONGO_DATABASE":"DC_myDC",
"NATS_URL": "nats://nats:4222"
}

View File

@@ -0,0 +1,8 @@
datasources:
- name: Loki
type: loki
access: proxy
url: http://loki:3100
isDefault: true
jsonData:
httpMethod: POST

View File

@@ -1,5 +0,0 @@
{
"oc-catalog" : "http://localhost:49618/",
"logs" : "",
"mongo_url": "mongodb://127.0.0.1:27017"
}

View File

@@ -1,47 +1,48 @@
package daemons
import (
"oc-scheduler/conf"
"oc-scheduler/logger"
"fmt"
"oc-schedulerd/conf"
"os/exec"
"github.com/rs/zerolog"
)
type LocalMonitor struct{
LokiURL string
KubeURL string
WorkflowName string
type LocalMonitor struct {
LokiURL string
KubeURL string
ExecutionID string
Duration int
Logger zerolog.Logger
}
func (lm *LocalMonitor) LaunchLocalMonitor (){
if (lm.LokiURL == "" || lm.KubeURL == "" || lm.WorkflowName == ""){
logger.Logger.Error().Msg("Missing parameter in LocalMonitor")
func (lm *LocalMonitor) LaunchLocalMonitor() {
if lm.LokiURL == "" || lm.KubeURL == "" || lm.ExecutionID == "" {
lm.Logger.Error().Msg("Missing parameter in LocalMonitor")
}
// For dev purposes, in prod KubeURL must be a kube API's URL
if(lm.KubeURL == "localhost"){
lm.execLocalKube()
} else{
if lm.KubeURL != "localhost" {
lm.execRemoteKube()
} else {
lm.execLocalKube()
}
}
func (lm *LocalMonitor) execLocalKube (){
// kube_url := ""
cmd := exec.Command("../oc-monitor/oc-monitor", "-w",lm.WorkflowName, "-u", lm.LokiURL, "-m", conf.GetConfig().MongoUrl,"-d", conf.GetConfig().DBName)
// cmd_ls := exec.Command("ls", "../oc-monitor")
func (lm *LocalMonitor) execLocalKube() {
args := []string{"-e", lm.ExecutionID, "-u", lm.LokiURL, "-m", conf.GetConfig().MongoUrl, "-d", conf.GetConfig().DBName}
if lm.Duration > 0 {
args = append(args, "-t", fmt.Sprintf("%d", lm.Duration))
}
cmd := exec.Command(conf.GetConfig().MonitorPath, args...)
fmt.Println("CMD", cmd)
err := cmd.Start()
// output, err := cmd_ls.CombinedOutput()
if err !=nil {
logger.Logger.Error().Msg("Could not start oc-monitor for " + lm.WorkflowName + " : " + err.Error())
if err != nil {
lm.Logger.Error().Msg("Could not start oc-monitor for " + lm.ExecutionID + " : " + err.Error())
}
}
func (lm *LocalMonitor) execRemoteKube (){
// TODO : implement this
func (lm *LocalMonitor) execRemoteKube() {
}
func (lm *LocalMonitor) todo (){
}

View File

@@ -1,72 +1,60 @@
package daemons
import (
"oc-scheduler/conf"
"oc-scheduler/logger"
"oc-scheduler/models"
"oc-schedulerd/conf"
"os"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
workflow_execution "cloud.o-forge.io/core/oc-lib/models/workflow_execution"
)
type ExecutionManager struct {
bookings *models.ScheduledBooking
executions []models.Booking
}
var Bookings = ScheduledBooking{Bookings: []*workflow_execution.WorkflowExecution{}}
func (em *ExecutionManager) SetBookings(b *models.ScheduledBooking){
em.bookings = b
}
type ExecutionManager struct{}
// Loop every second on the booking's list and move the booking that must start to a new list
// that will be looped over to start them
func (em *ExecutionManager) RetrieveNextExecutions(){
if(em.bookings == nil){
logger.Logger.Error().Msg("booking has not been set in the exection manager")
return
}
for(true){
logger.Logger.Debug().Msg("New loop")
em.bookings.Mu.Lock()
bookings := em.bookings.Bookings
if (len(bookings) > 0){
for i := len( bookings) - 1 ; i >= 0 ; i--{
logger.Logger.Debug().Msg("It should start at " + bookings[i].Start.String() + " and it is now " + time.Now().UTC() .String())
if (bookings[i].Start.Before(time.Now().UTC())){
logger.Logger.Info().Msg("Will execute " + bookings[i].Workflow + " soon")
func (em *ExecutionManager) RetrieveNextExecutions() {
logger := oclib.GetLogger()
for {
logger.Debug().Msg("New loop")
Bookings.Mu.Lock()
if len(Bookings.Bookings) > 0 {
bookings := Bookings.Bookings
for i := len(bookings) - 1; i >= 0; i-- {
if bookings[i].ExecDate.Before(time.Now().UTC()) {
logger.Info().Msg("Will execute " + bookings[i].UUID + " soon")
go em.executeBooking(bookings[i])
bookings = append(bookings[:i], bookings[i+1:]...)
em.bookings.Bookings = bookings
Bookings.Bookings = append(bookings[:i], bookings[i+1:]...)
}
}
}
em.bookings.Mu.Unlock()
}
Bookings.Mu.Unlock()
time.Sleep(time.Second)
}
}
func (em *ExecutionManager) executeBooking(booking models.Booking){
// start execution
func (em *ExecutionManager) executeBooking(booking *workflow_execution.WorkflowExecution) {
// start execution
// create the yaml that describes the pod : filename, path/url to Loki
exec_method := os.Getenv("MONITOR_METHOD")
if exec_method == "local"{
logger.Logger.Debug().Msg("Executing oc-monitor localy")
monitor := LocalMonitor{LokiURL: conf.GetConfig().LokiUrl,KubeURL: "localhost",WorkflowName: booking.Workflow,}
logger := oclib.GetLogger()
if exec_method == "k8s" {
logger.Error().Msg("TODO : executing oc-monitor in a k8s")
} else {
logger.Debug().Msg("Executing oc-monitor localy")
duration := 0
if booking.EndDate != nil && booking.ExecDate != nil {
duration = int(booking.EndDate.Sub(*booking.ExecDate).Seconds())
}
monitor := LocalMonitor{
Logger: logger,
Duration: duration,
LokiURL: conf.GetConfig().LokiUrl,
KubeURL: "localhost",
ExecutionID: booking.UUID,
}
monitor.LaunchLocalMonitor()
}else{
logger.Logger.Error().Msg("TODO : executing oc-monitor in a k8s")
}
}

View File

@@ -3,152 +3,132 @@ package daemons
import (
"encoding/json"
"fmt"
"oc-schedulerd/conf"
"sync"
"time"
"oc-scheduler/logger"
"oc-scheduler/models"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
"cloud.o-forge.io/core/oc-lib/tools"
"github.com/nats-io/nats.go"
"github.com/rs/zerolog"
"go.mongodb.org/mongo-driver/bson/primitive"
)
type ScheduledBooking struct {
Bookings []*workflow_execution.WorkflowExecution
Mu sync.Mutex
}
func (sb *ScheduledBooking) DeleteSchedules(workflow_id string) {
toNotDelete := []*workflow_execution.WorkflowExecution{}
for _, b := range sb.Bookings {
if b.WorkflowID != workflow_id {
toNotDelete = append(toNotDelete, b)
}
}
Bookings.Mu.Lock()
defer Bookings.Mu.Unlock()
sb.Bookings = toNotDelete
}
func (sb *ScheduledBooking) AddSchedules(new_bookings []*workflow_execution.WorkflowExecution, logger zerolog.Logger) {
Bookings.Mu.Lock()
defer Bookings.Mu.Unlock()
for _, exec := range new_bookings {
sb.Bookings = append(sb.Bookings , exec)
}
}
// NATS daemon listens to subject " workflowsUpdate "
// workflowsUpdate messages must be formatted following this pattern '{"workflow" : "", "start_date" : "", "stop_date" : "" }'
type ScheduleManager struct {
Api_url string
bookings *models.ScheduledBooking
ws models.HttpQuery
Logger zerolog.Logger
}
// Goroutine listening to a NATS server for updates
// on workflows' scheduling. Messages must contain
// workflow execution ID, to allow retrieval of execution infos
func (s *ScheduleManager) ListenNATS() {
nc, err := nats.Connect(conf.GetConfig().NatsUrl)
if err != nil {
s.Logger.Error().Msg("Could not connect to NATS")
return
}
defer nc.Close()
var wg sync.WaitGroup
wg.Add(2)
go s.listenForChange(nc, tools.REMOVE.GenerateKey(oclib.WORKFLOW.String()), true, wg)
go s.listenForChange(nc, tools.CREATE.GenerateKey(oclib.WORKFLOW.String()), false, wg)
wg.Wait()
func (s *ScheduleManager) SetBookings(b *models.ScheduledBooking){
s.bookings = b
}
// Goroutine listening to a NATS server for updates
// on workflows' scheduling. Messages must contain
// on workflows' scheduling. Messages must contain
// workflow execution ID, to allow retrieval of execution infos
func (s *ScheduleManager) ListenForWorkflowSubmissions(){
if(s.bookings == nil){
logger.Logger.Error().Msg("booking has not been set in the schedule manager")
}
nc, err := nats.Connect(nats.DefaultURL)
if err != nil {
logger.Logger.Error().Msg("Could not connect to NATS")
return
}
defer nc.Close()
func (s *ScheduleManager) listenForChange(nc *nats.Conn, chanName string, delete bool, wg sync.WaitGroup) {
defer wg.Done()
ch := make(chan *nats.Msg, 64)
subs , err := nc.ChanSubscribe("workflowsUpdate", ch)
fmt.Println("Listening to " + chanName)
subs, err := nc.ChanSubscribe(chanName, ch)
if err != nil {
logger.Logger.Error().Msg("Error listening to NATS")
s.Logger.Error().Msg("Error listening to NATS : " + err.Error())
}
defer subs.Unsubscribe()
for msg := range(ch){
fmt.Println("Waiting...")
map_mess := retrieveMapFromSub(msg.Data)
s.bookings.Mu.Lock()
wf_exec := getWorkflowExecution(map_mess["workflow"])
s.bookings.AddSchedule(models.Booking{Workflow: map_mess["workflow"], Start: *wf_exec.ExecDate, Stop: *wf_exec.EndDate })
s.bookings.Mu.Unlock()
for msg := range ch {
map_mess := map[string]string{}
json.Unmarshal(msg.Data, &map_mess)
str := "new"
if delete {
str = "deleted"
}
fmt.Println("Catching " + str + " workflow... " + map_mess["id"])
if delete {
Bookings.DeleteSchedules(map_mess["id"])
} else {
s.getNextScheduledWorkflows(1)
}
}
}
func getWorkflowExecution(exec_id string) *workflow_execution.WorkflowExecution {
res := oclib.LoadOne(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION),exec_id)
if res.Code != 200 {
logger.Logger.Error().Msg("Could not retrieve workflow ID from execution ID " + exec_id)
return nil
}
wf_exec := res.ToWorkflowExecution()
return wf_exec
}
// At the moment very simplistic, but could be useful if we send bigger messages
func retrieveMapFromSub(message []byte) (result_map map[string]string) {
json.Unmarshal(message, &result_map)
return
}
// Used at launch of the component to retrieve the next scheduled workflows
// and then every X minutes in case some workflows were scheduled before launch
func (s *ScheduleManager) SchedulePolling (){
func (s *ScheduleManager) SchedulePolling() {
var sleep_time float64 = 1
for(true){
s.getNextScheduledWorkflows(3)
logger.Logger.Info().Msg("Current list of schedules")
fmt.Println(s.bookings.Bookings)
for {
s.getNextScheduledWorkflows(1)
s.Logger.Info().Msg("Current list of schedules -------> " + fmt.Sprintf("%v", len(Bookings.Bookings)))
time.Sleep(time.Minute * time.Duration(sleep_time))
}
}
func (s *ScheduleManager) getWorfklowExecution(from time.Time, to time.Time) (exec_list []workflow_execution.WorkflowExecution, err error) {
func (s *ScheduleManager) getExecution(from time.Time, to time.Time) (exec_list []*workflow_execution.WorkflowExecution, err error) {
fmt.Printf("Getting workflows execution from %s to %s \n", from.String(), to.String())
f := dbs.Filters{
And: map[string][]dbs.Filter{
"execution_date" : {{Operator : dbs.GTE.String(), Value: primitive.NewDateTimeFromTime(from)}, {Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(to)}},
"state": {{Operator: dbs.EQUAL.String(), Value: 1}},
"execution_date": {{Operator: dbs.GTE.String(), Value: primitive.NewDateTimeFromTime(from)}, {Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(to)}},
"state": {{Operator: dbs.EQUAL.String(), Value: 1}},
},
}
res := oclib.Search(&f,"",oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION))
res := oclib.Search(&f, "", oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION))
if res.Code != 200 {
logger.Logger.Error().Msg("Error loading")
return nil, nil
}
for _, exec := range(res.Data){
lib_data := oclib.LoadOne(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION),exec.GetID())
exec_obj := lib_data.ToWorkflowExecution()
exec_list = append(exec_list, *exec_obj)
}
return exec_list, nil
}
// TODO : refactor to implement oclib.Search
func (s *ScheduleManager) getNextScheduledWorkflows(minutes float64) {
start := time.Now().UTC()
end := start.Add(time.Minute * time.Duration(minutes)).UTC()
fmt.Printf("Getting workflows execution from %s to %s \n", start.String(), end.String())
next_wf_exec, err := s.getWorfklowExecution(start,end)
if err != nil {
logger.Logger.Error().Msg("Could not retrieve next schedules")
s.Logger.Error().Msg("Error loading")
return
}
s.bookings.Mu.Lock()
defer s.bookings.Mu.Unlock()
for _, exec := range(next_wf_exec){
exec_start := exec.ExecDate
exec_stop := exec.EndDate
s.bookings.AddSchedule(models.Booking{Workflow: exec.UUID, Start: *exec_start, Stop: *exec_stop})
for _, exec := range res.Data {
exec_list = append(exec_list, exec.(*workflow_execution.WorkflowExecution))
}
return
}
func (s *ScheduleManager) getNextScheduledWorkflows(minutes float64) {
start := time.Now().UTC()
if next_wf_exec, err := s.getExecution(
start.Add(time.Second * time.Duration(-1)).UTC(),
start.Add(time.Minute * time.Duration(minutes)).UTC(),
); err != nil {
s.Logger.Error().Msg("Could not retrieve next schedules")
} else {
Bookings.AddSchedules(next_wf_exec, s.Logger)
}
}

39
docker-compose.tools.yml Normal file
View File

@@ -0,0 +1,39 @@
version: '3.4'
services:
nats:
image: 'nats:latest'
container_name: nats
ports:
- 4222:4222
command:
- "--debug"
networks:
- scheduler
- catalog
loki:
image: 'grafana/loki'
container_name: loki
ports :
- "3100:3100"
networks:
- scheduler
grafana:
image: 'grafana/grafana'
container_name: grafana
ports:
- '3000:3000'
networks:
- scheduler
volumes:
- ./conf/grafana_data_source.yml:/etc/grafana/provisioning/datasources/datasource.yml
environment:
- GF_SECURITY_ADMIN_PASSWORD=pfnirt # Change this to anything but admin to not have a password change page at startup
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_DISABLE_INITIAL_ADMIN_PASSWORD_CHANGE=true
networks:
scheduler:
external: true
catalog:
external: true

View File

@@ -1,23 +1,16 @@
version: '3.4'
services:
nats:
image: 'nats:latest'
container_name: nats
oc-schedulerd:
environment:
- MONGO_DATABASE=DC_myDC
image: 'oc-schedulerd:latest'
ports:
- 4222:4222
command:
- "--debug"
- 9001:8080
container_name: oc-schedulerd
networks:
- scheduler
loki:
image: 'grafana/loki'
container_name: loki
ports :
- "3100:3100"
networks:
- scheduler
- catalog
networks:
scheduler:
catalog:
external: true

View File

@@ -1,14 +0,0 @@
package main
import (
"oc-scheduler/daemons"
"oc-scheduler/models"
"testing"
)
func TestCreateManifest(t *testing.T){
em := daemons.ExecutionManager{}
em.CreateManifest(models.Booking{},"fessity-chlics_23_07_2024_154326")
}

8
go.mod
View File

@@ -1,4 +1,4 @@
module oc-scheduler
module oc-schedulerd
go 1.22.0
@@ -8,7 +8,7 @@ require (
github.com/beego/beego v1.12.12
github.com/beego/beego/v2 v2.2.2
github.com/goraz/onion v0.1.3
github.com/nats-io/nats.go v1.9.1
github.com/nats-io/nats.go v1.37.0
github.com/nwtgck/go-fakelish v0.1.3
github.com/rs/zerolog v1.33.0
github.com/tidwall/gjson v1.17.1
@@ -17,7 +17,7 @@ require (
)
require (
cloud.o-forge.io/core/oc-lib v0.0.0-20240812075555-6e3069068ce4 // indirect
cloud.o-forge.io/core/oc-lib v0.0.0-20240826103423-aff9304b1a71 // indirect
github.com/Klathmon/StructToMap v0.0.0-20140724123129-3d0229e2dce7 // indirect
github.com/antihax/optional v1.0.0 // indirect
github.com/aws/aws-sdk-go v1.36.29 // indirect
@@ -56,7 +56,7 @@ require (
github.com/montanaflynn/stats v0.7.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nats-io/jwt v0.3.2 // indirect
github.com/nats-io/nkeys v0.1.3 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.19.0 // indirect

12
go.sum
View File

@@ -4,6 +4,14 @@ cloud.o-forge.io/core/oc-lib v0.0.0-20240808075405-f45ad91687c4 h1:3xqz2s6r/PONq
cloud.o-forge.io/core/oc-lib v0.0.0-20240808075405-f45ad91687c4/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo=
cloud.o-forge.io/core/oc-lib v0.0.0-20240812075555-6e3069068ce4 h1:fdxRsT4eR4v1DM3FpTPi9AKxB5oIw3XgLu9ByNipj4I=
cloud.o-forge.io/core/oc-lib v0.0.0-20240812075555-6e3069068ce4/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo=
cloud.o-forge.io/core/oc-lib v0.0.0-20240821093044-f64563c9ff06 h1:sYveE1C/0mpSr+ZmOYxuZ3fTWID7mr5hPiq0jQenv3Q=
cloud.o-forge.io/core/oc-lib v0.0.0-20240821093044-f64563c9ff06/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU=
cloud.o-forge.io/core/oc-lib v0.0.0-20240826085916-d0e1474f8f34 h1:40XQgwR9HxXSnouY+ZqE/xYCM4qa+U+RLA5GA5JSNyQ=
cloud.o-forge.io/core/oc-lib v0.0.0-20240826085916-d0e1474f8f34/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU=
cloud.o-forge.io/core/oc-lib v0.0.0-20240826094730-73dc43b329d7 h1:WrlURBiciau4p6iU3v0nKcQYjBqW8e9Uc2soDDXll28=
cloud.o-forge.io/core/oc-lib v0.0.0-20240826094730-73dc43b329d7/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU=
cloud.o-forge.io/core/oc-lib v0.0.0-20240826103423-aff9304b1a71 h1:GodGMXVFgSdd5R1FoUjFAloOS+zOd3j66Wa+jcEPa4c=
cloud.o-forge.io/core/oc-lib v0.0.0-20240826103423-aff9304b1a71/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Klathmon/StructToMap v0.0.0-20140724123129-3d0229e2dce7 h1:n0MD6UkwbgGHtXsmfgVzC2+ZbHzIsScpbq9ZGI18074=
github.com/Klathmon/StructToMap v0.0.0-20140724123129-3d0229e2dce7/go.mod h1:xdrQDwHlKUmv8yiElMx6W0W10cLkqpeSEUUib8KGtv4=
@@ -370,9 +378,13 @@ github.com/nats-io/nats-server/v2 v2.1.2 h1:i2Ly0B+1+rzNZHHWtD4ZwKi+OU5l+uQo1iDH
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE=
github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=

File diff suppressed because one or more lines are too long

View File

@@ -1,13 +0,0 @@
package logger
import (
"cloud.o-forge.io/core/oc-lib/logs"
"github.com/rs/zerolog"
)
var Logger zerolog.Logger
func init() {
logs.SetAppName("oc-scheduler")
Logger = logs.CreateLogger("","")
}

53
main.go
View File

@@ -3,54 +3,29 @@ package main
import (
"fmt"
conf "oc-scheduler/conf"
"oc-scheduler/models"
"oc-scheduler/daemons"
"oc-schedulerd/conf"
"oc-schedulerd/daemons"
oclib "cloud.o-forge.io/core/oc-lib"
)
// var log zerolog.Logger
func main() {
var bookings models.ScheduledBooking
oclib.SetConfig(conf.GetConfig().MongoUrl,"DC_myDC")
oclib.Init("oc-scheduler")
oclib.SetConfig(
conf.GetConfig().MongoUrl,
conf.GetConfig().DBName,
conf.GetConfig().NatsUrl,
conf.GetConfig().LokiUrl,
conf.GetConfig().Logs,
)
oclib.Init("oc-schedulerd")
app_conf := conf.GetConfig()
apiurl := app_conf.OcCatalogUrl
sch_mngr := daemons.ScheduleManager{Api_url: apiurl}
sch_mngr.SetBookings(&bookings)
sch_mngr := daemons.ScheduleManager{Logger: oclib.GetLogger()}
exe_mngr := daemons.ExecutionManager{}
exe_mngr.SetBookings(&bookings)
go sch_mngr.ListenForWorkflowSubmissions()
go sch_mngr.ListenNATS()
go sch_mngr.SchedulePolling()
exe_mngr.RetrieveNextExecutions()
// method in Schedule manager that checks the first Schedule object for its start date and exe
// var g Graph
// list, err := g.GetGraphList(apiurl)
// if err != nil {
// log.Fatal().Msg("Failed to get the workspaces list, check api url and that api server is up : " + apiurl)
// }
// println("Available workspaces :")
// for workspace, _ := range list {
// println(workspace)
// }
// g.LoadFrom(list["test-alpr"])
// g.ExportToArgo("test-alpr")
fmt.Print("stop")
fmt.Print("stop")
}

View File

@@ -4,12 +4,12 @@ metadata:
name: test-monitor
spec:
containers:
- name: "oc-monitor-quity-anetran"
- name: "oc-workflow-prous-skintris"
image: docker.io/library/oc-monitor # Currently uses the local contenaird
imagePullPolicy: IfNotPresent # This should be removed once a registry has been set up
env:
- name: "OCMONITOR_ARGOFILE"
value: "quity-anetran_29_07_2024_144136.yml"
value: "prous-skintris_29_07_2024_164008.yml"
- name: "OCMONITOR_LOKIURL"
value: "info" # !!!! In dev this must be replaced with the address of one of your interface (wifi, ethernet..)
restartPolicy: OnFailure

View File

@@ -1,53 +0,0 @@
package models
import (
"io"
"net/http"
"net/http/cookiejar"
"net/url"
)
type HttpQuery struct {
baseurl string
jar http.CookieJar
Cookies map[string]string
}
func (h *HttpQuery) Init(url string) {
h.baseurl = url
h.jar, _ = cookiejar.New(nil)
h.Cookies = make(map[string]string)
}
func (h *HttpQuery) Get(url string) ([]byte, error) {
client := &http.Client{Jar: h.jar}
resp, err := client.Get(h.baseurl + url)
if err != nil {
return nil, err
}
// store received cookies
for _, cookie := range h.jar.Cookies(resp.Request.URL) {
h.Cookies[cookie.Name] = cookie.Value
}
if err != nil {
return nil, err
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
func (h *HttpQuery) Post(url string, data url.Values) (*http.Response, error) {
client := &http.Client{Jar: h.jar}
resp, err := client.PostForm(h.baseurl+url, data)
if err != nil {
return nil, err
}
// store received cookies
for _, cookie := range h.jar.Cookies(resp.Request.URL) {
h.Cookies[cookie.Name] = cookie.Value
}
return resp, err
}

View File

@@ -1,71 +0,0 @@
package models
import (
"fmt"
"oc-scheduler/logger"
"sync"
"time"
)
// Is duration really important ?
type Booking struct {
Start time.Time
Stop time.Time
Duration uint
Workflow string
}
type ScheduledBooking struct {
Bookings []Booking
Mu sync.Mutex
}
func (s Booking) Equals(other Booking) bool {
return s.Workflow == other.Workflow && s.Start == other.Start && s.Stop == other.Stop
}
func (sb *ScheduledBooking) AddSchedule(new_booking Booking){
if(!sb.scheduleAlreadyExists(new_booking)){
sb.Bookings = append(sb.Bookings,new_booking)
logger.Logger.Info().Msg("Updated list schedules : \n " + sb.String())
} else {
// Debug condition : delete once this feature is ready to be implemented
logger.Logger.Debug().Msg("Workflow received not added")
logger.Logger.Debug().Msg("current schedule contains")
for _, booking := range(sb.Bookings){
logger.Logger.Debug().Msg(booking.String())
}
}
}
func (sb *ScheduledBooking) GetListNames()(list_names []string ){
for _, schedule := range(sb.Bookings){
list_names = append(list_names, schedule.Workflow)
}
return
}
func (sb *ScheduledBooking) scheduleAlreadyExists(new_booking Booking) bool {
for _, booking := range(sb.Bookings){
if booking.Equals(new_booking){
return true
}
}
return false
}
func (b *Booking) String() string {
return fmt.Sprintf("{workflow : %s , start_date : %s , stop_date : %s }", b.Workflow, b.Start.Format(time.RFC3339), b.Stop.Format(time.RFC3339))
}
func (sb *ScheduledBooking) String() string {
var str string
for _, booking := range(sb.Bookings){
str += fmt.Sprintf("%s\n", booking.String())
}
return str
}

View File

@@ -1,40 +0,0 @@
package models
type Parameter struct {
Name string `yaml:"name,omitempty"`
Value string `yaml:"value,omitempty"`
}
type Container struct {
Image string `yaml:"image"`
Command []string `yaml:"command,omitempty,flow"`
Args []string `yaml:"args,omitempty,flow"`
VolumeMounts []VolumeMount `yaml:"volumeMounts,omitempty"`
}
type VolumeMount struct {
Name string `yaml:"name"`
MountPath string `yaml:"mountPath"`
}
type Task struct {
Name string `yaml:"name"`
Template string `yaml:"template"`
Dependencies []string `yaml:"dependencies,omitempty"`
Arguments struct {
Parameters []Parameter `yaml:"parameters,omitempty"`
} `yaml:"arguments,omitempty"`
}
type Dag struct {
Tasks []Task `yaml:"tasks,omitempty"`
}
type Template struct {
Name string `yaml:"name"`
Inputs struct {
Parameters []Parameter `yaml:"parameters"`
} `yaml:"inputs,omitempty"`
Container Container `yaml:"container,omitempty"`
Dag Dag `yaml:"dag,omitempty"`
}

View File

@@ -1,19 +0,0 @@
package models
type VolumeClaimTemplate struct {
Metadata struct {
Name string `yaml:"name"`
} `yaml:"metadata"`
Spec VolumeSpec `yaml:"spec"`
}
type VolumeSpec struct {
AccessModes []string `yaml:"accessModes,flow"`
Resources struct {
Requests struct {
Storage string `yaml:"storage"`
} `yaml:"requests"`
} `yaml:"resources"`
}

Binary file not shown.