Compare commits
12 Commits
execute-mo
...
chart
Author | SHA1 | Date | |
---|---|---|---|
47570d9423 | |||
2bc6e4327e | |||
a69ecc4ab5 | |||
7206de35a8 | |||
20b5955ba9 | |||
826650487b | |||
c5d15d32da | |||
825c18b6d6 | |||
e5cfd6f4fb | |||
c710469881 | |||
41f93a292c | |||
5b626dcb21 |
18
.vscode/launch.json
vendored
Normal file
18
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
{
|
||||||
|
// Use IntelliSense to learn about possible attributes.
|
||||||
|
// Hover to view descriptions of existing attributes.
|
||||||
|
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Launch Package",
|
||||||
|
"type": "go",
|
||||||
|
"request": "launch",
|
||||||
|
"mode": "auto",
|
||||||
|
"program": "${fileDirname}",
|
||||||
|
"env": {
|
||||||
|
"MONITOR_METHOD" : "local"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
17
Dockerfile
17
Dockerfile
@@ -4,15 +4,22 @@ ENV DOCKER_ENVIRONMENT=true
|
|||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
COPY conf/docker_scheduler.json /etc/oc/scheduler.json
|
|
||||||
|
|
||||||
RUN go build .
|
RUN go build .
|
||||||
|
|
||||||
FROM golang:alpine
|
FROM oc-monitord:latest AS monitord
|
||||||
|
|
||||||
|
FROM argoproj/argocd:latest
|
||||||
|
|
||||||
|
ENV MONITORD_PATH = "./oc-monitord"
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
COPY --from=builder /app/oc-scheduler .
|
COPY conf/docker_schedulerd.json /etc/oc/schedulerd.json
|
||||||
COPY conf/docker_scheduler.json /etc/oc/scheduler.json
|
|
||||||
|
|
||||||
ENTRYPOINT ["/app/oc-scheduler"]
|
COPY --from=monitord /app/oc-monitord .
|
||||||
|
COPY --from=builder /app/oc-schedulerd .
|
||||||
|
COPY conf/docker_schedulerd.json /etc/oc/schedulerd.json
|
||||||
|
|
||||||
|
ENTRYPOINT ["/app/oc-schedulerd"]
|
||||||
|
|
||||||
|
47
conf/conf.go
47
conf/conf.go
@@ -8,54 +8,41 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
OcCatalogUrl string
|
MonitorPath string
|
||||||
MongoUrl string
|
MongoUrl string
|
||||||
DBName string
|
DBName string
|
||||||
Logs string
|
Logs string
|
||||||
LokiUrl string
|
LokiUrl string
|
||||||
NatsUrl string
|
NatsUrl string
|
||||||
}
|
}
|
||||||
|
|
||||||
var instance *Config
|
var instance *Config
|
||||||
var once sync.Once
|
var once sync.Once
|
||||||
|
|
||||||
const defaultConfigFile = "/etc/oc/scheduler.json"
|
const defaultConfigFile = "/etc/oc/schedulerd.json"
|
||||||
const localConfigFile = "./conf/local_scheduler.json"
|
|
||||||
|
|
||||||
|
func init() {
|
||||||
func init(){
|
|
||||||
|
|
||||||
configFile := ""
|
configFile := ""
|
||||||
var o *onion.Onion
|
var o *onion.Onion
|
||||||
|
|
||||||
l3 := onion.NewEnvLayerPrefix("_", "OCSCHEDULER_")
|
l3 := onion.NewEnvLayerPrefix("_", "OCSCHEDULERD_")
|
||||||
l2, err := onion.NewFileLayer(defaultConfigFile, nil)
|
l2, err := onion.NewFileLayer(defaultConfigFile, nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
logs.Info("Config file found : " + defaultConfigFile)
|
logs.Info("Config file found : " + defaultConfigFile)
|
||||||
configFile = defaultConfigFile
|
configFile = defaultConfigFile
|
||||||
}
|
}
|
||||||
l1, err := onion.NewFileLayer(localConfigFile, nil)
|
if configFile == "" || l2 == nil {
|
||||||
if err == nil {
|
|
||||||
logs.Info("Local config file found " + localConfigFile + ", overriding default file")
|
|
||||||
configFile = localConfigFile
|
|
||||||
}
|
|
||||||
if configFile == "" {
|
|
||||||
logs.Info("No config file found, using env")
|
logs.Info("No config file found, using env")
|
||||||
o = onion.New(l3)
|
o = onion.New(l3)
|
||||||
} else if l1 == nil && l2 == nil {
|
} else {
|
||||||
o = onion.New(l1, l2, l3)
|
|
||||||
} else if l1 == nil {
|
|
||||||
o = onion.New(l2, l3)
|
o = onion.New(l2, l3)
|
||||||
} else if l2 == nil {
|
|
||||||
o = onion.New(l1, l3)
|
|
||||||
}
|
}
|
||||||
|
GetConfig().MonitorPath = o.GetStringDefault("MONITORD_PATH", "../oc-monitord/oc-monitord")
|
||||||
GetConfig().OcCatalogUrl = o.GetStringDefault("oc-catalog", "https://localhost:49618")
|
GetConfig().Logs = o.GetStringDefault("LOG_LEVEL", "info")
|
||||||
GetConfig().Logs = o.GetStringDefault("loglevel", "info")
|
GetConfig().LokiUrl = o.GetStringDefault("LOKI_URL", "http://127.0.0.1:3100")
|
||||||
GetConfig().LokiUrl = o.GetStringDefault("loki_url","http://127.0.0.1:3100")
|
GetConfig().NatsUrl = o.GetStringDefault("NATS_URL", "http://127.0.0.1:4222")
|
||||||
GetConfig().NatsUrl = o.GetStringDefault("nats_url","http://127.0.0.1:4222")
|
GetConfig().MongoUrl = o.GetStringDefault("MONGO_URL", "mongodb://127.0.0.1:27017")
|
||||||
GetConfig().MongoUrl = o.GetStringDefault("mongo_url","mongodb://127.0.0.1:27017")
|
GetConfig().DBName = o.GetStringDefault("MONGO_DATABASE", "DC_myDC")
|
||||||
GetConfig().DBName = o.GetStringDefault("database_name","DC_myDC")
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,4 +0,0 @@
|
|||||||
{
|
|
||||||
"oc-catalog" : "http://oc-catalog:49618/",
|
|
||||||
"loki_url" : "http://192.168.1.18:3100"
|
|
||||||
}
|
|
6
conf/docker_schedulerd.json
Normal file
6
conf/docker_schedulerd.json
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"LOKI_URL" : "http://loki:3100",
|
||||||
|
"MONGO_URL":"mongodb://mongo:27017/",
|
||||||
|
"MONGO_DATABASE":"DC_myDC",
|
||||||
|
"NATS_URL": "nats://nats:4222"
|
||||||
|
}
|
8
conf/grafana_data_source.yml
Normal file
8
conf/grafana_data_source.yml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
datasources:
|
||||||
|
- name: Loki
|
||||||
|
type: loki
|
||||||
|
access: proxy
|
||||||
|
url: http://loki:3100
|
||||||
|
isDefault: true
|
||||||
|
jsonData:
|
||||||
|
httpMethod: POST
|
@@ -1,5 +0,0 @@
|
|||||||
{
|
|
||||||
"oc-catalog" : "http://localhost:49618/",
|
|
||||||
"logs" : "",
|
|
||||||
"mongo_url": "mongodb://127.0.0.1:27017"
|
|
||||||
}
|
|
@@ -1,47 +1,48 @@
|
|||||||
package daemons
|
package daemons
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"oc-scheduler/conf"
|
"fmt"
|
||||||
"oc-scheduler/logger"
|
"oc-schedulerd/conf"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
type LocalMonitor struct{
|
type LocalMonitor struct {
|
||||||
LokiURL string
|
LokiURL string
|
||||||
KubeURL string
|
KubeURL string
|
||||||
WorkflowName string
|
ExecutionID string
|
||||||
|
Duration int
|
||||||
|
Logger zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lm *LocalMonitor) LaunchLocalMonitor (){
|
func (lm *LocalMonitor) LaunchLocalMonitor() {
|
||||||
if (lm.LokiURL == "" || lm.KubeURL == "" || lm.WorkflowName == ""){
|
if lm.LokiURL == "" || lm.KubeURL == "" || lm.ExecutionID == "" {
|
||||||
logger.Logger.Error().Msg("Missing parameter in LocalMonitor")
|
lm.Logger.Error().Msg("Missing parameter in LocalMonitor")
|
||||||
}
|
}
|
||||||
|
|
||||||
// For dev purposes, in prod KubeURL must be a kube API's URL
|
// For dev purposes, in prod KubeURL must be a kube API's URL
|
||||||
if(lm.KubeURL == "localhost"){
|
if lm.KubeURL != "localhost" {
|
||||||
lm.execLocalKube()
|
|
||||||
} else{
|
|
||||||
lm.execRemoteKube()
|
lm.execRemoteKube()
|
||||||
|
} else {
|
||||||
|
lm.execLocalKube()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lm *LocalMonitor) execLocalKube (){
|
func (lm *LocalMonitor) execLocalKube() {
|
||||||
// kube_url := ""
|
args := []string{"-e", lm.ExecutionID, "-u", lm.LokiURL, "-m", conf.GetConfig().MongoUrl, "-d", conf.GetConfig().DBName}
|
||||||
cmd := exec.Command("../oc-monitor/oc-monitor", "-w",lm.WorkflowName, "-u", lm.LokiURL, "-m", conf.GetConfig().MongoUrl,"-d", conf.GetConfig().DBName)
|
if lm.Duration > 0 {
|
||||||
// cmd_ls := exec.Command("ls", "../oc-monitor")
|
args = append(args, "-t", fmt.Sprintf("%d", lm.Duration))
|
||||||
|
}
|
||||||
|
cmd := exec.Command(conf.GetConfig().MonitorPath, args...)
|
||||||
|
fmt.Println("CMD", cmd)
|
||||||
err := cmd.Start()
|
err := cmd.Start()
|
||||||
// output, err := cmd_ls.CombinedOutput()
|
if err != nil {
|
||||||
if err !=nil {
|
lm.Logger.Error().Msg("Could not start oc-monitor for " + lm.ExecutionID + " : " + err.Error())
|
||||||
logger.Logger.Error().Msg("Could not start oc-monitor for " + lm.WorkflowName + " : " + err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO : implement this
|
||||||
func (lm *LocalMonitor) execRemoteKube (){
|
func (lm *LocalMonitor) execRemoteKube() {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lm *LocalMonitor) todo (){
|
|
||||||
|
|
||||||
}
|
|
@@ -1,72 +1,60 @@
|
|||||||
package daemons
|
package daemons
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"oc-scheduler/conf"
|
"oc-schedulerd/conf"
|
||||||
"oc-scheduler/logger"
|
|
||||||
"oc-scheduler/models"
|
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
oclib "cloud.o-forge.io/core/oc-lib"
|
||||||
|
workflow_execution "cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ExecutionManager struct {
|
var Bookings = ScheduledBooking{Bookings: []*workflow_execution.WorkflowExecution{}}
|
||||||
bookings *models.ScheduledBooking
|
|
||||||
executions []models.Booking
|
|
||||||
}
|
|
||||||
|
|
||||||
|
type ExecutionManager struct{}
|
||||||
|
|
||||||
func (em *ExecutionManager) SetBookings(b *models.ScheduledBooking){
|
|
||||||
em.bookings = b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loop every second on the booking's list and move the booking that must start to a new list
|
// Loop every second on the booking's list and move the booking that must start to a new list
|
||||||
// that will be looped over to start them
|
// that will be looped over to start them
|
||||||
func (em *ExecutionManager) RetrieveNextExecutions(){
|
func (em *ExecutionManager) RetrieveNextExecutions() {
|
||||||
|
logger := oclib.GetLogger()
|
||||||
|
for {
|
||||||
if(em.bookings == nil){
|
logger.Debug().Msg("New loop")
|
||||||
logger.Logger.Error().Msg("booking has not been set in the exection manager")
|
Bookings.Mu.Lock()
|
||||||
return
|
if len(Bookings.Bookings) > 0 {
|
||||||
}
|
bookings := Bookings.Bookings
|
||||||
|
for i := len(bookings) - 1; i >= 0; i-- {
|
||||||
for(true){
|
if bookings[i].ExecDate.Before(time.Now().UTC()) {
|
||||||
logger.Logger.Debug().Msg("New loop")
|
logger.Info().Msg("Will execute " + bookings[i].UUID + " soon")
|
||||||
em.bookings.Mu.Lock()
|
|
||||||
bookings := em.bookings.Bookings
|
|
||||||
if (len(bookings) > 0){
|
|
||||||
for i := len( bookings) - 1 ; i >= 0 ; i--{
|
|
||||||
logger.Logger.Debug().Msg("It should start at " + bookings[i].Start.String() + " and it is now " + time.Now().UTC() .String())
|
|
||||||
if (bookings[i].Start.Before(time.Now().UTC())){
|
|
||||||
logger.Logger.Info().Msg("Will execute " + bookings[i].Workflow + " soon")
|
|
||||||
go em.executeBooking(bookings[i])
|
go em.executeBooking(bookings[i])
|
||||||
bookings = append(bookings[:i], bookings[i+1:]...)
|
Bookings.Bookings = append(bookings[:i], bookings[i+1:]...)
|
||||||
em.bookings.Bookings = bookings
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
em.bookings.Mu.Unlock()
|
Bookings.Mu.Unlock()
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (em *ExecutionManager) executeBooking(booking models.Booking){
|
func (em *ExecutionManager) executeBooking(booking *workflow_execution.WorkflowExecution) {
|
||||||
|
// start execution
|
||||||
|
|
||||||
// start execution
|
|
||||||
// create the yaml that describes the pod : filename, path/url to Loki
|
// create the yaml that describes the pod : filename, path/url to Loki
|
||||||
|
|
||||||
|
|
||||||
exec_method := os.Getenv("MONITOR_METHOD")
|
exec_method := os.Getenv("MONITOR_METHOD")
|
||||||
|
logger := oclib.GetLogger()
|
||||||
if exec_method == "local"{
|
if exec_method == "k8s" {
|
||||||
logger.Logger.Debug().Msg("Executing oc-monitor localy")
|
logger.Error().Msg("TODO : executing oc-monitor in a k8s")
|
||||||
monitor := LocalMonitor{LokiURL: conf.GetConfig().LokiUrl,KubeURL: "localhost",WorkflowName: booking.Workflow,}
|
} else {
|
||||||
|
logger.Debug().Msg("Executing oc-monitor localy")
|
||||||
|
duration := 0
|
||||||
|
if booking.EndDate != nil && booking.ExecDate != nil {
|
||||||
|
duration = int(booking.EndDate.Sub(*booking.ExecDate).Seconds())
|
||||||
|
}
|
||||||
|
monitor := LocalMonitor{
|
||||||
|
Logger: logger,
|
||||||
|
Duration: duration,
|
||||||
|
LokiURL: conf.GetConfig().LokiUrl,
|
||||||
|
KubeURL: "localhost",
|
||||||
|
ExecutionID: booking.UUID,
|
||||||
|
}
|
||||||
monitor.LaunchLocalMonitor()
|
monitor.LaunchLocalMonitor()
|
||||||
}else{
|
|
||||||
logger.Logger.Error().Msg("TODO : executing oc-monitor in a k8s")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -3,152 +3,132 @@ package daemons
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"oc-schedulerd/conf"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"oc-scheduler/logger"
|
|
||||||
"oc-scheduler/models"
|
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
oclib "cloud.o-forge.io/core/oc-lib"
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
"github.com/nats-io/nats.go"
|
"github.com/nats-io/nats.go"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type ScheduledBooking struct {
|
||||||
|
Bookings []*workflow_execution.WorkflowExecution
|
||||||
|
Mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *ScheduledBooking) DeleteSchedules(workflow_id string) {
|
||||||
|
toNotDelete := []*workflow_execution.WorkflowExecution{}
|
||||||
|
for _, b := range sb.Bookings {
|
||||||
|
if b.WorkflowID != workflow_id {
|
||||||
|
toNotDelete = append(toNotDelete, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Bookings.Mu.Lock()
|
||||||
|
defer Bookings.Mu.Unlock()
|
||||||
|
sb.Bookings = toNotDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *ScheduledBooking) AddSchedules(new_bookings []*workflow_execution.WorkflowExecution, logger zerolog.Logger) {
|
||||||
|
Bookings.Mu.Lock()
|
||||||
|
defer Bookings.Mu.Unlock()
|
||||||
|
for _, exec := range new_bookings {
|
||||||
|
sb.Bookings = append(sb.Bookings , exec)
|
||||||
|
}
|
||||||
|
}
|
||||||
// NATS daemon listens to subject " workflowsUpdate "
|
// NATS daemon listens to subject " workflowsUpdate "
|
||||||
// workflowsUpdate messages must be formatted following this pattern '{"workflow" : "", "start_date" : "", "stop_date" : "" }'
|
// workflowsUpdate messages must be formatted following this pattern '{"workflow" : "", "start_date" : "", "stop_date" : "" }'
|
||||||
|
|
||||||
|
|
||||||
type ScheduleManager struct {
|
type ScheduleManager struct {
|
||||||
Api_url string
|
Logger zerolog.Logger
|
||||||
bookings *models.ScheduledBooking
|
|
||||||
ws models.HttpQuery
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
// Goroutine listening to a NATS server for updates
|
||||||
|
// on workflows' scheduling. Messages must contain
|
||||||
|
// workflow execution ID, to allow retrieval of execution infos
|
||||||
|
func (s *ScheduleManager) ListenNATS() {
|
||||||
|
nc, err := nats.Connect(conf.GetConfig().NatsUrl)
|
||||||
|
if err != nil {
|
||||||
|
s.Logger.Error().Msg("Could not connect to NATS")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer nc.Close()
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
go s.listenForChange(nc, tools.REMOVE.GenerateKey(oclib.WORKFLOW.String()), true, wg)
|
||||||
|
go s.listenForChange(nc, tools.CREATE.GenerateKey(oclib.WORKFLOW.String()), false, wg)
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
func (s *ScheduleManager) SetBookings(b *models.ScheduledBooking){
|
|
||||||
s.bookings = b
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Goroutine listening to a NATS server for updates
|
// Goroutine listening to a NATS server for updates
|
||||||
// on workflows' scheduling. Messages must contain
|
// on workflows' scheduling. Messages must contain
|
||||||
// workflow execution ID, to allow retrieval of execution infos
|
// workflow execution ID, to allow retrieval of execution infos
|
||||||
func (s *ScheduleManager) ListenForWorkflowSubmissions(){
|
func (s *ScheduleManager) listenForChange(nc *nats.Conn, chanName string, delete bool, wg sync.WaitGroup) {
|
||||||
|
defer wg.Done()
|
||||||
if(s.bookings == nil){
|
|
||||||
logger.Logger.Error().Msg("booking has not been set in the schedule manager")
|
|
||||||
}
|
|
||||||
|
|
||||||
nc, err := nats.Connect(nats.DefaultURL)
|
|
||||||
if err != nil {
|
|
||||||
logger.Logger.Error().Msg("Could not connect to NATS")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
defer nc.Close()
|
|
||||||
|
|
||||||
ch := make(chan *nats.Msg, 64)
|
ch := make(chan *nats.Msg, 64)
|
||||||
|
fmt.Println("Listening to " + chanName)
|
||||||
subs , err := nc.ChanSubscribe("workflowsUpdate", ch)
|
subs, err := nc.ChanSubscribe(chanName, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Logger.Error().Msg("Error listening to NATS")
|
s.Logger.Error().Msg("Error listening to NATS : " + err.Error())
|
||||||
}
|
}
|
||||||
defer subs.Unsubscribe()
|
defer subs.Unsubscribe()
|
||||||
|
|
||||||
for msg := range(ch){
|
for msg := range ch {
|
||||||
fmt.Println("Waiting...")
|
map_mess := map[string]string{}
|
||||||
|
json.Unmarshal(msg.Data, &map_mess)
|
||||||
map_mess := retrieveMapFromSub(msg.Data)
|
str := "new"
|
||||||
|
if delete {
|
||||||
s.bookings.Mu.Lock()
|
str = "deleted"
|
||||||
|
}
|
||||||
wf_exec := getWorkflowExecution(map_mess["workflow"])
|
fmt.Println("Catching " + str + " workflow... " + map_mess["id"])
|
||||||
|
if delete {
|
||||||
s.bookings.AddSchedule(models.Booking{Workflow: map_mess["workflow"], Start: *wf_exec.ExecDate, Stop: *wf_exec.EndDate })
|
Bookings.DeleteSchedules(map_mess["id"])
|
||||||
s.bookings.Mu.Unlock()
|
} else {
|
||||||
|
s.getNextScheduledWorkflows(1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func getWorkflowExecution(exec_id string) *workflow_execution.WorkflowExecution {
|
|
||||||
|
|
||||||
res := oclib.LoadOne(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION),exec_id)
|
|
||||||
|
|
||||||
if res.Code != 200 {
|
|
||||||
logger.Logger.Error().Msg("Could not retrieve workflow ID from execution ID " + exec_id)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
wf_exec := res.ToWorkflowExecution()
|
|
||||||
|
|
||||||
return wf_exec
|
|
||||||
}
|
|
||||||
|
|
||||||
// At the moment very simplistic, but could be useful if we send bigger messages
|
|
||||||
func retrieveMapFromSub(message []byte) (result_map map[string]string) {
|
|
||||||
json.Unmarshal(message, &result_map)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Used at launch of the component to retrieve the next scheduled workflows
|
// Used at launch of the component to retrieve the next scheduled workflows
|
||||||
// and then every X minutes in case some workflows were scheduled before launch
|
// and then every X minutes in case some workflows were scheduled before launch
|
||||||
func (s *ScheduleManager) SchedulePolling (){
|
func (s *ScheduleManager) SchedulePolling() {
|
||||||
var sleep_time float64 = 1
|
var sleep_time float64 = 1
|
||||||
for(true){
|
for {
|
||||||
s.getNextScheduledWorkflows(3)
|
s.getNextScheduledWorkflows(1)
|
||||||
|
s.Logger.Info().Msg("Current list of schedules -------> " + fmt.Sprintf("%v", len(Bookings.Bookings)))
|
||||||
logger.Logger.Info().Msg("Current list of schedules")
|
|
||||||
fmt.Println(s.bookings.Bookings)
|
|
||||||
|
|
||||||
time.Sleep(time.Minute * time.Duration(sleep_time))
|
time.Sleep(time.Minute * time.Duration(sleep_time))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (s *ScheduleManager) getWorfklowExecution(from time.Time, to time.Time) (exec_list []workflow_execution.WorkflowExecution, err error) {
|
func (s *ScheduleManager) getExecution(from time.Time, to time.Time) (exec_list []*workflow_execution.WorkflowExecution, err error) {
|
||||||
|
fmt.Printf("Getting workflows execution from %s to %s \n", from.String(), to.String())
|
||||||
f := dbs.Filters{
|
f := dbs.Filters{
|
||||||
And: map[string][]dbs.Filter{
|
And: map[string][]dbs.Filter{
|
||||||
"execution_date" : {{Operator : dbs.GTE.String(), Value: primitive.NewDateTimeFromTime(from)}, {Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(to)}},
|
"execution_date": {{Operator: dbs.GTE.String(), Value: primitive.NewDateTimeFromTime(from)}, {Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(to)}},
|
||||||
"state": {{Operator: dbs.EQUAL.String(), Value: 1}},
|
"state": {{Operator: dbs.EQUAL.String(), Value: 1}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
res := oclib.Search(&f,"",oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION))
|
res := oclib.Search(&f, "", oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION))
|
||||||
if res.Code != 200 {
|
if res.Code != 200 {
|
||||||
logger.Logger.Error().Msg("Error loading")
|
s.Logger.Error().Msg("Error loading")
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, exec := range(res.Data){
|
|
||||||
lib_data := oclib.LoadOne(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION),exec.GetID())
|
|
||||||
exec_obj := lib_data.ToWorkflowExecution()
|
|
||||||
exec_list = append(exec_list, *exec_obj)
|
|
||||||
}
|
|
||||||
return exec_list, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO : refactor to implement oclib.Search
|
|
||||||
func (s *ScheduleManager) getNextScheduledWorkflows(minutes float64) {
|
|
||||||
start := time.Now().UTC()
|
|
||||||
end := start.Add(time.Minute * time.Duration(minutes)).UTC()
|
|
||||||
|
|
||||||
fmt.Printf("Getting workflows execution from %s to %s \n", start.String(), end.String())
|
|
||||||
|
|
||||||
next_wf_exec, err := s.getWorfklowExecution(start,end)
|
|
||||||
if err != nil {
|
|
||||||
logger.Logger.Error().Msg("Could not retrieve next schedules")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
for _, exec := range res.Data {
|
||||||
|
exec_list = append(exec_list, exec.(*workflow_execution.WorkflowExecution))
|
||||||
s.bookings.Mu.Lock()
|
|
||||||
defer s.bookings.Mu.Unlock()
|
|
||||||
|
|
||||||
for _, exec := range(next_wf_exec){
|
|
||||||
exec_start := exec.ExecDate
|
|
||||||
exec_stop := exec.EndDate
|
|
||||||
|
|
||||||
s.bookings.AddSchedule(models.Booking{Workflow: exec.UUID, Start: *exec_start, Stop: *exec_stop})
|
|
||||||
}
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *ScheduleManager) getNextScheduledWorkflows(minutes float64) {
|
||||||
|
start := time.Now().UTC()
|
||||||
|
if next_wf_exec, err := s.getExecution(
|
||||||
|
start.Add(time.Second * time.Duration(-1)).UTC(),
|
||||||
|
start.Add(time.Minute * time.Duration(minutes)).UTC(),
|
||||||
|
); err != nil {
|
||||||
|
s.Logger.Error().Msg("Could not retrieve next schedules")
|
||||||
|
} else {
|
||||||
|
Bookings.AddSchedules(next_wf_exec, s.Logger)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
39
docker-compose.tools.yml
Normal file
39
docker-compose.tools.yml
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
version: '3.4'
|
||||||
|
|
||||||
|
services:
|
||||||
|
nats:
|
||||||
|
image: 'nats:latest'
|
||||||
|
container_name: nats
|
||||||
|
ports:
|
||||||
|
- 4222:4222
|
||||||
|
command:
|
||||||
|
- "--debug"
|
||||||
|
networks:
|
||||||
|
- scheduler
|
||||||
|
- catalog
|
||||||
|
loki:
|
||||||
|
image: 'grafana/loki'
|
||||||
|
container_name: loki
|
||||||
|
ports :
|
||||||
|
- "3100:3100"
|
||||||
|
networks:
|
||||||
|
- scheduler
|
||||||
|
grafana:
|
||||||
|
image: 'grafana/grafana'
|
||||||
|
container_name: grafana
|
||||||
|
ports:
|
||||||
|
- '3000:3000'
|
||||||
|
networks:
|
||||||
|
- scheduler
|
||||||
|
volumes:
|
||||||
|
- ./conf/grafana_data_source.yml:/etc/grafana/provisioning/datasources/datasource.yml
|
||||||
|
environment:
|
||||||
|
- GF_SECURITY_ADMIN_PASSWORD=pfnirt # Change this to anything but admin to not have a password change page at startup
|
||||||
|
- GF_SECURITY_ADMIN_USER=admin
|
||||||
|
- GF_SECURITY_DISABLE_INITIAL_ADMIN_PASSWORD_CHANGE=true
|
||||||
|
|
||||||
|
networks:
|
||||||
|
scheduler:
|
||||||
|
external: true
|
||||||
|
catalog:
|
||||||
|
external: true
|
@@ -1,23 +1,16 @@
|
|||||||
version: '3.4'
|
version: '3.4'
|
||||||
|
|
||||||
services:
|
services:
|
||||||
nats:
|
oc-schedulerd:
|
||||||
image: 'nats:latest'
|
environment:
|
||||||
container_name: nats
|
- MONGO_DATABASE=DC_myDC
|
||||||
|
image: 'oc-schedulerd:latest'
|
||||||
ports:
|
ports:
|
||||||
- 4222:4222
|
- 9001:8080
|
||||||
command:
|
container_name: oc-schedulerd
|
||||||
- "--debug"
|
|
||||||
networks:
|
networks:
|
||||||
- scheduler
|
- catalog
|
||||||
loki:
|
|
||||||
image: 'grafana/loki'
|
|
||||||
container_name: loki
|
|
||||||
ports :
|
|
||||||
- "3100:3100"
|
|
||||||
networks:
|
|
||||||
- scheduler
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
scheduler:
|
catalog:
|
||||||
external: true
|
external: true
|
@@ -1,14 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"oc-scheduler/daemons"
|
|
||||||
"oc-scheduler/models"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCreateManifest(t *testing.T){
|
|
||||||
em := daemons.ExecutionManager{}
|
|
||||||
em.CreateManifest(models.Booking{},"fessity-chlics_23_07_2024_154326")
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
8
go.mod
8
go.mod
@@ -1,4 +1,4 @@
|
|||||||
module oc-scheduler
|
module oc-schedulerd
|
||||||
|
|
||||||
go 1.22.0
|
go 1.22.0
|
||||||
|
|
||||||
@@ -8,7 +8,7 @@ require (
|
|||||||
github.com/beego/beego v1.12.12
|
github.com/beego/beego v1.12.12
|
||||||
github.com/beego/beego/v2 v2.2.2
|
github.com/beego/beego/v2 v2.2.2
|
||||||
github.com/goraz/onion v0.1.3
|
github.com/goraz/onion v0.1.3
|
||||||
github.com/nats-io/nats.go v1.9.1
|
github.com/nats-io/nats.go v1.37.0
|
||||||
github.com/nwtgck/go-fakelish v0.1.3
|
github.com/nwtgck/go-fakelish v0.1.3
|
||||||
github.com/rs/zerolog v1.33.0
|
github.com/rs/zerolog v1.33.0
|
||||||
github.com/tidwall/gjson v1.17.1
|
github.com/tidwall/gjson v1.17.1
|
||||||
@@ -17,7 +17,7 @@ require (
|
|||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20240812075555-6e3069068ce4 // indirect
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240826103423-aff9304b1a71 // indirect
|
||||||
github.com/Klathmon/StructToMap v0.0.0-20140724123129-3d0229e2dce7 // indirect
|
github.com/Klathmon/StructToMap v0.0.0-20140724123129-3d0229e2dce7 // indirect
|
||||||
github.com/antihax/optional v1.0.0 // indirect
|
github.com/antihax/optional v1.0.0 // indirect
|
||||||
github.com/aws/aws-sdk-go v1.36.29 // indirect
|
github.com/aws/aws-sdk-go v1.36.29 // indirect
|
||||||
@@ -56,7 +56,7 @@ require (
|
|||||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/nats-io/jwt v0.3.2 // indirect
|
github.com/nats-io/jwt v0.3.2 // indirect
|
||||||
github.com/nats-io/nkeys v0.1.3 // indirect
|
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||||
github.com/nats-io/nuid v1.0.1 // indirect
|
github.com/nats-io/nuid v1.0.1 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/prometheus/client_golang v1.19.0 // indirect
|
github.com/prometheus/client_golang v1.19.0 // indirect
|
||||||
|
12
go.sum
12
go.sum
@@ -4,6 +4,14 @@ cloud.o-forge.io/core/oc-lib v0.0.0-20240808075405-f45ad91687c4 h1:3xqz2s6r/PONq
|
|||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20240808075405-f45ad91687c4/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240808075405-f45ad91687c4/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo=
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20240812075555-6e3069068ce4 h1:fdxRsT4eR4v1DM3FpTPi9AKxB5oIw3XgLu9ByNipj4I=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240812075555-6e3069068ce4 h1:fdxRsT4eR4v1DM3FpTPi9AKxB5oIw3XgLu9ByNipj4I=
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20240812075555-6e3069068ce4/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240812075555-6e3069068ce4/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240821093044-f64563c9ff06 h1:sYveE1C/0mpSr+ZmOYxuZ3fTWID7mr5hPiq0jQenv3Q=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240821093044-f64563c9ff06/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240826085916-d0e1474f8f34 h1:40XQgwR9HxXSnouY+ZqE/xYCM4qa+U+RLA5GA5JSNyQ=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240826085916-d0e1474f8f34/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240826094730-73dc43b329d7 h1:WrlURBiciau4p6iU3v0nKcQYjBqW8e9Uc2soDDXll28=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240826094730-73dc43b329d7/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240826103423-aff9304b1a71 h1:GodGMXVFgSdd5R1FoUjFAloOS+zOd3j66Wa+jcEPa4c=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240826103423-aff9304b1a71/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/Klathmon/StructToMap v0.0.0-20140724123129-3d0229e2dce7 h1:n0MD6UkwbgGHtXsmfgVzC2+ZbHzIsScpbq9ZGI18074=
|
github.com/Klathmon/StructToMap v0.0.0-20140724123129-3d0229e2dce7 h1:n0MD6UkwbgGHtXsmfgVzC2+ZbHzIsScpbq9ZGI18074=
|
||||||
github.com/Klathmon/StructToMap v0.0.0-20140724123129-3d0229e2dce7/go.mod h1:xdrQDwHlKUmv8yiElMx6W0W10cLkqpeSEUUib8KGtv4=
|
github.com/Klathmon/StructToMap v0.0.0-20140724123129-3d0229e2dce7/go.mod h1:xdrQDwHlKUmv8yiElMx6W0W10cLkqpeSEUUib8KGtv4=
|
||||||
@@ -370,9 +378,13 @@ github.com/nats-io/nats-server/v2 v2.1.2 h1:i2Ly0B+1+rzNZHHWtD4ZwKi+OU5l+uQo1iDH
|
|||||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
||||||
github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=
|
github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=
|
||||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||||
|
github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE=
|
||||||
|
github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
||||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||||
github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=
|
github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=
|
||||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||||
|
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
||||||
|
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
||||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||||
|
@@ -1,13 +0,0 @@
|
|||||||
package logger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cloud.o-forge.io/core/oc-lib/logs"
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
|
||||||
|
|
||||||
var Logger zerolog.Logger
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
logs.SetAppName("oc-scheduler")
|
|
||||||
Logger = logs.CreateLogger("","")
|
|
||||||
}
|
|
53
main.go
53
main.go
@@ -3,54 +3,29 @@ package main
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
conf "oc-scheduler/conf"
|
"oc-schedulerd/conf"
|
||||||
"oc-scheduler/models"
|
"oc-schedulerd/daemons"
|
||||||
|
|
||||||
"oc-scheduler/daemons"
|
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
oclib "cloud.o-forge.io/core/oc-lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
// var log zerolog.Logger
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var bookings models.ScheduledBooking
|
oclib.SetConfig(
|
||||||
|
conf.GetConfig().MongoUrl,
|
||||||
oclib.SetConfig(conf.GetConfig().MongoUrl,"DC_myDC")
|
conf.GetConfig().DBName,
|
||||||
oclib.Init("oc-scheduler")
|
conf.GetConfig().NatsUrl,
|
||||||
|
conf.GetConfig().LokiUrl,
|
||||||
|
conf.GetConfig().Logs,
|
||||||
|
)
|
||||||
|
oclib.Init("oc-schedulerd")
|
||||||
|
|
||||||
app_conf := conf.GetConfig()
|
sch_mngr := daemons.ScheduleManager{Logger: oclib.GetLogger()}
|
||||||
apiurl := app_conf.OcCatalogUrl
|
|
||||||
|
|
||||||
sch_mngr := daemons.ScheduleManager{Api_url: apiurl}
|
|
||||||
sch_mngr.SetBookings(&bookings)
|
|
||||||
exe_mngr := daemons.ExecutionManager{}
|
exe_mngr := daemons.ExecutionManager{}
|
||||||
exe_mngr.SetBookings(&bookings)
|
|
||||||
|
|
||||||
go sch_mngr.ListenForWorkflowSubmissions()
|
go sch_mngr.ListenNATS()
|
||||||
|
|
||||||
go sch_mngr.SchedulePolling()
|
go sch_mngr.SchedulePolling()
|
||||||
|
|
||||||
exe_mngr.RetrieveNextExecutions()
|
exe_mngr.RetrieveNextExecutions()
|
||||||
|
|
||||||
|
fmt.Print("stop")
|
||||||
// method in Schedule manager that checks the first Schedule object for its start date and exe
|
|
||||||
|
|
||||||
// var g Graph
|
|
||||||
|
|
||||||
// list, err := g.GetGraphList(apiurl)
|
|
||||||
// if err != nil {
|
|
||||||
// log.Fatal().Msg("Failed to get the workspaces list, check api url and that api server is up : " + apiurl)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// println("Available workspaces :")
|
|
||||||
// for workspace, _ := range list {
|
|
||||||
// println(workspace)
|
|
||||||
// }
|
|
||||||
|
|
||||||
|
|
||||||
// g.LoadFrom(list["test-alpr"])
|
|
||||||
// g.ExportToArgo("test-alpr")
|
|
||||||
|
|
||||||
fmt.Print("stop")
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@@ -4,12 +4,12 @@ metadata:
|
|||||||
name: test-monitor
|
name: test-monitor
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: "oc-monitor-quity-anetran"
|
- name: "oc-workflow-prous-skintris"
|
||||||
image: docker.io/library/oc-monitor # Currently uses the local contenaird
|
image: docker.io/library/oc-monitor # Currently uses the local contenaird
|
||||||
imagePullPolicy: IfNotPresent # This should be removed once a registry has been set up
|
imagePullPolicy: IfNotPresent # This should be removed once a registry has been set up
|
||||||
env:
|
env:
|
||||||
- name: "OCMONITOR_ARGOFILE"
|
- name: "OCMONITOR_ARGOFILE"
|
||||||
value: "quity-anetran_29_07_2024_144136.yml"
|
value: "prous-skintris_29_07_2024_164008.yml"
|
||||||
- name: "OCMONITOR_LOKIURL"
|
- name: "OCMONITOR_LOKIURL"
|
||||||
value: "info" # !!!! In dev this must be replaced with the address of one of your interface (wifi, ethernet..)
|
value: "info" # !!!! In dev this must be replaced with the address of one of your interface (wifi, ethernet..)
|
||||||
restartPolicy: OnFailure
|
restartPolicy: OnFailure
|
||||||
|
@@ -1,53 +0,0 @@
|
|||||||
package models
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/http/cookiejar"
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
type HttpQuery struct {
|
|
||||||
baseurl string
|
|
||||||
jar http.CookieJar
|
|
||||||
Cookies map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *HttpQuery) Init(url string) {
|
|
||||||
h.baseurl = url
|
|
||||||
h.jar, _ = cookiejar.New(nil)
|
|
||||||
h.Cookies = make(map[string]string)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *HttpQuery) Get(url string) ([]byte, error) {
|
|
||||||
client := &http.Client{Jar: h.jar}
|
|
||||||
resp, err := client.Get(h.baseurl + url)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// store received cookies
|
|
||||||
for _, cookie := range h.jar.Cookies(resp.Request.URL) {
|
|
||||||
h.Cookies[cookie.Name] = cookie.Value
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *HttpQuery) Post(url string, data url.Values) (*http.Response, error) {
|
|
||||||
client := &http.Client{Jar: h.jar}
|
|
||||||
resp, err := client.PostForm(h.baseurl+url, data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// store received cookies
|
|
||||||
for _, cookie := range h.jar.Cookies(resp.Request.URL) {
|
|
||||||
h.Cookies[cookie.Name] = cookie.Value
|
|
||||||
}
|
|
||||||
return resp, err
|
|
||||||
}
|
|
@@ -1,71 +0,0 @@
|
|||||||
package models
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"oc-scheduler/logger"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Is duration really important ?
|
|
||||||
|
|
||||||
type Booking struct {
|
|
||||||
Start time.Time
|
|
||||||
Stop time.Time
|
|
||||||
Duration uint
|
|
||||||
Workflow string
|
|
||||||
}
|
|
||||||
|
|
||||||
type ScheduledBooking struct {
|
|
||||||
Bookings []Booking
|
|
||||||
Mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s Booking) Equals(other Booking) bool {
|
|
||||||
return s.Workflow == other.Workflow && s.Start == other.Start && s.Stop == other.Stop
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sb *ScheduledBooking) AddSchedule(new_booking Booking){
|
|
||||||
if(!sb.scheduleAlreadyExists(new_booking)){
|
|
||||||
sb.Bookings = append(sb.Bookings,new_booking)
|
|
||||||
logger.Logger.Info().Msg("Updated list schedules : \n " + sb.String())
|
|
||||||
} else {
|
|
||||||
// Debug condition : delete once this feature is ready to be implemented
|
|
||||||
logger.Logger.Debug().Msg("Workflow received not added")
|
|
||||||
logger.Logger.Debug().Msg("current schedule contains")
|
|
||||||
for _, booking := range(sb.Bookings){
|
|
||||||
logger.Logger.Debug().Msg(booking.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sb *ScheduledBooking) GetListNames()(list_names []string ){
|
|
||||||
for _, schedule := range(sb.Bookings){
|
|
||||||
list_names = append(list_names, schedule.Workflow)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sb *ScheduledBooking) scheduleAlreadyExists(new_booking Booking) bool {
|
|
||||||
for _, booking := range(sb.Bookings){
|
|
||||||
if booking.Equals(new_booking){
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Booking) String() string {
|
|
||||||
return fmt.Sprintf("{workflow : %s , start_date : %s , stop_date : %s }", b.Workflow, b.Start.Format(time.RFC3339), b.Stop.Format(time.RFC3339))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sb *ScheduledBooking) String() string {
|
|
||||||
var str string
|
|
||||||
for _, booking := range(sb.Bookings){
|
|
||||||
str += fmt.Sprintf("%s\n", booking.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return str
|
|
||||||
}
|
|
@@ -1,40 +0,0 @@
|
|||||||
package models
|
|
||||||
|
|
||||||
type Parameter struct {
|
|
||||||
Name string `yaml:"name,omitempty"`
|
|
||||||
Value string `yaml:"value,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Container struct {
|
|
||||||
Image string `yaml:"image"`
|
|
||||||
Command []string `yaml:"command,omitempty,flow"`
|
|
||||||
Args []string `yaml:"args,omitempty,flow"`
|
|
||||||
VolumeMounts []VolumeMount `yaml:"volumeMounts,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type VolumeMount struct {
|
|
||||||
Name string `yaml:"name"`
|
|
||||||
MountPath string `yaml:"mountPath"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Task struct {
|
|
||||||
Name string `yaml:"name"`
|
|
||||||
Template string `yaml:"template"`
|
|
||||||
Dependencies []string `yaml:"dependencies,omitempty"`
|
|
||||||
Arguments struct {
|
|
||||||
Parameters []Parameter `yaml:"parameters,omitempty"`
|
|
||||||
} `yaml:"arguments,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Dag struct {
|
|
||||||
Tasks []Task `yaml:"tasks,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Template struct {
|
|
||||||
Name string `yaml:"name"`
|
|
||||||
Inputs struct {
|
|
||||||
Parameters []Parameter `yaml:"parameters"`
|
|
||||||
} `yaml:"inputs,omitempty"`
|
|
||||||
Container Container `yaml:"container,omitempty"`
|
|
||||||
Dag Dag `yaml:"dag,omitempty"`
|
|
||||||
}
|
|
@@ -1,19 +0,0 @@
|
|||||||
package models
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
type VolumeClaimTemplate struct {
|
|
||||||
Metadata struct {
|
|
||||||
Name string `yaml:"name"`
|
|
||||||
} `yaml:"metadata"`
|
|
||||||
Spec VolumeSpec `yaml:"spec"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type VolumeSpec struct {
|
|
||||||
AccessModes []string `yaml:"accessModes,flow"`
|
|
||||||
Resources struct {
|
|
||||||
Requests struct {
|
|
||||||
Storage string `yaml:"storage"`
|
|
||||||
} `yaml:"requests"`
|
|
||||||
} `yaml:"resources"`
|
|
||||||
}
|
|
Binary file not shown.
Reference in New Issue
Block a user