Compare commits
15 Commits
5302ed48b3
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 98fe2600b3 | |||
| 29623244c4 | |||
| c8b8955c4b | |||
| 2ccbfe93ed | |||
| c5741b9650 | |||
| 9e08c13144 | |||
| d1c380fde2 | |||
| b565c1930c | |||
| 22ab916590 | |||
| 69620efaf2 | |||
| 1dcf0336d7 | |||
| d6427492fa | |||
| 369a53a672 | |||
| 580499e8db | |||
| 399d746b49 |
10
Makefile
10
Makefile
@@ -22,19 +22,21 @@ clean:
|
||||
|
||||
docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t oc-scheduler -f Dockerfile . --build-arg=HOST=$(HOST)
|
||||
docker tag oc-scheduler:latest oc/oc-scheduler:0.0.1
|
||||
docker tag oc-scheduler opencloudregistry/oc-scheduler:latest
|
||||
|
||||
publish-kind:
|
||||
kind load docker-image oc/oc-scheduler:0.0.1 --name opencloud | true
|
||||
kind load docker-image opencloudregistry/oc-scheduler:latest --name $(CLUSTER_NAME) | true
|
||||
|
||||
publish-registry:
|
||||
@echo "TODO"
|
||||
docker push opencloudregistry/oc-scheduler:latest
|
||||
|
||||
docker-deploy:
|
||||
docker compose up -d
|
||||
|
||||
run-docker: docker publish-kind publish-registry docker-deploy
|
||||
|
||||
all: docker publish-kind publish-registry
|
||||
all: docker publish-kind
|
||||
|
||||
ci: docker publish-registry
|
||||
|
||||
.PHONY: build run clean docker publish-kind publish-registry
|
||||
|
||||
21
conf/config.go
Normal file
21
conf/config.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package conf
|
||||
|
||||
import "sync"
|
||||
|
||||
type Config struct {
|
||||
KubeHost string
|
||||
KubePort string
|
||||
KubeCA string
|
||||
KubeCert string
|
||||
KubeData string
|
||||
}
|
||||
|
||||
var instance *Config
|
||||
var once sync.Once
|
||||
|
||||
func GetConfig() *Config {
|
||||
once.Do(func() {
|
||||
instance = &Config{}
|
||||
})
|
||||
return instance
|
||||
}
|
||||
110
controllers/booking.go
Normal file
110
controllers/booking.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
||||
beego "github.com/beego/beego/v2/server/web"
|
||||
"github.com/gorilla/websocket"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
// Operations about workspace
|
||||
type BookingController struct {
|
||||
beego.Controller
|
||||
}
|
||||
|
||||
var BookingExample booking.Booking
|
||||
|
||||
// @Title Search
|
||||
// @Description search bookings by execution
|
||||
// @Param id path string true "id execution"
|
||||
// @Param is_draft query string false "draft wished"
|
||||
// @Success 200 {workspace} models.workspace
|
||||
// @router /search/execution/:id [get]
|
||||
func (o *BookingController) ExecutionSearch() {
|
||||
/*
|
||||
* This is a sample of how to use the search function
|
||||
* The search function is used to search for data in the database
|
||||
* The search function takes in a filter and a data type
|
||||
* The filter is a struct that contains the search parameters
|
||||
* The data type is an enum that specifies the type of data to search for
|
||||
* The search function returns a list of data that matches the filter
|
||||
* The data is then returned as a json object
|
||||
*/
|
||||
// store and return Id or post with UUID
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
id := o.Ctx.Input.Param(":id")
|
||||
isDraft := o.Ctx.Input.Query("is_draft")
|
||||
f := dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||
"execution_id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
||||
},
|
||||
}
|
||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).Search(&f, "", isDraft == "true")
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
// @Title Search
|
||||
// @Description search bookings
|
||||
// @Param start_date path string true "the word search you want to get"
|
||||
// @Param end_date path string true "the word search you want to get"
|
||||
// @Param is_draft query string false "draft wished"
|
||||
// @Success 200 {workspace} models.workspace
|
||||
// @router /search/:start_date/:end_date [get]
|
||||
func (o *BookingController) Search() {
|
||||
/*
|
||||
* This is a sample of how to use the search function
|
||||
* The search function is used to search for data in the database
|
||||
* The search function takes in a filter and a data type
|
||||
* The filter is a struct that contains the search parameters
|
||||
* The data type is an enum that specifies the type of data to search for
|
||||
* The search function returns a list of data that matches the filter
|
||||
* The data is then returned as a json object
|
||||
*/
|
||||
// store and return Id or post with UUID
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
start_date, _ := time.Parse("2006-01-02", o.Ctx.Input.Param(":start_date"))
|
||||
end_date, _ := time.Parse("2006-01-02", o.Ctx.Input.Param(":end_date"))
|
||||
isDraft := o.Ctx.Input.Query("is_draft")
|
||||
sd := primitive.NewDateTimeFromTime(start_date)
|
||||
ed := primitive.NewDateTimeFromTime(end_date)
|
||||
f := dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"execution_date": {{Operator: "gte", Value: sd}, {Operator: "lte", Value: ed}},
|
||||
},
|
||||
}
|
||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).Search(&f, "", isDraft == "true")
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
// @Title GetAll
|
||||
// @Description find booking by id
|
||||
// @Param is_draft query string false "draft wished"
|
||||
// @Success 200 {booking} models.booking
|
||||
// @router / [get]
|
||||
func (o *BookingController) GetAll() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
isDraft := o.Ctx.Input.Query("is_draft")
|
||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).LoadAll(isDraft == "true")
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
// @Title Get
|
||||
// @Description find booking by id
|
||||
// @Param id path string true "the id you want to get"
|
||||
// @Success 200 {booking} models.booking
|
||||
// @router /:id [get]
|
||||
func (o *BookingController) Get() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
id := o.Ctx.Input.Param(":id")
|
||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).LoadOne(id)
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
var upgrader = websocket.Upgrader{
|
||||
CheckOrigin: func(r *http.Request) bool { return true }, // allow all origins
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func (o *WorkflowExecutionController) SearchPerDate() {
|
||||
* The search function returns a list of data that matches the filter
|
||||
* The data is then returned as a json object
|
||||
*/
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
// user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
// store and return Id or post with UUID
|
||||
start_date, _ := time.Parse("2006-01-02", o.Ctx.Input.Param(":start_date"))
|
||||
end_date, _ := time.Parse("2006-01-02", o.Ctx.Input.Param(":end_date"))
|
||||
@@ -45,7 +45,9 @@ func (o *WorkflowExecutionController) SearchPerDate() {
|
||||
},
|
||||
}
|
||||
isDraft := o.Ctx.Input.Query("is_draft")
|
||||
o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).Search(&f, "", isDraft == "true")
|
||||
// o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).Search(&f, "", isDraft == "true")
|
||||
o.Data["json"] = oclib.NewRequestAdmin(collection, nil).Search(&f, "", isDraft == "true")
|
||||
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
@@ -55,9 +57,10 @@ func (o *WorkflowExecutionController) SearchPerDate() {
|
||||
// @Success 200 {workflow} models.workflow
|
||||
// @router / [get]
|
||||
func (o *WorkflowExecutionController) GetAll() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
// user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
isDraft := o.Ctx.Input.Query("is_draft")
|
||||
o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).LoadAll(isDraft == "true")
|
||||
// o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).LoadAll(isDraft == "true")
|
||||
o.Data["json"] = oclib.NewRequestAdmin(collection, nil).LoadAll(isDraft == "true")
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
@@ -67,9 +70,10 @@ func (o *WorkflowExecutionController) GetAll() {
|
||||
// @Success 200 {workflow} models.workflow
|
||||
// @router /:id [get]
|
||||
func (o *WorkflowExecutionController) Get() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
//user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
id := o.Ctx.Input.Param(":id")
|
||||
o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).LoadOne(id)
|
||||
// o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).LoadOne(id)
|
||||
o.Data["json"] = oclib.NewRequestAdmin(collection, nil).LoadOne(id)
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
@@ -80,9 +84,11 @@ func (o *WorkflowExecutionController) Get() {
|
||||
// @Success 200 {compute} models.compute
|
||||
// @router /search/:search [get]
|
||||
func (o *WorkflowExecutionController) Search() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
// user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
isDraft := o.Ctx.Input.Query("is_draft")
|
||||
search := o.Ctx.Input.Param(":search")
|
||||
o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).Search(nil, search, isDraft == "true")
|
||||
// o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).Search(nil, search, isDraft == "true")
|
||||
o.Data["json"] = oclib.NewRequestAdmin(collection, nil).Search(nil, search, isDraft == "true")
|
||||
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
@@ -3,18 +3,16 @@ package controllers
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"oc-scheduler/infrastructure"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow/graph"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
beego "github.com/beego/beego/v2/server/web"
|
||||
"github.com/google/uuid"
|
||||
gorillaws "github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
var orderCollection = oclib.LibDataEnum(oclib.ORDER)
|
||||
@@ -52,14 +50,16 @@ func (o *WorkflowSchedulerController) Schedule() {
|
||||
})
|
||||
|
||||
logger.Info().Msg("Booking for " + wfId)
|
||||
req := oclib.NewRequest(collection, user, peerID, groups, caller)
|
||||
req := oclib.NewRequestAdmin(collection, caller)
|
||||
// req := oclib.NewRequest(collection, user, peerID, groups, caller)
|
||||
resp.UUID = uuid.New().String()
|
||||
|
||||
sch, _, execs, err := resp.Schedules(wfId, &tools.APIRequest{
|
||||
fmt.Println(user, peerID, groups)
|
||||
sch, _, _, err := resp.Schedules(wfId, &tools.APIRequest{
|
||||
Username: user,
|
||||
PeerID: peerID,
|
||||
Groups: groups,
|
||||
Caller: caller,
|
||||
Admin: true,
|
||||
})
|
||||
if err != nil {
|
||||
if sch != nil {
|
||||
@@ -75,29 +75,6 @@ func (o *WorkflowSchedulerController) Schedule() {
|
||||
o.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info().Msg("Creating S3 service account if necessary")
|
||||
for _, exec := range execs {
|
||||
execId := exec.ExecutionsID
|
||||
logger.Info().Msg("S3 ServiceAccount for " + execId)
|
||||
// execId = "6cdaf6e4-5727-480e-ab97-f78853c4e553"
|
||||
err = createStorageServiceAccount(execId, peerID, wfId, sch, caller, user, groups)
|
||||
if err != nil {
|
||||
// if sch != nil {
|
||||
// for _, w := range sch.WorkflowExecution {
|
||||
// req.DeleteOne(w.GetID())
|
||||
// }
|
||||
// }
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": nil,
|
||||
"code": 409,
|
||||
"error": err.Error(),
|
||||
}
|
||||
o.ServeJSON()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": sch.WorkflowExecution,
|
||||
"code": code,
|
||||
@@ -106,6 +83,124 @@ func (o *WorkflowSchedulerController) Schedule() {
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
var wsUpgrader = gorillaws.Upgrader{
|
||||
CheckOrigin: func(r *http.Request) bool { return true },
|
||||
}
|
||||
|
||||
// CheckStreamHandler is a plain http.HandlerFunc (registered via beego.Handler
|
||||
// to avoid Beego's WriteHeader interference with the WebSocket upgrade).
|
||||
// Path: /oc/:id/check → parts = ["", "oc", "<id>", "check"]
|
||||
// Query params: as_possible=true, preemption=true
|
||||
func CheckStreamHandler(w http.ResponseWriter, r *http.Request) {
|
||||
parts := strings.Split(strings.TrimSuffix(r.URL.Path, "/"), "/")
|
||||
wfID := parts[len(parts)-2] // second-to-last segment
|
||||
|
||||
q := r.URL.Query()
|
||||
asap := q.Get("as_possible") == "true"
|
||||
preemption := q.Get("preemption") == "true"
|
||||
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*r)
|
||||
req := &tools.APIRequest{
|
||||
Username: user,
|
||||
PeerID: peerID,
|
||||
Groups: groups,
|
||||
Caller: nil,
|
||||
Admin: true,
|
||||
}
|
||||
|
||||
// Resolve the peer IDs concerned by this workflow before upgrading so we
|
||||
// can abort cleanly with a plain HTTP error if the workflow is not found.
|
||||
watchedPeers, err := infrastructure.GetWorkflowPeerIDs(wfID, req)
|
||||
fmt.Println("Here my watched peers involved in workflow", watchedPeers)
|
||||
if err != nil {
|
||||
http.Error(w, `{"code":404,"error":"`+err.Error()+`"}`, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Upgrade to WebSocket.
|
||||
conn, err := wsUpgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
// gorilla already wrote the error response
|
||||
return
|
||||
}
|
||||
|
||||
// Read the schedule parameters sent by the client as the first message.
|
||||
var ws infrastructure.WorkflowSchedule
|
||||
if err := conn.ReadJSON(&ws); err != nil {
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// Subscribe to planner updates for the initially resolved peers and to
|
||||
// workflow change notifications (peer list may change on workflow edit).
|
||||
plannerCh, plannerUnsub := infrastructure.SubscribePlannerUpdates(watchedPeers)
|
||||
wfCh, wfUnsub := infrastructure.SubscribeWorkflowUpdates(wfID)
|
||||
|
||||
// Cleanup on exit: cancel subscriptions, evict planner cache entries,
|
||||
// signal PB_CLOSE_PLANNER on NATS for each peer that was being watched.
|
||||
defer func() {
|
||||
conn.Close()
|
||||
plannerUnsub()
|
||||
wfUnsub()
|
||||
for _, peer := range watchedPeers {
|
||||
if b, err := json.Marshal(map[string]interface{}{"peer_id": peer}); err == nil {
|
||||
infrastructure.EmitNATS(peer, tools.PropalgationMessage{
|
||||
Action: tools.PB_CLOSE_PLANNER,
|
||||
Payload: b,
|
||||
})
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
push := func() error {
|
||||
result, checkErr := ws.Check(wfID, asap, preemption, req)
|
||||
fmt.Println(result, checkErr)
|
||||
if checkErr != nil {
|
||||
return checkErr
|
||||
}
|
||||
return conn.WriteJSON(result)
|
||||
}
|
||||
|
||||
// Initial check.
|
||||
if err := push(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Detect client-side close in a separate goroutine.
|
||||
closeCh := make(chan struct{})
|
||||
go func() {
|
||||
defer close(closeCh)
|
||||
for {
|
||||
if _, _, err := conn.ReadMessage(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Stream loop.
|
||||
for {
|
||||
select {
|
||||
case <-wfCh:
|
||||
// The workflow was modified: refresh the peer list and re-subscribe
|
||||
// so the stream watches the correct set of planners going forward.
|
||||
if newPeers, err := infrastructure.GetWorkflowPeerIDs(wfID, req); err == nil {
|
||||
plannerUnsub()
|
||||
watchedPeers = newPeers
|
||||
plannerCh, plannerUnsub = infrastructure.SubscribePlannerUpdates(newPeers)
|
||||
}
|
||||
if err := push(); err != nil {
|
||||
return
|
||||
}
|
||||
case <-plannerCh:
|
||||
if err := push(); err != nil {
|
||||
return
|
||||
}
|
||||
case <-closeCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// @Title UnSchedule
|
||||
// @Description schedule workflow
|
||||
// @Param id path string true "id execution"
|
||||
@@ -113,7 +208,7 @@ func (o *WorkflowSchedulerController) Schedule() {
|
||||
// @Success 200 {workspace} models.workspace
|
||||
// @router /:id [delete]
|
||||
func (o *WorkflowSchedulerController) UnSchedule() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
// user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
id := o.Ctx.Input.Param(":id")
|
||||
// TODO UNSCHEDULER
|
||||
filter := &dbs.Filters{
|
||||
@@ -121,7 +216,9 @@ func (o *WorkflowSchedulerController) UnSchedule() {
|
||||
"workflow_id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
||||
},
|
||||
}
|
||||
o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).Search(filter, "", true)
|
||||
o.Data["json"] = oclib.NewRequestAdmin(collection, nil).Search(filter, "", true)
|
||||
|
||||
// o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).Search(filter, "", true)
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
@@ -133,250 +230,15 @@ func (o *WorkflowSchedulerController) UnSchedule() {
|
||||
func (o *WorkflowSchedulerController) SearchScheduledDraftOrder() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
id := o.Ctx.Input.Param(":id")
|
||||
fmt.Println(user, peerID, groups)
|
||||
filter := &dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"workflow_id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
||||
"order_by": {{Operator: dbs.EQUAL.String(), Value: peerID}},
|
||||
},
|
||||
}
|
||||
o.Data["json"] = oclib.NewRequest(orderCollection, user, peerID, groups, nil).Search(filter, "", true)
|
||||
o.Data["json"] = oclib.NewRequestAdmin(orderCollection, nil).Search(filter, "", true)
|
||||
|
||||
//o.Data["json"] = oclib.NewRequest(orderCollection, user, peerID, groups, nil).Search(filter, "", true)
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
func createStorageServiceAccount(execId string, peerID string, wfId string, wfs *infrastructure.WorkflowSchedule, caller *tools.HTTPCaller, user string, groups []string) error {
|
||||
// Retrieve the Workflow in the WorkflowSchedule
|
||||
wf := loadWorkflow(wfId, peerID)
|
||||
// storageItems := wf.GetGraphItems(wf.Graph.IsStorage)
|
||||
itemMap := wf.GetItemsByResources()
|
||||
// mapStorageRessources, err := getItemByRessourceId(wf, storageItems)
|
||||
for id, items := range itemMap[tools.STORAGE_RESOURCE] {
|
||||
_ = items
|
||||
// Load the storage
|
||||
s, err := oclib.LoadOneStorage(id, user, peerID, groups)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.StorageType == enum.S3 {
|
||||
// DEV MULTI PEER MINIO CREDENTIAL CREATION
|
||||
|
||||
// retrieve all the processing linked to a compute using the storage : processing -- compute -- storage
|
||||
// In this case we need to retrieve the Item ID(s) for each storage to be able to evaluate links with other items
|
||||
associatedComputingResources := getAssociatedComputeRessources(*wf, itemMap[tools.STORAGE_RESOURCE][id])
|
||||
for _, computeId := range associatedComputingResources {
|
||||
|
||||
c, err := oclib.LoadOneComputing(computeId, user, peerID, groups)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.CreatorID == s.CreatorID {
|
||||
// post on datacenter /minio/createServiceAccount
|
||||
err := postCreateServiceAccount(peerID, s, caller, execId, wfId)
|
||||
if err != nil {
|
||||
// Add a logger.Info() here
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// get on storage datacenter /minio/createServiceAccount
|
||||
access, secret, err := getServiceAccountCredentials(peerID, *s, caller, execId, wfId, *c)
|
||||
if err != nil {
|
||||
// Add a logger.Info() here
|
||||
return err
|
||||
}
|
||||
// post on computing datacenter /minio/createSAsecret
|
||||
err = postS3Secret(peerID, *s, caller, execId, wfId, *c, access, secret) // create the secret holding the retrieved access on c's peer
|
||||
if err != nil {
|
||||
// Add a logger.Info() here
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func postCreateServiceAccount(peerID string, s *resources.StorageResource, caller *tools.HTTPCaller, execId string, wfId string) error {
|
||||
l := oclib.GetLogger()
|
||||
fmt.Println("Creating a service account on " + peerID + " for " + s.Name)
|
||||
res := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", peerID, []string{}, nil).LoadOne(s.CreatorID)
|
||||
if res.Code != 200 {
|
||||
l.Error().Msg("Error while loading a peer for creation of the serviceAccount")
|
||||
return fmt.Errorf(res.Err)
|
||||
}
|
||||
p := res.ToPeer()
|
||||
|
||||
caller.URLS[tools.MINIO_SVCACC] = map[tools.METHOD]string{
|
||||
tools.POST: "/serviceaccount/" + s.UUID + "/" + execId,
|
||||
}
|
||||
|
||||
l.Debug().Msg("Lauching execution on" + p.UUID)
|
||||
_, err := p.LaunchPeerExecution(p.UUID, wfId, tools.MINIO_SVCACC, tools.POST, nil, caller)
|
||||
if err != nil {
|
||||
l.Error().Msg("Error when executing on peer at " + p.Url + " when creating a S3 service account")
|
||||
l.Error().Msg(err.Error())
|
||||
return err
|
||||
}
|
||||
if caller.LastResults["code"].(int) != 200 {
|
||||
l.Error().Msg(fmt.Sprint("Error when trying to create a serviceAccount on storage " + s.Name + " on peer at " + p.Url))
|
||||
if _, ok := caller.LastResults["body"]; ok {
|
||||
l.Error().Msg(string(caller.LastResults["body"].([]byte)))
|
||||
return fmt.Errorf(string(caller.LastResults["body"].(map[string]interface{})["error"].([]byte)))
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadWorkflow(workflowId string, peerId string) *workflow.Workflow {
|
||||
res := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW), "", peerId, []string{}, nil).LoadOne(workflowId)
|
||||
if res.Code != 200 {
|
||||
l := oclib.GetLogger()
|
||||
l.Error().Msg("Error while loading a workflow for creation of the serviceAccount")
|
||||
return nil
|
||||
}
|
||||
|
||||
return res.ToWorkflow()
|
||||
|
||||
}
|
||||
|
||||
// func getItemByRessourceId(storages string) (map[string][]string, error) {
|
||||
// var storagesMap map[string][]string
|
||||
// }
|
||||
|
||||
func getAssociatedComputeRessources(wf workflow.Workflow, storageNodes []string) []string {
|
||||
storageProcessingLinks := make([]string, 0)
|
||||
for _, id := range storageNodes {
|
||||
processings := getStorageRelatedProcessing(wf, id) // Retrieve all the Processing item linked to one storage node
|
||||
for _, procId := range processings {
|
||||
computings := getComputeProcessing(wf, procId)
|
||||
if !slices.Contains(storageProcessingLinks, computings) {
|
||||
storageProcessingLinks = append(storageProcessingLinks, computings)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return storageProcessingLinks
|
||||
}
|
||||
|
||||
// returns a list of processing item's Id that use the Storage
|
||||
// theses item Id can be used to instantiate the resource
|
||||
func getStorageRelatedProcessing(wf workflow.Workflow, storageId string) (relatedProcessing []string) {
|
||||
var storageLinks []graph.GraphLink
|
||||
// Only keep the links that are associated to the storage
|
||||
for _, link := range wf.Graph.Links {
|
||||
if link.Destination.ID == storageId || link.Source.ID == storageId {
|
||||
storageLinks = append(storageLinks, link)
|
||||
}
|
||||
}
|
||||
|
||||
for _, link := range storageLinks {
|
||||
var resourceId string
|
||||
if link.Source.ID != storageId {
|
||||
resourceId = link.Source.ID
|
||||
} else {
|
||||
resourceId = link.Destination.ID
|
||||
}
|
||||
if wf.Graph.IsProcessing(wf.Graph.Items[resourceId]) {
|
||||
relatedProcessing = append(relatedProcessing, resourceId)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getComputeProcessing(wf workflow.Workflow, processingId string) (res string) {
|
||||
computeRel := wf.GetByRelatedProcessing(processingId, wf.Graph.IsCompute)
|
||||
for _, rel := range computeRel {
|
||||
return rel.Node.GetID()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func getServiceAccountCredentials(peerID string, storageRes resources.StorageResource, caller *tools.HTTPCaller, execId string, wfId string, computeRes resources.ComputeResource) (string, string, error) {
|
||||
l := oclib.GetLogger()
|
||||
fmt.Println("Getting a service account for" + computeRes.CreatorID + " on S3 " + storageRes.Name + " on peer " + storageRes.CreatorID)
|
||||
res := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", peerID, []string{}, nil).LoadOne(storageRes.CreatorID)
|
||||
if res.Code != 200 {
|
||||
l.Error().Msg("Error while loading a peer for creation of the serviceAccount")
|
||||
return "", "", fmt.Errorf(res.Err)
|
||||
}
|
||||
p := res.ToPeer()
|
||||
|
||||
caller.URLS[tools.MINIO_SVCACC] = map[tools.METHOD]string{
|
||||
tools.POST: "/serviceaccount/" + storageRes.UUID + "/" + execId,
|
||||
}
|
||||
body := map[string]bool{"retrieve": true}
|
||||
|
||||
l.Debug().Msg("Lauching execution on" + p.UUID)
|
||||
resp, err := p.LaunchPeerExecution(p.UUID, wfId, tools.MINIO_SVCACC, tools.POST, body, caller)
|
||||
if err != nil {
|
||||
l.Error().Msg("Error when executing on peer at " + p.Url + " when retrieving S3 credentials")
|
||||
l.Error().Msg(err.Error())
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
result_code := caller.LastResults["code"].(int)
|
||||
if !slices.Contains([]int{200, 201}, result_code) {
|
||||
l.Error().Msg(fmt.Sprint("Error when trying to create a serviceAccount on storage " + storageRes.Name + " on peer at " + p.Url))
|
||||
if _, ok := caller.LastResults["body"]; ok {
|
||||
l.Error().Msg(string(caller.LastResults["body"].([]byte)))
|
||||
return "", "", fmt.Errorf(string(caller.LastResults["body"].(map[string]interface{})["error"].([]byte)))
|
||||
}
|
||||
}
|
||||
|
||||
var access, secret string
|
||||
if a, ok := resp["access"]; !ok {
|
||||
return "", "", fmt.Errorf("Error in the response returned when creating a S3 serviceAccount on " + storageRes.Name + " on peer " + p.UUID)
|
||||
} else {
|
||||
access = a.(string)
|
||||
}
|
||||
|
||||
if s, ok := resp["secret"]; !ok {
|
||||
return "", "", fmt.Errorf("Error in the response returned when creating a S3 serviceAccount on " + storageRes.Name + " on peer " + p.UUID)
|
||||
} else {
|
||||
secret = s.(string)
|
||||
}
|
||||
|
||||
return access, secret, nil
|
||||
}
|
||||
|
||||
func postS3Secret(peerID string, s resources.StorageResource, caller *tools.HTTPCaller, execId string, wfId string, c resources.ComputeResource, access string, secret string) error {
|
||||
l := oclib.GetLogger()
|
||||
|
||||
res := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", peerID, []string{}, nil).LoadOne(c.CreatorID)
|
||||
if res.Code != 200 {
|
||||
l.Error().Msg("Error while loading a peer for creation of the serviceAccount")
|
||||
return fmt.Errorf(res.Err)
|
||||
}
|
||||
p := res.ToPeer()
|
||||
|
||||
caller.URLS[tools.MINIO_SVCACC_SECRET] = map[tools.METHOD]string{
|
||||
tools.POST: "/secret/" + s.UUID + "/" + execId,
|
||||
}
|
||||
body := map[string]string{"access": access, "secret": secret}
|
||||
|
||||
_, err := p.LaunchPeerExecution(p.UUID, wfId, tools.MINIO_SVCACC_SECRET, tools.POST, body, caller)
|
||||
if err != nil {
|
||||
l.Error().Msg("Error when executing on peer at " + p.Url + " when creating a secret holding s3 credentials in namespace " + execId)
|
||||
l.Error().Msg(err.Error())
|
||||
return fmt.Errorf("Error when executing on peer at " + p.Url + " when creating a secret holding s3 credentials" + " : " + err.Error())
|
||||
}
|
||||
|
||||
result_code := caller.LastResults["code"].(int)
|
||||
if !slices.Contains([]int{200, 201}, result_code) {
|
||||
l.Error().Msg(fmt.Sprint("Error when trying to post the credential to " + s.Name + "to a secret on peer at " + p.Url))
|
||||
if _, ok := caller.LastResults["body"]; ok {
|
||||
l.Error().Msg(string(caller.LastResults["body"].([]byte)))
|
||||
return fmt.Errorf(string(caller.LastResults["body"].(map[string]interface{})["error"].([]byte)))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -11,9 +11,12 @@ services:
|
||||
- "traefik.http.routers.scheduler.rule=PathPrefix(`/scheduler`)"
|
||||
- "traefik.http.middlewares.scheduler-rewrite.replacepathregex.regex=^/scheduler(.*)"
|
||||
- "traefik.http.middlewares.scheduler-rewrite.replacepathregex.replacement=/oc$$1"
|
||||
- "traefik.http.routers.scheduler.middlewares=scheduler-rewrite"
|
||||
- "traefik.http.routers.scheduler.middlewares=scheduler-rewrite,auth-scheduler"
|
||||
- "traefik.http.services.scheduler.loadbalancer.server.port=8080"
|
||||
- "traefik.http.middlewares.scheduler.forwardauth.address=http://oc-auth:8080/oc/forward"
|
||||
|
||||
- "traefik.http.middlewares.auth-scheduler.forwardauth.address=http://oc-auth:8080/oc/forward"
|
||||
- "traefik.http.middlewares.auth-scheduler.forwardauth.trustForwardHeader=true"
|
||||
- "traefik.http.middlewares.auth-scheduler.forwardauth.authResponseHeaders=X-Auth-Request-User,X-Auth-Request-Email"
|
||||
ports:
|
||||
- 8090:8080
|
||||
container_name: oc-scheduler
|
||||
|
||||
140
docs/nats.md
Normal file
140
docs/nats.md
Normal file
@@ -0,0 +1,140 @@
|
||||
# NATS dans oc-scheduler
|
||||
|
||||
## Vue d'ensemble
|
||||
|
||||
`oc-scheduler` utilise NATS comme bus d'événements pour deux objectifs :
|
||||
|
||||
1. **Recevoir les planners** (disponibilité des ressources) publiés par `oc-discovery`.
|
||||
2. **Réagir aux modifications de workflows** pour diffuser un planner actualisé et signaler les streams WebSocket actifs.
|
||||
|
||||
Tout le code NATS se trouve dans `infrastructure/nats.go`.
|
||||
|
||||
---
|
||||
|
||||
## Canaux écoutés
|
||||
|
||||
### `PROPALGATION_EVENT` — réception des planners
|
||||
|
||||
**Condition d'acceptation :** `resp.FromApp == "oc-discovery"` et `prop.Action == PB_PLANNER`.
|
||||
|
||||
**Ce qui se passe :**
|
||||
- Le payload est désérialisé en `planner.Planner`.
|
||||
- Le champ `peer_id` est extrait pour identifier le pair.
|
||||
- Le planner est stocké dans `PlannerCache[peerID]` via `storePlanner()`.
|
||||
- Si c'est la **première apparition** de ce `peerID` dans le cache, une goroutine de TTL est lancée (voir §TTL ci-dessous).
|
||||
- Tous les abonnés en attente d'un changement sur ce `peerID` sont notifiés.
|
||||
|
||||
### `CREATE_RESOURCE` — modification d'un workflow
|
||||
|
||||
**Condition d'acceptation :** `resp.Datatype == WORKFLOW`.
|
||||
|
||||
**Ce qui se passe :**
|
||||
1. Le payload est désérialisé en `workflow.Workflow`.
|
||||
2. `broadcastPlanner(wf)` est appelé : pour chaque pair (storage + compute) du workflow dont le planner **n'est pas encore en cache**, un événement `PB_PLANNER` est émis sur NATS afin de demander un planner frais à `oc-discovery`.
|
||||
3. `notifyWorkflowWatchers(wf.GetID())` est appelé : tous les streams WebSocket qui observent ce workflow sont signalés pour **rafraîchir leur liste de pairs surveillés**.
|
||||
|
||||
---
|
||||
|
||||
## Canaux émis
|
||||
|
||||
### `PROPALGATION_EVENT` — deux actions possibles
|
||||
|
||||
| Action | Déclencheur | Effet attendu |
|
||||
|---|---|---|
|
||||
| `PB_PLANNER` | Workflow modifié, pair inconnu du cache | `oc-discovery` renvoie le planner du pair |
|
||||
| `PB_CLOSE_PLANNER` | TTL expiré **ou** déconnexion WebSocket | Les consommateurs (oc-discovery, autres schedulers) libèrent leur état pour ce pair |
|
||||
|
||||
---
|
||||
|
||||
## Cache des planners (`PlannerCache`)
|
||||
|
||||
```
|
||||
PlannerCache : map[string]*planner.Planner // clé = peerID
|
||||
plannerAddedAt : map[string]time.Time // horodatage de première insertion
|
||||
```
|
||||
|
||||
- Protégé par `plannerMu` (RWMutex).
|
||||
- Alimenté uniquement via `storePlanner()` (appelé par le listener NATS).
|
||||
- Supprimé via `EmitNATS(peerID, PB_CLOSE_PLANNER)`, qui efface l'entrée **et** notifie les abonnés.
|
||||
|
||||
### TTL de 24 heures
|
||||
|
||||
À la **première** insertion d'un `peerID`, une goroutine est lancée :
|
||||
|
||||
```
|
||||
sleep(24h)
|
||||
→ si l'entrée existe encore : EmitNATS(peerID, PB_CLOSE_PLANNER)
|
||||
```
|
||||
|
||||
Cela évite que des planners obsolètes stagnent indéfiniment. L'entrée est supprimée et les streams actifs reçoivent une notification « plus de planner » pour ce pair.
|
||||
|
||||
---
|
||||
|
||||
## Pub/sub interne
|
||||
|
||||
Un registre d'abonnements en mémoire permet à d'autres composants (notamment le controller WebSocket) de réagir aux événements sans coupler directement le code NATS et les goroutines HTTP.
|
||||
|
||||
Deux registres distincts :
|
||||
|
||||
| Registre | Clé | Signification |
|
||||
|---|---|---|
|
||||
| `plannerSubs` | `peerID` | « le planner de ce pair a changé » |
|
||||
| `workflowSubs` | `workflowID` | « ce workflow a été modifié » |
|
||||
|
||||
### API
|
||||
|
||||
```go
|
||||
// S'abonner aux changements de planners pour plusieurs pairs
|
||||
ch, cancel := SubscribePlannerUpdates(peerIDs []string)
|
||||
|
||||
// S'abonner aux modifications d'un workflow
|
||||
ch, cancel := SubscribeWorkflowUpdates(wfID string)
|
||||
```
|
||||
|
||||
Chaque canal est bufférisé (`capacity 1`) : si un signal est déjà en attente, les suivants sont ignorés sans bloquer.
|
||||
|
||||
---
|
||||
|
||||
## Intégration avec le stream WebSocket (`GET /oc/:id/check`)
|
||||
|
||||
Le handler `CheckStream` dans `controllers/workflow_sheduler.go` exploite ces mécanismes :
|
||||
|
||||
1. **Ouverture** : résolution des `peerIDs` du workflow, abonnement à `SubscribePlannerUpdates` et `SubscribeWorkflowUpdates`.
|
||||
2. **Boucle de streaming** :
|
||||
- `plannerCh` reçoit un signal → re-calcul du `CheckResult` et envoi au client.
|
||||
- `wfCh` reçoit un signal (workflow modifié) → recalcul des `peerIDs`, désabonnement + ré-abonnement aux nouveaux pairs, re-calcul et envoi.
|
||||
3. **Fermeture** (déconnexion client) :
|
||||
- Désabonnement des deux registres.
|
||||
- `EmitNATS(peerID, PB_CLOSE_PLANNER)` pour **chaque pair surveillé** : le cache est purgé et `oc-discovery` est informé que le scheduler n'a plus besoin du planner.
|
||||
|
||||
---
|
||||
|
||||
## Flux de données résumé
|
||||
|
||||
```
|
||||
oc-discovery ──PROPALGATION_EVENT(PB_PLANNER)──► ListenNATS
|
||||
│
|
||||
storePlanner()
|
||||
PlannerCache[peerID] = planner
|
||||
notifyPlannerWatchers(peerID)
|
||||
│
|
||||
SubscribePlannerUpdates
|
||||
│
|
||||
CheckStream (WS) ──► client
|
||||
|
||||
Workflow modifié ──CREATE_RESOURCE(WORKFLOW)──► ListenNATS
|
||||
│
|
||||
broadcastPlanner(wf)
|
||||
PROPALGATION_EVENT(PB_PLANNER) → oc-discovery
|
||||
notifyWorkflowWatchers(wfID)
|
||||
│
|
||||
SubscribeWorkflowUpdates
|
||||
│
|
||||
CheckStream refresh peerIDs ──► client
|
||||
|
||||
TTL 24h / déconnexion WS ──► EmitNATS(PB_CLOSE_PLANNER)
|
||||
│
|
||||
delete PlannerCache[peerID]
|
||||
notifyPlannerWatchers(peerID)
|
||||
PROPALGATION_EVENT(PB_CLOSE_PLANNER) → NATS bus
|
||||
```
|
||||
71
docs/seq_check.puml
Normal file
71
docs/seq_check.puml
Normal file
@@ -0,0 +1,71 @@
|
||||
@startuml seq_check
|
||||
title Flux CHECK — Peer A ↔ Peer B via oc-discovery
|
||||
|
||||
skinparam sequenceMessageAlign center
|
||||
skinparam sequence {
|
||||
ArrowColor #333333
|
||||
LifeLineBorderColor #888888
|
||||
GroupBorderColor #777777
|
||||
GroupBackgroundColor #FAFAFA
|
||||
NoteBackgroundColor #FFFDE7
|
||||
NoteBorderColor #CCAA00
|
||||
BoxBorderColor #555555
|
||||
}
|
||||
skinparam ParticipantBackgroundColor #FFFFFF
|
||||
|
||||
box "Peer A" #EAF3FB
|
||||
participant "oc-scheduler A" as SA
|
||||
participant "oc-discovery A" as DA
|
||||
end box
|
||||
|
||||
box "Peer B" #EAF9EE
|
||||
participant "oc-discovery B" as DB
|
||||
participant "oc-scheduler B" as SB
|
||||
end box
|
||||
|
||||
participant "Client" as Client
|
||||
|
||||
' ══════════════════════════════════════════════════════
|
||||
== Alimentation continue du PlannerCache (fond permanent) ==
|
||||
' ══════════════════════════════════════════════════════
|
||||
|
||||
note over SA, SB
|
||||
Déclenché par : démarrage de SB, booking local créé,
|
||||
TTL planner expiré → refreshSelfPlanner()
|
||||
end note
|
||||
|
||||
SB -> DB : **NATS PUB** · PROPALGATION_EVENT\nPB_PLANNER { peer_id, schedule, capacities }
|
||||
DB --> DA : **STREAM** · PropalgationMessage\n{ action: PB_PLANNER }
|
||||
DA -> SA : **NATS SUB** · PROPALGATION_EVENT\n[ FromApp = "oc-discovery" ]
|
||||
SA -> SA : storePlanner(PeerB.PeerID, planner)\n→ PlannerCache[PeerB.PeerID] = p
|
||||
|
||||
' ══════════════════════════════════════════════════════
|
||||
== Flux CHECK (POST /oc/:wfID/check) ==
|
||||
' ══════════════════════════════════════════════════════
|
||||
|
||||
Client -> SA : POST /oc/:wfID/check\n?as_possible=true&preemption=false
|
||||
|
||||
group ① Résolution du workflow
|
||||
SA -> SA : workflow.LoadOne(wfID)
|
||||
SA -> SA : collectBookingResources(wf)\n→ [ { peerID=B, resourceID, instanceID } ]
|
||||
end
|
||||
|
||||
group ② Vérification locale contre le cache
|
||||
SA -> SA : checkResourceAvailability()\nPlannerCache[PeerB.PeerID].Check(res, inst, start, end)
|
||||
|
||||
alt slot disponible
|
||||
SA -> SA : available = true
|
||||
else slot occupé
|
||||
SA -> SA : findNextSlot(window=5h, pas=15min)\n→ next_slot
|
||||
end
|
||||
end
|
||||
|
||||
SA -> Client : **CheckResult**\n{ available, start, end, next_slot, warnings }
|
||||
|
||||
note over Client, SB
|
||||
Aucun appel réseau pendant le check :
|
||||
tout est résolu depuis le PlannerCache local de A.
|
||||
oc-discovery n'intervient qu'en amont (fond continu).
|
||||
end note
|
||||
|
||||
@enduml
|
||||
95
docs/seq_schedule.puml
Normal file
95
docs/seq_schedule.puml
Normal file
@@ -0,0 +1,95 @@
|
||||
@startuml seq_schedule
|
||||
title Flux SCHEDULE — Peer A ↔ Peer B via oc-discovery
|
||||
|
||||
skinparam sequenceMessageAlign center
|
||||
skinparam sequence {
|
||||
ArrowColor #333333
|
||||
LifeLineBorderColor #888888
|
||||
GroupBorderColor #777777
|
||||
GroupBackgroundColor #FAFAFA
|
||||
NoteBackgroundColor #FFFDE7
|
||||
NoteBorderColor #CCAA00
|
||||
BoxBorderColor #555555
|
||||
}
|
||||
skinparam ParticipantBackgroundColor #FFFFFF
|
||||
|
||||
participant "Client" as Client
|
||||
|
||||
box "Peer A" #EAF3FB
|
||||
participant "oc-scheduler A" as SA
|
||||
participant "oc-discovery A" as DA
|
||||
end box
|
||||
|
||||
box "Peer B" #EAF9EE
|
||||
participant "oc-discovery B" as DB
|
||||
participant "oc-scheduler B" as SB
|
||||
end box
|
||||
|
||||
' ══════════════════════════════════════════════════════════════════
|
||||
Client -> SA : POST /oc/:wfID
|
||||
|
||||
' ──────────────────────────────────────────────────────────────────
|
||||
group ① Planification — synchrone (GetBuyAndBook)
|
||||
SA -> SA : workflow.LoadOne(wfID)\nwf.Planify(start, end, instances, …)\nexec.Buy() → purchases [ DestPeerID = B ]\nexec.Book() → bookings [ DestPeerID = B ]\n⇒ WorkflowExecution {\n BookingsState: { booking_id: false }\n PurchasesState: { purchase_id: false }\n }
|
||||
end
|
||||
|
||||
' ──────────────────────────────────────────────────────────────────
|
||||
group ② Propagation vers Peer B — goroutines (errCh attend l'envoi NATS, pas la réception par B)
|
||||
|
||||
SA -> DA : **NATS PUB** · CREATE_RESOURCE\nPURCHASE_RESOURCE { DestPeerID=B, IsDraft=true }
|
||||
note right of DA : oc-discovery A est le\nrécepteur systématique\ndes émissions NATS de SA
|
||||
DA --> DB : **STREAM** · PropalgationMessage\n{ datatype: PURCHASE_RESOURCE }
|
||||
DB -> SB : **NATS SUB** · CREATE_RESOURCE PURCHASE_RESOURCE
|
||||
|
||||
SA -> DA : **NATS PUB** · CREATE_RESOURCE\nBOOKING { DestPeerID=B, IsDraft=true }
|
||||
DA --> DB : **STREAM** · PropalgationMessage\n{ datatype: BOOKING }
|
||||
DB -> SB : **NATS SUB** · CREATE_RESOURCE BOOKING
|
||||
|
||||
end
|
||||
|
||||
' ──────────────────────────────────────────────────────────────────
|
||||
group ③ Peer B traite — async (ListenNATS goroutine de SB)
|
||||
|
||||
SB -> SB : StoreOne(purchase, IsDraft=true)\nAfterFunc(10 min → draftTimeout)
|
||||
|
||||
SB -> DB : **NATS PUB** · PROPALGATION_EVENT\nConsiders { DataType:PURCHASE_RESOURCE,\n id=purchase_id, execution_id }
|
||||
note right of DB : SB émet sur son NATS local\nDB (oc-discovery B) reçoit
|
||||
DB --> DA : **STREAM** · PropalgationMessage\n{ action: Considers, DataType: PURCHASE_RESOURCE }
|
||||
DA -> SA : **NATS SUB** · PROPALGATION_EVENT\n[ FromApp = "oc-discovery" ]
|
||||
SA -> SA : updateExecutionState()\nPurchasesState[ purchase_id ] = true
|
||||
|
||||
SB -> SB : PlannerCache[self].Check(slot) ✓\nStoreOne(booking, IsDraft=true)\nAfterFunc(10 min → draftTimeout)\nrefreshSelfPlanner()
|
||||
|
||||
SB -> DB : **NATS PUB** · PROPALGATION_EVENT\nPB_PLANNER { peer_id, schedule, capacities }
|
||||
DB --> DA : **STREAM** · PropalgationMessage\n{ action: PB_PLANNER }
|
||||
DA -> SA : **NATS SUB** · PROPALGATION_EVENT\n[ FromApp = "oc-discovery" ]
|
||||
SA -> SA : storePlanner(PeerB.PeerID, p)
|
||||
|
||||
SB -> DB : **NATS PUB** · PROPALGATION_EVENT\nConsiders { DataType:BOOKING,\n id=booking_id, execution_id }
|
||||
DB --> DA : **STREAM** · PropalgationMessage\n{ action: Considers, DataType: BOOKING }
|
||||
DA -> SA : **NATS SUB** · PROPALGATION_EVENT\n[ FromApp = "oc-discovery" ]
|
||||
SA -> SA : updateExecutionState()\nBookingsState[ booking_id ] = true\n→ tous true → State = SCHEDULED (DB)
|
||||
|
||||
end
|
||||
|
||||
' ──────────────────────────────────────────────────────────────────
|
||||
group ④ Schedules() finalise — synchrone (concurrent avec ③)
|
||||
SA -> SA : GenerateOrder(purchases, bookings)\nexec.PurgeDraft()\nexec.StoreDraftDefault() → State=SCHEDULED, IsDraft=false\nGenericStoreOne(exec)
|
||||
|
||||
SA -> DA : **NATS PUB** · PROPALGATION_EVENT [goroutine]\nConsiders { DataType:WORKFLOW_EXECUTION,\n execution, peer_ids:[ PeerB ] }
|
||||
note right of DA : oc-discovery A reçoit\net STREAM vers tous les\npairs listés dans peer_ids
|
||||
DA --> DB : **STREAM** · PropalgationMessage\n{ action: Considers, DataType: WORKFLOW_EXECUTION }
|
||||
DB -> SB : **NATS SUB** · PROPALGATION_EVENT\n[ FromApp = "oc-discovery" ]
|
||||
SB -> SB : confirmExecutionDrafts()\nconfirmResource(booking_id)\n → Booking.IsDraft=false, State=SCHEDULED\nconfirmResource(purchase_id)\n → Purchase.IsDraft=false
|
||||
|
||||
SA -> Client : **{WorkflowSchedule, Workflow, Executions}**
|
||||
end
|
||||
|
||||
note over SA, SB
|
||||
③ et ④ sont concurrents.
|
||||
En pratique : GenerateOrder + écritures DB côté A
|
||||
laissent le temps à B de recevoir et stocker ses drafts
|
||||
avant que A émette le Considers/WORKFLOW_EXECUTION.
|
||||
end note
|
||||
|
||||
@enduml
|
||||
58
go.mod
58
go.mod
@@ -1,22 +1,55 @@
|
||||
module oc-scheduler
|
||||
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.24.0
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260114125749-fa5b7543332d
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260304145747-e03a0d3dd0aa
|
||||
github.com/beego/beego/v2 v2.3.8
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/robfig/cron v1.2.0
|
||||
github.com/smartystreets/goconvey v1.7.2
|
||||
go.mongodb.org/mongo-driver v1.17.4
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/api v0.35.1 // indirect
|
||||
k8s.io/apimachinery v0.35.1 // indirect
|
||||
k8s.io/client-go v0.35.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/biter777/countries v1.7.5 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
@@ -24,11 +57,13 @@ require (
|
||||
github.com/golang/snappy v1.0.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect
|
||||
github.com/goraz/onion v0.1.3 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/libp2p/go-libp2p/core v0.43.0-rc2 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
@@ -42,8 +77,7 @@ require (
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.65.0 // indirect
|
||||
github.com/prometheus/procfs v0.17.0 // indirect
|
||||
github.com/robfig/cron v1.2.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.11.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/rs/zerolog v1.34.0 // indirect
|
||||
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect
|
||||
github.com/smartystreets/assertions v1.2.0 // indirect
|
||||
@@ -51,11 +85,11 @@ require (
|
||||
github.com/xdg-go/scram v1.1.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||
golang.org/x/crypto v0.40.0 // indirect
|
||||
golang.org/x/net v0.42.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/text v0.27.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
golang.org/x/crypto v0.44.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
200
go.sum
200
go.sum
@@ -1,37 +1,21 @@
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250630120603-3971d5ca5d7b h1:ld3dxfjFcquqMiq9Exm8kiNg9WNWPOaCyzUly4pi4sc=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250630120603-3971d5ca5d7b/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250704084459-443546027b27 h1:iogk6pV3gybzQDBXMI6Qd/jvSA1h+3oRE+vLl1MRjew=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250704084459-443546027b27/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250707143058-365b924e4b9d h1:9utgm0JRYtbzSQDmEmRbyzOfshKaQyK/EpDqMJOdKpA=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250707143058-365b924e4b9d/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250708115955-346275e12cb4 h1:OxDo6/lucAYbCtTw3ZoOK/z/M4HxNgd+wClT17Z8UJg=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250708115955-346275e12cb4/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250709145437-4e3ff9aa086b h1:PagTxoBr/LomQuTA7HL8q1vuNNDfdvFHAKi4pjGwf1M=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250709145437-4e3ff9aa086b/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250709154237-83e590d4e190 h1:/8uQ2nkJnv13K0+BL/QbxaVJI+oAOq5A/aBPgNrsjbQ=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250709154237-83e590d4e190/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250710094754-98a2359c9d9f h1:PZ8yVeZ4q85lMQ06KIRyHkSJnrlFf78fxgV2fjzZHqc=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250710094754-98a2359c9d9f/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250730160123-76d83878ebd3 h1:SCG9evvlT1yrYi9mxvIX2hZaQAuv33AdH6rKqAOH6yg=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250730160123-76d83878ebd3/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250730161555-a093369dc5a2 h1:M6bVZ08gSYnwOHWS/zqNe8+7xwc4zewjmxDor5kBXqo=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250730161555-a093369dc5a2/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250730162109-be2a1cc11474 h1:LpC+PkWmzKcsqKJbaqDiHnO5UxeGaJtscJ2aEqMXD0I=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250730162109-be2a1cc11474/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250731135305-cc3091d401ea h1:yJ4cdFycOw8+X97gh8e33piztu6J0V+iWWkVtvx9V/g=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250731135305-cc3091d401ea/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250805095627-76e9b2562e9b h1:ktjmh3VA0gb+TAfbnQNX0XAGUpA6HYm9p9myyvYL1IE=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250805095627-76e9b2562e9b/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250805112547-cc939451fd81 h1:539qIasa1Vz+FY8nEdLTQHXJqZBSLDuRY7mWo2r+vDg=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250805112547-cc939451fd81/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250805113921-40a61387b9f1 h1:53KzZ+1JqRY6J7EVzQpNBmLzNuxb8oHNW3UgqxkYABo=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250805113921-40a61387b9f1/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260113150431-6d745fe92216 h1:9ab37/TK1JhdOOvYbqq9J9hDbipofBkq0l2GQ6umARY=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260113150431-6d745fe92216/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260114125532-0e378dc19c06 h1:kDTCqxzV8dvLeXPzPWIn4LgFqwgVprrXwNnP+ftA9C0=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260114125532-0e378dc19c06/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260114125749-fa5b7543332d h1:6oGSN4Fb+H7LNVbUEN7vaDtWBHZTdd2Y1BkBdZ7MLXE=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260114125749-fa5b7543332d/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260223141827-5d32b4646a86 h1:/7XYbCzzo062lYbyBM3MA7KLrJII9iCQzvw4T5g/4oY=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260223141827-5d32b4646a86/go.mod h1:jmyBwmsac/4V7XPL347qawF60JsBCDmNAMfn/ySXKYo=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260223142248-b08bbf51ddc5 h1:qxLz4rrFxB1dmJa0/Q6AWBwQgmVt7LVXB0RgwpGYeXE=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260223142248-b08bbf51ddc5/go.mod h1:jmyBwmsac/4V7XPL347qawF60JsBCDmNAMfn/ySXKYo=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260223144148-f28e2c362020 h1:F7Ifw3WgtCnDur1p5+EuFZrM9yy7KSWoIyDQ8opQE90=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260223144148-f28e2c362020/go.mod h1:jmyBwmsac/4V7XPL347qawF60JsBCDmNAMfn/ySXKYo=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260223145010-e10bb5545561 h1:q5m2UMsEgrfN0OJsoa4Sme0v4OO1pnIt8OsAwdL+5/A=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260223145010-e10bb5545561/go.mod h1:jmyBwmsac/4V7XPL347qawF60JsBCDmNAMfn/ySXKYo=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260223145640-e039fa56b64c h1:3PRvQdSSGjmw+Txkf0zWs3F+V9URq22zQCLR3o7bNBY=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260223145640-e039fa56b64c/go.mod h1:jmyBwmsac/4V7XPL347qawF60JsBCDmNAMfn/ySXKYo=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260223162637-ff830065ec27 h1:cw3R1/Ivlr3W1XZ2cCHRrLB6UG/3dhdvG0i+P5W1tYc=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260223162637-ff830065ec27/go.mod h1:jmyBwmsac/4V7XPL347qawF60JsBCDmNAMfn/ySXKYo=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260224092928-54aef164ba10 h1:9i8fDtGjg3JDniCO7VGtkd8zHXWze7OJ3tvO4mZnBmY=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260224092928-54aef164ba10/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260224093610-a9ebad78f3a8 h1:xoC5PAz1469QxrNm8rrsq5+BtwshEt+L2Nhf90MrqrM=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260224093610-a9ebad78f3a8/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260304145747-e03a0d3dd0aa h1:1wCpI4dwN1pj6MlpJ7/WifhHVHmCE4RU+9klwqgo/bk=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260304145747-e03a0d3dd0aa/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/beego/beego/v2 v2.3.8 h1:wplhB1pF4TxR+2SS4PUej8eDoH4xGfxuHfS7wAk9VBc=
|
||||
github.com/beego/beego/v2 v2.3.8/go.mod h1:8vl9+RrXqvodrl9C8yivX1e6le6deCK6RWeq8R7gTTg=
|
||||
@@ -49,25 +33,43 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/etcd-io/etcd v3.3.17+incompatible/go.mod h1:cdZ77EstHBwVtD6iTgzgvogwcjo9m4iOqoijouPJ4bs=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
|
||||
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
|
||||
github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
|
||||
github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
|
||||
github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
|
||||
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@@ -77,23 +79,44 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/goraz/onion v0.1.3 h1:KhyvbDA2b70gcz/d5izfwTiOH8SmrvV43AsVzpng3n0=
|
||||
github.com/goraz/onion v0.1.3/go.mod h1:XEmz1XoBz+wxTgWB8NwuvRm4RAu3vKxvrmYtzK+XCuQ=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
|
||||
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
|
||||
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-libp2p/core v0.43.0-rc2 h1:1X1aDJNWhMfodJ/ynbaGLkgnC8f+hfBIqQDrzxFZOqI=
|
||||
github.com/libp2p/go-libp2p/core v0.43.0-rc2/go.mod h1:NYeJ9lvyBv9nbDk2IuGb8gFKEOkIv/W5YRIy1pAJB2Q=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
@@ -101,19 +124,39 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
|
||||
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
|
||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
||||
github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
|
||||
github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
|
||||
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
||||
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
||||
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
|
||||
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
|
||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/nats-io/nats.go v1.43.0 h1:uRFZ2FEoRvP64+UUhaTokyS18XBCR/xM2vQZKO4i8ug=
|
||||
github.com/nats-io/nats.go v1.43.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
||||
github.com/nats-io/nats.go v1.44.0 h1:ECKVrDLdh/kDPV1g0gAQ+2+m2KprqZK5O/eJAyAnH2M=
|
||||
github.com/nats-io/nats.go v1.44.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
||||
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
|
||||
@@ -126,22 +169,19 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
|
||||
github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
|
||||
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
|
||||
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
|
||||
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
@@ -154,10 +194,19 @@ github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYl
|
||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
|
||||
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
|
||||
@@ -169,29 +218,39 @@ github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfS
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
|
||||
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
|
||||
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -202,20 +261,24 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
@@ -223,10 +286,39 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
|
||||
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q=
|
||||
k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM=
|
||||
k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU=
|
||||
k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
|
||||
k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM=
|
||||
k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
|
||||
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
|
||||
698
infrastructure/nats.go
Normal file
698
infrastructure/nats.go
Normal file
@@ -0,0 +1,698 @@
|
||||
package infrastructure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"oc-scheduler/conf"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/config"
|
||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
||||
"cloud.o-forge.io/core/oc-lib/models/booking/planner"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow/graph"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/nats-io/nats.go"
|
||||
)
|
||||
|
||||
const plannerTTL = 24 * time.Hour
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Planner cache — protected by plannerMu
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var plannerMu sync.RWMutex
|
||||
var PlannerCache = map[string]*planner.Planner{}
|
||||
var plannerAddedAt = map[string]time.Time{} // peerID → first-seen timestamp
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Subscriber registries — one keyed by peerID, one by workflowID
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var subsMu sync.RWMutex
|
||||
var plannerSubs = map[string][]chan struct{}{} // peerID → notification channels
|
||||
var workflowSubs = map[string][]chan struct{}{} // workflowID → notification channels
|
||||
|
||||
// SubscribePlannerUpdates registers interest in planner changes for the given
|
||||
// peer IDs. The returned channel receives one struct{} (non-blocking) each time
|
||||
// any of those planners is updated. Call cancel to unregister.
|
||||
func SubscribePlannerUpdates(peerIDs []string) (<-chan struct{}, func()) {
|
||||
return subscribe(&subsMu, plannerSubs, peerIDs)
|
||||
}
|
||||
|
||||
// SubscribeWorkflowUpdates registers interest in workflow modifications for the
|
||||
// given workflow ID. The returned channel is signalled when the workflow changes
|
||||
// (peer list may have grown or shrunk). Call cancel to unregister.
|
||||
func SubscribeWorkflowUpdates(wfID string) (<-chan struct{}, func()) {
|
||||
ch, cancel := subscribe(&subsMu, workflowSubs, []string{wfID})
|
||||
return ch, cancel
|
||||
}
|
||||
|
||||
// subscribe is the generic helper used by both registries.
|
||||
func subscribe(mu *sync.RWMutex, registry map[string][]chan struct{}, keys []string) (<-chan struct{}, func()) {
|
||||
ch := make(chan struct{}, 1)
|
||||
mu.Lock()
|
||||
for _, k := range keys {
|
||||
registry[k] = append(registry[k], ch)
|
||||
}
|
||||
mu.Unlock()
|
||||
cancel := func() {
|
||||
mu.Lock()
|
||||
for _, k := range keys {
|
||||
subs := registry[k]
|
||||
for i, s := range subs {
|
||||
if s == ch {
|
||||
registry[k] = append(subs[:i], subs[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
return ch, cancel
|
||||
}
|
||||
|
||||
func notifyPlannerWatchers(peerID string) {
|
||||
notify(&subsMu, plannerSubs, peerID)
|
||||
}
|
||||
|
||||
func notifyWorkflowWatchers(wfID string) {
|
||||
notify(&subsMu, workflowSubs, wfID)
|
||||
}
|
||||
|
||||
func notify(mu *sync.RWMutex, registry map[string][]chan struct{}, key string) {
|
||||
mu.RLock()
|
||||
subs := registry[key]
|
||||
mu.RUnlock()
|
||||
for _, ch := range subs {
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Cache helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// storePlanner inserts or updates a planner for peerID.
|
||||
// On first insertion it schedules an automatic eviction after plannerTTL.
|
||||
// All subscribers interested in this peer are notified.
|
||||
func storePlanner(peerID string, p *planner.Planner) {
|
||||
plannerMu.Lock()
|
||||
isNew := PlannerCache[peerID] == nil
|
||||
PlannerCache[peerID] = p
|
||||
if isNew {
|
||||
plannerAddedAt[peerID] = time.Now()
|
||||
go evictAfter(peerID, plannerTTL)
|
||||
}
|
||||
plannerMu.Unlock()
|
||||
notifyPlannerWatchers(peerID)
|
||||
}
|
||||
|
||||
// evictAfter waits ttl from the first-seen time for peerID then emits a
|
||||
// PB_CLOSE_PLANNER event, which removes the entry from the cache and notifies
|
||||
// NATS.
|
||||
func evictAfter(peerID string, ttl time.Duration) {
|
||||
time.Sleep(ttl)
|
||||
plannerMu.RLock()
|
||||
_, exists := PlannerCache[peerID]
|
||||
plannerMu.RUnlock()
|
||||
if exists {
|
||||
EmitNATS(peerID, tools.PropalgationMessage{Action: tools.PB_CLOSE_PLANNER})
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// NATS emission
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func EmitNATS(peerID string, message tools.PropalgationMessage) {
|
||||
if message.Action == tools.PB_CLOSE_PLANNER {
|
||||
plannerMu.Lock()
|
||||
delete(PlannerCache, peerID)
|
||||
delete(plannerAddedAt, peerID)
|
||||
plannerMu.Unlock()
|
||||
notifyPlannerWatchers(peerID) // let streams re-evaluate (will warn "no planner")
|
||||
}
|
||||
b, _ := json.Marshal(message)
|
||||
tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-scheduler",
|
||||
Datatype: -1,
|
||||
Method: int(tools.PROPALGATION_EVENT),
|
||||
Payload: b,
|
||||
})
|
||||
}
|
||||
|
||||
type executionConsidersPayload struct {
|
||||
ID string `json:"id"`
|
||||
ExecutionsID string `json:"executions_id"`
|
||||
ExecutionID string `json:"execution_id"`
|
||||
PeerIDs []string `json:"peer_ids"`
|
||||
}
|
||||
|
||||
// emitConsiders broadcasts a PROPALGATION_EVENT with the Considers action,
|
||||
// carrying the stored resource ID and its datatype (BOOKING or PURCHASE_RESOURCE).
|
||||
func emitConsiders(id string, executionID string, dt tools.DataType) {
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil)
|
||||
data := access.LoadOne(executionID)
|
||||
if data.ToWorkflowExecution() != nil {
|
||||
exec := data.ToWorkflowExecution()
|
||||
if peers, err := GetWorkflowPeerIDs(exec.WorkflowID, &tools.APIRequest{Admin: true}); err == nil {
|
||||
payload, _ := json.Marshal(&executionConsidersPayload{
|
||||
ID: id,
|
||||
ExecutionsID: exec.ExecutionsID,
|
||||
ExecutionID: executionID,
|
||||
PeerIDs: peers,
|
||||
})
|
||||
b, _ := json.Marshal(tools.PropalgationMessage{
|
||||
DataType: int(dt),
|
||||
Action: tools.PB_CONSIDERS,
|
||||
Payload: payload,
|
||||
})
|
||||
tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-scheduler",
|
||||
Datatype: dt,
|
||||
Method: int(tools.PROPALGATION_EVENT),
|
||||
Payload: b,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// EmitConsidersExecution broadcasts a Considers / WORKFLOW_EXECUTION message to all
|
||||
// storage and compute peers of wf once the execution has transitioned to SCHEDULED.
|
||||
// Each receiving peer will use it to confirm (IsDraft=false) their local drafts.
|
||||
func EmitConsidersExecution(exec *workflow_execution.WorkflowExecution, wf *workflow.Workflow) {
|
||||
if wf == nil || wf.Graph == nil {
|
||||
return
|
||||
}
|
||||
peerIDs, err := GetWorkflowPeerIDs(wf.GetID(), &tools.APIRequest{Admin: true})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(peerIDs) == 0 {
|
||||
return
|
||||
}
|
||||
payload, err := json.Marshal(executionConsidersPayload{
|
||||
ID: exec.GetID(),
|
||||
ExecutionID: exec.GetID(),
|
||||
ExecutionsID: exec.ExecutionsID,
|
||||
PeerIDs: peerIDs})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
b, err := json.Marshal(tools.PropalgationMessage{
|
||||
DataType: int(tools.WORKFLOW_EXECUTION),
|
||||
Action: tools.PB_CONSIDERS,
|
||||
Payload: payload,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-scheduler",
|
||||
Datatype: tools.WORKFLOW_EXECUTION,
|
||||
Method: int(tools.PROPALGATION_EVENT),
|
||||
Payload: b,
|
||||
})
|
||||
}
|
||||
|
||||
// updateExecutionState sets BookingsState[id]=true (dt==BOOKING) or
|
||||
// PurchasesState[id]=true (dt==PURCHASE_RESOURCE) on the target execution.
|
||||
// payload must be JSON-encoded {"id":"...", "execution_id":"..."}.
|
||||
func updateExecutionState(payload []byte, dt tools.DataType) {
|
||||
var data executionConsidersPayload
|
||||
if err := json.Unmarshal(payload, &data); err != nil || data.ID == "" || data.ExecutionID == "" {
|
||||
return
|
||||
}
|
||||
adminReq := &tools.APIRequest{Admin: true}
|
||||
res, _, err := workflow_execution.NewAccessor(adminReq).LoadOne(data.ExecutionID)
|
||||
if err != nil || res == nil {
|
||||
fmt.Printf("updateExecutionState: could not load execution %s: %v\n", data.ExecutionID, err)
|
||||
return
|
||||
}
|
||||
exec := res.(*workflow_execution.WorkflowExecution)
|
||||
switch dt {
|
||||
case tools.BOOKING:
|
||||
if exec.BookingsState == nil {
|
||||
exec.BookingsState = map[string]bool{}
|
||||
}
|
||||
exec.BookingsState[data.ID] = true
|
||||
case tools.PURCHASE_RESOURCE:
|
||||
if exec.PurchasesState == nil {
|
||||
exec.PurchasesState = map[string]bool{}
|
||||
}
|
||||
exec.PurchasesState[data.ID] = true
|
||||
}
|
||||
found := true
|
||||
for _, st := range exec.BookingsState {
|
||||
if !st {
|
||||
found = false
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, st := range exec.PurchasesState {
|
||||
if !st {
|
||||
found = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
exec.State = enum.SCHEDULED
|
||||
}
|
||||
if _, _, err := utils.GenericRawUpdateOne(exec, data.ExecutionID, workflow_execution.NewAccessor(adminReq)); err != nil {
|
||||
fmt.Printf("updateExecutionState: could not update execution %s: %v\n", data.ExecutionID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// confirmExecutionDrafts is called when a Considers/WORKFLOW_EXECUTION message
|
||||
// is received from oc-discovery, meaning the originating peer has confirmed the
|
||||
// execution as SCHEDULED. For every booking and purchase ID listed in the
|
||||
// execution's states, we confirm the local draft (IsDraft=false).
|
||||
func confirmExecutionDrafts(payload []byte) {
|
||||
var data executionConsidersPayload
|
||||
if err := json.Unmarshal(payload, &data); err != nil {
|
||||
fmt.Printf("confirmExecutionDrafts: could not parse payload: %v\n", err)
|
||||
return
|
||||
}
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(tools.WORKFLOW_EXECUTION), nil)
|
||||
d := access.LoadOne(data.ExecutionID)
|
||||
if exec := d.ToWorkflowExecution(); exec != nil {
|
||||
for id := range exec.BookingsState {
|
||||
go confirmResource(id, tools.BOOKING)
|
||||
}
|
||||
for id := range exec.PurchasesState {
|
||||
go confirmResource(id, tools.PURCHASE_RESOURCE)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// NATS listeners
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func ListenNATS() {
|
||||
tools.NewNATSCaller().ListenNats(map[tools.NATSMethod]func(tools.NATSResponse){
|
||||
// Receive planner snapshots pushed by oc-discovery and cache them.
|
||||
// Considers messages:
|
||||
// BOOKING / PURCHASE_RESOURCE → mark the individual resource as
|
||||
// considered in the target WorkflowExecution (BookingsState / PurchasesState).
|
||||
// WORKFLOW_EXECUTION → the execution reached SCHEDULED; confirm all
|
||||
// local draft bookings and purchases listed in its states.
|
||||
tools.PROPALGATION_EVENT: func(resp tools.NATSResponse) {
|
||||
if resp.FromApp != "oc-discovery" {
|
||||
return
|
||||
}
|
||||
var prop tools.PropalgationMessage
|
||||
if err := json.Unmarshal(resp.Payload, &prop); err != nil {
|
||||
return
|
||||
}
|
||||
switch prop.Action {
|
||||
case tools.PB_PLANNER:
|
||||
m := map[string]interface{}{}
|
||||
p := planner.Planner{}
|
||||
if err := json.Unmarshal(prop.Payload, &m); err != nil {
|
||||
return
|
||||
}
|
||||
if err := json.Unmarshal(prop.Payload, &p); err != nil {
|
||||
return
|
||||
}
|
||||
storePlanner(fmt.Sprintf("%v", m["peer_id"]), &p)
|
||||
case tools.PB_CONSIDERS:
|
||||
switch tools.DataType(prop.DataType) {
|
||||
case tools.BOOKING, tools.PURCHASE_RESOURCE:
|
||||
updateExecutionState(prop.Payload, tools.DataType(prop.DataType))
|
||||
case tools.WORKFLOW_EXECUTION:
|
||||
confirmExecutionDrafts(prop.Payload)
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// Incoming resource creation events:
|
||||
// - WORKFLOW → refresh peer planner entries and notify CheckStream watchers.
|
||||
// - BOOKING → if destined for us, validate, store as draft, start 10-min
|
||||
// expiry timer, and emit a "considers_booking" response.
|
||||
// - PURCHASE → if destined for us, store as draft, start 10-min expiry
|
||||
// timer, and emit a "considers_purchase" response.
|
||||
tools.REMOVE_RESOURCE: func(resp tools.NATSResponse) {
|
||||
switch resp.Datatype {
|
||||
case tools.WORKFLOW:
|
||||
wf := workflow.Workflow{}
|
||||
if err := json.Unmarshal(resp.Payload, &wf); err != nil {
|
||||
return
|
||||
}
|
||||
notifyWorkflowWatchers(wf.GetID())
|
||||
}
|
||||
},
|
||||
tools.CREATE_RESOURCE: func(resp tools.NATSResponse) {
|
||||
switch resp.Datatype {
|
||||
case tools.WORKFLOW:
|
||||
wf := workflow.Workflow{}
|
||||
if err := json.Unmarshal(resp.Payload, &wf); err != nil {
|
||||
return
|
||||
}
|
||||
broadcastPlanner(&wf)
|
||||
notifyWorkflowWatchers(wf.GetID())
|
||||
case tools.BOOKING:
|
||||
var bk booking.Booking
|
||||
if err := json.Unmarshal(resp.Payload, &bk); err != nil {
|
||||
return
|
||||
}
|
||||
self, err := oclib.GetMySelf()
|
||||
if err != nil || self == nil || bk.DestPeerID != self.GetID() {
|
||||
return
|
||||
}
|
||||
// Reject bookings whose start date is already in the past.
|
||||
if !bk.ExpectedStartDate.IsZero() && bk.ExpectedStartDate.Before(time.Now()) {
|
||||
fmt.Println("ListenNATS: booking start date is in the past, discarding")
|
||||
return
|
||||
}
|
||||
// Verify the slot is free in our planner (if we have one).
|
||||
plannerMu.RLock()
|
||||
p := PlannerCache[self.PeerID]
|
||||
plannerMu.RUnlock()
|
||||
if p != nil && !checkInstance(p, bk.ResourceID, bk.InstanceID, bk.ExpectedStartDate, bk.ExpectedEndDate) {
|
||||
fmt.Println("ListenNATS: booking conflicts with local planner, discarding")
|
||||
return
|
||||
}
|
||||
adminReq := &tools.APIRequest{Admin: true}
|
||||
bk.IsDraft = true
|
||||
stored, _, err := booking.NewAccessor(adminReq).StoreOne(&bk)
|
||||
if err != nil {
|
||||
fmt.Println("ListenNATS: could not store booking:", err)
|
||||
return
|
||||
}
|
||||
storedID := stored.GetID()
|
||||
go refreshSelfPlanner(self.PeerID, adminReq)
|
||||
time.AfterFunc(10*time.Minute, func() { draftTimeout(storedID, tools.BOOKING) })
|
||||
go emitConsiders(storedID, stored.(*booking.Booking).ExecutionID, tools.BOOKING)
|
||||
|
||||
case tools.PURCHASE_RESOURCE:
|
||||
var pr purchase_resource.PurchaseResource
|
||||
if err := json.Unmarshal(resp.Payload, &pr); err != nil {
|
||||
return
|
||||
}
|
||||
self, err := oclib.GetMySelf()
|
||||
if err != nil || self == nil || pr.DestPeerID != self.GetID() {
|
||||
return
|
||||
}
|
||||
adminReq := &tools.APIRequest{Admin: true}
|
||||
pr.IsDraft = true
|
||||
stored, _, err := purchase_resource.NewAccessor(adminReq).StoreOne(&pr)
|
||||
if err != nil {
|
||||
fmt.Println("ListenNATS: could not store purchase:", err)
|
||||
return
|
||||
}
|
||||
storedID := stored.GetID()
|
||||
time.AfterFunc(10*time.Minute, func() { draftTimeout(storedID, tools.PURCHASE_RESOURCE) })
|
||||
go emitConsiders(storedID, stored.(*purchase_resource.PurchaseResource).ExecutionID, tools.PURCHASE_RESOURCE)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Draft timeout
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// draftTimeout deletes a booking or purchase resource if it is still a draft
|
||||
// after the 10-minute confirmation window has elapsed.
|
||||
func draftTimeout(id string, dt tools.DataType) {
|
||||
adminReq := &tools.APIRequest{Admin: true}
|
||||
var res utils.DBObject
|
||||
var loadErr error
|
||||
switch dt {
|
||||
case tools.BOOKING:
|
||||
res, _, loadErr = booking.NewAccessor(adminReq).LoadOne(id)
|
||||
case tools.PURCHASE_RESOURCE:
|
||||
res, _, loadErr = purchase_resource.NewAccessor(adminReq).LoadOne(id)
|
||||
default:
|
||||
return
|
||||
}
|
||||
if loadErr != nil || res == nil || !res.IsDrafted() {
|
||||
return
|
||||
}
|
||||
switch dt {
|
||||
case tools.BOOKING:
|
||||
booking.NewAccessor(adminReq).DeleteOne(id)
|
||||
case tools.PURCHASE_RESOURCE:
|
||||
purchase_resource.NewAccessor(adminReq).DeleteOne(id)
|
||||
}
|
||||
fmt.Printf("draftTimeout: %s %s deleted (still draft after 10 min)\n", dt.String(), id)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Confirm channels
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// confirmResource sets IsDraft=false for a booking or purchase resource.
|
||||
// For bookings it also advances State to SCHEDULED and refreshes the local planner.
|
||||
func confirmResource(id string, dt tools.DataType) {
|
||||
adminReq := &tools.APIRequest{Admin: true}
|
||||
switch dt {
|
||||
case tools.BOOKING:
|
||||
res, _, err := booking.NewAccessor(adminReq).LoadOne(id)
|
||||
if err != nil || res == nil {
|
||||
fmt.Printf("confirmResource: could not load booking %s: %v\n", id, err)
|
||||
return
|
||||
}
|
||||
bk := res.(*booking.Booking)
|
||||
bk.IsDraft = false
|
||||
bk.State = enum.SCHEDULED
|
||||
if _, _, err := utils.GenericRawUpdateOne(bk, id, booking.NewAccessor(adminReq)); err != nil {
|
||||
fmt.Printf("confirmResource: could not confirm booking %s: %v\n", id, err)
|
||||
return
|
||||
}
|
||||
createNamespace(bk.ExecutionsID) // create Namespace locally
|
||||
self, err := oclib.GetMySelf()
|
||||
if err == nil && self != nil {
|
||||
go refreshSelfPlanner(self.PeerID, adminReq)
|
||||
}
|
||||
case tools.PURCHASE_RESOURCE:
|
||||
res, _, err := purchase_resource.NewAccessor(adminReq).LoadOne(id)
|
||||
if err != nil || res == nil {
|
||||
fmt.Printf("confirmResource: could not load purchase %s: %v\n", id, err)
|
||||
return
|
||||
}
|
||||
pr := res.(*purchase_resource.PurchaseResource)
|
||||
pr.IsDraft = false
|
||||
if _, _, err := utils.GenericRawUpdateOne(pr, id, purchase_resource.NewAccessor(adminReq)); err != nil {
|
||||
fmt.Printf("confirmResource: could not confirm purchase %s: %v\n", id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// listenConfirmChannel subscribes to a NATS subject and calls confirmResource
|
||||
// for each message received. The message body is expected to be the plain
|
||||
// resource ID (UTF-8 string).
|
||||
func listenConfirmChannel(nc *nats.Conn, subject string, dt tools.DataType, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
ch := make(chan *nats.Msg, 64)
|
||||
sub, err := nc.ChanSubscribe(subject, ch)
|
||||
if err != nil {
|
||||
fmt.Printf("listenConfirmChannel: could not subscribe to %s: %v\n", subject, err)
|
||||
return
|
||||
}
|
||||
defer sub.Unsubscribe()
|
||||
for msg := range ch {
|
||||
confirmResource(string(msg.Data), dt)
|
||||
}
|
||||
}
|
||||
|
||||
// ListenConfirm opens a direct NATS connection and subscribes to the hardcoded
|
||||
// "confirm_booking" and "confirm_purchase" subjects. It reconnects automatically
|
||||
// if the connection is lost.
|
||||
func ListenConfirm() {
|
||||
natsURL := config.GetConfig().NATSUrl
|
||||
if natsURL == "" {
|
||||
fmt.Println("ListenConfirm: NATS_SERVER not set, skipping confirm listeners")
|
||||
return
|
||||
}
|
||||
for {
|
||||
nc, err := nats.Connect(natsURL)
|
||||
if err != nil {
|
||||
fmt.Println("ListenConfirm: could not connect to NATS:", err)
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go listenConfirmChannel(nc, "confirm_booking", tools.BOOKING, &wg)
|
||||
go listenConfirmChannel(nc, "confirm_purchase", tools.PURCHASE_RESOURCE, &wg)
|
||||
wg.Wait()
|
||||
nc.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Self-planner initialisation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// InitSelfPlanner bootstraps our own planner entry at startup.
|
||||
// It waits (with 15-second retries) for our peer record to be present in the
|
||||
// database before generating the first planner snapshot and broadcasting it
|
||||
// on PB_PLANNER. This handles the race between oc-scheduler starting before
|
||||
// oc-peer has fully registered our node.
|
||||
func InitSelfPlanner() {
|
||||
for {
|
||||
self, err := oclib.GetMySelf()
|
||||
if err != nil || self == nil {
|
||||
fmt.Println("InitSelfPlanner: self peer not found yet, retrying in 15s...")
|
||||
time.Sleep(15 * time.Second)
|
||||
continue
|
||||
}
|
||||
refreshSelfPlanner(self.PeerID, &tools.APIRequest{Admin: true})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Self-planner refresh
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// refreshSelfPlanner regenerates the local planner from the current state of
|
||||
// the booking DB, stores it in PlannerCache under our own node UUID, and
|
||||
// broadcasts it on PROPALGATION_EVENT / PB_PLANNER so all listeners (including
|
||||
// oc-discovery) are kept in sync.
|
||||
//
|
||||
// It should be called whenever a booking for our own peer is created, whether
|
||||
// by direct DB insertion (self-peer routing) or upon receiving a CREATE_RESOURCE
|
||||
// BOOKING message from oc-discovery.
|
||||
func refreshSelfPlanner(peerID string, request *tools.APIRequest) {
|
||||
p, err := planner.GenerateShallow(request)
|
||||
if err != nil {
|
||||
fmt.Println("refreshSelfPlanner: could not generate planner:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Update the local cache and notify any waiting CheckStream goroutines.
|
||||
storePlanner(peerID, p)
|
||||
|
||||
// Broadcast the updated planner so remote peers (and oc-discovery) can
|
||||
// refresh their view of our availability.
|
||||
type plannerWithPeer struct {
|
||||
PeerID string `json:"peer_id"`
|
||||
*planner.Planner
|
||||
}
|
||||
plannerPayload, err := json.Marshal(plannerWithPeer{PeerID: peerID, Planner: p})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
EmitNATS(peerID, tools.PropalgationMessage{
|
||||
Action: tools.PB_PLANNER,
|
||||
Payload: plannerPayload,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Planner broadcast
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// broadcastPlanner iterates the storage and compute peers of the given workflow
|
||||
// and, for each peer not yet in the cache, emits a PB_PLANNER propagation so
|
||||
// downstream consumers (oc-discovery, other schedulers) refresh their state.
|
||||
func broadcastPlanner(wf *workflow.Workflow) {
|
||||
if wf.Graph == nil {
|
||||
return
|
||||
}
|
||||
items := []graph.GraphItem{}
|
||||
items = append(items, wf.GetGraphItems(wf.Graph.IsStorage)...)
|
||||
items = append(items, wf.GetGraphItems(wf.Graph.IsCompute)...)
|
||||
|
||||
seen := []string{}
|
||||
for _, item := range items {
|
||||
i := item
|
||||
_, res := i.GetResource()
|
||||
if res == nil {
|
||||
continue
|
||||
}
|
||||
creatorID := res.GetCreatorID()
|
||||
if slices.Contains(seen, creatorID) {
|
||||
continue
|
||||
}
|
||||
|
||||
data := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).LoadOne(creatorID)
|
||||
p := data.ToPeer()
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
plannerMu.RLock()
|
||||
cached := PlannerCache[p.PeerID]
|
||||
plannerMu.RUnlock()
|
||||
|
||||
if cached == nil {
|
||||
payload, err := json.Marshal(map[string]interface{}{"peer_id": p.PeerID})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
seen = append(seen, creatorID)
|
||||
EmitNATS(p.PeerID, tools.PropalgationMessage{
|
||||
Action: tools.PB_PLANNER,
|
||||
Payload: payload,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createNamespace(ns string) error {
|
||||
/*
|
||||
* This function is used to create a namespace.
|
||||
* It takes the following parameters:
|
||||
* - ns: the namespace you want to create
|
||||
*/
|
||||
logger := oclib.GetLogger()
|
||||
serv, err := tools.NewKubernetesService(
|
||||
conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort, conf.GetConfig().KubeCA,
|
||||
conf.GetConfig().KubeCert, conf.GetConfig().KubeData)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
c := context.Background()
|
||||
|
||||
ok, err := serv.GetNamespace(c, ns)
|
||||
if ok != nil && err == nil {
|
||||
logger.Debug().Msg("A namespace with name " + ns + " already exists")
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = serv.CreateNamespace(c, ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = serv.CreateServiceAccount(c, ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
role := "argo-role"
|
||||
err = serv.CreateRole(c, ns, role,
|
||||
[][]string{
|
||||
{"coordination.k8s.io"},
|
||||
{""},
|
||||
{""}},
|
||||
[][]string{
|
||||
{"leases"},
|
||||
{"secrets"},
|
||||
{"pods"}},
|
||||
[][]string{
|
||||
{"get", "create", "update"},
|
||||
{"get"},
|
||||
{"patch"}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return serv.CreateRoleBinding(c, ns, "argo-role-binding", role)
|
||||
}
|
||||
@@ -1,18 +1,21 @@
|
||||
package infrastructure
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/models/bill"
|
||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
||||
"cloud.o-forge.io/core/oc-lib/models/booking/planner"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/order"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
@@ -112,20 +115,6 @@ func (ws *WorkflowSchedule) GetBuyAndBook(wfID string, request *tools.APIRequest
|
||||
purchased = append(purchased, exec.Buy(ws.SelectedBillingStrategy, ws.UUID, wfID, priceds)...)
|
||||
bookings = append(bookings, exec.Book(ws.UUID, wfID, priceds)...)
|
||||
}
|
||||
|
||||
errCh := make(chan error, len(bookings))
|
||||
var m sync.Mutex
|
||||
|
||||
for _, b := range bookings {
|
||||
go getBooking(b, request, errCh, &m)
|
||||
}
|
||||
|
||||
for i := 0; i < len(bookings); i++ {
|
||||
if err := <-errCh; err != nil {
|
||||
return false, wf, execs, purchased, bookings, err
|
||||
}
|
||||
}
|
||||
|
||||
return true, wf, execs, purchased, bookings, nil
|
||||
}
|
||||
|
||||
@@ -150,41 +139,6 @@ func (ws *WorkflowSchedule) GenerateOrder(purchases []*purchase_resource.Purchas
|
||||
}
|
||||
}
|
||||
|
||||
func getBooking(b *booking.Booking, request *tools.APIRequest, errCh chan error, m *sync.Mutex) {
|
||||
m.Lock()
|
||||
c, err := getCallerCopy(request, errCh)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
m.Unlock()
|
||||
|
||||
meth := c.URLS[tools.BOOKING][tools.GET]
|
||||
meth = strings.ReplaceAll(meth, ":id", b.ResourceID)
|
||||
meth = strings.ReplaceAll(meth, ":start_date", b.ExpectedStartDate.Format("2006-01-02T15:04:05"))
|
||||
meth = strings.ReplaceAll(meth, ":end_date", b.ExpectedEndDate.Format("2006-01-02T15:04:05"))
|
||||
c.URLS[tools.BOOKING][tools.GET] = meth
|
||||
_, err = (&peer.Peer{}).LaunchPeerExecution(b.DestPeerID, b.ResourceID, tools.BOOKING, tools.GET, nil, &c)
|
||||
|
||||
if err != nil {
|
||||
errCh <- fmt.Errorf("error on " + b.DestPeerID + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
errCh <- nil
|
||||
}
|
||||
|
||||
func getCallerCopy(request *tools.APIRequest, errCh chan error) (tools.HTTPCaller, error) {
|
||||
var c tools.HTTPCaller
|
||||
err := request.Caller.DeepCopy(c)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return tools.HTTPCaller{}, nil
|
||||
}
|
||||
c.URLS = request.Caller.URLS
|
||||
return c, err
|
||||
}
|
||||
|
||||
func (ws *WorkflowSchedule) Schedules(wfID string, request *tools.APIRequest) (*WorkflowSchedule, *workflow.Workflow, []*workflow_execution.WorkflowExecution, error) {
|
||||
if request == nil {
|
||||
return ws, nil, []*workflow_execution.WorkflowExecution{}, errors.New("no request found")
|
||||
@@ -204,27 +158,28 @@ func (ws *WorkflowSchedule) Schedules(wfID string, request *tools.APIRequest) (*
|
||||
}
|
||||
ws.Workflow = wf
|
||||
|
||||
var errCh = make(chan error, len(bookings))
|
||||
var m sync.Mutex
|
||||
// Resolve our own peer MongoDB-ID once; used to decide local vs NATS routing.
|
||||
selfID, _ := oclib.GetMySelf()
|
||||
|
||||
errCh := make(chan error, len(purchases))
|
||||
for _, purchase := range purchases {
|
||||
go ws.CallDatacenter(purchase, purchase.DestPeerID, tools.PURCHASE_RESOURCE, request, errCh, &m)
|
||||
purchase.IsDraft = true
|
||||
go propagateResource(purchase, purchase.DestPeerID, tools.PURCHASE_RESOURCE, selfID, request, errCh)
|
||||
}
|
||||
for i := 0; i < len(purchases); i++ {
|
||||
if err := <-errCh; err != nil {
|
||||
return ws, wf, executions, errors.New("could not launch the peer execution : " + fmt.Sprintf("%v", err))
|
||||
return ws, wf, executions, errors.New("could not propagate purchase: " + fmt.Sprintf("%v", err))
|
||||
}
|
||||
}
|
||||
|
||||
errCh = make(chan error, len(bookings))
|
||||
|
||||
for _, booking := range bookings {
|
||||
go ws.CallDatacenter(booking, booking.DestPeerID, tools.BOOKING, request, errCh, &m)
|
||||
for _, bk := range bookings {
|
||||
bk.IsDraft = true
|
||||
go propagateResource(bk, bk.DestPeerID, tools.BOOKING, selfID, request, errCh)
|
||||
}
|
||||
|
||||
for i := 0; i < len(bookings); i++ {
|
||||
if err := <-errCh; err != nil {
|
||||
return ws, wf, executions, errors.New("could not launch the peer execution : " + fmt.Sprintf("%v", err))
|
||||
return ws, wf, executions, errors.New("could not propagate booking: " + fmt.Sprintf("%v", err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -240,29 +195,49 @@ func (ws *WorkflowSchedule) Schedules(wfID string, request *tools.APIRequest) (*
|
||||
}
|
||||
exec.StoreDraftDefault()
|
||||
utils.GenericStoreOne(exec, workflow_execution.NewAccessor(request))
|
||||
go EmitConsidersExecution(exec, wf)
|
||||
}
|
||||
fmt.Println("Schedules")
|
||||
|
||||
wf.GetAccessor(&tools.APIRequest{Admin: true}).UpdateOne(wf, wf.GetID())
|
||||
wf.GetAccessor(&tools.APIRequest{Admin: true}).UpdateOne(wf.Serialize(wf), wf.GetID())
|
||||
|
||||
return ws, wf, executions, nil
|
||||
}
|
||||
|
||||
func (ws *WorkflowSchedule) CallDatacenter(purchase utils.DBObject, destPeerID string, dt tools.DataType, request *tools.APIRequest, errCh chan error, m *sync.Mutex) {
|
||||
m.Lock()
|
||||
c, err := getCallerCopy(request, errCh)
|
||||
// propagateResource routes a purchase or booking to its destination:
|
||||
// - If destPeerID matches our own peer (selfMongoID), the object is stored
|
||||
// directly in the local DB as draft and the local planner is refreshed.
|
||||
// - Otherwise a NATS CREATE_RESOURCE message is emitted so the destination
|
||||
// peer can process it asynchronously.
|
||||
//
|
||||
// The caller is responsible for setting obj.IsDraft = true before calling.
|
||||
func propagateResource(obj utils.DBObject, destPeerID string, dt tools.DataType, selfMongoID *peer.Peer, request *tools.APIRequest, errCh chan error) {
|
||||
if selfMongoID == nil {
|
||||
return
|
||||
} // booking or purchase
|
||||
if destPeerID == selfMongoID.GetID() {
|
||||
if _, _, err := obj.GetAccessor(request).StoreOne(obj); err != nil {
|
||||
errCh <- fmt.Errorf("could not store %s locally: %w", dt.String(), err)
|
||||
return
|
||||
}
|
||||
// The planner tracks booking time-slots only; purchases do not affect it.
|
||||
if dt == tools.BOOKING {
|
||||
go refreshSelfPlanner(selfMongoID.PeerID, request)
|
||||
}
|
||||
errCh <- nil
|
||||
return
|
||||
}
|
||||
payload, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
errCh <- fmt.Errorf("could not serialize %s: %w", dt.String(), err)
|
||||
return
|
||||
}
|
||||
m.Unlock()
|
||||
if res, err := (&peer.Peer{}).LaunchPeerExecution(destPeerID, "", dt, tools.POST, purchase.Serialize(purchase), &c); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
} else {
|
||||
data := res["data"].(map[string]interface{})
|
||||
purchase.SetID(fmt.Sprintf("%v", data["id"]))
|
||||
}
|
||||
tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{
|
||||
FromApp: "oc-scheduler",
|
||||
Datatype: dt,
|
||||
Method: int(tools.CREATE_RESOURCE),
|
||||
Payload: payload,
|
||||
})
|
||||
errCh <- nil
|
||||
}
|
||||
|
||||
@@ -360,3 +335,303 @@ type Schedule struct {
|
||||
* TODO : LARGEST GRAIN PLANIFYING THE WORKFLOW WHEN OPTION IS SET
|
||||
* SET PROTECTION BORDER TIME
|
||||
*/
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Slot availability check
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const (
|
||||
checkWindowHours = 5 // how far ahead to scan for a free slot (hours)
|
||||
checkStepMin = 15 // time increment per scan step (minutes)
|
||||
)
|
||||
|
||||
// CheckResult holds the outcome of a slot availability check.
|
||||
type CheckResult struct {
|
||||
Available bool `json:"available"`
|
||||
Start time.Time `json:"start"`
|
||||
End *time.Time `json:"end,omitempty"`
|
||||
// NextSlot is the nearest free slot found within checkWindowHours when
|
||||
// the requested slot is unavailable, or the preferred (conflict-free) slot
|
||||
// when running in preemption mode.
|
||||
NextSlot *time.Time `json:"next_slot,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
// Preemptible is true when the check was run in preemption mode.
|
||||
Preemptible bool `json:"preemptible,omitempty"`
|
||||
}
|
||||
|
||||
// bookingResource is the minimum info needed to verify a resource against the
|
||||
// planner cache.
|
||||
type bookingResource struct {
|
||||
id string
|
||||
peerID string
|
||||
instanceID string // resolved from WorkflowSchedule.SelectedInstances
|
||||
}
|
||||
|
||||
// Check verifies that all booking-relevant resources (storage and compute) of
|
||||
// the given workflow have capacity for the requested time slot.
|
||||
//
|
||||
// - asap=true → ignore ws.Start, begin searching from time.Now()
|
||||
// - preemption → always return Available=true but populate Warnings with
|
||||
// conflicts and NextSlot with the nearest conflict-free alternative
|
||||
func (ws *WorkflowSchedule) Check(wfID string, asap bool, preemption bool, request *tools.APIRequest) (*CheckResult, error) {
|
||||
// 1. Load workflow
|
||||
obj, code, err := workflow.NewAccessor(request).LoadOne(wfID)
|
||||
if code != 200 || err != nil {
|
||||
msg := "could not load workflow " + wfID
|
||||
if err != nil {
|
||||
msg += ": " + err.Error()
|
||||
}
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
wf := obj.(*workflow.Workflow)
|
||||
|
||||
// 2. Resolve start
|
||||
start := ws.Start
|
||||
if asap || start.IsZero() {
|
||||
start = time.Now()
|
||||
}
|
||||
|
||||
// 3. Resolve end – use explicit end/duration or estimate via Planify
|
||||
end := ws.End
|
||||
if end == nil {
|
||||
if ws.DurationS > 0 {
|
||||
e := start.Add(time.Duration(ws.DurationS * float64(time.Second)))
|
||||
end = &e
|
||||
} else {
|
||||
_, longest, _, _, planErr := wf.Planify(
|
||||
start, nil,
|
||||
ws.SelectedInstances, ws.SelectedPartnerships,
|
||||
ws.SelectedBuyings, ws.SelectedStrategies,
|
||||
int(ws.BookingMode), request,
|
||||
)
|
||||
if planErr == nil && longest > 0 {
|
||||
e := start.Add(time.Duration(longest) * time.Second)
|
||||
end = &e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Extract booking-relevant (storage + compute) resources from the graph,
|
||||
// resolving the selected instance for each resource.
|
||||
checkables := collectBookingResources(wf, ws.SelectedInstances)
|
||||
fmt.Println(checkables)
|
||||
// 5. Check every resource against its peer's planner
|
||||
unavailable, warnings := checkResourceAvailability(checkables, start, end)
|
||||
fmt.Println(unavailable, warnings)
|
||||
result := &CheckResult{
|
||||
Start: start,
|
||||
End: end,
|
||||
Warnings: warnings,
|
||||
}
|
||||
|
||||
// 6. Preemption mode: mark as schedulable regardless of conflicts, but
|
||||
// surface warnings and the nearest conflict-free alternative.
|
||||
if preemption {
|
||||
result.Available = true
|
||||
result.Preemptible = true
|
||||
if len(unavailable) > 0 {
|
||||
result.NextSlot = findNextSlot(checkables, start, end, checkWindowHours)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// 7. All resources are free
|
||||
if len(unavailable) == 0 {
|
||||
result.Available = true
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// 8. Slot unavailable – locate the nearest free slot within the window
|
||||
result.Available = false
|
||||
result.NextSlot = findNextSlot(checkables, start, end, checkWindowHours)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// collectBookingResources returns unique storage and compute resources from the
|
||||
// workflow graph. For each resource the selected instance ID is resolved from
|
||||
// selectedInstances (the scheduler's SelectedInstances ConfigItem) so the planner
|
||||
// check targets the exact instance chosen by the user.
|
||||
func collectBookingResources(wf *workflow.Workflow, selectedInstances workflow.ConfigItem) []bookingResource {
|
||||
if wf.Graph == nil {
|
||||
return nil
|
||||
}
|
||||
seen := map[string]bool{}
|
||||
var result []bookingResource
|
||||
|
||||
resolveInstanceID := func(res interface {
|
||||
GetID() string
|
||||
GetCreatorID() string
|
||||
}) string {
|
||||
idx := selectedInstances.Get(res.GetID())
|
||||
switch r := res.(type) {
|
||||
case *resources.StorageResource:
|
||||
if inst := r.GetSelectedInstance(idx); inst != nil {
|
||||
return inst.GetID()
|
||||
}
|
||||
case *resources.ComputeResource:
|
||||
if inst := r.GetSelectedInstance(idx); inst != nil {
|
||||
return inst.GetID()
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, item := range wf.GetGraphItems(wf.Graph.IsStorage) {
|
||||
i := item
|
||||
_, res := i.GetResource()
|
||||
if res == nil {
|
||||
continue
|
||||
}
|
||||
id, peerID := res.GetID(), res.GetCreatorID()
|
||||
if peerID == "" || seen[id] {
|
||||
continue
|
||||
}
|
||||
seen[id] = true
|
||||
result = append(result, bookingResource{
|
||||
id: id,
|
||||
peerID: peerID,
|
||||
instanceID: resolveInstanceID(res),
|
||||
})
|
||||
}
|
||||
|
||||
for _, item := range wf.GetGraphItems(wf.Graph.IsCompute) {
|
||||
i := item
|
||||
_, res := i.GetResource()
|
||||
if res == nil {
|
||||
continue
|
||||
}
|
||||
id, peerID := res.GetID(), res.GetCreatorID()
|
||||
if peerID == "" || seen[id] {
|
||||
continue
|
||||
}
|
||||
seen[id] = true
|
||||
result = append(result, bookingResource{
|
||||
id: id,
|
||||
peerID: peerID,
|
||||
instanceID: resolveInstanceID(res),
|
||||
})
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// checkResourceAvailability returns the IDs of unavailable resources and
|
||||
// human-readable warning messages.
|
||||
func checkResourceAvailability(res []bookingResource, start time.Time, end *time.Time) (unavailable []string, warnings []string) {
|
||||
for _, r := range res {
|
||||
plannerMu.RLock()
|
||||
p := PlannerCache[r.peerID]
|
||||
plannerMu.RUnlock()
|
||||
if p == nil {
|
||||
warnings = append(warnings, fmt.Sprintf(
|
||||
"peer %s planner not in cache for resource %s – assuming available", r.peerID, r.id))
|
||||
continue
|
||||
}
|
||||
if !checkInstance(p, r.id, r.instanceID, start, end) {
|
||||
unavailable = append(unavailable, r.id)
|
||||
warnings = append(warnings, fmt.Sprintf(
|
||||
"resource %s is not available in [%s – %s]",
|
||||
r.id, start.Format(time.RFC3339), formatOptTime(end)))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// checkInstance checks availability for the specific instance resolved by the
|
||||
// scheduler. When instanceID is empty (no instance selected / none resolvable),
|
||||
// it falls back to checking all instances known in the planner and returns true
|
||||
// if any one has remaining capacity. Returns true when no capacity is recorded.
|
||||
func checkInstance(p *planner.Planner, resourceID string, instanceID string, start time.Time, end *time.Time) bool {
|
||||
if instanceID != "" {
|
||||
return p.Check(resourceID, instanceID, nil, start, end)
|
||||
}
|
||||
// Fallback: accept if any known instance has free capacity
|
||||
caps, ok := p.Capacities[resourceID]
|
||||
if !ok || len(caps) == 0 {
|
||||
return true // no recorded usage → assume free
|
||||
}
|
||||
for id := range caps {
|
||||
if p.Check(resourceID, id, nil, start, end) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// findNextSlot scans forward from 'from' in checkStepMin increments for up to
|
||||
// windowH hours and returns the first candidate start time at which all
|
||||
// resources are simultaneously free.
|
||||
func findNextSlot(resources []bookingResource, from time.Time, originalEnd *time.Time, windowH int) *time.Time {
|
||||
duration := time.Hour
|
||||
if originalEnd != nil {
|
||||
if d := originalEnd.Sub(from); d > 0 {
|
||||
duration = d
|
||||
}
|
||||
}
|
||||
step := time.Duration(checkStepMin) * time.Minute
|
||||
limit := from.Add(time.Duration(windowH) * time.Hour)
|
||||
for t := from.Add(step); t.Before(limit); t = t.Add(step) {
|
||||
e := t.Add(duration)
|
||||
if unavail, _ := checkResourceAvailability(resources, t, &e); len(unavail) == 0 {
|
||||
return &t
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func formatOptTime(t *time.Time) string {
|
||||
if t == nil {
|
||||
return "open"
|
||||
}
|
||||
return t.Format(time.RFC3339)
|
||||
}
|
||||
|
||||
// GetWorkflowPeerIDs loads the workflow and returns the deduplicated list of
|
||||
// creator peer IDs for all its storage and compute resources.
|
||||
// These are the peers whose planners must be watched by a check stream.
|
||||
func GetWorkflowPeerIDs(wfID string, request *tools.APIRequest) ([]string, error) {
|
||||
obj, code, err := workflow.NewAccessor(request).LoadOne(wfID)
|
||||
if code != 200 || err != nil {
|
||||
msg := "could not load workflow " + wfID
|
||||
if err != nil {
|
||||
msg += ": " + err.Error()
|
||||
}
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
wf := obj.(*workflow.Workflow)
|
||||
if wf.Graph == nil {
|
||||
return nil, nil
|
||||
}
|
||||
seen := map[string]bool{}
|
||||
var peerIDs []string
|
||||
for _, item := range wf.GetGraphItems(wf.Graph.IsStorage) {
|
||||
i := item
|
||||
_, res := i.GetResource()
|
||||
if res == nil {
|
||||
continue
|
||||
}
|
||||
if id := res.GetCreatorID(); id != "" && !seen[id] {
|
||||
seen[id] = true
|
||||
peerIDs = append(peerIDs, id)
|
||||
}
|
||||
}
|
||||
for _, item := range wf.GetGraphItems(wf.Graph.IsCompute) {
|
||||
i := item
|
||||
_, res := i.GetResource()
|
||||
if res == nil {
|
||||
continue
|
||||
}
|
||||
if id := res.GetCreatorID(); id != "" && !seen[id] {
|
||||
seen[id] = true
|
||||
peerIDs = append(peerIDs, id)
|
||||
}
|
||||
}
|
||||
realPeersID := []string{}
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(tools.PEER), nil)
|
||||
for _, id := range peerIDs {
|
||||
if data := access.LoadOne(id); data.Data != nil {
|
||||
realPeersID = append(realPeersID, data.ToPeer().PeerID)
|
||||
}
|
||||
}
|
||||
return realPeersID, nil
|
||||
}
|
||||
|
||||
52
main.go
52
main.go
@@ -1,46 +1,40 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"oc-scheduler/conf"
|
||||
"oc-scheduler/infrastructure"
|
||||
_ "oc-scheduler/routers"
|
||||
"os"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
beego "github.com/beego/beego/v2/server/web"
|
||||
"github.com/beego/beego/v2/server/web/filter/cors"
|
||||
)
|
||||
|
||||
const appname = "oc-scheduler"
|
||||
|
||||
func main() {
|
||||
o := oclib.GetConfLoader(appname)
|
||||
conf.GetConfig().KubeHost = o.GetStringDefault("KUBERNETES_SERVICE_HOST", os.Getenv("KUBERNETES_SERVICE_HOST"))
|
||||
conf.GetConfig().KubePort = o.GetStringDefault("KUBERNETES_SERVICE_PORT", "6443")
|
||||
|
||||
// Init the oc-lib
|
||||
oclib.Init(appname)
|
||||
sDec, err := base64.StdEncoding.DecodeString(o.GetStringDefault("KUBE_CA", ""))
|
||||
if err == nil {
|
||||
conf.GetConfig().KubeCA = string(sDec)
|
||||
}
|
||||
sDec, err = base64.StdEncoding.DecodeString(o.GetStringDefault("KUBE_CERT", ""))
|
||||
if err == nil {
|
||||
conf.GetConfig().KubeCert = string(sDec)
|
||||
}
|
||||
sDec, err = base64.StdEncoding.DecodeString(o.GetStringDefault("KUBE_DATA", ""))
|
||||
if err == nil {
|
||||
conf.GetConfig().KubeData = string(sDec)
|
||||
}
|
||||
|
||||
// Load the right config file
|
||||
o := oclib.GetConfLoader()
|
||||
oclib.InitAPI(appname)
|
||||
|
||||
// feed the library with the loaded config
|
||||
oclib.SetConfig(
|
||||
o.GetStringDefault("MONGO_URL", "mongodb://127.0.0.1:27017"),
|
||||
o.GetStringDefault("MONGO_DATABASE", "DC_myDC"),
|
||||
o.GetStringDefault("NATS_URL", "nats://localhost:4222"),
|
||||
o.GetStringDefault("LOKI_URL", "loki://localhost:3100"),
|
||||
o.GetStringDefault("LOG_LEVEL", "info"),
|
||||
)
|
||||
|
||||
// Beego init
|
||||
beego.BConfig.AppName = appname
|
||||
beego.BConfig.Listen.HTTPPort = o.GetIntDefault("port", 8080)
|
||||
beego.BConfig.WebConfig.DirectoryIndex = true
|
||||
beego.BConfig.WebConfig.StaticDir["/swagger"] = "swagger"
|
||||
api := &tools.API{}
|
||||
api.Discovered(beego.BeeApp.Handlers.GetAllControllerInfo())
|
||||
beego.InsertFilter("*", beego.BeforeRouter, cors.Allow(&cors.Options{
|
||||
AllowAllOrigins: true,
|
||||
AllowMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
|
||||
AllowHeaders: []string{"Origin", "Authorization", "Content-Type"},
|
||||
ExposeHeaders: []string{"Content-Length", "Content-Type"},
|
||||
AllowCredentials: true,
|
||||
}))
|
||||
go infrastructure.ListenNATS()
|
||||
go infrastructure.InitSelfPlanner()
|
||||
go infrastructure.ListenConfirm()
|
||||
beego.Run()
|
||||
}
|
||||
|
||||
BIN
oc-scheduler
BIN
oc-scheduler
Binary file not shown.
@@ -7,6 +7,42 @@ import (
|
||||
|
||||
func init() {
|
||||
|
||||
beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"],
|
||||
beego.ControllerComments{
|
||||
Method: "GetAll",
|
||||
Router: `/`,
|
||||
AllowHTTPMethods: []string{"get"},
|
||||
MethodParams: param.Make(),
|
||||
Filters: nil,
|
||||
Params: nil})
|
||||
|
||||
beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"],
|
||||
beego.ControllerComments{
|
||||
Method: "Get",
|
||||
Router: `/:id`,
|
||||
AllowHTTPMethods: []string{"get"},
|
||||
MethodParams: param.Make(),
|
||||
Filters: nil,
|
||||
Params: nil})
|
||||
|
||||
beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"],
|
||||
beego.ControllerComments{
|
||||
Method: "Search",
|
||||
Router: `/search/:start_date/:end_date`,
|
||||
AllowHTTPMethods: []string{"get"},
|
||||
MethodParams: param.Make(),
|
||||
Filters: nil,
|
||||
Params: nil})
|
||||
|
||||
beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"],
|
||||
beego.ControllerComments{
|
||||
Method: "ExecutionSearch",
|
||||
Router: `/search/execution/:id`,
|
||||
AllowHTTPMethods: []string{"get"},
|
||||
MethodParams: param.Make(),
|
||||
Filters: nil,
|
||||
Params: nil})
|
||||
|
||||
beego.GlobalControllerRouter["oc-scheduler/controllers:LokiController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:LokiController"],
|
||||
beego.ControllerComments{
|
||||
Method: "GetLogs",
|
||||
@@ -88,6 +124,15 @@ func init() {
|
||||
Filters: nil,
|
||||
Params: nil})
|
||||
|
||||
beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowSchedulerController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowSchedulerController"],
|
||||
beego.ControllerComments{
|
||||
Method: "CheckStream",
|
||||
Router: `/:id/check`,
|
||||
AllowHTTPMethods: []string{"get"},
|
||||
MethodParams: param.Make(),
|
||||
Filters: nil,
|
||||
Params: nil})
|
||||
|
||||
beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowSchedulerController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowSchedulerController"],
|
||||
beego.ControllerComments{
|
||||
Method: "SearchScheduledDraftOrder",
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
package routers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"oc-scheduler/controllers"
|
||||
|
||||
beego "github.com/beego/beego/v2/server/web"
|
||||
@@ -23,6 +24,11 @@ func init() {
|
||||
&controllers.LokiController{},
|
||||
),
|
||||
),
|
||||
beego.NSNamespace("/booking",
|
||||
beego.NSInclude(
|
||||
&controllers.BookingController{},
|
||||
),
|
||||
),
|
||||
beego.NSNamespace("/execution",
|
||||
beego.NSInclude(
|
||||
&controllers.WorkflowExecutionController{},
|
||||
@@ -36,4 +42,7 @@ func init() {
|
||||
)
|
||||
|
||||
beego.AddNamespace(ns)
|
||||
|
||||
// Route WebSocket hors du pipeline Beego pour éviter le WriteHeader parasite
|
||||
beego.Handler("/oc/:id/check", http.HandlerFunc(controllers.CheckStreamHandler))
|
||||
}
|
||||
|
||||
@@ -15,6 +15,116 @@
|
||||
},
|
||||
"basePath": "/oc/",
|
||||
"paths": {
|
||||
"/booking/": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"booking"
|
||||
],
|
||||
"description": "find booking by id\n\u003cbr\u003e",
|
||||
"operationId": "BookingController.GetAll",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "query",
|
||||
"name": "is_draft",
|
||||
"description": "draft wished",
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "{booking} models.booking"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/booking/search/execution/{id}": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"booking"
|
||||
],
|
||||
"description": "search bookings by execution\n\u003cbr\u003e",
|
||||
"operationId": "BookingController.Search",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "id",
|
||||
"description": "id execution",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"in": "query",
|
||||
"name": "is_draft",
|
||||
"description": "draft wished",
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "{workspace} models.workspace"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/booking/search/{start_date}/{end_date}": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"booking"
|
||||
],
|
||||
"description": "search bookings\n\u003cbr\u003e",
|
||||
"operationId": "BookingController.Search",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "start_date",
|
||||
"description": "the word search you want to get",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"in": "path",
|
||||
"name": "end_date",
|
||||
"description": "the word search you want to get",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"in": "query",
|
||||
"name": "is_draft",
|
||||
"description": "draft wished",
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "{workspace} models.workspace"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/booking/{id}": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"booking"
|
||||
],
|
||||
"description": "find booking by id\n\u003cbr\u003e",
|
||||
"operationId": "BookingController.Get",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "id",
|
||||
"description": "the id you want to get",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "{booking} models.booking"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/execution/": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -240,6 +350,41 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/{id}/check": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"oc-scheduler/controllersWorkflowSchedulerController"
|
||||
],
|
||||
"description": "WebSocket stream of slot availability for a workflow.\n\u003cbr\u003e",
|
||||
"operationId": "WorkflowSchedulerController.CheckStream",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "path",
|
||||
"name": "id",
|
||||
"description": "workflow id",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"in": "query",
|
||||
"name": "as_possible",
|
||||
"description": "find nearest free slot from now",
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"in": "query",
|
||||
"name": "preemption",
|
||||
"description": "validate anyway, raise warnings",
|
||||
"type": "boolean"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"101": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/{id}/order": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -279,6 +424,10 @@
|
||||
"name": "loki",
|
||||
"description": "Operations about workflow\n"
|
||||
},
|
||||
{
|
||||
"name": "booking",
|
||||
"description": "Operations about workspace\n"
|
||||
},
|
||||
{
|
||||
"name": "execution",
|
||||
"description": "Operations about workflow\n"
|
||||
|
||||
@@ -57,6 +57,31 @@ paths:
|
||||
responses:
|
||||
"200":
|
||||
description: '{workspace} models.workspace'
|
||||
/{id}/check:
|
||||
get:
|
||||
tags:
|
||||
- oc-scheduler/controllersWorkflowSchedulerController
|
||||
description: |-
|
||||
WebSocket stream of slot availability for a workflow.
|
||||
<br>
|
||||
operationId: WorkflowSchedulerController.CheckStream
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
description: workflow id
|
||||
required: true
|
||||
type: string
|
||||
- in: query
|
||||
name: as_possible
|
||||
description: find nearest free slot from now
|
||||
type: boolean
|
||||
- in: query
|
||||
name: preemption
|
||||
description: validate anyway, raise warnings
|
||||
type: boolean
|
||||
responses:
|
||||
"101":
|
||||
description: ""
|
||||
/{id}/order:
|
||||
get:
|
||||
tags:
|
||||
@@ -74,6 +99,86 @@ paths:
|
||||
responses:
|
||||
"200":
|
||||
description: '{workspace} models.workspace'
|
||||
/booking/:
|
||||
get:
|
||||
tags:
|
||||
- booking
|
||||
description: |-
|
||||
find booking by id
|
||||
<br>
|
||||
operationId: BookingController.GetAll
|
||||
parameters:
|
||||
- in: query
|
||||
name: is_draft
|
||||
description: draft wished
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: '{booking} models.booking'
|
||||
/booking/{id}:
|
||||
get:
|
||||
tags:
|
||||
- booking
|
||||
description: |-
|
||||
find booking by id
|
||||
<br>
|
||||
operationId: BookingController.Get
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
description: the id you want to get
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: '{booking} models.booking'
|
||||
/booking/search/{start_date}/{end_date}:
|
||||
get:
|
||||
tags:
|
||||
- booking
|
||||
description: |-
|
||||
search bookings
|
||||
<br>
|
||||
operationId: BookingController.Search
|
||||
parameters:
|
||||
- in: path
|
||||
name: start_date
|
||||
description: the word search you want to get
|
||||
required: true
|
||||
type: string
|
||||
- in: path
|
||||
name: end_date
|
||||
description: the word search you want to get
|
||||
required: true
|
||||
type: string
|
||||
- in: query
|
||||
name: is_draft
|
||||
description: draft wished
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: '{workspace} models.workspace'
|
||||
/booking/search/execution/{id}:
|
||||
get:
|
||||
tags:
|
||||
- booking
|
||||
description: |-
|
||||
search bookings by execution
|
||||
<br>
|
||||
operationId: BookingController.Search
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
description: id execution
|
||||
required: true
|
||||
type: string
|
||||
- in: query
|
||||
name: is_draft
|
||||
description: draft wished
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: '{workspace} models.workspace'
|
||||
/execution/:
|
||||
get:
|
||||
tags:
|
||||
@@ -205,6 +310,9 @@ tags:
|
||||
- name: loki
|
||||
description: |
|
||||
Operations about workflow
|
||||
- name: booking
|
||||
description: |
|
||||
Operations about workspace
|
||||
- name: execution
|
||||
description: |
|
||||
Operations about workflow
|
||||
|
||||
115
ws.go
Normal file
115
ws.go
Normal file
@@ -0,0 +1,115 @@
|
||||
//go:build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
func main() {
|
||||
timeout := flag.Int("timeout", 30, "secondes sans message avant de quitter")
|
||||
flag.Parse()
|
||||
|
||||
args := flag.Args()
|
||||
// Exemples de routes WS disponibles :
|
||||
// ws://localhost:8090/oc/<workflow-id>/check
|
||||
// ws://localhost:8090/oc/<workflow-id>/check?as_possible=true
|
||||
// ws://localhost:8090/oc/<workflow-id>/check?as_possible=true&preemption=true
|
||||
url := "ws://localhost:8090/oc/WORKFLOW_ID/check?as_possible=true"
|
||||
token := ""
|
||||
// Body JSON envoyé comme premier message WebSocket (WorkflowSchedule).
|
||||
// Seuls start + duration_s sont requis si as_possible=true.
|
||||
body := `{"start":"` + time.Now().UTC().Format(time.RFC3339) + `","duration_s":3600}`
|
||||
|
||||
if len(args) >= 1 {
|
||||
url = args[0]
|
||||
}
|
||||
if len(args) >= 2 {
|
||||
token = args[1]
|
||||
}
|
||||
if len(args) >= 3 {
|
||||
body = args[2]
|
||||
}
|
||||
|
||||
origin := "http://localhost/"
|
||||
config, err := websocket.NewConfig(url, origin)
|
||||
if err != nil {
|
||||
log.Fatalf("Config invalide : %v", err)
|
||||
}
|
||||
if token != "" {
|
||||
config.Header.Set("Authorization", "Bearer "+token)
|
||||
fmt.Printf("Token : %s...\n", token[:min(20, len(token))])
|
||||
}
|
||||
|
||||
fmt.Printf("Connexion à : %s\n", url)
|
||||
ws, err := websocket.DialConfig(config)
|
||||
if err != nil {
|
||||
log.Fatalf("Impossible de se connecter : %v", err)
|
||||
}
|
||||
defer ws.Close()
|
||||
fmt.Println("Connecté — envoi du body initial...")
|
||||
|
||||
// Envoi du WorkflowSchedule comme premier message.
|
||||
if err := websocket.Message.Send(ws, body); err != nil {
|
||||
log.Fatalf("Impossible d'envoyer le body initial : %v", err)
|
||||
}
|
||||
fmt.Printf("Body envoyé : %s\n\nEn attente de messages...\n\n", body)
|
||||
|
||||
stop := make(chan os.Signal, 1)
|
||||
signal.Notify(stop, os.Interrupt)
|
||||
|
||||
msgs := make(chan string)
|
||||
errs := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
var raw string
|
||||
if err := websocket.Message.Receive(ws, &raw); err != nil {
|
||||
errs <- err
|
||||
return
|
||||
}
|
||||
msgs <- raw
|
||||
}
|
||||
}()
|
||||
|
||||
idleTimer := time.NewTimer(time.Duration(*timeout) * time.Second)
|
||||
defer idleTimer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
fmt.Println("\nInterruption — fermeture.")
|
||||
return
|
||||
case err := <-errs:
|
||||
fmt.Printf("Connexion fermée : %v\n", err)
|
||||
return
|
||||
case <-idleTimer.C:
|
||||
fmt.Printf("Timeout (%ds) — aucun message reçu, fermeture.\n", *timeout)
|
||||
return
|
||||
case raw := <-msgs:
|
||||
idleTimer.Reset(time.Duration(*timeout) * time.Second)
|
||||
var data any
|
||||
if err := json.Unmarshal([]byte(raw), &data); err == nil {
|
||||
b, _ := json.MarshalIndent(data, "", " ")
|
||||
fmt.Println(string(b))
|
||||
} else {
|
||||
fmt.Printf("Message brut : %s\n", raw)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
Reference in New Issue
Block a user