Compare commits
1 Commits
main
...
feature/or
| Author | SHA1 | Date | |
|---|---|---|---|
| d1e5b59eb9 |
3
.gitattributes
vendored
3
.gitattributes
vendored
@@ -1,3 +0,0 @@
|
|||||||
# Force Go as the main language
|
|
||||||
*.go linguist-detectable=true
|
|
||||||
* linguist-language=Go
|
|
||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -8,7 +8,7 @@
|
|||||||
*.dll
|
*.dll
|
||||||
*.so
|
*.so
|
||||||
*.dylib
|
*.dylib
|
||||||
env.env
|
|
||||||
# Test binary, built with `go test -c`
|
# Test binary, built with `go test -c`
|
||||||
*.test
|
*.test
|
||||||
|
|
||||||
|
|||||||
47
Dockerfile
47
Dockerfile
@@ -1,44 +1,29 @@
|
|||||||
FROM golang:alpine AS deps
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
COPY go.mod go.sum ./
|
|
||||||
RUN sed -i '/replace/d' go.mod
|
|
||||||
RUN go mod download
|
|
||||||
|
|
||||||
#----------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
FROM golang:alpine AS builder
|
FROM golang:alpine AS builder
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
RUN apk add git
|
|
||||||
|
|
||||||
RUN go install github.com/beego/bee/v2@latest
|
|
||||||
|
|
||||||
WORKDIR /oc-scheduler
|
|
||||||
|
|
||||||
COPY --from=deps /go/pkg /go/pkg
|
|
||||||
COPY --from=deps /app/go.mod /app/go.sum ./
|
|
||||||
|
|
||||||
RUN export CGO_ENABLED=0 && \
|
|
||||||
export GOOS=linux && \
|
|
||||||
export GOARCH=amd64 && \
|
|
||||||
export BUILD_FLAGS="-ldflags='-w -s'"
|
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN sed -i '/replace/d' go.mod
|
RUN apk add git
|
||||||
RUN bee pack
|
|
||||||
RUN mkdir -p /app/extracted && tar -zxvf oc-scheduler.tar.gz -C /app/extracted
|
|
||||||
|
|
||||||
#----------------------------------------------------------------------------------------------
|
RUN go get github.com/beego/bee/v2 && go install github.com/beego/bee/v2@master
|
||||||
|
|
||||||
FROM golang:alpine
|
RUN timeout 15 bee run -gendoc=true -downdoc=true -runmode=dev || :
|
||||||
|
|
||||||
|
RUN sed -i 's/http:\/\/127.0.0.1:8080\/swagger\/swagger.json/swagger.json/g' swagger/index.html
|
||||||
|
|
||||||
|
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" .
|
||||||
|
|
||||||
|
RUN ls /app
|
||||||
|
|
||||||
|
FROM scratch
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY --from=builder /app/extracted/oc-scheduler /usr/bin/
|
|
||||||
COPY --from=builder /app/extracted/swagger /app/swagger
|
COPY --from=builder /app/oc-scheduler /usr/bin/
|
||||||
COPY --from=builder /app/extracted/docker_scheduler.json /etc/oc/scheduler.json
|
COPY --from=builder /app/swagger /app/swagger
|
||||||
|
|
||||||
|
COPY docker_scheduler.json /etc/oc/scheduler.json
|
||||||
|
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
|
|
||||||
|
|||||||
42
Makefile
42
Makefile
@@ -1,42 +0,0 @@
|
|||||||
.DEFAULT_GOAL := all
|
|
||||||
|
|
||||||
build: clean
|
|
||||||
bee pack
|
|
||||||
|
|
||||||
run:
|
|
||||||
bee run -gendoc=true -downdoc=true
|
|
||||||
|
|
||||||
purge:
|
|
||||||
lsof -t -i:8090 | xargs kill | true
|
|
||||||
|
|
||||||
run-dev:
|
|
||||||
bee generate routers && bee run -gendoc=true -downdoc=true -runmode=prod
|
|
||||||
|
|
||||||
dev: purge run-dev
|
|
||||||
|
|
||||||
debug:
|
|
||||||
bee run -downdebug -gendebug
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -rf oc-peer.tar.gz
|
|
||||||
|
|
||||||
docker:
|
|
||||||
DOCKER_BUILDKIT=1 docker build -t oc-scheduler -f Dockerfile . --build-arg=HOST=$(HOST)
|
|
||||||
docker tag oc-scheduler opencloudregistry/oc-scheduler:latest
|
|
||||||
|
|
||||||
publish-kind:
|
|
||||||
kind load docker-image opencloudregistry/oc-scheduler:latest --name $(CLUSTER_NAME) | true
|
|
||||||
|
|
||||||
publish-registry:
|
|
||||||
docker push opencloudregistry/oc-scheduler:latest
|
|
||||||
|
|
||||||
docker-deploy:
|
|
||||||
docker compose up -d
|
|
||||||
|
|
||||||
run-docker: docker publish-kind publish-registry docker-deploy
|
|
||||||
|
|
||||||
all: docker publish-kind
|
|
||||||
|
|
||||||
ci: docker publish-registry
|
|
||||||
|
|
||||||
.PHONY: build run clean docker publish-kind publish-registry
|
|
||||||
@@ -6,8 +6,6 @@ To build :
|
|||||||
|
|
||||||
bee generate routers
|
bee generate routers
|
||||||
bee run -gendoc=true -downdoc=true
|
bee run -gendoc=true -downdoc=true
|
||||||
OR
|
|
||||||
make dev
|
|
||||||
|
|
||||||
If default Swagger page is displayed instead of tyour api, change url in swagger/index.html file to :
|
If default Swagger page is displayed instead of tyour api, change url in swagger/index.html file to :
|
||||||
<>
|
<>
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
appname = oc-scheduler
|
appname = oc-scheduler
|
||||||
httpport = 8090
|
httpport = 8080
|
||||||
runmode = dev
|
runmode = dev
|
||||||
autorender = false
|
autorender = false
|
||||||
copyrequestbody = true
|
copyrequestbody = true
|
||||||
|
|||||||
@@ -1,35 +0,0 @@
|
|||||||
package conf
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
KubeHost string
|
|
||||||
KubePort string
|
|
||||||
KubeCA string
|
|
||||||
KubeCert string
|
|
||||||
KubeData string
|
|
||||||
// PrepLeadSeconds must match oc-schedulerd's PREP_LEAD_SECONDS.
|
|
||||||
// Used both as the ASAP buffer and as the minimum allowed lead time
|
|
||||||
// when validating explicit booking start dates.
|
|
||||||
PrepLeadSeconds int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) PrepLead() time.Duration {
|
|
||||||
if c.PrepLeadSeconds <= 0 {
|
|
||||||
return 2 * time.Minute
|
|
||||||
}
|
|
||||||
return time.Duration(c.PrepLeadSeconds) * time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
var instance *Config
|
|
||||||
var once sync.Once
|
|
||||||
|
|
||||||
func GetConfig() *Config {
|
|
||||||
once.Do(func() {
|
|
||||||
instance = &Config{}
|
|
||||||
})
|
|
||||||
return instance
|
|
||||||
}
|
|
||||||
@@ -1,93 +0,0 @@
|
|||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
|
||||||
beego "github.com/beego/beego/v2/server/web"
|
|
||||||
"github.com/gorilla/websocket"
|
|
||||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Operations about workspace
|
|
||||||
type BookingController struct {
|
|
||||||
beego.Controller
|
|
||||||
}
|
|
||||||
|
|
||||||
var BookingExample booking.Booking
|
|
||||||
|
|
||||||
// @Title Search
|
|
||||||
// @Description search bookings
|
|
||||||
// @Param start_date path string true "the word search you want to get"
|
|
||||||
// @Param end_date path string true "the word search you want to get"
|
|
||||||
// @Param is_draft query string false "draft wished"
|
|
||||||
// @Param offset query string false
|
|
||||||
// @Param limit query string false
|
|
||||||
// @Success 200 {workspace} models.workspace
|
|
||||||
// @router /search/:start_date/:end_date [get]
|
|
||||||
func (o *BookingController) Search() {
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is a sample of how to use the search function
|
|
||||||
* The search function is used to search for data in the database
|
|
||||||
* The search function takes in a filter and a data type
|
|
||||||
* The filter is a struct that contains the search parameters
|
|
||||||
* The data type is an enum that specifies the type of data to search for
|
|
||||||
* The search function returns a list of data that matches the filter
|
|
||||||
* The data is then returned as a json object
|
|
||||||
*/
|
|
||||||
// store and return Id or post with UUID
|
|
||||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
|
||||||
offset, _ := strconv.Atoi(o.Ctx.Input.Query("offset"))
|
|
||||||
limit, _ := strconv.Atoi(o.Ctx.Input.Query("limit"))
|
|
||||||
start_date, _ := time.ParseInLocation("2006-01-02", o.Ctx.Input.Param(":start_date"), time.UTC)
|
|
||||||
end_date, _ := time.ParseInLocation("2006-01-02", o.Ctx.Input.Param(":end_date"), time.UTC)
|
|
||||||
isDraft := o.Ctx.Input.Query("is_draft")
|
|
||||||
sd := primitive.NewDateTimeFromTime(start_date)
|
|
||||||
ed := primitive.NewDateTimeFromTime(end_date)
|
|
||||||
fmt.Println("SEARCH START END", start_date, end_date)
|
|
||||||
f := dbs.Filters{
|
|
||||||
And: map[string][]dbs.Filter{
|
|
||||||
"expected_start_date": {{Operator: "gte", Value: sd}, {Operator: "lte", Value: ed}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).Search(&f, "", isDraft == "true", int64(offset), int64(limit))
|
|
||||||
o.ServeJSON()
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Title GetAll
|
|
||||||
// @Description find booking by id
|
|
||||||
// @Param is_draft query string false "draft wished"
|
|
||||||
// @Param offset query string false
|
|
||||||
// @Param limit query string false
|
|
||||||
// @Success 200 {booking} models.booking
|
|
||||||
// @router / [get]
|
|
||||||
func (o *BookingController) GetAll() {
|
|
||||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
|
||||||
offset, _ := strconv.Atoi(o.Ctx.Input.Query("offset"))
|
|
||||||
limit, _ := strconv.Atoi(o.Ctx.Input.Query("limit"))
|
|
||||||
isDraft := o.Ctx.Input.Query("is_draft")
|
|
||||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).LoadAll(isDraft == "true", int64(offset), int64(limit))
|
|
||||||
o.ServeJSON()
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Title Get
|
|
||||||
// @Description find booking by id
|
|
||||||
// @Param id path string true "the id you want to get"
|
|
||||||
// @Success 200 {booking} models.booking
|
|
||||||
// @router /:id [get]
|
|
||||||
func (o *BookingController) Get() {
|
|
||||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
|
||||||
id := o.Ctx.Input.Param(":id")
|
|
||||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).LoadOne(id)
|
|
||||||
o.ServeJSON()
|
|
||||||
}
|
|
||||||
|
|
||||||
var upgrader = websocket.Upgrader{
|
|
||||||
CheckOrigin: func(r *http.Request) bool { return true }, // allow all origins
|
|
||||||
}
|
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"oc-scheduler/infrastructure"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/execution_verification"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources/native_tools"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
beego "github.com/beego/beego/v2/server/web"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Operations about workspace
|
|
||||||
type ExecutionVerificationController struct {
|
|
||||||
beego.Controller
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Title GetAll
|
|
||||||
// @Description find verification by id
|
|
||||||
// @Param is_draft query string false "draft wished"
|
|
||||||
// @Param offset query string false
|
|
||||||
// @Param limit query string false
|
|
||||||
// @Success 200 {booking} models.booking
|
|
||||||
// @router / [get]
|
|
||||||
func (o *ExecutionVerificationController) GetAll() {
|
|
||||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
|
||||||
isDraft := o.Ctx.Input.Query("is_draft")
|
|
||||||
offset, _ := strconv.Atoi(o.Ctx.Input.Query("offset"))
|
|
||||||
limit, _ := strconv.Atoi(o.Ctx.Input.Query("limit"))
|
|
||||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.EXECUTION_VERIFICATION), user, peerID, groups, nil).LoadAll(isDraft == "true", int64(offset), int64(limit))
|
|
||||||
o.ServeJSON()
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Title Get
|
|
||||||
// @Description find verification by id
|
|
||||||
// @Param id path string true "the id you want to get"
|
|
||||||
// @Success 200 {booking} models.booking
|
|
||||||
// @router /:id [get]
|
|
||||||
func (o *ExecutionVerificationController) Get() {
|
|
||||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
|
||||||
id := o.Ctx.Input.Param(":id")
|
|
||||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.EXECUTION_VERIFICATION), user, peerID, groups, nil).LoadOne(id)
|
|
||||||
o.ServeJSON()
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Title Update
|
|
||||||
// @Description create computes
|
|
||||||
// @Param id path string true "the compute id you want to get"
|
|
||||||
// @Param body body models.compute true "The compute content"
|
|
||||||
// @Success 200 {compute} models.compute
|
|
||||||
// @router /:id [put]
|
|
||||||
func (o *ExecutionVerificationController) Put() {
|
|
||||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
|
||||||
// store and return Id or post with UUID
|
|
||||||
var res map[string]interface{}
|
|
||||||
id := o.Ctx.Input.Param(":id")
|
|
||||||
json.Unmarshal(o.Ctx.Input.CopyBody(10000), &res)
|
|
||||||
data := oclib.NewRequest(oclib.LibDataEnum(oclib.EXECUTION_VERIFICATION), user, peerID, groups, nil).UpdateOne(res, id)
|
|
||||||
if data.Err == "" && data.Data != nil && data.Data.(*execution_verification.ExecutionVerification).Validate {
|
|
||||||
data, _ := json.Marshal(&native_tools.WorkflowEventParams{
|
|
||||||
WorkflowResourceID: data.Data.(*execution_verification.ExecutionVerification).WorkflowID,
|
|
||||||
})
|
|
||||||
infrastructure.EmitNATS(peerID, tools.PropalgationMessage{
|
|
||||||
Action: tools.PubSubAction(tools.WORKFLOW_EVENT),
|
|
||||||
DataType: tools.EXECUTION_VERIFICATION.EnumIndex(),
|
|
||||||
Payload: data,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
o.Data["json"] = data
|
|
||||||
o.ServeJSON()
|
|
||||||
}
|
|
||||||
@@ -1,181 +0,0 @@
|
|||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/config"
|
|
||||||
beego "github.com/beego/beego/v2/server/web"
|
|
||||||
gorillaws "github.com/gorilla/websocket"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Operations about workflow
|
|
||||||
type LokiController struct {
|
|
||||||
beego.Controller
|
|
||||||
}
|
|
||||||
|
|
||||||
type LokiInfo struct {
|
|
||||||
Start string `json:"start"`
|
|
||||||
End string `json:"end"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Title GetLogs
|
|
||||||
// @Description get logs
|
|
||||||
// @Param body body models.compute true "The compute content"
|
|
||||||
// @Success 200 {workspace} models.workspace
|
|
||||||
// @router /:id [post]
|
|
||||||
func (o *LokiController) GetLogs() {
|
|
||||||
id := o.Ctx.Input.Param(":id")
|
|
||||||
var resp map[string]interface{}
|
|
||||||
json.Unmarshal(o.Ctx.Input.CopyBody(100000), &resp)
|
|
||||||
|
|
||||||
path := "/loki/api/v1/query_range"
|
|
||||||
if len(resp) > 0 {
|
|
||||||
start := fmt.Sprintf("%v", resp["start"])
|
|
||||||
if len(start) > 10 {
|
|
||||||
start = start[0:10]
|
|
||||||
}
|
|
||||||
end := fmt.Sprintf("%v", resp["end"])
|
|
||||||
if len(end) > 10 {
|
|
||||||
end = end[0:10]
|
|
||||||
}
|
|
||||||
query := []string{
|
|
||||||
"workflow_execution_id=\"" + id + "\"",
|
|
||||||
}
|
|
||||||
for k, v := range resp {
|
|
||||||
if k == "start" || k == "end" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
query = append(query, fmt.Sprintf("%v=\"%v\"", k, v))
|
|
||||||
}
|
|
||||||
if len(query) == 0 || len(start) < 10 || len(end) < 10 {
|
|
||||||
o.Ctx.ResponseWriter.WriteHeader(403)
|
|
||||||
o.Data["json"] = map[string]string{"error": "Query error, missing data : start, end or query"}
|
|
||||||
o.ServeJSON()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
path += "?query={" + strings.Join(query, ", ") + "}&start=" + start + "&end=" + end
|
|
||||||
|
|
||||||
resp, err := http.Get(config.GetConfig().LokiUrl + path) // CALL
|
|
||||||
if err != nil {
|
|
||||||
o.Ctx.ResponseWriter.WriteHeader(422)
|
|
||||||
o.Data["json"] = map[string]string{"error": err.Error()}
|
|
||||||
o.ServeJSON()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
body, _ := io.ReadAll(resp.Body)
|
|
||||||
var result map[string]interface{}
|
|
||||||
// Unmarshal: string → []byte → object
|
|
||||||
err = json.Unmarshal(body, &result)
|
|
||||||
if err != nil {
|
|
||||||
o.Ctx.ResponseWriter.WriteHeader(403)
|
|
||||||
o.Data["json"] = map[string]string{"error": err.Error()}
|
|
||||||
o.ServeJSON()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
o.Data["json"] = result
|
|
||||||
o.ServeJSON()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
o.Ctx.ResponseWriter.WriteHeader(403)
|
|
||||||
o.Data["json"] = map[string]string{"error": "Query error"}
|
|
||||||
o.ServeJSON()
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogsStreamHandler streams Loki logs over WebSocket.
|
|
||||||
//
|
|
||||||
// The client sends one JSON message with the same format as GetLogs:
|
|
||||||
//
|
|
||||||
// {"start": "<unix-seconds>", "label1": "val1", ...}
|
|
||||||
//
|
|
||||||
// The server connects to Loki's /loki/api/v1/tail WebSocket endpoint and
|
|
||||||
// forwards every message it receives until the client disconnects.
|
|
||||||
func LogsStreamHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
fmt.Println("LogsStreamHandler")
|
|
||||||
execID := strings.TrimSuffix(
|
|
||||||
strings.TrimPrefix(r.URL.Path, "/oc/logs/"),
|
|
||||||
"",
|
|
||||||
)
|
|
||||||
conn, err := wsUpgrader.Upgrade(w, r, nil)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("LogsStreamHandler", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
/*
|
|
||||||
var query map[string]interface{}
|
|
||||||
if err := conn.ReadJSON(&query); err != nil {
|
|
||||||
fmt.Println("LogsStreamHandler ReadJSON", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
start := time.Now().UTC().UnixNano()
|
|
||||||
labels := []string{
|
|
||||||
"workflow_execution_id=\"" + execID + "\"",
|
|
||||||
}
|
|
||||||
fmt.Println("LOKI START", start, labels)
|
|
||||||
// Build Loki tail WS URL (http→ws, https→wss).
|
|
||||||
lokiBase := config.GetConfig().LokiUrl
|
|
||||||
lokiBase = strings.Replace(lokiBase, "https://", "wss://", 1)
|
|
||||||
lokiBase = strings.Replace(lokiBase, "http://", "ws://", 1)
|
|
||||||
lokiURL := lokiBase + "/loki/api/v1/tail?" + url.Values{
|
|
||||||
"query": {"{" + strings.Join(labels, ", ") + "}"},
|
|
||||||
"start": {fmt.Sprintf("%v", start)},
|
|
||||||
}.Encode()
|
|
||||||
|
|
||||||
headers := http.Header{}
|
|
||||||
headers.Set("X-Scope-OrgID", "1")
|
|
||||||
|
|
||||||
lokiConn, resp, err := gorillaws.DefaultDialer.Dial(lokiURL, headers)
|
|
||||||
fmt.Println("LOKI LISTEN", lokiBase, err)
|
|
||||||
if err != nil {
|
|
||||||
if resp != nil {
|
|
||||||
body, _ := io.ReadAll(resp.Body)
|
|
||||||
fmt.Printf("Handshake failed: status=%d body=%s", resp.StatusCode, string(body))
|
|
||||||
}
|
|
||||||
_ = conn.WriteJSON(map[string]string{"error": "loki: " + err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer lokiConn.Close()
|
|
||||||
|
|
||||||
errCh := make(chan error, 2)
|
|
||||||
|
|
||||||
// Forward Loki → client.
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
_, msg, err := lokiConn.ReadMessage()
|
|
||||||
if err != nil {
|
|
||||||
errCh <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var result map[string]interface{}
|
|
||||||
if json.Unmarshal(msg, &result) == nil {
|
|
||||||
fmt.Println(result)
|
|
||||||
if err := conn.WriteJSON(result); err != nil {
|
|
||||||
errCh <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Detect client disconnect (read pump).
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
if _, _, err := conn.ReadMessage(); err != nil {
|
|
||||||
errCh <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
<-errCh
|
|
||||||
}
|
|
||||||
@@ -1,358 +0,0 @@
|
|||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"oc-scheduler/infrastructure"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
beego "github.com/beego/beego/v2/server/web"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
gorillaws "github.com/gorilla/websocket"
|
|
||||||
)
|
|
||||||
|
|
||||||
var orderCollection = oclib.LibDataEnum(oclib.ORDER)
|
|
||||||
var logger = oclib.GetLogger()
|
|
||||||
|
|
||||||
// Operations about workflow
|
|
||||||
type WorkflowSchedulerController struct {
|
|
||||||
beego.Controller
|
|
||||||
}
|
|
||||||
|
|
||||||
var wsUpgrader = gorillaws.Upgrader{
|
|
||||||
CheckOrigin: func(r *http.Request) bool { return true },
|
|
||||||
}
|
|
||||||
|
|
||||||
var schedulerMu sync.RWMutex
|
|
||||||
var scheduler = map[string]*infrastructure.WorkflowSchedule{}
|
|
||||||
|
|
||||||
func realPushCheckfunc(ctx context.Context, conn *gorillaws.Conn, req *tools.APIRequest, user string, ws infrastructure.WorkflowSchedule,
|
|
||||||
executionsID string, wfID string, scheduled bool, asap bool, preemption bool, reschedule bool) (bool, error) {
|
|
||||||
// If we already have draft bookings for this session and we're about to
|
|
||||||
// re-check (timer refresh or planner update), remove the old drafts first
|
|
||||||
// so the planner doesn't treat our own previous reservations as conflicts.
|
|
||||||
if reschedule && scheduled {
|
|
||||||
infrastructure.CleanupSession(executionsID, req)
|
|
||||||
scheduled = false
|
|
||||||
}
|
|
||||||
workflowScheduler := ws
|
|
||||||
|
|
||||||
schedulerMu.Lock()
|
|
||||||
if scheduler[user] != nil {
|
|
||||||
workflowScheduler = *scheduler[user]
|
|
||||||
}
|
|
||||||
schedulerMu.Unlock()
|
|
||||||
result, checkErr := workflowScheduler.Check(wfID, asap, preemption, req)
|
|
||||||
fmt.Println("CHECK", checkErr)
|
|
||||||
if checkErr != nil {
|
|
||||||
return scheduled, checkErr
|
|
||||||
}
|
|
||||||
if result.Available && reschedule {
|
|
||||||
workflowScheduler.Start = result.Start
|
|
||||||
if result.End != nil {
|
|
||||||
workflowScheduler.End = result.End
|
|
||||||
}
|
|
||||||
_, _, execs, purchases, bookings, err := workflowScheduler.GetBuyAndBook(wfID, req)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("GetBuyAndBook", err)
|
|
||||||
return scheduled, err
|
|
||||||
}
|
|
||||||
infrastructure.UpsertSessionDrafts(executionsID, execs, purchases, bookings, req)
|
|
||||||
scheduled = true
|
|
||||||
delay := workflowScheduler.Start.UTC().Add(-(1 * time.Minute)).Sub(time.Now().UTC())
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
infrastructure.CleanupSession(executionsID, req)
|
|
||||||
// Session closed before timer fired — nothing to do, CleanupSession
|
|
||||||
// has already run (or will run) in the defer of CheckStreamHandler.
|
|
||||||
return
|
|
||||||
case <-time.After(delay):
|
|
||||||
realPushCheckfunc(ctx, conn, req, user, ws, executionsID, wfID, scheduled, asap, preemption, true)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
result.SchedulingID = executionsID
|
|
||||||
fmt.Println(result)
|
|
||||||
return scheduled, conn.WriteJSON(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckStreamHandler is the WebSocket handler for slot availability checking.
|
|
||||||
// Query params: as_possible=true, preemption=true
|
|
||||||
func CheckStreamHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
fmt.Println("qksbdkqbsdkh")
|
|
||||||
var err error
|
|
||||||
wfID := strings.TrimSuffix(
|
|
||||||
strings.TrimPrefix(r.URL.Path, "/oc/check/"),
|
|
||||||
"",
|
|
||||||
)
|
|
||||||
|
|
||||||
q := r.URL.Query()
|
|
||||||
asap := q.Get("as_possible") == "true"
|
|
||||||
preemption := q.Get("preemption") == "true"
|
|
||||||
|
|
||||||
user, peerID, groups := oclib.ExtractTokenInfoWs(*r)
|
|
||||||
req := &tools.APIRequest{
|
|
||||||
Username: user,
|
|
||||||
PeerID: peerID,
|
|
||||||
Groups: groups,
|
|
||||||
Caller: nil,
|
|
||||||
Admin: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
watchedPeers, err := infrastructure.GetWorkflowPeerIDs(wfID, req)
|
|
||||||
fmt.Println("Watched peers for workflow", wfID, ":", watchedPeers, err)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, `{"code":404,"error":"`+err.Error()+`"}`, http.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
conn, err := wsUpgrader.Upgrade(w, r, nil)
|
|
||||||
fmt.Println("Upgrade :", err)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var ws infrastructure.WorkflowSchedule
|
|
||||||
if err := conn.ReadJSON(&ws); err != nil {
|
|
||||||
fmt.Println("ReadJSON :", err)
|
|
||||||
conn.Close()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Allow the initial JSON to override the query-param mode.
|
|
||||||
if ws.Asap != nil {
|
|
||||||
asap = *ws.Asap
|
|
||||||
}
|
|
||||||
if ws.Preemption != nil {
|
|
||||||
preemption = *ws.Preemption
|
|
||||||
}
|
|
||||||
|
|
||||||
plannerCh, plannerUnsub := infrastructure.SubscribePlannerUpdates(watchedPeers)
|
|
||||||
wfCh, wfUnsub := infrastructure.SubscribeWorkflowUpdates(wfID)
|
|
||||||
|
|
||||||
executionsID := uuid.New().String()
|
|
||||||
ownedPeers := infrastructure.RequestPlannerRefresh(watchedPeers, executionsID)
|
|
||||||
|
|
||||||
self, err := oclib.GetMySelf()
|
|
||||||
if err != nil || self == nil {
|
|
||||||
logger.Err(err).Msg("could not resolve self peer")
|
|
||||||
conn.Close()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
selfPeerID := self.PeerID
|
|
||||||
|
|
||||||
scheduled := false
|
|
||||||
confirmed := false
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer func() {
|
|
||||||
cancel()
|
|
||||||
conn.Close()
|
|
||||||
plannerUnsub()
|
|
||||||
wfUnsub()
|
|
||||||
infrastructure.ReleaseRefreshOwnership(ownedPeers, executionsID)
|
|
||||||
if !confirmed {
|
|
||||||
infrastructure.CleanupSession(executionsID, req)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
pushCheck := realPushCheckfunc
|
|
||||||
if scheduled, err = pushCheck(ctx, conn, req, user, ws, executionsID, wfID, scheduled, asap, preemption, true); err != nil {
|
|
||||||
fmt.Println("UPDATE CONFIRM FIRST scheduled", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
updateCh := make(chan infrastructure.WorkflowSchedule, 1)
|
|
||||||
closeCh := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(closeCh)
|
|
||||||
for {
|
|
||||||
var updated infrastructure.WorkflowSchedule
|
|
||||||
if err := conn.ReadJSON(&updated); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case updateCh <- updated:
|
|
||||||
default:
|
|
||||||
<-updateCh
|
|
||||||
updateCh <- updated
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case updated := <-updateCh:
|
|
||||||
fmt.Println("updated FOUND ", updated)
|
|
||||||
workflowScheduler := ws
|
|
||||||
schedulerMu.Lock()
|
|
||||||
if scheduler[user] != nil {
|
|
||||||
workflowScheduler = *scheduler[user]
|
|
||||||
}
|
|
||||||
schedulerMu.Unlock()
|
|
||||||
|
|
||||||
if updated.Confirm {
|
|
||||||
// Subscribe BEFORE calling Schedule to avoid missing the notification.
|
|
||||||
confirmCh, confirmUnsub := infrastructure.SubscribeSessionConfirmation(executionsID)
|
|
||||||
defer confirmUnsub()
|
|
||||||
|
|
||||||
workflowScheduler.UUID = executionsID
|
|
||||||
_, _, _, schedErr := infrastructure.Schedule(&workflowScheduler, wfID, req)
|
|
||||||
if schedErr != nil {
|
|
||||||
infrastructure.CleanupSession(executionsID, req)
|
|
||||||
_ = conn.WriteJSON(map[string]interface{}{
|
|
||||||
"error": schedErr.Error(),
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fmt.Println("UPDATE CONFIRM — waiting for execution confirmation")
|
|
||||||
select {
|
|
||||||
case <-confirmCh:
|
|
||||||
fmt.Println("UPDATE CONFIRM done")
|
|
||||||
confirmed = true
|
|
||||||
_ = conn.WriteJSON(map[string]interface{}{
|
|
||||||
"confirmed": true,
|
|
||||||
"scheduling_id": executionsID,
|
|
||||||
})
|
|
||||||
case <-time.After(60 * time.Second):
|
|
||||||
_ = conn.WriteJSON(map[string]interface{}{
|
|
||||||
"confirmed": false,
|
|
||||||
"error": "confirmation timeout: scheduling accepted but peers did not confirm in time",
|
|
||||||
})
|
|
||||||
case <-ctx.Done():
|
|
||||||
// client disconnected before confirmation
|
|
||||||
}
|
|
||||||
if !confirmed {
|
|
||||||
infrastructure.CleanupSession(executionsID, req)
|
|
||||||
fmt.Println("UPDATE CONFIRM not done")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Detect mode change before updating local vars.
|
|
||||||
modeChanged := (updated.Asap != nil && *updated.Asap != asap) ||
|
|
||||||
(updated.Preemption != nil && *updated.Preemption != preemption)
|
|
||||||
if updated.Asap != nil {
|
|
||||||
asap = *updated.Asap
|
|
||||||
}
|
|
||||||
if updated.Preemption != nil {
|
|
||||||
preemption = *updated.Preemption
|
|
||||||
}
|
|
||||||
changed := modeChanged ||
|
|
||||||
updated.Cron != workflowScheduler.Cron ||
|
|
||||||
!updated.Start.Equal(workflowScheduler.Start) ||
|
|
||||||
updated.DurationS != workflowScheduler.DurationS ||
|
|
||||||
(updated.End == nil) != (workflowScheduler.End == nil) ||
|
|
||||||
(updated.End != nil && workflowScheduler.End != nil && !updated.End.Equal(*workflowScheduler.End)) ||
|
|
||||||
updated.BookingMode != workflowScheduler.BookingMode ||
|
|
||||||
!reflect.DeepEqual(updated.SelectedBillingStrategy, workflowScheduler.SelectedBillingStrategy) ||
|
|
||||||
!reflect.DeepEqual(updated.SelectedInstances, workflowScheduler.SelectedInstances) ||
|
|
||||||
!reflect.DeepEqual(updated.SelectedPartnerships, workflowScheduler.SelectedPartnerships) ||
|
|
||||||
!reflect.DeepEqual(updated.SelectedBuyings, workflowScheduler.SelectedBuyings) ||
|
|
||||||
!reflect.DeepEqual(updated.SelectedStrategies, workflowScheduler.SelectedStrategies) ||
|
|
||||||
!reflect.DeepEqual(updated.SelectedEmbeddedStorages, workflowScheduler.SelectedEmbeddedStorages)
|
|
||||||
|
|
||||||
infrastructure.CleanupSession(executionsID, req)
|
|
||||||
|
|
||||||
schedulerMu.Lock()
|
|
||||||
scheduler[user] = &updated
|
|
||||||
schedulerMu.Unlock()
|
|
||||||
|
|
||||||
if scheduled, err = pushCheck(ctx, conn, req, user, ws, executionsID, wfID, scheduled, asap, preemption, changed || !scheduled); err != nil {
|
|
||||||
fmt.Println("UPDATE SCHEDULERD", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
case remotePeerID := <-plannerCh:
|
|
||||||
workflowScheduler := ws
|
|
||||||
schedulerMu.Lock()
|
|
||||||
if scheduler[user] != nil {
|
|
||||||
workflowScheduler = *scheduler[user]
|
|
||||||
}
|
|
||||||
schedulerMu.Unlock()
|
|
||||||
if remotePeerID == selfPeerID {
|
|
||||||
if scheduled {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
result, checkErr := workflowScheduler.Check(wfID, asap, preemption, req)
|
|
||||||
if checkErr == nil {
|
|
||||||
result.SchedulingID = executionsID
|
|
||||||
_ = conn.WriteJSON(result)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if scheduled, err = pushCheck(ctx, conn, req, user, ws, executionsID, wfID, scheduled, asap, preemption, scheduled); err != nil {
|
|
||||||
fmt.Println("UPDATE SCHEDULERD PLAN", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-wfCh:
|
|
||||||
if newPeers, err := infrastructure.GetWorkflowPeerIDs(wfID, req); err == nil {
|
|
||||||
plannerUnsub()
|
|
||||||
watchedPeers = newPeers
|
|
||||||
plannerCh, plannerUnsub = infrastructure.SubscribePlannerUpdates(newPeers)
|
|
||||||
newOwned := infrastructure.RequestPlannerRefresh(newPeers, executionsID)
|
|
||||||
ownedPeers = append(ownedPeers, newOwned...)
|
|
||||||
}
|
|
||||||
if scheduled, err = pushCheck(ctx, conn, req, user, ws, executionsID, wfID, scheduled, asap, preemption, false); err != nil {
|
|
||||||
fmt.Println("UPDATE WORKFLOW", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-closeCh:
|
|
||||||
fmt.Println("UPDATE Close ? ")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Title UnSchedule
|
|
||||||
// @Description unschedule a workflow execution: deletes its bookings on all peers then deletes the execution.
|
|
||||||
// @Param id path string true "execution id"
|
|
||||||
// @Success 200 {object} map[string]interface{}
|
|
||||||
// @router /:id [delete]
|
|
||||||
func (o *WorkflowSchedulerController) UnSchedule() {
|
|
||||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
|
||||||
executionID := o.Ctx.Input.Param(":id")
|
|
||||||
req := &tools.APIRequest{
|
|
||||||
Username: user,
|
|
||||||
PeerID: peerID,
|
|
||||||
Groups: groups,
|
|
||||||
Admin: true,
|
|
||||||
}
|
|
||||||
if err := infrastructure.UnscheduleExecution(executionID, req); err != nil {
|
|
||||||
o.Data["json"] = map[string]interface{}{"code": 404, "error": err.Error()}
|
|
||||||
} else {
|
|
||||||
o.Data["json"] = map[string]interface{}{"code": 200, "error": ""}
|
|
||||||
}
|
|
||||||
o.ServeJSON()
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Title SearchScheduledDraftOrder
|
|
||||||
// @Description search draft order for a workflow
|
|
||||||
// @Param id path string true "id execution"
|
|
||||||
// @Param offset query string false
|
|
||||||
// @Param limit query string false
|
|
||||||
// @Success 200 {workspace} models.workspace
|
|
||||||
// @router /order/:id [get]
|
|
||||||
func (o *WorkflowSchedulerController) SearchScheduledDraftOrder() {
|
|
||||||
offset, _ := strconv.Atoi(o.Ctx.Input.Query("offset"))
|
|
||||||
limit, _ := strconv.Atoi(o.Ctx.Input.Query("limit"))
|
|
||||||
_, peerID, _ := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
|
||||||
id := o.Ctx.Input.Param(":id")
|
|
||||||
filter := &dbs.Filters{
|
|
||||||
And: map[string][]dbs.Filter{
|
|
||||||
"workflow_id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
|
||||||
"order_by": {{Operator: dbs.EQUAL.String(), Value: peerID}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
o.Data["json"] = oclib.NewRequestAdmin(orderCollection, nil).Search(filter, "", true, int64(offset), int64(limit))
|
|
||||||
o.ServeJSON()
|
|
||||||
}
|
|
||||||
@@ -1,254 +0,0 @@
|
|||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
|
||||||
libutils "cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
|
||||||
)
|
|
||||||
|
|
||||||
// streamMsg is the envelope pushed over every stream WebSocket.
|
|
||||||
type streamMsg struct {
|
|
||||||
Type string `json:"type"` // "snapshot" | "update" | "delete"
|
|
||||||
Data interface{} `json:"data,omitempty"`
|
|
||||||
Deleted bool `json:"deleted,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Booking stream
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// BookingStreamHandler opens a WebSocket that:
|
|
||||||
// 1. sends an immediate snapshot of matching bookings ("snapshot")
|
|
||||||
// 2. pushes each subsequent create/update/delete as an individual "update" or
|
|
||||||
// "delete" message.
|
|
||||||
//
|
|
||||||
// Query params (all optional):
|
|
||||||
//
|
|
||||||
// executions_id — filter to a specific scheduling session
|
|
||||||
// is_draft — "true" | "false" (omit = non-draft)
|
|
||||||
// start_date — YYYY-MM-DD (expected_start_date >=)
|
|
||||||
// end_date — YYYY-MM-DD (expected_start_date <=)
|
|
||||||
func BookingStreamHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
user, peerID, groups := oclib.ExtractTokenInfoWs(*r)
|
|
||||||
|
|
||||||
q := r.URL.Query()
|
|
||||||
executionID := q.Get("execution_id")
|
|
||||||
executionsID := q.Get("executions_id")
|
|
||||||
isDraftStr := q.Get("is_draft")
|
|
||||||
onlyDraft := isDraftStr == "true"
|
|
||||||
filterDraft := isDraftStr != "" // whether the caller wants draft filtering at all
|
|
||||||
startDate, _ := time.ParseInLocation("2006-01-02", q.Get("start_date"), time.UTC)
|
|
||||||
endDate, _ := time.ParseInLocation("2006-01-02", q.Get("end_date"), time.UTC)
|
|
||||||
|
|
||||||
conn, err := upgrader.Upgrade(w, r, nil)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
matchesFilter := func(b *booking.Booking) bool {
|
|
||||||
if executionID != "" && b.GetID() != executionID {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if executionsID != "" && b.ExecutionsID != executionsID {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if filterDraft && b.IsDraft != onlyDraft {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !startDate.IsZero() && b.ExpectedStartDate.Before(startDate) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !endDate.IsZero() && b.ExpectedStartDate.After(endDate) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build snapshot filters
|
|
||||||
andF := map[string][]dbs.Filter{}
|
|
||||||
if executionID != "" {
|
|
||||||
andF["id"] = []dbs.Filter{{Operator: dbs.EQUAL.String(), Value: executionID}}
|
|
||||||
}
|
|
||||||
if executionsID != "" {
|
|
||||||
andF["executions_id"] = []dbs.Filter{{Operator: dbs.EQUAL.String(), Value: executionsID}}
|
|
||||||
}
|
|
||||||
if !startDate.IsZero() {
|
|
||||||
andF["expected_start_date"] = append(andF["expected_start_date"],
|
|
||||||
dbs.Filter{Operator: "gte", Value: primitive.NewDateTimeFromTime(startDate)})
|
|
||||||
}
|
|
||||||
if !endDate.IsZero() {
|
|
||||||
andF["expected_start_date"] = append(andF["expected_start_date"],
|
|
||||||
dbs.Filter{Operator: "lte", Value: primitive.NewDateTimeFromTime(endDate)})
|
|
||||||
}
|
|
||||||
var snapshotFilter *dbs.Filters
|
|
||||||
if len(andF) > 0 {
|
|
||||||
snapshotFilter = &dbs.Filters{And: andF}
|
|
||||||
}
|
|
||||||
|
|
||||||
snapshot := oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).
|
|
||||||
Search(snapshotFilter, "", onlyDraft, 0, 10000)
|
|
||||||
if err := conn.WriteJSON(streamMsg{Type: "snapshot", Data: snapshot.Data}); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
changeCh, unsub := libutils.SubscribeChanges(tools.BOOKING)
|
|
||||||
defer unsub()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// detect client disconnect
|
|
||||||
closeCh := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(closeCh)
|
|
||||||
for {
|
|
||||||
if _, _, err := conn.ReadMessage(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case evt := <-changeCh:
|
|
||||||
b, ok := evt.Object.(*booking.Booking)
|
|
||||||
if !ok || !matchesFilter(b) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if evt.Deleted {
|
|
||||||
_ = conn.WriteJSON(streamMsg{Type: "delete", Data: b, Deleted: true})
|
|
||||||
} else {
|
|
||||||
_ = conn.WriteJSON(streamMsg{Type: "update", Data: b})
|
|
||||||
}
|
|
||||||
case <-closeCh:
|
|
||||||
return
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// WorkflowExecution stream
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// ExecutionStreamHandler opens a WebSocket that:
|
|
||||||
// 1. sends an immediate snapshot of matching executions ("snapshot")
|
|
||||||
// 2. pushes each subsequent create/update/delete as "update" or "delete".
|
|
||||||
//
|
|
||||||
// Query params (all optional):
|
|
||||||
//
|
|
||||||
// executions_id — filter to a specific scheduling session
|
|
||||||
// is_draft — "true" | "false" (omit = non-draft)
|
|
||||||
// start_date — YYYY-MM-DD (execution_date >=)
|
|
||||||
// end_date — YYYY-MM-DD (execution_date <=)
|
|
||||||
func ExecutionStreamHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
user, peerID, groups := oclib.ExtractTokenInfoWs(*r)
|
|
||||||
|
|
||||||
q := r.URL.Query()
|
|
||||||
executionID := q.Get("execution_id")
|
|
||||||
executionsID := q.Get("executions_id")
|
|
||||||
isDraftStr := q.Get("is_draft")
|
|
||||||
onlyDraft := isDraftStr == "true"
|
|
||||||
filterDraft := isDraftStr != ""
|
|
||||||
startDate, _ := time.ParseInLocation("2006-01-02", q.Get("start_date"), time.UTC)
|
|
||||||
endDate, _ := time.ParseInLocation("2006-01-02", q.Get("end_date"), time.UTC)
|
|
||||||
|
|
||||||
conn, err := upgrader.Upgrade(w, r, nil)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
matchesFilter := func(e *workflow_execution.WorkflowExecution) bool {
|
|
||||||
if executionID != "" && e.GetID() != executionID {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if executionsID != "" && e.ExecutionsID != executionsID {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if filterDraft && e.IsDraft != onlyDraft {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !startDate.IsZero() && e.ExecDate.Before(startDate) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !endDate.IsZero() && e.ExecDate.After(endDate) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build snapshot filters
|
|
||||||
andF := map[string][]dbs.Filter{}
|
|
||||||
if executionID != "" {
|
|
||||||
andF["id"] = []dbs.Filter{{Operator: dbs.EQUAL.String(), Value: executionID}}
|
|
||||||
}
|
|
||||||
if executionsID != "" {
|
|
||||||
andF["executions_id"] = []dbs.Filter{{Operator: dbs.EQUAL.String(), Value: executionsID}}
|
|
||||||
}
|
|
||||||
if !startDate.IsZero() {
|
|
||||||
andF["execution_date"] = append(andF["execution_date"],
|
|
||||||
dbs.Filter{Operator: "gte", Value: primitive.NewDateTimeFromTime(startDate)})
|
|
||||||
}
|
|
||||||
if !endDate.IsZero() {
|
|
||||||
andF["execution_date"] = append(andF["execution_date"],
|
|
||||||
dbs.Filter{Operator: "lte", Value: primitive.NewDateTimeFromTime(endDate)})
|
|
||||||
}
|
|
||||||
var snapshotFilter *dbs.Filters
|
|
||||||
if len(andF) > 0 {
|
|
||||||
snapshotFilter = &dbs.Filters{And: andF}
|
|
||||||
}
|
|
||||||
|
|
||||||
snapshot := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), user, peerID, groups, nil).
|
|
||||||
Search(snapshotFilter, "", onlyDraft, 0, 10000)
|
|
||||||
if err := conn.WriteJSON(streamMsg{Type: "snapshot", Data: snapshot.Data}); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
changeCh, unsub := libutils.SubscribeChanges(tools.WORKFLOW_EXECUTION)
|
|
||||||
defer unsub()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
closeCh := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(closeCh)
|
|
||||||
for {
|
|
||||||
if _, _, err := conn.ReadMessage(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case evt := <-changeCh:
|
|
||||||
e, ok := evt.Object.(*workflow_execution.WorkflowExecution)
|
|
||||||
fmt.Println("CHANGE!", e, ok, matchesFilter(e))
|
|
||||||
if !ok || !matchesFilter(e) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if evt.Deleted {
|
|
||||||
_ = conn.WriteJSON(streamMsg{Type: "delete", Data: e, Deleted: true})
|
|
||||||
} else {
|
|
||||||
_ = conn.WriteJSON(streamMsg{Type: "update", Data: e})
|
|
||||||
}
|
|
||||||
case <-closeCh:
|
|
||||||
return
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -15,10 +15,7 @@ type VersionController struct {
|
|||||||
// @Success 200
|
// @Success 200
|
||||||
// @router / [get]
|
// @router / [get]
|
||||||
func (c *VersionController) GetAll() {
|
func (c *VersionController) GetAll() {
|
||||||
c.Data["json"] = map[string]string{
|
c.Data["json"] = map[string]string{"version": "1"}
|
||||||
"service": "oc-scheduler",
|
|
||||||
"version": "1",
|
|
||||||
}
|
|
||||||
c.ServeJSON()
|
c.ServeJSON()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package controllers
|
package controllers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
oclib "cloud.o-forge.io/core/oc-lib"
|
||||||
@@ -22,8 +21,6 @@ type WorkflowExecutionController struct {
|
|||||||
// @Param start_date path string true "the word search you want to get"
|
// @Param start_date path string true "the word search you want to get"
|
||||||
// @Param end_date path string true "the word search you want to get"
|
// @Param end_date path string true "the word search you want to get"
|
||||||
// @Param is_draft query string false "draft wished"
|
// @Param is_draft query string false "draft wished"
|
||||||
// @Param offset query string false
|
|
||||||
// @Param limit query string false
|
|
||||||
// @Success 200 {workspace} models.workspace
|
// @Success 200 {workspace} models.workspace
|
||||||
// @router /search/:start_date/:end_date [get]
|
// @router /search/:start_date/:end_date [get]
|
||||||
func (o *WorkflowExecutionController) SearchPerDate() {
|
func (o *WorkflowExecutionController) SearchPerDate() {
|
||||||
@@ -36,12 +33,10 @@ func (o *WorkflowExecutionController) SearchPerDate() {
|
|||||||
* The search function returns a list of data that matches the filter
|
* The search function returns a list of data that matches the filter
|
||||||
* The data is then returned as a json object
|
* The data is then returned as a json object
|
||||||
*/
|
*/
|
||||||
// user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||||
// store and return Id or post with UUID
|
// store and return Id or post with UUID
|
||||||
offset, _ := strconv.Atoi(o.Ctx.Input.Query("offset"))
|
start_date, _ := time.Parse("2006-01-02", o.Ctx.Input.Param(":start_date"))
|
||||||
limit, _ := strconv.Atoi(o.Ctx.Input.Query("limit"))
|
end_date, _ := time.Parse("2006-01-02", o.Ctx.Input.Param(":end_date"))
|
||||||
start_date, _ := time.ParseInLocation("2006-01-02", o.Ctx.Input.Param(":start_date"), time.UTC)
|
|
||||||
end_date, _ := time.ParseInLocation("2006-01-02", o.Ctx.Input.Param(":end_date"), time.UTC)
|
|
||||||
sd := primitive.NewDateTimeFromTime(start_date)
|
sd := primitive.NewDateTimeFromTime(start_date)
|
||||||
ed := primitive.NewDateTimeFromTime(end_date)
|
ed := primitive.NewDateTimeFromTime(end_date)
|
||||||
f := dbs.Filters{
|
f := dbs.Filters{
|
||||||
@@ -50,25 +45,19 @@ func (o *WorkflowExecutionController) SearchPerDate() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
isDraft := o.Ctx.Input.Query("is_draft")
|
isDraft := o.Ctx.Input.Query("is_draft")
|
||||||
// o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).Search(&f, "", isDraft == "true")
|
o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).Search(&f, "", isDraft == "true")
|
||||||
o.Data["json"] = oclib.NewRequestAdmin(collection, nil).Search(&f, "", isDraft == "true", int64(offset), int64(limit))
|
|
||||||
|
|
||||||
o.ServeJSON()
|
o.ServeJSON()
|
||||||
}
|
}
|
||||||
|
|
||||||
// @Title GetAll
|
// @Title GetAll
|
||||||
// @Description find workflow by workflowid
|
// @Description find workflow by workflowid
|
||||||
// @Param is_draft query string false "draft wished"
|
// @Param is_draft query string false "draft wished"
|
||||||
// @Param offset query string false
|
|
||||||
// @Param limit query string false
|
|
||||||
// @Success 200 {workflow} models.workflow
|
// @Success 200 {workflow} models.workflow
|
||||||
// @router / [get]
|
// @router / [get]
|
||||||
func (o *WorkflowExecutionController) GetAll() {
|
func (o *WorkflowExecutionController) GetAll() {
|
||||||
offset, _ := strconv.Atoi(o.Ctx.Input.Query("offset"))
|
|
||||||
limit, _ := strconv.Atoi(o.Ctx.Input.Query("limit"))
|
|
||||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||||
isDraft := o.Ctx.Input.Query("is_draft")
|
isDraft := o.Ctx.Input.Query("is_draft")
|
||||||
o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).LoadAll(isDraft == "true", int64(offset), int64(limit))
|
o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).LoadAll(isDraft == "true")
|
||||||
o.ServeJSON()
|
o.ServeJSON()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,34 +67,9 @@ func (o *WorkflowExecutionController) GetAll() {
|
|||||||
// @Success 200 {workflow} models.workflow
|
// @Success 200 {workflow} models.workflow
|
||||||
// @router /:id [get]
|
// @router /:id [get]
|
||||||
func (o *WorkflowExecutionController) Get() {
|
func (o *WorkflowExecutionController) Get() {
|
||||||
//user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
|
||||||
id := o.Ctx.Input.Param(":id")
|
|
||||||
// o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).LoadOne(id)
|
|
||||||
o.Data["json"] = oclib.NewRequestAdmin(collection, nil).LoadOne(id)
|
|
||||||
o.ServeJSON()
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Title Delete
|
|
||||||
// @Description find workflow by workflowid
|
|
||||||
// @Param id path string true "the workflowid you want to get"
|
|
||||||
// @Success 200 {workflow} models.workflow
|
|
||||||
// @router /:id [delete]
|
|
||||||
func (o *WorkflowExecutionController) Delete() {
|
|
||||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||||
id := o.Ctx.Input.Param(":id")
|
id := o.Ctx.Input.Param(":id")
|
||||||
data := oclib.NewRequest(collection, user, peerID, groups, nil).LoadOne(id)
|
o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).LoadOne(id)
|
||||||
if b := data.ToBookings(); b != nil {
|
|
||||||
bAccess := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.BOOKING), nil)
|
|
||||||
s := bAccess.Search(&dbs.Filters{
|
|
||||||
And: map[string][]dbs.Filter{
|
|
||||||
"execution_id": {{Operator: dbs.EQUAL.String(), Value: b.ExecutionID}},
|
|
||||||
},
|
|
||||||
}, "", false, 0, 10000)
|
|
||||||
for _, ss := range s.Data {
|
|
||||||
bAccess.DeleteOne(ss.GetID())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).DeleteOne(id)
|
|
||||||
o.ServeJSON()
|
o.ServeJSON()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -113,16 +77,12 @@ func (o *WorkflowExecutionController) Delete() {
|
|||||||
// @Description find compute by key word
|
// @Description find compute by key word
|
||||||
// @Param search path string true "the search you want to get"
|
// @Param search path string true "the search you want to get"
|
||||||
// @Param is_draft query string false "draft wished"
|
// @Param is_draft query string false "draft wished"
|
||||||
// @Param offset query string false
|
|
||||||
// @Param limit query string false
|
|
||||||
// @Success 200 {compute} models.compute
|
// @Success 200 {compute} models.compute
|
||||||
// @router /search/:search [get]
|
// @router /search/:search [get]
|
||||||
func (o *WorkflowExecutionController) Search() {
|
func (o *WorkflowExecutionController) Search() {
|
||||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||||
offset, _ := strconv.Atoi(o.Ctx.Input.Query("offset"))
|
|
||||||
limit, _ := strconv.Atoi(o.Ctx.Input.Query("limit"))
|
|
||||||
isDraft := o.Ctx.Input.Query("is_draft")
|
isDraft := o.Ctx.Input.Query("is_draft")
|
||||||
search := o.Ctx.Input.Param(":search")
|
search := o.Ctx.Input.Param(":search")
|
||||||
o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).Search(nil, search, isDraft == "true", int64(offset), int64(limit))
|
o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).Search(nil, search, isDraft == "true")
|
||||||
o.ServeJSON()
|
o.ServeJSON()
|
||||||
}
|
}
|
||||||
|
|||||||
122
controllers/workflow_sheduler.go
Normal file
122
controllers/workflow_sheduler.go
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
oclib "cloud.o-forge.io/core/oc-lib"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
beego "github.com/beego/beego/v2/server/web"
|
||||||
|
)
|
||||||
|
|
||||||
|
var orderCollection = oclib.LibDataEnum(oclib.ORDER)
|
||||||
|
|
||||||
|
// Operations about workflow
|
||||||
|
type WorkflowSchedulerController struct {
|
||||||
|
beego.Controller
|
||||||
|
}
|
||||||
|
|
||||||
|
// @Title Schedule
|
||||||
|
// @Description schedule workflow
|
||||||
|
// @Param id path string true "id execution"
|
||||||
|
// @Param body body models.compute true "The compute content"
|
||||||
|
// @Success 200 {workspace} models.workspace
|
||||||
|
// @router /:id [post]
|
||||||
|
func (o *WorkflowSchedulerController) Schedule() {
|
||||||
|
code := 200
|
||||||
|
e := ""
|
||||||
|
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||||
|
id := o.Ctx.Input.Param(":id")
|
||||||
|
var resp *workflow_execution.WorkflowSchedule
|
||||||
|
json.Unmarshal(o.Ctx.Input.CopyBody(100000), &resp)
|
||||||
|
caller := tools.NewHTTPCaller(map[tools.DataType]map[tools.METHOD]string{ // paths to call other OC services
|
||||||
|
tools.PEER: {
|
||||||
|
tools.POST: "/status/",
|
||||||
|
},
|
||||||
|
tools.BOOKING: {
|
||||||
|
tools.GET: "/booking/check/:id/:start_date/:end_date",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
req := oclib.NewRequest(collection, user, peerID, groups, caller)
|
||||||
|
sch, err := req.Schedule(id, resp)
|
||||||
|
if err != nil {
|
||||||
|
filter := &dbs.Filters{
|
||||||
|
And: map[string][]dbs.Filter{
|
||||||
|
"workflow_id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
d := req.Search(filter, "", true)
|
||||||
|
if d.Data != nil {
|
||||||
|
for _, w := range d.Data {
|
||||||
|
req.DeleteOne(w.GetID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
o.Data["json"] = map[string]interface{}{
|
||||||
|
"data": nil,
|
||||||
|
"code": 409,
|
||||||
|
"error": err.Error(),
|
||||||
|
}
|
||||||
|
o.ServeJSON()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
order, err := req.DraftOrder(sch)
|
||||||
|
fmt.Println("SCHEDULED", order, err)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
for _, w := range sch.WorkflowExecutions {
|
||||||
|
oclib.NewRequest(collection, user, peerID, groups, nil).DeleteOne(w.GetID())
|
||||||
|
}
|
||||||
|
o.Data["json"] = map[string]interface{}{
|
||||||
|
"data": nil,
|
||||||
|
"code": 409,
|
||||||
|
"error": err.Error(),
|
||||||
|
}
|
||||||
|
o.ServeJSON()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
o.Data["json"] = map[string]interface{}{
|
||||||
|
"data": order,
|
||||||
|
"code": code,
|
||||||
|
"error": e,
|
||||||
|
}
|
||||||
|
o.ServeJSON()
|
||||||
|
}
|
||||||
|
|
||||||
|
// @Title UnSchedule
|
||||||
|
// @Description schedule workflow
|
||||||
|
// @Param id path string true "id execution"
|
||||||
|
// @Param body body models.compute true "The compute content"
|
||||||
|
// @Success 200 {workspace} models.workspace
|
||||||
|
// @router /:id [delete]
|
||||||
|
func (o *WorkflowSchedulerController) UnSchedule() {
|
||||||
|
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||||
|
id := o.Ctx.Input.Param(":id")
|
||||||
|
// TODO UNSCHEDULER
|
||||||
|
filter := &dbs.Filters{
|
||||||
|
And: map[string][]dbs.Filter{
|
||||||
|
"workflow_id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
o.Data["json"] = oclib.NewRequest(collection, user, peerID, groups, nil).Search(filter, "", true)
|
||||||
|
o.ServeJSON()
|
||||||
|
}
|
||||||
|
|
||||||
|
// @Title SearchScheduledDraftOrder
|
||||||
|
// @Description schedule workflow
|
||||||
|
// @Param id path string true "id execution"
|
||||||
|
// @Success 200 {workspace} models.workspace
|
||||||
|
// @router /:id/order [get]
|
||||||
|
func (o *WorkflowSchedulerController) SearchScheduledDraftOrder() {
|
||||||
|
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||||
|
id := o.Ctx.Input.Param(":id")
|
||||||
|
filter := &dbs.Filters{
|
||||||
|
And: map[string][]dbs.Filter{
|
||||||
|
"workflow_id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
||||||
|
"order_by": {{Operator: dbs.EQUAL.String(), Value: peerID}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
o.Data["json"] = oclib.NewRequest(orderCollection, user, peerID, groups, nil).Search(filter, "", true)
|
||||||
|
o.ServeJSON()
|
||||||
|
}
|
||||||
33
docker-compose.base.yml
Normal file
33
docker-compose.base.yml
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
version: '3.4'
|
||||||
|
|
||||||
|
services:
|
||||||
|
mongo:
|
||||||
|
image: 'mongo:latest'
|
||||||
|
networks:
|
||||||
|
- catalog
|
||||||
|
ports:
|
||||||
|
- 27017:27017
|
||||||
|
container_name: mongo
|
||||||
|
volumes:
|
||||||
|
- oc-catalog-data:/data/db
|
||||||
|
- oc-catalog-data:/data/configdb
|
||||||
|
|
||||||
|
mongo-express:
|
||||||
|
image: "mongo-express:latest"
|
||||||
|
restart: always
|
||||||
|
depends_on:
|
||||||
|
- mongo
|
||||||
|
networks:
|
||||||
|
- catalog
|
||||||
|
ports:
|
||||||
|
- 8081:8081
|
||||||
|
environment:
|
||||||
|
- ME_CONFIG_BASICAUTH_USERNAME=test
|
||||||
|
- ME_CONFIG_BASICAUTH_PASSWORD=test
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
oc-catalog-data:
|
||||||
|
|
||||||
|
networks:
|
||||||
|
catalog:
|
||||||
|
# name: catalog
|
||||||
@@ -8,21 +8,16 @@ services:
|
|||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
- "traefik.enable=true"
|
||||||
- "traefik.http.routers.scheduler.entrypoints=web"
|
- "traefik.http.routers.scheduler.entrypoints=web"
|
||||||
- "traefik.http.routers.scheduler.rule=PathPrefix(`/scheduler`)"
|
- "traefik.http.middlewares.auth.forwardauth.address=http://oc-auth:8080/oc/forward"
|
||||||
- "traefik.http.middlewares.scheduler-rewrite.replacepathregex.regex=^/scheduler(.*)"
|
- "traefik.http.routers.workflow.rule=PathPrefix(/scheduler)"
|
||||||
- "traefik.http.middlewares.scheduler-rewrite.replacepathregex.replacement=/oc$$1"
|
- "traefik.http.routers.scheduler.tls=false"
|
||||||
- "traefik.http.routers.scheduler.middlewares=scheduler-rewrite,auth-scheduler"
|
- "traefik.http.routers.scheduler.middlewares=auth"
|
||||||
- "traefik.http.services.scheduler.loadbalancer.server.port=8080"
|
|
||||||
|
|
||||||
- "traefik.http.middlewares.auth-scheduler.forwardauth.address=http://oc-auth:8080/oc/forward"
|
|
||||||
- "traefik.http.middlewares.auth-scheduler.forwardauth.trustForwardHeader=true"
|
|
||||||
- "traefik.http.middlewares.auth-scheduler.forwardauth.authResponseHeaders=X-Auth-Request-User,X-Auth-Request-Email"
|
|
||||||
ports:
|
ports:
|
||||||
- 8090:8080
|
- 8090:8080
|
||||||
container_name: oc-scheduler
|
container_name: oc-scheduler
|
||||||
networks:
|
networks:
|
||||||
- oc
|
- catalog
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
oc:
|
catalog:
|
||||||
external: true
|
external: true
|
||||||
@@ -1,13 +1,5 @@
|
|||||||
{
|
{
|
||||||
"MONGO_URL":"mongodb://mongo:27017/",
|
"MONGO_URL":"mongodb://mongo:27017/",
|
||||||
"NATS_URL":"nats://nats:4222",
|
"NATS_URL":"nats://nats:4222",
|
||||||
"MONGO_DATABASE":"DC_myDC",
|
"MONGO_DATABASE":"DC_myDC"
|
||||||
"LOKI_URL": "http://loki:3100",
|
|
||||||
"KUBERNETES_SERVICE_HOST": "kubernetes.default.svc.cluster.local",
|
|
||||||
"KUBERNETES_SERVICE_PORT": "6443",
|
|
||||||
"KUBERNETES_NAMESPACE": "default",
|
|
||||||
"KUBERNETES_IMAGE": "opencloudregistry/oc-monitord",
|
|
||||||
"KUBE_CA": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTnpReU56STVNVEF3SGhjTk1qWXdNekl6TVRNek5URXdXaGNOTXpZd016SXdNVE16TlRFdwpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTnpReU56STVNVEF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFSSGpYRDVpbnRIYWZWSk5VaDFlRnIxcXBKdFlkUmc5NStKVENEa0tadTIKYjUxRXlKaG1zanRIY3BDUndGL1VGMzlvdzY4TFBUcjBxaUorUHlhQTBLZUtvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTdWQkNzZVN3ajJ2cmczMFE5UG8vCnV6ZzAvMjR3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUlEOVY2aFlUSS83ZW1hRzU0dDdDWVU3TXFSdDdESUkKNlgvSUwrQ0RLbzlNQWlCdlFEMGJmT0tVWDc4UmRGdUplcEhEdWFUMUExaGkxcWdIUGduM1dZdDBxUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",
|
|
||||||
"KUBE_CERT": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJUU5KbFNJQUJPMDR3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOemMwTWpjeU9URXdNQjRYRFRJMk1ETXlNekV6TXpVeE1Gb1hEVEkzTURNeQpNekV6TXpVeE1Gb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJMY3Uwb2pUbVg4RFhTQkYKSHZwZDZNVEoyTHdXc1lRTmdZVURXRDhTVERIUWlCczlMZ0x5ZTdOMEFvZk85RkNZVW1HamhiaVd3WFVHR3dGTgpUdlRMU2lXalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUlJhRW9wQzc5NGJyTHlnR0g5SVhvbDZTSmlFREFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQWhaRUlrSWV3Y1loL1NmTFVCVjE5MW1CYTNRK0J5S2J5eTVlQmpwL3kzeWtDSUIxWTJicTVOZTNLUUU4RAprNnNzeFJrbjJmN0VoWWVRQU1pUlJ2MjIweDNLCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTnpReU56STVNVEF3SGhjTk1qWXdNekl6TVRNek5URXdXaGNOTXpZd016SXdNVE16TlRFdwpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTnpReU56STVNVEF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTcTdVTC85MEc1ZmVTaE95NjI3eGFZWlM5dHhFdWFoWFQ3Vk5wZkpQSnMKaEdXd2UxOXdtbXZzdlp6dlNPUWFRSzJaMmttN0hSb1IrNlA1YjIyamczbHVvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVVXaEtLUXUvZUc2eThvQmgvU0Y2Ckpla2lZaEF3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUk3cGxHczFtV20ySDErbjRobDBNTk13RmZzd0o5ZXIKTzRGVkM0QzhwRG44QWlCN3NZMVFwd2M5VkRUeGNZaGxuZzZNUzRXai85K0lHWjJxcy94UStrMjdTQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",
|
|
||||||
"KUBE_DATA": "LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUROZDRnWXd6aVRhK1hwNnFtNVc3SHFzc1JJNkREaUJTbUV2ZHoxZzk3VGxvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFdHk3U2lOT1pmd05kSUVVZStsM294TW5ZdkJheGhBMkJoUU5ZUHhKTU1kQ0lHejB1QXZKNwpzM1FDaDg3MFVKaFNZYU9GdUpiQmRRWWJBVTFPOU10S0pRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo="
|
|
||||||
}
|
}
|
||||||
140
docs/nats.md
140
docs/nats.md
@@ -1,140 +0,0 @@
|
|||||||
# NATS dans oc-scheduler
|
|
||||||
|
|
||||||
## Vue d'ensemble
|
|
||||||
|
|
||||||
`oc-scheduler` utilise NATS comme bus d'événements pour deux objectifs :
|
|
||||||
|
|
||||||
1. **Recevoir les planners** (disponibilité des ressources) publiés par `oc-discovery`.
|
|
||||||
2. **Réagir aux modifications de workflows** pour diffuser un planner actualisé et signaler les streams WebSocket actifs.
|
|
||||||
|
|
||||||
Tout le code NATS se trouve dans `infrastructure/nats.go`.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Canaux écoutés
|
|
||||||
|
|
||||||
### `PROPALGATION_EVENT` — réception des planners
|
|
||||||
|
|
||||||
**Condition d'acceptation :** `resp.FromApp == "oc-discovery"` et `prop.Action == PB_PLANNER`.
|
|
||||||
|
|
||||||
**Ce qui se passe :**
|
|
||||||
- Le payload est désérialisé en `planner.Planner`.
|
|
||||||
- Le champ `peer_id` est extrait pour identifier le pair.
|
|
||||||
- Le planner est stocké dans `PlannerCache[peerID]` via `storePlanner()`.
|
|
||||||
- Si c'est la **première apparition** de ce `peerID` dans le cache, une goroutine de TTL est lancée (voir §TTL ci-dessous).
|
|
||||||
- Tous les abonnés en attente d'un changement sur ce `peerID` sont notifiés.
|
|
||||||
|
|
||||||
### `CREATE_RESOURCE` — modification d'un workflow
|
|
||||||
|
|
||||||
**Condition d'acceptation :** `resp.Datatype == WORKFLOW`.
|
|
||||||
|
|
||||||
**Ce qui se passe :**
|
|
||||||
1. Le payload est désérialisé en `workflow.Workflow`.
|
|
||||||
2. `broadcastPlanner(wf)` est appelé : pour chaque pair (storage + compute) du workflow dont le planner **n'est pas encore en cache**, un événement `PB_PLANNER` est émis sur NATS afin de demander un planner frais à `oc-discovery`.
|
|
||||||
3. `notifyWorkflowWatchers(wf.GetID())` est appelé : tous les streams WebSocket qui observent ce workflow sont signalés pour **rafraîchir leur liste de pairs surveillés**.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Canaux émis
|
|
||||||
|
|
||||||
### `PROPALGATION_EVENT` — deux actions possibles
|
|
||||||
|
|
||||||
| Action | Déclencheur | Effet attendu |
|
|
||||||
|---|---|---|
|
|
||||||
| `PB_PLANNER` | Workflow modifié, pair inconnu du cache | `oc-discovery` renvoie le planner du pair |
|
|
||||||
| `PB_CLOSE_PLANNER` | TTL expiré **ou** déconnexion WebSocket | Les consommateurs (oc-discovery, autres schedulers) libèrent leur état pour ce pair |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Cache des planners (`PlannerCache`)
|
|
||||||
|
|
||||||
```
|
|
||||||
PlannerCache : map[string]*planner.Planner // clé = peerID
|
|
||||||
plannerAddedAt : map[string]time.Time // horodatage de première insertion
|
|
||||||
```
|
|
||||||
|
|
||||||
- Protégé par `plannerMu` (RWMutex).
|
|
||||||
- Alimenté uniquement via `storePlanner()` (appelé par le listener NATS).
|
|
||||||
- Supprimé via `EmitNATS(peerID, PB_CLOSE_PLANNER)`, qui efface l'entrée **et** notifie les abonnés.
|
|
||||||
|
|
||||||
### TTL de 24 heures
|
|
||||||
|
|
||||||
À la **première** insertion d'un `peerID`, une goroutine est lancée :
|
|
||||||
|
|
||||||
```
|
|
||||||
sleep(24h)
|
|
||||||
→ si l'entrée existe encore : EmitNATS(peerID, PB_CLOSE_PLANNER)
|
|
||||||
```
|
|
||||||
|
|
||||||
Cela évite que des planners obsolètes stagnent indéfiniment. L'entrée est supprimée et les streams actifs reçoivent une notification « plus de planner » pour ce pair.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Pub/sub interne
|
|
||||||
|
|
||||||
Un registre d'abonnements en mémoire permet à d'autres composants (notamment le controller WebSocket) de réagir aux événements sans coupler directement le code NATS et les goroutines HTTP.
|
|
||||||
|
|
||||||
Deux registres distincts :
|
|
||||||
|
|
||||||
| Registre | Clé | Signification |
|
|
||||||
|---|---|---|
|
|
||||||
| `plannerSubs` | `peerID` | « le planner de ce pair a changé » |
|
|
||||||
| `workflowSubs` | `workflowID` | « ce workflow a été modifié » |
|
|
||||||
|
|
||||||
### API
|
|
||||||
|
|
||||||
```go
|
|
||||||
// S'abonner aux changements de planners pour plusieurs pairs
|
|
||||||
ch, cancel := SubscribePlannerUpdates(peerIDs []string)
|
|
||||||
|
|
||||||
// S'abonner aux modifications d'un workflow
|
|
||||||
ch, cancel := SubscribeWorkflowUpdates(wfID string)
|
|
||||||
```
|
|
||||||
|
|
||||||
Chaque canal est bufférisé (`capacity 1`) : si un signal est déjà en attente, les suivants sont ignorés sans bloquer.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Intégration avec le stream WebSocket (`GET /oc/:id/check`)
|
|
||||||
|
|
||||||
Le handler `CheckStream` dans `controllers/workflow_sheduler.go` exploite ces mécanismes :
|
|
||||||
|
|
||||||
1. **Ouverture** : résolution des `peerIDs` du workflow, abonnement à `SubscribePlannerUpdates` et `SubscribeWorkflowUpdates`.
|
|
||||||
2. **Boucle de streaming** :
|
|
||||||
- `plannerCh` reçoit un signal → re-calcul du `CheckResult` et envoi au client.
|
|
||||||
- `wfCh` reçoit un signal (workflow modifié) → recalcul des `peerIDs`, désabonnement + ré-abonnement aux nouveaux pairs, re-calcul et envoi.
|
|
||||||
3. **Fermeture** (déconnexion client) :
|
|
||||||
- Désabonnement des deux registres.
|
|
||||||
- `EmitNATS(peerID, PB_CLOSE_PLANNER)` pour **chaque pair surveillé** : le cache est purgé et `oc-discovery` est informé que le scheduler n'a plus besoin du planner.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Flux de données résumé
|
|
||||||
|
|
||||||
```
|
|
||||||
oc-discovery ──PROPALGATION_EVENT(PB_PLANNER)──► ListenNATS
|
|
||||||
│
|
|
||||||
storePlanner()
|
|
||||||
PlannerCache[peerID] = planner
|
|
||||||
notifyPlannerWatchers(peerID)
|
|
||||||
│
|
|
||||||
SubscribePlannerUpdates
|
|
||||||
│
|
|
||||||
CheckStream (WS) ──► client
|
|
||||||
|
|
||||||
Workflow modifié ──CREATE_RESOURCE(WORKFLOW)──► ListenNATS
|
|
||||||
│
|
|
||||||
broadcastPlanner(wf)
|
|
||||||
PROPALGATION_EVENT(PB_PLANNER) → oc-discovery
|
|
||||||
notifyWorkflowWatchers(wfID)
|
|
||||||
│
|
|
||||||
SubscribeWorkflowUpdates
|
|
||||||
│
|
|
||||||
CheckStream refresh peerIDs ──► client
|
|
||||||
|
|
||||||
TTL 24h / déconnexion WS ──► EmitNATS(PB_CLOSE_PLANNER)
|
|
||||||
│
|
|
||||||
delete PlannerCache[peerID]
|
|
||||||
notifyPlannerWatchers(peerID)
|
|
||||||
PROPALGATION_EVENT(PB_CLOSE_PLANNER) → NATS bus
|
|
||||||
```
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
@startuml seq_check
|
|
||||||
title Flux CHECK — Peer A ↔ Peer B via oc-discovery
|
|
||||||
|
|
||||||
skinparam sequenceMessageAlign center
|
|
||||||
skinparam sequence {
|
|
||||||
ArrowColor #333333
|
|
||||||
LifeLineBorderColor #888888
|
|
||||||
GroupBorderColor #777777
|
|
||||||
GroupBackgroundColor #FAFAFA
|
|
||||||
NoteBackgroundColor #FFFDE7
|
|
||||||
NoteBorderColor #CCAA00
|
|
||||||
BoxBorderColor #555555
|
|
||||||
}
|
|
||||||
skinparam ParticipantBackgroundColor #FFFFFF
|
|
||||||
|
|
||||||
box "Peer A" #EAF3FB
|
|
||||||
participant "oc-scheduler A" as SA
|
|
||||||
participant "oc-discovery A" as DA
|
|
||||||
end box
|
|
||||||
|
|
||||||
box "Peer B" #EAF9EE
|
|
||||||
participant "oc-discovery B" as DB
|
|
||||||
participant "oc-scheduler B" as SB
|
|
||||||
end box
|
|
||||||
|
|
||||||
participant "Client" as Client
|
|
||||||
|
|
||||||
' ══════════════════════════════════════════════════════
|
|
||||||
== Alimentation continue du PlannerCache (fond permanent) ==
|
|
||||||
' ══════════════════════════════════════════════════════
|
|
||||||
|
|
||||||
note over SA, SB
|
|
||||||
Déclenché par : démarrage de SB, booking local créé,
|
|
||||||
TTL planner expiré → refreshSelfPlanner()
|
|
||||||
end note
|
|
||||||
|
|
||||||
SB -> DB : **NATS PUB** · PROPALGATION_EVENT\nPB_PLANNER { peer_id, schedule, capacities }
|
|
||||||
DB --> DA : **STREAM** · PropalgationMessage\n{ action: PB_PLANNER }
|
|
||||||
DA -> SA : **NATS SUB** · PROPALGATION_EVENT\n[ FromApp = "oc-discovery" ]
|
|
||||||
SA -> SA : storePlanner(PeerB.PeerID, planner)\n→ PlannerCache[PeerB.PeerID] = p
|
|
||||||
|
|
||||||
' ══════════════════════════════════════════════════════
|
|
||||||
== Flux CHECK (POST /oc/:wfID/check) ==
|
|
||||||
' ══════════════════════════════════════════════════════
|
|
||||||
|
|
||||||
Client -> SA : POST /oc/:wfID/check\n?as_possible=true&preemption=false
|
|
||||||
|
|
||||||
group ① Résolution du workflow
|
|
||||||
SA -> SA : workflow.LoadOne(wfID)
|
|
||||||
SA -> SA : collectBookingResources(wf)\n→ [ { peerID=B, resourceID, instanceID } ]
|
|
||||||
end
|
|
||||||
|
|
||||||
group ② Vérification locale contre le cache
|
|
||||||
SA -> SA : checkResourceAvailability()\nPlannerCache[PeerB.PeerID].Check(res, inst, start, end)
|
|
||||||
|
|
||||||
alt slot disponible
|
|
||||||
SA -> SA : available = true
|
|
||||||
else slot occupé
|
|
||||||
SA -> SA : findNextSlot(window=5h, pas=15min)\n→ next_slot
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
SA -> Client : **CheckResult**\n{ available, start, end, next_slot, warnings }
|
|
||||||
|
|
||||||
note over Client, SB
|
|
||||||
Aucun appel réseau pendant le check :
|
|
||||||
tout est résolu depuis le PlannerCache local de A.
|
|
||||||
oc-discovery n'intervient qu'en amont (fond continu).
|
|
||||||
end note
|
|
||||||
|
|
||||||
@enduml
|
|
||||||
@@ -1,95 +0,0 @@
|
|||||||
@startuml seq_schedule
|
|
||||||
title Flux SCHEDULE — Peer A ↔ Peer B via oc-discovery
|
|
||||||
|
|
||||||
skinparam sequenceMessageAlign center
|
|
||||||
skinparam sequence {
|
|
||||||
ArrowColor #333333
|
|
||||||
LifeLineBorderColor #888888
|
|
||||||
GroupBorderColor #777777
|
|
||||||
GroupBackgroundColor #FAFAFA
|
|
||||||
NoteBackgroundColor #FFFDE7
|
|
||||||
NoteBorderColor #CCAA00
|
|
||||||
BoxBorderColor #555555
|
|
||||||
}
|
|
||||||
skinparam ParticipantBackgroundColor #FFFFFF
|
|
||||||
|
|
||||||
participant "Client" as Client
|
|
||||||
|
|
||||||
box "Peer A" #EAF3FB
|
|
||||||
participant "oc-scheduler A" as SA
|
|
||||||
participant "oc-discovery A" as DA
|
|
||||||
end box
|
|
||||||
|
|
||||||
box "Peer B" #EAF9EE
|
|
||||||
participant "oc-discovery B" as DB
|
|
||||||
participant "oc-scheduler B" as SB
|
|
||||||
end box
|
|
||||||
|
|
||||||
' ══════════════════════════════════════════════════════════════════
|
|
||||||
Client -> SA : POST /oc/:wfID
|
|
||||||
|
|
||||||
' ──────────────────────────────────────────────────────────────────
|
|
||||||
group ① Planification — synchrone (GetBuyAndBook)
|
|
||||||
SA -> SA : workflow.LoadOne(wfID)\nwf.Planify(start, end, instances, …)\nexec.Buy() → purchases [ DestPeerID = B ]\nexec.Book() → bookings [ DestPeerID = B ]\n⇒ WorkflowExecution {\n BookingsState: { booking_id: false }\n PurchasesState: { purchase_id: false }\n }
|
|
||||||
end
|
|
||||||
|
|
||||||
' ──────────────────────────────────────────────────────────────────
|
|
||||||
group ② Propagation vers Peer B — goroutines (errCh attend l'envoi NATS, pas la réception par B)
|
|
||||||
|
|
||||||
SA -> DA : **NATS PUB** · CREATE_RESOURCE\nPURCHASE_RESOURCE { DestPeerID=B, IsDraft=true }
|
|
||||||
note right of DA : oc-discovery A est le\nrécepteur systématique\ndes émissions NATS de SA
|
|
||||||
DA --> DB : **STREAM** · PropalgationMessage\n{ datatype: PURCHASE_RESOURCE }
|
|
||||||
DB -> SB : **NATS SUB** · CREATE_RESOURCE PURCHASE_RESOURCE
|
|
||||||
|
|
||||||
SA -> DA : **NATS PUB** · CREATE_RESOURCE\nBOOKING { DestPeerID=B, IsDraft=true }
|
|
||||||
DA --> DB : **STREAM** · PropalgationMessage\n{ datatype: BOOKING }
|
|
||||||
DB -> SB : **NATS SUB** · CREATE_RESOURCE BOOKING
|
|
||||||
|
|
||||||
end
|
|
||||||
|
|
||||||
' ──────────────────────────────────────────────────────────────────
|
|
||||||
group ③ Peer B traite — async (ListenNATS goroutine de SB)
|
|
||||||
|
|
||||||
SB -> SB : StoreOne(purchase, IsDraft=true)\nAfterFunc(10 min → draftTimeout)
|
|
||||||
|
|
||||||
SB -> DB : **NATS PUB** · PROPALGATION_EVENT\nConsiders { DataType:PURCHASE_RESOURCE,\n id=purchase_id, execution_id }
|
|
||||||
note right of DB : SB émet sur son NATS local\nDB (oc-discovery B) reçoit
|
|
||||||
DB --> DA : **STREAM** · PropalgationMessage\n{ action: Considers, DataType: PURCHASE_RESOURCE }
|
|
||||||
DA -> SA : **NATS SUB** · PROPALGATION_EVENT\n[ FromApp = "oc-discovery" ]
|
|
||||||
SA -> SA : updateExecutionState()\nPurchasesState[ purchase_id ] = true
|
|
||||||
|
|
||||||
SB -> SB : PlannerCache[self].Check(slot) ✓\nStoreOne(booking, IsDraft=true)\nAfterFunc(10 min → draftTimeout)\nrefreshSelfPlanner()
|
|
||||||
|
|
||||||
SB -> DB : **NATS PUB** · PROPALGATION_EVENT\nPB_PLANNER { peer_id, schedule, capacities }
|
|
||||||
DB --> DA : **STREAM** · PropalgationMessage\n{ action: PB_PLANNER }
|
|
||||||
DA -> SA : **NATS SUB** · PROPALGATION_EVENT\n[ FromApp = "oc-discovery" ]
|
|
||||||
SA -> SA : storePlanner(PeerB.PeerID, p)
|
|
||||||
|
|
||||||
SB -> DB : **NATS PUB** · PROPALGATION_EVENT\nConsiders { DataType:BOOKING,\n id=booking_id, execution_id }
|
|
||||||
DB --> DA : **STREAM** · PropalgationMessage\n{ action: Considers, DataType: BOOKING }
|
|
||||||
DA -> SA : **NATS SUB** · PROPALGATION_EVENT\n[ FromApp = "oc-discovery" ]
|
|
||||||
SA -> SA : updateExecutionState()\nBookingsState[ booking_id ] = true\n→ tous true → State = SCHEDULED (DB)
|
|
||||||
|
|
||||||
end
|
|
||||||
|
|
||||||
' ──────────────────────────────────────────────────────────────────
|
|
||||||
group ④ Schedules() finalise — synchrone (concurrent avec ③)
|
|
||||||
SA -> SA : GenerateOrder(purchases, bookings)\nexec.PurgeDraft()\nexec.StoreDraftDefault() → State=SCHEDULED, IsDraft=false\nGenericStoreOne(exec)
|
|
||||||
|
|
||||||
SA -> DA : **NATS PUB** · PROPALGATION_EVENT [goroutine]\nConsiders { DataType:WORKFLOW_EXECUTION,\n execution, peer_ids:[ PeerB ] }
|
|
||||||
note right of DA : oc-discovery A reçoit\net STREAM vers tous les\npairs listés dans peer_ids
|
|
||||||
DA --> DB : **STREAM** · PropalgationMessage\n{ action: Considers, DataType: WORKFLOW_EXECUTION }
|
|
||||||
DB -> SB : **NATS SUB** · PROPALGATION_EVENT\n[ FromApp = "oc-discovery" ]
|
|
||||||
SB -> SB : confirmExecutionDrafts()\nconfirmResource(booking_id)\n → Booking.IsDraft=false, State=SCHEDULED\nconfirmResource(purchase_id)\n → Purchase.IsDraft=false
|
|
||||||
|
|
||||||
SA -> Client : **{WorkflowSchedule, Workflow, Executions}**
|
|
||||||
end
|
|
||||||
|
|
||||||
note over SA, SB
|
|
||||||
③ et ④ sont concurrents.
|
|
||||||
En pratique : GenerateOrder + écritures DB côté A
|
|
||||||
laissent le temps à B de recevoir et stocker ses drafts
|
|
||||||
avant que A émette le Considers/WORKFLOW_EXECUTION.
|
|
||||||
end note
|
|
||||||
|
|
||||||
@enduml
|
|
||||||
4
env.env
4
env.env
@@ -1,4 +0,0 @@
|
|||||||
KUBERNETES_SERVICE_HOST=192.168.1.169
|
|
||||||
KUBE_CA="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTnpReU56STVNVEF3SGhjTk1qWXdNekl6TVRNek5URXdXaGNOTXpZd016SXdNVE16TlRFdwpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTnpReU56STVNVEF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFSSGpYRDVpbnRIYWZWSk5VaDFlRnIxcXBKdFlkUmc5NStKVENEa0tadTIKYjUxRXlKaG1zanRIY3BDUndGL1VGMzlvdzY4TFBUcjBxaUorUHlhQTBLZUtvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTdWQkNzZVN3ajJ2cmczMFE5UG8vCnV6ZzAvMjR3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUlEOVY2aFlUSS83ZW1hRzU0dDdDWVU3TXFSdDdESUkKNlgvSUwrQ0RLbzlNQWlCdlFEMGJmT0tVWDc4UmRGdUplcEhEdWFUMUExaGkxcWdIUGduM1dZdDBxUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
|
|
||||||
KUBE_CERT="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJUU5KbFNJQUJPMDR3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOemMwTWpjeU9URXdNQjRYRFRJMk1ETXlNekV6TXpVeE1Gb1hEVEkzTURNeQpNekV6TXpVeE1Gb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJMY3Uwb2pUbVg4RFhTQkYKSHZwZDZNVEoyTHdXc1lRTmdZVURXRDhTVERIUWlCczlMZ0x5ZTdOMEFvZk85RkNZVW1HamhiaVd3WFVHR3dGTgpUdlRMU2lXalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUlJhRW9wQzc5NGJyTHlnR0g5SVhvbDZTSmlFREFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQWhaRUlrSWV3Y1loL1NmTFVCVjE5MW1CYTNRK0J5S2J5eTVlQmpwL3kzeWtDSUIxWTJicTVOZTNLUUU4RAprNnNzeFJrbjJmN0VoWWVRQU1pUlJ2MjIweDNLCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTnpReU56STVNVEF3SGhjTk1qWXdNekl6TVRNek5URXdXaGNOTXpZd016SXdNVE16TlRFdwpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTnpReU56STVNVEF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTcTdVTC85MEc1ZmVTaE95NjI3eGFZWlM5dHhFdWFoWFQ3Vk5wZkpQSnMKaEdXd2UxOXdtbXZzdlp6dlNPUWFRSzJaMmttN0hSb1IrNlA1YjIyamczbHVvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVVXaEtLUXUvZUc2eThvQmgvU0Y2Ckpla2lZaEF3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUk3cGxHczFtV20ySDErbjRobDBNTk13RmZzd0o5ZXIKTzRGVkM0QzhwRG44QWlCN3NZMVFwd2M5VkRUeGNZaGxuZzZNUzRXai85K0lHWjJxcy94UStrMjdTQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
|
|
||||||
KUBE_DATA="LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUROZDRnWXd6aVRhK1hwNnFtNVc3SHFzc1JJNkREaUJTbUV2ZHoxZzk3VGxvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFdHk3U2lOT1pmd05kSUVVZStsM294TW5ZdkJheGhBMkJoUU5ZUHhKTU1kQ0lHejB1QXZKNwpzM1FDaDg3MFVKaFNZYU9GdUpiQmRRWWJBVTFPOU10S0pRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo="
|
|
||||||
88
go.mod
88
go.mod
@@ -1,47 +1,14 @@
|
|||||||
module oc-scheduler
|
module oc-scheduler
|
||||||
|
|
||||||
go 1.25.0
|
go 1.22.0
|
||||||
|
|
||||||
|
toolchain go1.22.4
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260429050913-47d487ea8011
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250211081618-d82ae166a1e5
|
||||||
github.com/beego/beego/v2 v2.3.8
|
github.com/beego/beego/v2 v2.3.1
|
||||||
github.com/google/uuid v1.6.0
|
|
||||||
github.com/robfig/cron v1.2.0
|
|
||||||
github.com/smartystreets/goconvey v1.7.2
|
github.com/smartystreets/goconvey v1.7.2
|
||||||
go.mongodb.org/mongo-driver v1.17.4
|
go.mongodb.org/mongo-driver v1.17.1
|
||||||
)
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
|
||||||
github.com/go-logr/logr v1.4.3 // indirect
|
|
||||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
|
||||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
|
||||||
github.com/go-openapi/swag v0.23.0 // indirect
|
|
||||||
github.com/google/gnostic-models v0.7.0 // indirect
|
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
|
||||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
|
||||||
github.com/x448/float16 v0.8.4 // indirect
|
|
||||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
|
||||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
|
||||||
golang.org/x/oauth2 v0.30.0 // indirect
|
|
||||||
golang.org/x/term v0.37.0 // indirect
|
|
||||||
golang.org/x/time v0.9.0 // indirect
|
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
|
||||||
k8s.io/api v0.35.1 // indirect
|
|
||||||
k8s.io/apimachinery v0.35.1 // indirect
|
|
||||||
k8s.io/client-go v0.35.1 // indirect
|
|
||||||
k8s.io/klog/v2 v2.130.1 // indirect
|
|
||||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
|
|
||||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
|
|
||||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
|
||||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
|
||||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
|
||||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@@ -49,45 +16,46 @@ require (
|
|||||||
github.com/biter777/countries v1.7.5 // indirect
|
github.com/biter777/countries v1.7.5 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.6 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
|
|
||||||
github.com/go-playground/locales v0.14.1 // indirect
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
github.com/go-playground/validator/v10 v10.27.0 // indirect
|
github.com/go-playground/validator/v10 v10.22.1 // indirect
|
||||||
github.com/golang/snappy v1.0.0 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect
|
||||||
github.com/goraz/onion v0.1.3 // indirect
|
github.com/goraz/onion v0.1.3 // indirect
|
||||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
|
|
||||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||||
github.com/klauspost/compress v1.18.0 // indirect
|
github.com/klauspost/compress v1.17.11 // indirect
|
||||||
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/leodido/go-urn v1.4.0 // indirect
|
github.com/leodido/go-urn v1.4.0 // indirect
|
||||||
github.com/libp2p/go-libp2p/core v0.43.0-rc2 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/nats-io/nats.go v1.44.0 // indirect
|
github.com/nats-io/nats.go v1.37.0 // indirect
|
||||||
github.com/nats-io/nkeys v0.4.11 // indirect
|
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||||
github.com/nats-io/nuid v1.0.1 // indirect
|
github.com/nats-io/nuid v1.0.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/prometheus/client_golang v1.23.0 // indirect
|
github.com/prometheus/client_golang v1.20.5 // indirect
|
||||||
github.com/prometheus/client_model v0.6.2 // indirect
|
github.com/prometheus/client_model v0.6.1 // indirect
|
||||||
github.com/prometheus/common v0.65.0 // indirect
|
github.com/prometheus/common v0.60.1 // indirect
|
||||||
github.com/prometheus/procfs v0.17.0 // indirect
|
github.com/prometheus/procfs v0.15.1 // indirect
|
||||||
github.com/rs/zerolog v1.34.0 // indirect
|
github.com/robfig/cron v1.2.0 // indirect
|
||||||
|
github.com/rogpeppe/go-internal v1.11.0 // indirect
|
||||||
|
github.com/rs/zerolog v1.33.0 // indirect
|
||||||
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect
|
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect
|
||||||
github.com/smartystreets/assertions v1.2.0 // indirect
|
github.com/smartystreets/assertions v1.2.0 // indirect
|
||||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||||
github.com/xdg-go/scram v1.1.2 // indirect
|
github.com/xdg-go/scram v1.1.2 // indirect
|
||||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||||
golang.org/x/crypto v0.44.0 // indirect
|
golang.org/x/crypto v0.28.0 // indirect
|
||||||
golang.org/x/net v0.47.0 // indirect
|
golang.org/x/net v0.30.0 // indirect
|
||||||
golang.org/x/sync v0.18.0 // indirect
|
golang.org/x/sync v0.8.0 // indirect
|
||||||
golang.org/x/sys v0.38.0 // indirect
|
golang.org/x/sys v0.26.0 // indirect
|
||||||
golang.org/x/text v0.31.0 // indirect
|
golang.org/x/text v0.19.0 // indirect
|
||||||
google.golang.org/protobuf v1.36.8 // indirect
|
google.golang.org/protobuf v1.35.1 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
525
go.sum
525
go.sum
@@ -1,16 +1,267 @@
|
|||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260126113404-85a8857938f5 h1:pl6/u6UXyFcfCU+xyQcSY8Lkby68EVWswxG2Oaq476A=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240904135449-4f0ab6a3760f h1:v9mw3uNg/DJswOvHooMu8/BMedA+vIXbma+8iUwsjUI=
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260126113404-85a8857938f5/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240904135449-4f0ab6a3760f/go.mod h1:FIJD0taWLJ5pjQLJ6sfE2KlTkvbmk5SMcyrxdjsaVz0=
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260427091650-f048b420d74d h1:jzgwgbZDASalQJSYbPF/L2L2RSP2OAbqhMB4YUXK27M=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241002120813-a09a04e1a71e h1:77QHk5JSf0q13B/Ai3xjcsGSS7nX+9AfxcsYz5oDo/A=
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260427091650-f048b420d74d/go.mod h1:JynnOb3eMr9VZW1mHq+Vsl3tzx6gPhPsGKpQD/dtEBc=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241002120813-a09a04e1a71e/go.mod h1:t+zpCTVKVdHH/BImwtMYY2QIWLMXKgY4n/JhFm3Vpu8=
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260428065508-e3fbe7688ad5 h1:CVwlE1JgIcTAvVLCl+xeiJ54hndiTgP1XoFYS0vSvYA=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241015083538-9f5e6d60185a h1:2mBMc36WKh1/Dpomktx9dVXGxK0agFr7RdgvHTtyn2w=
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260428065508-e3fbe7688ad5/go.mod h1:JynnOb3eMr9VZW1mHq+Vsl3tzx6gPhPsGKpQD/dtEBc=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241015083538-9f5e6d60185a/go.mod h1:t+zpCTVKVdHH/BImwtMYY2QIWLMXKgY4n/JhFm3Vpu8=
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260429050913-47d487ea8011 h1:owV5pQ+mS5xDCKEcGTO+BgsyYrKjkISL8LDsmjEb/3s=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241018065112-59a1b52242b3 h1:IH0kY/aDvaxQAYDHuxpG82vf40P4QygIxf7mAxm7epU=
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260429050913-47d487ea8011/go.mod h1:JynnOb3eMr9VZW1mHq+Vsl3tzx6gPhPsGKpQD/dtEBc=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241018065112-59a1b52242b3/go.mod h1:t+zpCTVKVdHH/BImwtMYY2QIWLMXKgY4n/JhFm3Vpu8=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241030091613-1a5521237800 h1:uZ4Qrxk/KEpOfDq8QHjZankW7aZGLlDYLoM3CZowlR8=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241030091613-1a5521237800/go.mod h1:t+zpCTVKVdHH/BImwtMYY2QIWLMXKgY4n/JhFm3Vpu8=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241030105814-5f05b73366ab h1:hYUf9xXpqhp9w0eBfOWVi7c17iWpN+FL2FbhsAkmQ2E=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241030105814-5f05b73366ab/go.mod h1:t+zpCTVKVdHH/BImwtMYY2QIWLMXKgY4n/JhFm3Vpu8=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241107114600-4c0c75be9161 h1:so5V7C6kiJ9tpuxtgK/KcgjXQC2ythInAH8X2gohuaM=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241107114600-4c0c75be9161/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241107122526-f3df1e42b9ba h1:MGd8N7bY1LWXMhAp7gibDNwMS2hsatLQ3rfayvy5rGs=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241107122526-f3df1e42b9ba/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241108104423-7fd44a55cb28 h1:jekSPkD/b59kJ9Bp/trBWnahkdd1FkX4csQOcSaZa8I=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241108104423-7fd44a55cb28/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112100147-bc79d54284be h1:m9doLiroKbG5rp2gHsw1FbPrRD+zed6V1jkkqW5Xe2g=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112100147-bc79d54284be/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112104157-d6dba8e1f153 h1:VHOu4vvuxot5yH/0sUhjsfZtTe4+VSxT4Tww4BxYFZs=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112104157-d6dba8e1f153/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112111730-b2a6ac19cbb5 h1:u96Yw+N6/ebKYI2p8mB6FWGpC4PMexwCLHgIyJIzdt0=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112111730-b2a6ac19cbb5/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112114503-b5dba2458ad4 h1:JQZbu8E9Yv653/QikRBUqzakNDEVX4xfnoxn/EPvlws=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112114503-b5dba2458ad4/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112123618-6497e7dbdd51 h1:AlOfOFWKVC/lUpLfCbw/QeQ5Epax/bK+ltZSr5vzUe0=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112123618-6497e7dbdd51/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112134441-8a21a7c803de h1:faiwXaNsohYkPILHr21la3j2lnSsr1crPBeTteo2zqE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112134441-8a21a7c803de/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112141026-06c3af5d4d61 h1:wAuOQAbv2uAie1QulHvbBzlZWMUE6hQ0kjhnP4OXFqQ=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241112141026-06c3af5d4d61/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241120085309-08e9ee67fe96 h1:1f2m8148/bOY19urpgtgShmGPDMnnjRqcEczrkVDJBA=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241120085309-08e9ee67fe96/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241120093920-b49685aa8223 h1:LX04VfuXWxi+Q0lKhBBd7tfyLO3R4y8um3srRVlMbSY=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241120093920-b49685aa8223/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241120150854-57f18b224443 h1:cqlL4/EsqYlQ6luPBC4+6+gWNwQqWVV8DPD8O7F6yM8=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241120150854-57f18b224443/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241120153807-3b77c0da8352 h1:xNYjEiB/nrvXLbLcjSDfNZEPSR38/LKcsQKP/oWg5HI=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241120153807-3b77c0da8352/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241120160521-ac49d3324d7b h1:5prB7K0iM284VmYdoRaBMZIOEXq5S0YgTrSp4+SnZyo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241120160521-ac49d3324d7b/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241121065159-d8fac883d260 h1:DSumHyw9XJQ/r+LjWa5GDkjS0ri/lFkU7oPr5vv8mws=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241121065159-d8fac883d260/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241121071546-e9b3a65a0ec6 h1:AdUkzaX63VF3fdloWyyWT1jLM4M1pkDLErAdHyVbsKU=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241121071546-e9b3a65a0ec6/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241121074503-15ca06aba883 h1:JdHJT8vuup4pJCC7rjiOe0/qD7at6400ml5zZHjEeUo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241121074503-15ca06aba883/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241202081145-cb21db672bb5 h1:qxXC6fkEa8bLTo0qn3VrB55tfxyjHQQa/0n97piJhNI=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241202081145-cb21db672bb5/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241202121923-2ec6899a1865 h1:BhGzhy6gsEA7vthuq6KWyABsRuF4KV5NqOvfkygytGg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241202121923-2ec6899a1865/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241202134851-9a2ed2351d7e h1:3U5JBdQRti2OpALLPhev6lkUi1TlYHgo2ADidOAfEAs=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241202134851-9a2ed2351d7e/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241202152644-e2ddd7e4e6f9 h1:qUA6T5Pjq/pv6dZYH4PWktXmFiRnloDX84m1U5NhvLM=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241202152644-e2ddd7e4e6f9/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241202155908-599a6144803e h1:3xGLiTDTgWHIIPDZyTo/clMIj+gQxnIDSE78s9/0wNE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241202155908-599a6144803e/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241203073336-6042d47700fd h1:iDryCORnODgAvBe1Yi+RnIGjYgUSkAv7ZCnm+CUV18w=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241203073336-6042d47700fd/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241203082527-2924ccd23b5c h1:3ghuxLEI3JXicDYoFx4YnkLauLl0Nq9UErjpL/2SqEU=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241203082527-2924ccd23b5c/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241203090110-471e0c9d9b48 h1:kVTpROPipS4YtROH9vAGZw21OMLNR48qbYedCngGThw=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241203090110-471e0c9d9b48/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241203095728-ea55c94c7328 h1:7iK2HzMm0EEEF60ajUVT/6jwqIirduww5Xa3191XS4I=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241203095728-ea55c94c7328/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241203105751-4b88da8ff66d h1:iIo+AMQ09MshkKKN8K8pd1ooLaigAYlnUUnQAaCidLo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241203105751-4b88da8ff66d/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241203115141-6681c455d8e0 h1:RnHCONn0oYbEaTN1wDIeOAEM12cCZQRtvjBCVCb0b1Y=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241203115141-6681c455d8e0/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241204103308-fd01f535a131 h1:FdUY8b8xTdVzQ9wlphlo8TlbQif76V9oxGDYq26TsAs=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241204103308-fd01f535a131/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241204111455-1fcbc7c08ab0 h1:cBr4m2tcLf+dZufrjYvhvcsSqXcRDeyhnq5c5HY15po=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241204111455-1fcbc7c08ab0/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241205082103-fbbce7817b73 h1:g96KMOxdhvM7x6YFqJfd08wybRzCLEvol7HfhKJfxO4=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20241205082103-fbbce7817b73/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250110164331-5255ffc2f728 h1:3p1G82xZmEAu2OEyY5HM42Cfbb1J887P9lSoRKNhgg8=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250110164331-5255ffc2f728/go.mod h1:2IevepXviessA6m67fB6ZJhZSeEeoOYWbVqPS4dzkbg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250113102407-21a7ff90104a h1:rrLSuAHI/TGOTm5d7Bffu+qf4EnmPguOll5x5nG/3Tc=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250113102407-21a7ff90104a/go.mod h1:VgWEn23ddKySWXrwPMhqtiBjTJnbm5t7yWjzfvNxbbI=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250113114256-11905339bb24 h1:Kc51xKbnyfeafHpOJP7mWh9InNGqZUwcJR46008D+Eg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250113114256-11905339bb24/go.mod h1:VgWEn23ddKySWXrwPMhqtiBjTJnbm5t7yWjzfvNxbbI=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250113124812-6e5c87379649 h1:dmtrmNDdTR/2R3HjaIbPdu5LZViPzigwSjU207NXCxI=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250113124812-6e5c87379649/go.mod h1:VgWEn23ddKySWXrwPMhqtiBjTJnbm5t7yWjzfvNxbbI=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250113135241-a0f436b3e162 h1:oGP40P/uUngU7stnsRdx0jwxZGc+pzLzrMlUjEBSy0M=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250113135241-a0f436b3e162/go.mod h1:VgWEn23ddKySWXrwPMhqtiBjTJnbm5t7yWjzfvNxbbI=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250114071722-1c32cd2d12df h1:T52jgXQddoxwe+embR26Fwmz4G2jkl4QpYVHGtiLUNI=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250114071722-1c32cd2d12df/go.mod h1:VgWEn23ddKySWXrwPMhqtiBjTJnbm5t7yWjzfvNxbbI=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250114081637-918006302bb4 h1:AwCbDHjvUz9iQaF7hgYWyabVF/EzSSSk5bCNgntNJ6c=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250114081637-918006302bb4/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250114105339-b782248da741 h1:akAQLlcAXDtUhbNHbona9xJrHCzK9jxlvsDsEpVP1fg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250114105339-b782248da741/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250114135055-1a4694c8913a h1:AxnecA1YKOZ81OKb1akK2Qc/0UNDUxdjSww7ALyehas=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250114135055-1a4694c8913a/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250115082026-ad69c0495144 h1:MZ90rw4SKL0dqL/Lb+7E54vkk9fb8W6X0UJo9UW/XBk=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250115082026-ad69c0495144/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250115095644-be3803039583 h1:6My1sqjvqgHnC4TlE7RsZQHC8AVhad0gZl8uOvLTM9o=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250115095644-be3803039583/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250115102820-0e0540af43d0 h1:AcHC2WIeHOSjz5xe7OsjMi39EevxdY2O/9q0VMkDRz0=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250115102820-0e0540af43d0/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250116091455-68f418928395 h1:u4myLPGqBbzprWHg6713k5a++4yiq1ujlVy7yrMkZ9g=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250116091455-68f418928395/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250116142544-a4a249bab828 h1:yMDBDTs7LECyueUfh0iug502GN8GodVpQSl/gZchUjU=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250116142544-a4a249bab828/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117081640-450fab437cb7 h1:SV9U48sR09cNRl48489lQHrrKJFtTMQoQcRhmtsLTYQ=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117081640-450fab437cb7/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117090737-b990fe42d375 h1:UsPWfbVgvUcOC3BtD8B9dUQfv/FnRF4IZGrYxUJr1iM=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117090737-b990fe42d375/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117100508-d44fb976e4ff h1:GaLrVn6ame6BV7pfUB2xeHCCJLBECRiCCpPj6zteL+s=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117100508-d44fb976e4ff/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117121920-ed787683f47b h1:3wap+dPPplJkDglE5toKfdFUmjobAeIJWdiRtCQ3xkQ=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117121920-ed787683f47b/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117124801-e5c7dbe4cb96 h1:opQ/Uku27DOKAqDcKC9k6J9H5Tj9bNyKdHnJnD3U850=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117124801-e5c7dbe4cb96/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117135417-c63a1fef6c48 h1:dEebv8ZV5rt6BYPkcK6HOts+OPqkSxkKp5zn1lCq1vs=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117135417-c63a1fef6c48/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117152246-b85ca8674b27 h1:QEIj90eIoYsjs1uekbI3Nu48KDWmzGV7ugcr9agJbYI=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250117152246-b85ca8674b27/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250120123706-58b36f282344 h1:MPt8BhrbMJiMa4KDWqBUvdrlone7UxgIgZ5PW4du0Ek=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250120123706-58b36f282344/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250120124939-67b8215adf79 h1:9Y+KJlzy5jHhrd4b44pNEBjSJKnIyvlSQ5Mbj1zcXbA=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250120124939-67b8215adf79/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250120143509-305f2605030d h1:f1tpLADIAbwTKxN62csH+v2Fe0q1eQ7dYIDhPl1GZ8I=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250120143509-305f2605030d/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250121080257-de585a723426 h1:49cuCsDsBE6ZrvqMh6d48ZynpPyEpkw1LtC0nMQnvEU=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250121080257-de585a723426/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250121083541-0d83885b9b5e h1:yh2tiTxuQbrdgCePREyMewPr8Btdacpw6vo7ymmqf7Y=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250121083541-0d83885b9b5e/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250121101118-bc12fb53be23 h1:oOSJA8w33aJ2TlMRuR7bU/rme/IYSBcVjrb6gE/jwSw=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250121101118-bc12fb53be23/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250121105544-bf5a16f41bea h1:X9YiXv2GSLT6jotS3C/JvvdYBLtxgKI8OV60ndJzjXk=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250121105544-bf5a16f41bea/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250121131007-745bb58c593e h1:rHbooeLrsMvIYj5nHc3MK8NVEh9v5edFBCkOxeRoYjs=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250121131007-745bb58c593e/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250121160438-67ebeca1f489 h1:XwPLFaKjP0o6ZuKnj5aDJ9hIBlX8giNS9BB78uIH0g0=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250121160438-67ebeca1f489/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122080653-67940296d255 h1:VFlxjrbks8pDzoZ40lnyHD5qVyEMAIfEAmY2w4wBAE8=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122080653-67940296d255/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122090736-8ab313e6cbd8 h1:u7Rt0tQMCzylFPyMcO5uNQ8041K80cM0BQNbBDbjAj0=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122090736-8ab313e6cbd8/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122101635-2a93b17d71d8 h1:AvthXY1/mrB4aeQpoj84ewVCdIYYemwn9WydYJ+9hyw=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122101635-2a93b17d71d8/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122110438-062c1afe8568 h1:pk7Gqa1yEwl5ASc9wJNjxJ+1XfTXYSwDvsxB3KOHWoo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122110438-062c1afe8568/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122121814-ed1e76105250 h1:TwCz7oXB7diECiM/kadwDZ78iM8E8ka2ShKs/PzdszA=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122121814-ed1e76105250/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122135342-4be954a6f359 h1:x5dGOGYgdDhSeYtAkWeNlWQLU24yv8BUpwx1Idc9+ME=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122135342-4be954a6f359/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122140340-9c71730d9cb7 h1:oAkv9IOuiP71VO/plOkPHaPk9X3ELfnGdSz2cctLnGw=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122140340-9c71730d9cb7/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122153005-0e798dac5081 h1:P/WDRzkAJHhPuZZbU2VmVqSJ6AcMN/ia/pPZ60MpRfo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250122153005-0e798dac5081/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123074822-df04133551e4 h1:ayV2U6VUUJXdBE2AGuRuwTKr7WqIycmVgEMv8v/KlGU=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123074822-df04133551e4/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123082727-8cba10c4fe29 h1:zt0AA0GddWtbgupsvFvNAozrGMP0FISHnjSmsp3Ihgc=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123082727-8cba10c4fe29/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123094950-d15fdac27bde h1:Yjr0WPiR3dMg+H8EIO4GzqohRZBvGh/h4ysx5n8wCZw=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123094950-d15fdac27bde/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123103535-2205ac9b5819 h1:y/opEsKeo7G5Os2RWd7zF5i5DU4neDLt6fUq2hSW66U=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123103535-2205ac9b5819/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123114959-49e495f062eb h1:9FDB2xUhO+PFkb1mhNq+vItyfW/Jb0KjBRDEDPqPcno=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123114959-49e495f062eb/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123134717-db6049bab345 h1:OW5TLnNhNxJCkhMXUy5d9VSOgEGNFc9+uA3thyPuRA4=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123134717-db6049bab345/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123140834-c1888f89218f h1:iNqXYlnTh4nnfuVN/NObIJO5g9Mu3Mi9yFGmNFwO1Jk=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250123140834-c1888f89218f/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250124095557-97d466818af0 h1:v8Fj897AF5l8icSm2FE0E2tkl96eJI43Zr4UHIUkL6Y=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250124095557-97d466818af0/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250127080547-fbb55e64dcf4 h1:s6+5sTIeR86N+9oK3uXItlP0L1SgKCwMNQFU6LERDU4=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250127080547-fbb55e64dcf4/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250127083756-68bacf5da410 h1:b+dzulgEl+a7BudsqCkgBg/1aEqo8/1WpGs+WGZHznE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250127083756-68bacf5da410/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250127110938-1ad9ce09cb35 h1:PWlFiCaAHTUDuwOf84hA4BDivEA3FU+DDH7dBg9IPho=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250127110938-1ad9ce09cb35/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250127131512-7ca360be6aa4 h1:8y8I+hmSuUPV2dt/qw6d2TY/YRLXvZp0zE9iSwR3qv4=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250127131512-7ca360be6aa4/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250127134257-8b03df7923bd h1:eylhA0MziFMzY+kfXy2tnZEHDWIXCh/kPDLyBG2OC5E=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250127134257-8b03df7923bd/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250127150345-db85d1a48b73 h1:SNwsmEyaHrnoN7/IBathlA/HI/y4D2IBJjZEdtUC7Ew=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250127150345-db85d1a48b73/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250128131916-598774b0b197 h1:tAi5pznkPDjCFO81EhvS8Djx1e7iz4D2e72lxegRVmQ=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250128131916-598774b0b197/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250129073743-74a1f66d26c2 h1:ScjLqkn82u+on8CXnfgi52UZqddR879WlUtiq9qQOdo=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250129073743-74a1f66d26c2/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250129100135-330768490a61 h1:afATt4OzRndXApO1Xqn9PeKohW5G2nhqvptZkE2pML8=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250129100135-330768490a61/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250129133324-ede2d5fd5322 h1:d0/n7kJZNG6QKdI5ySqYGe3nYYOKmko76ysjlZA30Dk=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250129133324-ede2d5fd5322/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250129143004-df2c38199cf0 h1:8cIJxCeVHbefpa7oBZPeFUAa7Mmtiw93Z1xMa9Qf/wk=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250129143004-df2c38199cf0/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250129154925-84d20c52fa1c h1:6+KdDssQyPZSCmtiBrlygHIAt2yhewx3rz/SPEfsYnI=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250129154925-84d20c52fa1c/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250130072403-826d7586b127 h1:wYLo29accEk0anP8eLjBKbDyYGLFKg4Qp41NvCb2JsQ=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250130072403-826d7586b127/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250130084513-787c01b4be1c h1:3TEloYSf4k1o9tkEo5T3sES+qZcJBsdR82o+T81SC3A=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250130084513-787c01b4be1c/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250130101134-107ce2580128 h1:AElHp4SeiVmMiyCta9r8JOpSYMAS0To/fLK6eaBz1PU=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250130101134-107ce2580128/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250130130847-976a5cedcb5f h1:0buFXek+V4E4rIGBEygLXpw34I50yAGqTIAOyTgZwsA=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250130130847-976a5cedcb5f/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250131073800-3ec0d554edad h1:Ey6yORB8TOa+PkMpNhH0tayZuZ6FwyJ59vZM4BRGHnY=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250131073800-3ec0d554edad/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250131082340-892bd93471aa h1:53a/yqBAVkNpeAaCqxHx3FWC0wV5XK/dhooR3f0Kp8g=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250131082340-892bd93471aa/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250131100142-b2113bff62fa h1:S7nsqFotIeXSPJqipNW6wB3VsfYhFrWcZIR8mX6aJg0=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250131100142-b2113bff62fa/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250131110730-a2f2d0ebef72 h1:0EUj84bzUWvaH8egQkjH1xQ+HoyX9EZqtokNosYywgU=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250131110730-a2f2d0ebef72/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250131153610-6807614ac86b h1:/SjZVsLeH8sXopUeR3xB7wygJvIyA2V2uS+GsfPFysE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250131153610-6807614ac86b/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250203105249-64bea2a66e35 h1:5Zkm2tPQ60l2oMdrf3/uC1mWOCU+ti77d0k9y/AW1z8=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250203105249-64bea2a66e35/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250203113830-275bd56fe64c h1:4EW1OEHuRjH9B3LhQEvOLp3qPxnU4kDBwgKzy7KNlS4=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250203113830-275bd56fe64c/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250203124514-14977c7b2c39 h1:XW7Hny4W/2ClAZR2Wi9KRvLTH/pjmwpgXiwM+fDsy50=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250203124514-14977c7b2c39/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250203143322-22d15fe395e8 h1:OWBLh52Ee4Txs0PY4bMlfRbaTbfNNR/ndj2J+RGrR6k=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250203143322-22d15fe395e8/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250204080055-bf114b39b7d5 h1:rsOMNER+ZIIt/as3bOU2lJe+MbCCR5x1iR/XyZYmuKU=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250204080055-bf114b39b7d5/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250204091410-2ccb57ffb050 h1:NdKJD+hbAyDaUfRkdtMUZLasR1d/BGyEfCvuozTso+Y=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250204091410-2ccb57ffb050/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250204110709-3061df4f13da h1:Mx3vR5r21H0zX+B0yaQOeOn3hvWJUrdy0DFLI+RAH1I=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250204110709-3061df4f13da/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250204134321-69bf9518661e h1:etAdc6jOnpm49RFs2Z8R7zzwfP/uGN6eQAmMGVqTEnc=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250204134321-69bf9518661e/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250204155113-a8e2445c103c h1:wNM/SweaGy+Wz4KV3+1wpLYgtDOSDK+WO6564TCGDjE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250204155113-a8e2445c103c/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250205154116-7201cabb438a h1:DAEI00i+r2MAlUqqRJfW5FiXsWppQW8y51kKRl39WFA=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250205154116-7201cabb438a/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250205160221-88b7cfe2fd0f h1:6V+Z81ywYoDYSVMnM4PVaJYXFgCN3xSG3ddiUPn4jL8=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250205160221-88b7cfe2fd0f/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250206080835-e646cfef0b46 h1:YnM9WwcijS+/OrpgML7y1O5c8hJ3Wt5iIPSSZYai+zw=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250206080835-e646cfef0b46/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250206085600-3ffff7d32cf1 h1:PZ6Z3PdgjmiXQlNA64rhZgPyuZugs/jJROEVDHZs9yg=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250206085600-3ffff7d32cf1/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250206101306-ad3293da9dbc h1:3X2bDl/ErUp+ahzROiscJTF6XyF81Swv4JXY2xqI6/o=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250206101306-ad3293da9dbc/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250206115651-940ef17f7b0c h1:T4NE8PQY0opcYREioh4V2eVvJkagn52jytg4S1ZtpGE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250206115651-940ef17f7b0c/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250207072957-31ec352b57b9 h1:lmzktnKiGDo6f1+a8kRAeXvbu/+CEPe/PLsqIOt8hsc=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250207072957-31ec352b57b9/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250207104112-3d1383357252 h1:zLU294Mc2bcxdeihG2K+wK2Zr2B/lTm+dJCMIEMUOKU=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250207104112-3d1383357252/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250210085846-4a178d01e3ee h1:SwWTxlaRAX5p24XwOTBVbAeTLiLFNlSqDZpU0yICrWc=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250210085846-4a178d01e3ee/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250210094237-e55727d9e273 h1:flQk8D7BAQNolfMRXehxZ5QcWuR3ytUvwJWt5GyFSbw=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250210094237-e55727d9e273/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250210103255-f663ec80f5dd h1:myQN5EugL+AvIy4Ugw+jlHEfzcVaQ1bZ+RbwTioaZqs=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250210103255-f663ec80f5dd/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250210121042-52d5a1fbf9b8 h1:LQpmqcx6b+RjfvYzyrgquLSIWdRqcJi2UXybB9wk9Vk=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250210121042-52d5a1fbf9b8/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250211065515-a573a4ce715e h1:00SdIMSwwSJpKVfdwplehHpFULrVvAoc0HxKQD06KEs=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250211065515-a573a4ce715e/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250211073038-ffaa67fb5dca h1:mZBcicJezYO7gY5SHMzyUusyLxYKwFptliiysqaGwD0=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250211073038-ffaa67fb5dca/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250211081618-d82ae166a1e5 h1:S+vFupQoyTwa2QrtxmSChxzAYCrh6mLf7GXRNKU475g=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250211081618-d82ae166a1e5/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
|
github.com/beego/beego/v2 v2.3.0 h1:iECVwzm6egw6iw6tkWrEDqXG4NQtKLQ6QBSYqlM6T/I=
|
||||||
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
github.com/beego/beego/v2 v2.3.0/go.mod h1:Ob/5BJ9fIKZLd4s9ZV3o9J6odkkIyL83et+p98gyYXo=
|
||||||
github.com/beego/beego/v2 v2.3.8 h1:wplhB1pF4TxR+2SS4PUej8eDoH4xGfxuHfS7wAk9VBc=
|
github.com/beego/beego/v2 v2.3.1 h1:7MUKMpJYzOXtCUsTEoXOxsDV/UcHw6CPbaWMlthVNsc=
|
||||||
github.com/beego/beego/v2 v2.3.8/go.mod h1:8vl9+RrXqvodrl9C8yivX1e6le6deCK6RWeq8R7gTTg=
|
github.com/beego/beego/v2 v2.3.1/go.mod h1:5cqHsOHJIxkq44tBpRvtDe59GuVRVv/9/tyVDxd5ce4=
|
||||||
|
github.com/beego/beego/v2 v2.3.2/go.mod h1:5cqHsOHJIxkq44tBpRvtDe59GuVRVv/9/tyVDxd5ce4=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q=
|
github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q=
|
||||||
@@ -25,163 +276,111 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
|||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
|
||||||
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
|
||||||
github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw=
|
github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw=
|
||||||
github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
|
||||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
|
||||||
github.com/etcd-io/etcd v3.3.17+incompatible/go.mod h1:cdZ77EstHBwVtD6iTgzgvogwcjo9m4iOqoijouPJ4bs=
|
github.com/etcd-io/etcd v3.3.17+incompatible/go.mod h1:cdZ77EstHBwVtD6iTgzgvogwcjo9m4iOqoijouPJ4bs=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
github.com/gabriel-vasile/mimetype v1.4.5 h1:J7wGKdGu33ocBOhGy0z653k/lFKLFDPJMG8Gql0kxn4=
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
github.com/gabriel-vasile/mimetype v1.4.5/go.mod h1:ibHel+/kbxn9x2407k1izTA1S81ku1z/DlgOW2QE0M4=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
|
github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
|
github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc=
|
||||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
|
||||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
|
||||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
|
||||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
|
||||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
|
||||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
|
||||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
|
||||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
|
||||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
|
||||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
|
||||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||||
github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
|
github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao=
|
||||||
github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
|
github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
github.com/go-playground/validator/v10 v10.22.1 h1:40JcKH+bBNGFczGuoBYgX4I6m/i27HYW8P9FDk5PbgA=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
|
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||||
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
|
|
||||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
github.com/goraz/onion v0.1.3 h1:KhyvbDA2b70gcz/d5izfwTiOH8SmrvV43AsVzpng3n0=
|
github.com/goraz/onion v0.1.3 h1:KhyvbDA2b70gcz/d5izfwTiOH8SmrvV43AsVzpng3n0=
|
||||||
github.com/goraz/onion v0.1.3/go.mod h1:XEmz1XoBz+wxTgWB8NwuvRm4RAu3vKxvrmYtzK+XCuQ=
|
github.com/goraz/onion v0.1.3/go.mod h1:XEmz1XoBz+wxTgWB8NwuvRm4RAu3vKxvrmYtzK+XCuQ=
|
||||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
|
||||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
|
||||||
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
|
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
|
||||||
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
|
|
||||||
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
|
|
||||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
|
||||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
|
||||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||||
|
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
|
||||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
|
||||||
github.com/libp2p/go-libp2p/core v0.43.0-rc2 h1:1X1aDJNWhMfodJ/ynbaGLkgnC8f+hfBIqQDrzxFZOqI=
|
|
||||||
github.com/libp2p/go-libp2p/core v0.43.0-rc2/go.mod h1:NYeJ9lvyBv9nbDk2IuGb8gFKEOkIv/W5YRIy1pAJB2Q=
|
|
||||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
github.com/marcinwyszynski/geopoint v0.0.0-20140302213024-cf2a6f750c5b h1:XBF8THPBy28s2ryI7+/Jf/847unLWxYMpJveX5Kox+0=
|
||||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/marcinwyszynski/geopoint v0.0.0-20140302213024-cf2a6f750c5b/go.mod h1:z1oqhOuuYpPHmUmAK2aNygKFlPdb4o3PppQnVTRFdrI=
|
||||||
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
|
||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
|
||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
|
||||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
|
||||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
|
||||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
|
||||||
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
|
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
|
||||||
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
|
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
|
||||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
|
||||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
|
||||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
|
||||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
|
||||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
|
||||||
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
|
||||||
github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
|
|
||||||
github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
|
|
||||||
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
|
||||||
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
|
||||||
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
|
|
||||||
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
|
|
||||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
|
||||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
|
||||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
|
||||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/nats-io/nats.go v1.44.0 h1:ECKVrDLdh/kDPV1g0gAQ+2+m2KprqZK5O/eJAyAnH2M=
|
github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE=
|
||||||
github.com/nats-io/nats.go v1.44.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
||||||
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
|
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
||||||
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
|
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
||||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||||
github.com/ogier/pflag v0.0.1/go.mod h1:zkFki7tvTa0tafRvTBIZTvzYyAu6kQhPZFnshFFPE+g=
|
github.com/ogier/pflag v0.0.1/go.mod h1:zkFki7tvTa0tafRvTBIZTvzYyAu6kQhPZFnshFFPE+g=
|
||||||
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
|
|
||||||
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
|
||||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
|
||||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
|
||||||
github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
|
github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
|
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
|
||||||
github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
|
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||||
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
|
github.com/prometheus/common v0.58.0 h1:N+N8vY4/23r6iYfD3UQZUoJPnUYAo7v6LG5XZxjZTXo=
|
||||||
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
|
github.com/prometheus/common v0.58.0/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0=
|
||||||
|
github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
|
||||||
|
github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
|
||||||
|
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
|
||||||
|
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
|
||||||
|
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||||
|
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||||
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
|
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
|
||||||
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
||||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||||
|
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||||
|
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||||
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 h1:v9ezJDHA1XGxViAUSIoO/Id7Fl63u6d0YmsAm+/p2hs=
|
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 h1:v9ezJDHA1XGxViAUSIoO/Id7Fl63u6d0YmsAm+/p2hs=
|
||||||
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02/go.mod h1:RF16/A3L0xSa0oSERcnhd8Pu3IXSDZSK2gmGIMsttFE=
|
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02/go.mod h1:RF16/A3L0xSa0oSERcnhd8Pu3IXSDZSK2gmGIMsttFE=
|
||||||
github.com/skarademir/naturalsort v0.0.0-20150715044055-69a5d87bef62/go.mod h1:oIdVclZaltY1Nf7OQUkg1/2jImBJ+ZfKZuDIRSwk3p0=
|
github.com/skarademir/naturalsort v0.0.0-20150715044055-69a5d87bef62/go.mod h1:oIdVclZaltY1Nf7OQUkg1/2jImBJ+ZfKZuDIRSwk3p0=
|
||||||
@@ -191,23 +390,10 @@ github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYl
|
|||||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
|
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
|
||||||
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
|
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
|
||||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
|
||||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
|
||||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
|
||||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
|
||||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
|
||||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
|
||||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
|
||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
|
||||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||||
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
|
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
|
||||||
@@ -217,37 +403,35 @@ github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gi
|
|||||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
|
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
|
||||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
|
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
|
go.mongodb.org/mongo-driver v1.16.1 h1:rIVLL3q0IHM39dvE+z2ulZLp9ENZKThVfuvN/IiN4l8=
|
||||||
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
go.mongodb.org/mongo-driver v1.16.1/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw=
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
go.mongodb.org/mongo-driver v1.17.1 h1:Wic5cJIwJgSpBhe3lx3+/RybR5PiYRMpVFgO7cOHyIM=
|
||||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
go.mongodb.org/mongo-driver v1.17.1/go.mod h1:wwWm/+BuOddhcq3n68LKRmgk2wXzmF6s0SFOa0GINL4=
|
||||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
|
||||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
|
||||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
|
||||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
|
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
||||||
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
|
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
|
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
|
||||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
|
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
|
||||||
|
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
||||||
|
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
|
||||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
|
||||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
||||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
||||||
|
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||||
|
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@@ -258,60 +442,33 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||||
|
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
|
||||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
|
||||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
|
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||||
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||||
|
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||||
|
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
|
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
|
||||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
|
||||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q=
|
|
||||||
k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM=
|
|
||||||
k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU=
|
|
||||||
k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
|
|
||||||
k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM=
|
|
||||||
k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA=
|
|
||||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
|
||||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
|
||||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
|
|
||||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
|
|
||||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
|
|
||||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
|
||||||
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
|
|
||||||
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
|
|
||||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
|
|
||||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
|
||||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
|
||||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
|
||||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
|
||||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
|
||||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
|
||||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
|
||||||
|
|||||||
@@ -1,141 +0,0 @@
|
|||||||
// Package infrastructure is the public façade for all scheduling sub-services.
|
|
||||||
// Controllers and main.go import only this package; the sub-packages are
|
|
||||||
// internal implementation details.
|
|
||||||
package infrastructure
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"oc-scheduler/infrastructure/execution"
|
|
||||||
"oc-scheduler/infrastructure/nats"
|
|
||||||
"oc-scheduler/infrastructure/planner"
|
|
||||||
"oc-scheduler/infrastructure/scheduler"
|
|
||||||
"oc-scheduler/infrastructure/scheduling_resources"
|
|
||||||
"oc-scheduler/infrastructure/session"
|
|
||||||
"oc-scheduler/infrastructure/utils"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Type re-exports
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
type WorkflowSchedule = scheduler.WorkflowSchedule
|
|
||||||
type CheckResult = scheduler.CheckResult
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Bootstrap — called from main.go
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func ListenNATS() { nats.ListenNATS() }
|
|
||||||
func InitSelfPlanner() { planner.InitPlanner() }
|
|
||||||
func RecoverDraftExecutions() { execution.RecoverDraft() }
|
|
||||||
func WatchExecutions() { execution.WatchExecutions() }
|
|
||||||
|
|
||||||
// EmitNATS broadcasts a propagation message via NATS.
|
|
||||||
func EmitNATS(peerID string, message tools.PropalgationMessage) {
|
|
||||||
utils.Propalgate(peerID, message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Utilities
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func GetWorkflowPeerIDs(wfID string, req *tools.APIRequest) ([]string, error) {
|
|
||||||
return utils.GetWorkflowPeerIDs(wfID, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Planner subscriptions
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func SubscribeSessionConfirmation(executionsID string) (<-chan struct{}, func()) {
|
|
||||||
return execution.SubscribeSessionConfirmation(executionsID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func SubscribePlannerUpdates(peerIDs []string) (<-chan string, func()) {
|
|
||||||
return planner.GetPlannerService().SubscribePlannerUpdates(peerIDs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func SubscribeWorkflowUpdates(wfID string) (<-chan struct{}, func()) {
|
|
||||||
return planner.GetPlannerService().SubscribeWorkflowUpdates(wfID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func RequestPlannerRefresh(peerIDs []string, executionsID string) []string {
|
|
||||||
return planner.GetPlannerService().Refresh(peerIDs, executionsID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ReleaseRefreshOwnership(peerIDs []string, executionsID string) {
|
|
||||||
planner.GetPlannerService().ReleaseRefreshOwnership(peerIDs, executionsID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Session management
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func UpsertSessionDrafts(
|
|
||||||
executionsID string,
|
|
||||||
execs []*workflow_execution.WorkflowExecution,
|
|
||||||
purchases, bookings []scheduling_resources.SchedulerObject,
|
|
||||||
req *tools.APIRequest,
|
|
||||||
) {
|
|
||||||
svc := session.NewSessionExecutionsService(executionsID)
|
|
||||||
svc.UpsertSessionDrafts(purchases, bookings, execs, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
func CleanupSession(executionsID string, req *tools.APIRequest) {
|
|
||||||
svc := session.NewSessionExecutionsService(executionsID)
|
|
||||||
svc.CleanupSession(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
func UnscheduleExecution(executionID string, req *tools.APIRequest) error {
|
|
||||||
return execution.Unschedule(executionID, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Schedule confirmation
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func Schedule(
|
|
||||||
ws *WorkflowSchedule,
|
|
||||||
wfID string,
|
|
||||||
req *tools.APIRequest,
|
|
||||||
) (*WorkflowSchedule, *workflow.Workflow, []*workflow_execution.WorkflowExecution, error) {
|
|
||||||
if req == nil {
|
|
||||||
return ws, nil, nil, fmt.Errorf("no request provided")
|
|
||||||
}
|
|
||||||
if ws.UUID == "" {
|
|
||||||
return ws, nil, nil, fmt.Errorf("no scheduling session: use the Check stream first")
|
|
||||||
}
|
|
||||||
|
|
||||||
svc := session.NewSessionExecutionsService(ws.UUID)
|
|
||||||
|
|
||||||
executions := svc.LoadSessionExecs()
|
|
||||||
for _, exec := range executions {
|
|
||||||
if !exec.ExecDate.IsZero() && exec.ExecDate.Before(time.Now().UTC()) {
|
|
||||||
return ws, nil, nil, fmt.Errorf("execution %s is obsolete (start date in the past)", exec.GetID())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := svc.ConfirmSession(req); err != nil {
|
|
||||||
return ws, nil, nil, fmt.Errorf("confirm session failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, exec := range executions {
|
|
||||||
go execution.WatchDeadline(exec.GetID(), exec.ExecutionsID, exec.ExecDate, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
obj, _, _ := workflow.NewAccessor(req).LoadOne(wfID)
|
|
||||||
if obj == nil {
|
|
||||||
return ws, nil, executions, nil
|
|
||||||
}
|
|
||||||
wf := obj.(*workflow.Workflow)
|
|
||||||
ws.Workflow = wf
|
|
||||||
ws.WorkflowExecution = executions
|
|
||||||
wf.GetAccessor(adminReq).UpdateOne(wf.Serialize(wf), wf.GetID())
|
|
||||||
return ws, wf, executions, nil
|
|
||||||
}
|
|
||||||
@@ -1,705 +0,0 @@
|
|||||||
package execution
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"oc-scheduler/conf"
|
|
||||||
"oc-scheduler/infrastructure/planner"
|
|
||||||
"oc-scheduler/infrastructure/scheduling_resources"
|
|
||||||
infUtils "oc-scheduler/infrastructure/utils"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/config"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/order"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Session confirmation pub/sub
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
var sessionConfirmMu sync.Mutex
|
|
||||||
var sessionConfirmSubs = map[string][]chan struct{}{}
|
|
||||||
|
|
||||||
// SubscribeSessionConfirmation returns a channel that receives one signal when
|
|
||||||
// the first execution of the given session is fully confirmed (IsDraft=false).
|
|
||||||
// The returned cancel func must be called to clean up.
|
|
||||||
func SubscribeSessionConfirmation(executionsID string) (<-chan struct{}, func()) {
|
|
||||||
ch := make(chan struct{}, 1)
|
|
||||||
sessionConfirmMu.Lock()
|
|
||||||
sessionConfirmSubs[executionsID] = append(sessionConfirmSubs[executionsID], ch)
|
|
||||||
sessionConfirmMu.Unlock()
|
|
||||||
return ch, func() {
|
|
||||||
sessionConfirmMu.Lock()
|
|
||||||
subs := sessionConfirmSubs[executionsID]
|
|
||||||
for i, c := range subs {
|
|
||||||
if c == ch {
|
|
||||||
sessionConfirmSubs[executionsID] = append(subs[:i], subs[i+1:]...)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sessionConfirmMu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func notifySessionConfirmed(executionsID string) {
|
|
||||||
sessionConfirmMu.Lock()
|
|
||||||
subs := sessionConfirmSubs[executionsID]
|
|
||||||
sessionConfirmMu.Unlock()
|
|
||||||
for _, ch := range subs {
|
|
||||||
select {
|
|
||||||
case ch <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Global execution lock registry
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
var execLocksMu sync.RWMutex
|
|
||||||
var execLocks = map[string]*sync.Mutex{}
|
|
||||||
|
|
||||||
func RegisterExecLock(executionID string) {
|
|
||||||
execLocksMu.Lock()
|
|
||||||
execLocks[executionID] = &sync.Mutex{}
|
|
||||||
execLocksMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func UnregisterExecLock(executionID string) {
|
|
||||||
execLocksMu.Lock()
|
|
||||||
delete(execLocks, executionID)
|
|
||||||
execLocksMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetExecLock(executionID string) *sync.Mutex {
|
|
||||||
execLocksMu.RLock()
|
|
||||||
mu := execLocks[executionID]
|
|
||||||
execLocksMu.RUnlock()
|
|
||||||
return mu
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Considers payload
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
type ConsidersPayload struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
ExecutionsID string `json:"executions_id"`
|
|
||||||
ExecutionID string `json:"execution_id"`
|
|
||||||
PeerIDs []string `json:"peer_ids"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Execution state machine — considers
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func UpdateExecutionState(payload []byte, dt tools.DataType) {
|
|
||||||
var data ConsidersPayload
|
|
||||||
if err := json.Unmarshal(payload, &data); err != nil || data.ID == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
schdata := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).LoadOne(data.ID)
|
|
||||||
if schdata.Data == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sch := scheduling_resources.ToSchedulerObject(dt, schdata.Data)
|
|
||||||
if sch == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
execID := sch.GetExecutionId()
|
|
||||||
|
|
||||||
mu := GetExecLock(execID)
|
|
||||||
if mu == nil {
|
|
||||||
fmt.Printf("UpdateExecutionState: no lock for execution %s, skipping\n", execID)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
res, _, err := workflow_execution.NewAccessor(adminReq).LoadOne(execID)
|
|
||||||
if err != nil || res == nil {
|
|
||||||
fmt.Printf("UpdateExecutionState: could not load execution %s: %v\n", execID, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
exec := res.(*workflow_execution.WorkflowExecution)
|
|
||||||
|
|
||||||
switch dt {
|
|
||||||
case tools.BOOKING:
|
|
||||||
if exec.BookingsState == nil {
|
|
||||||
exec.BookingsState = map[string]workflow_execution.BookingState{}
|
|
||||||
}
|
|
||||||
st := exec.BookingsState[data.ID]
|
|
||||||
st.IsBooked = true
|
|
||||||
exec.BookingsState[data.ID] = st
|
|
||||||
if config.GetConfig().IsNano {
|
|
||||||
scheduling_resources.SendBookingToMaster(schdata.Data.(*booking.Booking)) // TODO : ASK FOR RESPONSE...
|
|
||||||
}
|
|
||||||
case tools.PURCHASE_RESOURCE:
|
|
||||||
if exec.PurchasesState == nil {
|
|
||||||
exec.PurchasesState = map[string]bool{}
|
|
||||||
}
|
|
||||||
exec.PurchasesState[data.ID] = true
|
|
||||||
if config.GetConfig().IsNano {
|
|
||||||
scheduling_resources.SendPurchaseToMaster(schdata.Data.(*purchase_resource.PurchaseResource)) // TODO : ASK FOR RESPONSE...
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// TODO REMOVE
|
|
||||||
allConfirmed := true
|
|
||||||
for _, st := range exec.BookingsState {
|
|
||||||
if !st.IsBooked {
|
|
||||||
allConfirmed = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if allConfirmed {
|
|
||||||
for _, st := range exec.PurchasesState {
|
|
||||||
if !st {
|
|
||||||
allConfirmed = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if allConfirmed {
|
|
||||||
exec.State = enum.SCHEDULED
|
|
||||||
exec.IsDraft = false
|
|
||||||
}
|
|
||||||
if _, _, err := utils.GenericRawUpdateOne(exec, exec.GetID(), workflow_execution.NewAccessor(adminReq)); err != nil {
|
|
||||||
fmt.Printf("UpdateExecutionState: could not update execution %s: %v\n", execID, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if allConfirmed {
|
|
||||||
go notifySessionConfirmed(exec.ExecutionsID)
|
|
||||||
go confirmSessionOrder(exec.ExecutionsID, adminReq)
|
|
||||||
obj, _, err := workflow.NewAccessor(adminReq).LoadOne(exec.WorkflowID)
|
|
||||||
if err == nil && obj != nil {
|
|
||||||
go EmitConsidersExecution(exec, obj.(*workflow.Workflow))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func confirmSessionOrder(executionsID string, adminReq *tools.APIRequest) {
|
|
||||||
results, _, _ := order.NewAccessor(adminReq).Search(
|
|
||||||
&dbs.Filters{And: map[string][]dbs.Filter{
|
|
||||||
"executions_id": {{Operator: dbs.EQUAL.String(), Value: executionsID}},
|
|
||||||
}}, "", true, 0, 10000)
|
|
||||||
for _, obj := range results {
|
|
||||||
if o, ok := obj.(*order.Order); ok {
|
|
||||||
o.IsDraft = false
|
|
||||||
utils.GenericRawUpdateOne(o, o.GetID(), order.NewAccessor(adminReq))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ConfirmExecutionDrafts(payload []byte) {
|
|
||||||
var data ConsidersPayload
|
|
||||||
if err := json.Unmarshal(payload, &data); err != nil {
|
|
||||||
fmt.Printf("ConfirmExecutionDrafts: could not parse payload: %v\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d := oclib.NewRequestAdmin(oclib.LibDataEnum(tools.WORKFLOW_EXECUTION), nil).LoadOne(data.ExecutionID)
|
|
||||||
if exec := d.ToWorkflowExecution(); exec != nil {
|
|
||||||
for id := range exec.BookingsState {
|
|
||||||
go scheduling_resources.Confirm(id, tools.BOOKING)
|
|
||||||
}
|
|
||||||
for id := range exec.PurchasesState {
|
|
||||||
go scheduling_resources.Confirm(id, tools.PURCHASE_RESOURCE)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func EmitConsidersExecution(exec *workflow_execution.WorkflowExecution, wf *workflow.Workflow) {
|
|
||||||
if wf == nil || wf.Graph == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
peerIDs, err := infUtils.GetWorkflowPeerIDs(wf.GetID(), &tools.APIRequest{Admin: true})
|
|
||||||
if err != nil || len(peerIDs) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
payload, err := json.Marshal(ConsidersPayload{
|
|
||||||
ID: exec.GetID(),
|
|
||||||
ExecutionID: exec.GetID(),
|
|
||||||
ExecutionsID: exec.ExecutionsID,
|
|
||||||
PeerIDs: peerIDs,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b, err := json.Marshal(tools.PropalgationMessage{
|
|
||||||
DataType: int(tools.WORKFLOW_EXECUTION),
|
|
||||||
Action: tools.PB_CONSIDERS,
|
|
||||||
Payload: payload,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
|
||||||
FromApp: "oc-scheduler",
|
|
||||||
Datatype: tools.WORKFLOW_EXECUTION,
|
|
||||||
Method: int(tools.PROPALGATION_EVENT),
|
|
||||||
Payload: b,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Deadline watchers
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func WatchDeadline(executionID string, ns string, execDate time.Time, request *tools.APIRequest) {
|
|
||||||
delay := time.Until(execDate.UTC().Add(-1 * time.Minute))
|
|
||||||
if delay <= 0 {
|
|
||||||
go handleDeadline(executionID, ns, request)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
time.AfterFunc(delay, func() { handleDeadline(executionID, ns, request) })
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleDeadline(executionID string, ns string, request *tools.APIRequest) {
|
|
||||||
res, _, err := workflow_execution.NewAccessor(&tools.APIRequest{Admin: true}).LoadOne(executionID)
|
|
||||||
if err != nil || res == nil {
|
|
||||||
fmt.Printf("handleDeadline: execution %s not found\n", executionID)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
exec := res.(*workflow_execution.WorkflowExecution)
|
|
||||||
if exec.IsDraft {
|
|
||||||
Unschedule(executionID, request)
|
|
||||||
workflow_execution.NewAccessor(adminReq).DeleteOne(executionID)
|
|
||||||
fmt.Printf("handleDeadline: purged draft execution %s\n", executionID)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if serv, err := tools.NewKubernetesService(
|
|
||||||
conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort,
|
|
||||||
conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData); err != nil {
|
|
||||||
fmt.Printf("handleDeadline: k8s init failed for %s: %v\n", executionID, err)
|
|
||||||
} else if err := serv.ProvisionExecutionNamespace(context.Background(), ns); err != nil &&
|
|
||||||
!strings.Contains(err.Error(), "already exists") {
|
|
||||||
fmt.Printf("handleDeadline: failed to provision namespace %s: %v\n", ns, err)
|
|
||||||
}
|
|
||||||
go watchEnd(executionID, ns, exec.EndDate, exec.ExecDate)
|
|
||||||
}
|
|
||||||
|
|
||||||
func watchEnd(executionID string, ns string, endDate *time.Time, execDate time.Time) {
|
|
||||||
var end time.Time
|
|
||||||
if endDate != nil {
|
|
||||||
end = *endDate
|
|
||||||
} else {
|
|
||||||
end = execDate.UTC().Add(5 * time.Minute)
|
|
||||||
}
|
|
||||||
fire := func() {
|
|
||||||
serv, err := tools.NewKubernetesService(
|
|
||||||
conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort,
|
|
||||||
conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("watchEnd: k8s init failed for %s: %v\n", executionID, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := serv.TeardownExecutionNamespace(context.Background(), ns); err != nil {
|
|
||||||
fmt.Printf("watchEnd: failed to teardown namespace %s: %v\n", ns, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if delay := time.Until(end.UTC()); delay <= 0 {
|
|
||||||
go fire()
|
|
||||||
} else {
|
|
||||||
time.AfterFunc(delay, fire)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Unschedule / Recovery
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func Unschedule(executionID string, request *tools.APIRequest) error {
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
res, _, err := workflow_execution.NewAccessor(adminReq).LoadOne(executionID)
|
|
||||||
if err != nil || res == nil {
|
|
||||||
return fmt.Errorf("execution %s not found: %w", executionID, err)
|
|
||||||
}
|
|
||||||
exec := res.(*workflow_execution.WorkflowExecution)
|
|
||||||
for _, byResource := range exec.PeerBookByGraph {
|
|
||||||
for _, bookingIDs := range byResource {
|
|
||||||
for _, bkID := range bookingIDs {
|
|
||||||
bkRes, _, loadErr := booking.NewAccessor(adminReq).LoadOne(bkID)
|
|
||||||
if loadErr != nil || bkRes == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
scheduling_resources.GetService().Delete(
|
|
||||||
tools.BOOKING,
|
|
||||||
scheduling_resources.ToSchedulerObject(tools.BOOKING, bkRes),
|
|
||||||
request,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
workflow_execution.NewAccessor(adminReq).DeleteOne(executionID)
|
|
||||||
UnregisterExecLock(executionID)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func RecoverDraft() {
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
results, _, _ := workflow_execution.NewAccessor(adminReq).Search(nil, "*", true, 0, 10000)
|
|
||||||
for _, obj := range results {
|
|
||||||
exec, ok := obj.(*workflow_execution.WorkflowExecution)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
RegisterExecLock(exec.GetID())
|
|
||||||
go WatchDeadline(exec.GetID(), exec.ExecutionsID, exec.ExecDate, adminReq)
|
|
||||||
}
|
|
||||||
fmt.Printf("RecoverDraft: recovered %d executions\n", len(results))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// NATS workflow lifecycle handlers
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func HandleWorkflowStarted(resp tools.NATSResponse) {
|
|
||||||
var evt tools.WorkflowLifecycleEvent
|
|
||||||
if err := json.Unmarshal(resp.Payload, &evt); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
|
|
||||||
mu := GetExecLock(evt.ExecutionID)
|
|
||||||
if mu != nil {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
res, _, err := workflow_execution.NewAccessor(adminReq).LoadOne(evt.ExecutionID)
|
|
||||||
if err != nil || res == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
exec := res.(*workflow_execution.WorkflowExecution)
|
|
||||||
exec.State = enum.STARTED
|
|
||||||
if evt.RealStart != nil {
|
|
||||||
exec.ExecDate = *evt.RealStart
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build the execution graph summary from the workflow graph on first start.
|
|
||||||
if len(exec.Graph) == 0 {
|
|
||||||
wfRes, _, wfErr := workflow.NewAccessor(adminReq).LoadOne(exec.WorkflowID)
|
|
||||||
if wfErr == nil && wfRes != nil {
|
|
||||||
exec.Graph = workflow_execution.BuildExecutionGraph(wfRes.(*workflow.Workflow).Graph)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Advance steps whose deps are already satisfied (typically the entry nodes).
|
|
||||||
if len(exec.Graph) > 0 {
|
|
||||||
now := time.Now().UTC()
|
|
||||||
for _, id := range exec.Graph.ReadyToRun() {
|
|
||||||
exec.Graph.MarkRunning(id, now)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
utils.GenericRawUpdateOne(exec, exec.GetID(), workflow_execution.NewAccessor(adminReq))
|
|
||||||
}
|
|
||||||
|
|
||||||
func HandleWorkflowDone(resp tools.NATSResponse) {
|
|
||||||
var evt tools.WorkflowLifecycleEvent
|
|
||||||
if err := json.Unmarshal(resp.Payload, &evt); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
|
|
||||||
mu := GetExecLock(evt.ExecutionID)
|
|
||||||
if mu != nil {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
res, _, err := workflow_execution.NewAccessor(adminReq).LoadOne(evt.ExecutionID)
|
|
||||||
if err != nil || res == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
exec := res.(*workflow_execution.WorkflowExecution)
|
|
||||||
exec.State = enum.BookingStatus(evt.State)
|
|
||||||
if evt.RealEnd != nil {
|
|
||||||
exec.EndDate = evt.RealEnd
|
|
||||||
}
|
|
||||||
// Release all booking reservations (workflow is over) without overwriting
|
|
||||||
// IsDone: individual step events already set the authoritative done state
|
|
||||||
// for each booking. Resetting everything here would lose that granularity.
|
|
||||||
if exec.BookingsState == nil {
|
|
||||||
exec.BookingsState = map[string]workflow_execution.BookingState{}
|
|
||||||
}
|
|
||||||
for id, st := range exec.BookingsState {
|
|
||||||
st.IsBooked = false
|
|
||||||
exec.BookingsState[id] = st
|
|
||||||
}
|
|
||||||
// Graph items that already reached success/failure keep their state.
|
|
||||||
// Items still in running when the execution terminates receive the terminal
|
|
||||||
// state (the step was active but no step_done event arrived before the
|
|
||||||
// workflow finished — treat it as the execution outcome).
|
|
||||||
terminalSuccess := enum.BookingStatus(evt.State) == enum.SUCCESS
|
|
||||||
nowGraph := time.Now().UTC()
|
|
||||||
for itemID, item := range exec.Graph {
|
|
||||||
if item.State == workflow_execution.StepRunning {
|
|
||||||
exec.Graph.MarkDone(itemID, terminalSuccess, nowGraph)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
utils.GenericRawUpdateOne(exec, exec.GetID(), workflow_execution.NewAccessor(adminReq))
|
|
||||||
|
|
||||||
// Build a set of booking IDs already covered by per-step events so we only
|
|
||||||
// fall back for bookings the orchestrator never emitted a step for (e.g. storage).
|
|
||||||
coveredByStep := map[string]bool{}
|
|
||||||
for _, step := range evt.Steps {
|
|
||||||
applyStepToBooking(step, adminReq)
|
|
||||||
coveredByStep[step.BookingID] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Propagate the execution's terminal state to any booking that was not
|
|
||||||
// updated by a step event and is not already in a terminal state.
|
|
||||||
terminalState := enum.BookingStatus(evt.State)
|
|
||||||
now := time.Now().UTC()
|
|
||||||
for id := range exec.BookingsState {
|
|
||||||
if coveredByStep[id] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
res, _, err := booking.NewAccessor(adminReq).LoadOne(id)
|
|
||||||
if err != nil || res == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
bk := res.(*booking.Booking)
|
|
||||||
if terminalExecStates[bk.State] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
bk.State = terminalState
|
|
||||||
bk.RealEndDate = &now
|
|
||||||
utils.GenericRawUpdateOne(bk, bk.GetID(), booking.NewAccessor(adminReq))
|
|
||||||
}
|
|
||||||
|
|
||||||
self, err := oclib.GetMySelf()
|
|
||||||
if err == nil && self != nil {
|
|
||||||
go planner.GetPlannerService().RefreshSelf(self.PeerID, adminReq)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func HandleWorkflowStepDone(resp tools.NATSResponse) {
|
|
||||||
var evt tools.WorkflowLifecycleEvent
|
|
||||||
if err := json.Unmarshal(resp.Payload, &evt); err != nil || evt.BookingID == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
|
|
||||||
// Update the booking itself first (no exec lock needed for the booking doc).
|
|
||||||
res, _, err := booking.NewAccessor(adminReq).LoadOne(evt.BookingID)
|
|
||||||
if err != nil || res == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bk := res.(*booking.Booking)
|
|
||||||
bk.State = enum.BookingStatus(evt.State)
|
|
||||||
if evt.RealStart != nil {
|
|
||||||
bk.RealStartDate = evt.RealStart
|
|
||||||
}
|
|
||||||
if evt.RealEnd != nil {
|
|
||||||
bk.RealEndDate = evt.RealEnd
|
|
||||||
}
|
|
||||||
utils.GenericRawUpdateOne(bk, bk.GetID(), booking.NewAccessor(adminReq))
|
|
||||||
|
|
||||||
// Update the parent WorkflowExecution under its exec lock to avoid races
|
|
||||||
// between concurrent WORKFLOW_STEP_DONE_EVENT deliveries.
|
|
||||||
execID := bk.ExecutionID
|
|
||||||
mu := GetExecLock(execID)
|
|
||||||
if mu != nil {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
execRes, _, execErr := workflow_execution.NewAccessor(adminReq).LoadOne(execID)
|
|
||||||
if execErr != nil || execRes == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
exec := execRes.(*workflow_execution.WorkflowExecution)
|
|
||||||
|
|
||||||
// BookingsState: resource released, step done.
|
|
||||||
if exec.BookingsState == nil {
|
|
||||||
exec.BookingsState = map[string]workflow_execution.BookingState{}
|
|
||||||
}
|
|
||||||
st := exec.BookingsState[evt.BookingID]
|
|
||||||
st.IsBooked = false
|
|
||||||
st.IsDone = true
|
|
||||||
exec.BookingsState[evt.BookingID] = st
|
|
||||||
|
|
||||||
// Advance the execution graph.
|
|
||||||
if len(exec.Graph) > 0 {
|
|
||||||
itemID := findItemIDByBookingID(exec, evt.BookingID)
|
|
||||||
if itemID != "" {
|
|
||||||
success := enum.BookingStatus(evt.State) == enum.SUCCESS
|
|
||||||
end := time.Now().UTC()
|
|
||||||
if evt.RealEnd != nil {
|
|
||||||
end = *evt.RealEnd
|
|
||||||
}
|
|
||||||
exec.Graph.MarkDone(itemID, success, end)
|
|
||||||
|
|
||||||
// Only advance when the step succeeded; a failure leaves dependents waiting.
|
|
||||||
if success {
|
|
||||||
start := end
|
|
||||||
if evt.RealStart != nil {
|
|
||||||
start = *evt.RealStart
|
|
||||||
}
|
|
||||||
for _, nextID := range exec.Graph.ReadyToRun() {
|
|
||||||
exec.Graph.MarkRunning(nextID, start)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
utils.GenericRawUpdateOne(exec, exec.GetID(), workflow_execution.NewAccessor(adminReq))
|
|
||||||
|
|
||||||
switch bk.State {
|
|
||||||
case enum.SUCCESS, enum.FAILURE, enum.FORGOTTEN, enum.CANCELLED:
|
|
||||||
self, err := oclib.GetMySelf()
|
|
||||||
if err == nil && self != nil {
|
|
||||||
go planner.GetPlannerService().RefreshSelf(self.PeerID, adminReq)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// findItemIDByBookingID reverse-looks up a booking ID in PeerBookByGraph.
|
|
||||||
// PeerBookByGraph layout: map[peerID]map[itemID][]bookingID
|
|
||||||
func findItemIDByBookingID(exec *workflow_execution.WorkflowExecution, bookingID string) string {
|
|
||||||
for _, byItem := range exec.PeerBookByGraph {
|
|
||||||
for itemID, bookingIDs := range byItem {
|
|
||||||
for _, bkID := range bookingIDs {
|
|
||||||
if bkID == bookingID {
|
|
||||||
return itemID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func applyStepToBooking(step tools.StepMetric, adminReq *tools.APIRequest) {
|
|
||||||
res, _, err := booking.NewAccessor(adminReq).LoadOne(step.BookingID)
|
|
||||||
if err != nil || res == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bk := res.(*booking.Booking)
|
|
||||||
switch bk.State {
|
|
||||||
case enum.SUCCESS, enum.FAILURE, enum.FORGOTTEN, enum.CANCELLED:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bk.State = enum.BookingStatus(step.State)
|
|
||||||
if step.RealStart != nil {
|
|
||||||
bk.RealStartDate = step.RealStart
|
|
||||||
}
|
|
||||||
if step.RealEnd != nil {
|
|
||||||
bk.RealEndDate = step.RealEnd
|
|
||||||
}
|
|
||||||
utils.GenericRawUpdateOne(bk, bk.GetID(), booking.NewAccessor(adminReq))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Watchdog — stale execution safety net
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
var processedExecutions sync.Map
|
|
||||||
|
|
||||||
var terminalExecStates = map[enum.BookingStatus]bool{
|
|
||||||
enum.SUCCESS: true, enum.FAILURE: true, enum.FORGOTTEN: true, enum.CANCELLED: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
func WatchExecutions() {
|
|
||||||
logger := oclib.GetLogger()
|
|
||||||
logger.Info().Msg("ExecutionWatchdog: started")
|
|
||||||
ticker := time.NewTicker(time.Minute)
|
|
||||||
defer ticker.Stop()
|
|
||||||
for range ticker.C {
|
|
||||||
if err := scanStaleExecutions(); err != nil {
|
|
||||||
logger.Error().Msg("ExecutionWatchdog: " + err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func scanStaleExecutions() error {
|
|
||||||
myself, err := oclib.GetMySelf()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("could not resolve local peer: %w", err)
|
|
||||||
}
|
|
||||||
deadline := time.Now().UTC().Add(-time.Minute)
|
|
||||||
res := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), "", myself.GetID(), []string{}, nil).
|
|
||||||
Search(&dbs.Filters{And: map[string][]dbs.Filter{
|
|
||||||
"execution_date": {{Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(deadline)}},
|
|
||||||
}}, "", false, 0, 10000)
|
|
||||||
if res.Err != "" {
|
|
||||||
return fmt.Errorf("stale execution search failed: %s", res.Err)
|
|
||||||
}
|
|
||||||
for _, dbo := range res.Data {
|
|
||||||
if exec, ok := dbo.(*workflow_execution.WorkflowExecution); ok {
|
|
||||||
go emitExecutionFailure(exec)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func emitExecutionFailure(exec *workflow_execution.WorkflowExecution) {
|
|
||||||
logger := oclib.GetLogger()
|
|
||||||
if _, done := processedExecutions.Load(exec.GetID()); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if terminalExecStates[exec.State] {
|
|
||||||
processedExecutions.Store(exec.GetID(), struct{}{})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
now := time.Now().UTC()
|
|
||||||
steps := make([]tools.StepMetric, 0)
|
|
||||||
for _, byGraph := range exec.PeerBookByGraph {
|
|
||||||
for _, bookingIDs := range byGraph {
|
|
||||||
for _, bookingID := range bookingIDs {
|
|
||||||
payload, err := json.Marshal(tools.WorkflowLifecycleEvent{
|
|
||||||
ExecutionID: exec.GetID(),
|
|
||||||
ExecutionsID: exec.ExecutionsID,
|
|
||||||
BookingID: bookingID,
|
|
||||||
State: enum.FAILURE.EnumIndex(),
|
|
||||||
RealEnd: &now,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tools.NewNATSCaller().SetNATSPub(tools.WORKFLOW_STEP_DONE_EVENT, tools.NATSResponse{
|
|
||||||
FromApp: "oc-scheduler-watchdog",
|
|
||||||
Method: int(tools.WORKFLOW_STEP_DONE_EVENT),
|
|
||||||
Payload: payload,
|
|
||||||
})
|
|
||||||
steps = append(steps, tools.StepMetric{
|
|
||||||
BookingID: bookingID,
|
|
||||||
State: enum.FAILURE.EnumIndex(),
|
|
||||||
RealEnd: &now,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
donePayload, err := json.Marshal(tools.WorkflowLifecycleEvent{
|
|
||||||
ExecutionID: exec.GetID(),
|
|
||||||
ExecutionsID: exec.ExecutionsID,
|
|
||||||
State: enum.FAILURE.EnumIndex(),
|
|
||||||
RealEnd: &now,
|
|
||||||
Steps: steps,
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
tools.NewNATSCaller().SetNATSPub(tools.WORKFLOW_DONE_EVENT, tools.NATSResponse{
|
|
||||||
FromApp: "oc-scheduler-watchdog",
|
|
||||||
Method: int(tools.WORKFLOW_DONE_EVENT),
|
|
||||||
Payload: donePayload,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
logger.Info().Msgf("ExecutionWatchdog: execution %s stale → emitting FAILURE (%d bookings)",
|
|
||||||
exec.GetID(), len(steps))
|
|
||||||
processedExecutions.Store(exec.GetID(), struct{}{})
|
|
||||||
}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
package nats
|
|
||||||
|
|
||||||
import (
|
|
||||||
"oc-scheduler/infrastructure/execution"
|
|
||||||
"oc-scheduler/infrastructure/planner"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ListenNATS registers all NATS event handlers and starts listening.
|
|
||||||
// Each handler is a thin router that delegates to the appropriate service.
|
|
||||||
func ListenNATS() {
|
|
||||||
tools.NewNATSCaller().ListenNats(map[tools.NATSMethod]func(tools.NATSResponse){
|
|
||||||
tools.PLANNER_EXECUTION: planner.GetPlannerService().HandleStore,
|
|
||||||
tools.CONSIDERS_EVENT: handleConsidersEvent,
|
|
||||||
tools.REMOVE_RESOURCE: handleRemoveResource,
|
|
||||||
tools.CREATE_RESOURCE: handleCreateResource,
|
|
||||||
tools.CONFIRM_EVENT: handleConfirm,
|
|
||||||
tools.WORKFLOW_STARTED_EVENT: execution.HandleWorkflowStarted,
|
|
||||||
tools.WORKFLOW_STEP_DONE_EVENT: execution.HandleWorkflowStepDone,
|
|
||||||
tools.WORKFLOW_DONE_EVENT: execution.HandleWorkflowDone,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,146 +0,0 @@
|
|||||||
package nats
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"oc-scheduler/infrastructure/execution"
|
|
||||||
"oc-scheduler/infrastructure/planner"
|
|
||||||
"oc-scheduler/infrastructure/scheduling_resources"
|
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource"
|
|
||||||
libutils "cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
// handleConfirm processes a CONFIRM_EVENT: sets IsDraft=false on the resource.
|
|
||||||
func handleConfirm(resp tools.NATSResponse) {
|
|
||||||
scheduling_resources.Confirm(string(resp.Payload), resp.Datatype)
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleConsidersEvent routes CONSIDERS_EVENT to the execution service.
|
|
||||||
func handleConsidersEvent(resp tools.NATSResponse) {
|
|
||||||
switch resp.Datatype {
|
|
||||||
case tools.BOOKING, tools.PURCHASE_RESOURCE:
|
|
||||||
execution.UpdateExecutionState(resp.Payload, resp.Datatype)
|
|
||||||
case tools.WORKFLOW_EXECUTION:
|
|
||||||
execution.ConfirmExecutionDrafts(resp.Payload)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleRemoveResource routes REMOVE_RESOURCE to the appropriate service.
|
|
||||||
func handleRemoveResource(resp tools.NATSResponse) {
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
switch resp.Datatype {
|
|
||||||
case tools.WORKFLOW:
|
|
||||||
var wf workflow.Workflow
|
|
||||||
if err := json.Unmarshal(resp.Payload, &wf); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
planner.GetPlannerService().NotifyWorkflow(wf.GetID())
|
|
||||||
case tools.BOOKING:
|
|
||||||
var p scheduling_resources.RemoveResourcePayload
|
|
||||||
if err := json.Unmarshal(resp.Payload, &p); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
scheduling_resources.GetService().HandleRemoveBooking(p, adminReq)
|
|
||||||
case tools.PURCHASE_RESOURCE:
|
|
||||||
var p scheduling_resources.RemoveResourcePayload
|
|
||||||
if err := json.Unmarshal(resp.Payload, &p); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
scheduling_resources.GetService().HandleRemovePurchase(p, adminReq)
|
|
||||||
case tools.WORKFLOW_EXECUTION:
|
|
||||||
var p scheduling_resources.RemoveResourcePayload
|
|
||||||
if err := json.Unmarshal(resp.Payload, &p); err != nil || p.ID == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// DeleteOne calls GenericDeleteOne internally which fires NotifyChange.
|
|
||||||
workflow_execution.NewAccessor(adminReq).DeleteOne(p.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleCreateResource routes CREATE_RESOURCE to the appropriate service.
|
|
||||||
func handleCreateResource(resp tools.NATSResponse) {
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
switch resp.Datatype {
|
|
||||||
case tools.WORKFLOW:
|
|
||||||
var wf workflow.Workflow
|
|
||||||
if err := json.Unmarshal(resp.Payload, &wf); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
planner.GetPlannerService().Broadcast(&wf)
|
|
||||||
planner.GetPlannerService().NotifyWorkflow(wf.GetID())
|
|
||||||
case tools.BOOKING:
|
|
||||||
var bk booking.Booking
|
|
||||||
if err := json.Unmarshal(resp.Payload, &bk); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if bk.FromNano != "" {
|
|
||||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
|
||||||
pp := access.LoadOne(bk.FromNano)
|
|
||||||
if p := pp.ToPeer(); p == nil || p.Relation == peer.NANO {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
access = oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.BOOKING), nil)
|
|
||||||
d := access.LoadOne(bk.GetID())
|
|
||||||
if d.Data == nil {
|
|
||||||
access.StoreOne(bk.Serialize(&bk))
|
|
||||||
} else {
|
|
||||||
access.UpdateOne(bk.Serialize(&bk), bk.GetID())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
needsConsiders := scheduling_resources.GetService().HandleCreateBooking(&bk, adminReq)
|
|
||||||
if needsConsiders {
|
|
||||||
payload, _ := json.Marshal(execution.ConsidersPayload{ID: bk.GetID()})
|
|
||||||
execution.UpdateExecutionState(payload, tools.BOOKING)
|
|
||||||
}
|
|
||||||
case tools.PURCHASE_RESOURCE:
|
|
||||||
var pr purchase_resource.PurchaseResource
|
|
||||||
if err := json.Unmarshal(resp.Payload, &pr); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if pr.FromNano != "" {
|
|
||||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
|
||||||
pp := access.LoadOne(pr.FromNano)
|
|
||||||
if p := pp.ToPeer(); p == nil || p.Relation == peer.NANO {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
access = oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PURCHASE_RESOURCE), nil)
|
|
||||||
d := access.LoadOne(pr.GetID())
|
|
||||||
if d.Data == nil {
|
|
||||||
access.StoreOne(pr.Serialize(&pr))
|
|
||||||
} else {
|
|
||||||
access.UpdateOne(pr.Serialize(&pr), pr.GetID())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
needsConsiders := scheduling_resources.GetService().HandleCreatePurchase(&pr, adminReq)
|
|
||||||
if needsConsiders {
|
|
||||||
payload, _ := json.Marshal(execution.ConsidersPayload{ID: pr.GetID()})
|
|
||||||
execution.UpdateExecutionState(payload, tools.PURCHASE_RESOURCE)
|
|
||||||
}
|
|
||||||
case tools.WORKFLOW_EXECUTION:
|
|
||||||
// Only propagate the state change onto an execution that oc-scheduler
|
|
||||||
// already owns. Never create executions from an external NATS event:
|
|
||||||
// creation is strictly oc-scheduler's responsibility (via the session
|
|
||||||
// flow), and blindly calling StoreOne here would trigger
|
|
||||||
// StoreDraftDefault (IsDraft=true, State=DRAFT), polluting the name-
|
|
||||||
// uniqueness index and breaking the check stream's first draft creation.
|
|
||||||
var update workflow_execution.WorkflowExecution
|
|
||||||
if err := json.Unmarshal(resp.Payload, &update); err != nil || update.GetID() == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
res, _, loadErr := workflow_execution.NewAccessor(adminReq).LoadOne(update.GetID())
|
|
||||||
if loadErr != nil || res == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
exec := res.(*workflow_execution.WorkflowExecution)
|
|
||||||
exec.State = update.State
|
|
||||||
libutils.GenericRawUpdateOne(exec, exec.GetID(), workflow_execution.NewAccessor(adminReq))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,651 +0,0 @@
|
|||||||
package planner
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"oc-scheduler/infrastructure/utils"
|
|
||||||
"slices"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/booking/planner"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow/graph"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
checkWindowHours = 5
|
|
||||||
checkStepMin = 15 // time increment per scan step (minutes)
|
|
||||||
plannerTTL = 24 * time.Hour
|
|
||||||
)
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Planner cache — protected by plannerMu
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// plannerEntry wraps a planner snapshot with refresh-ownership tracking.
|
|
||||||
// At most one check session may be the "refresh owner" of a given peer's
|
|
||||||
// planner at a time: it emits PB_PLANNER to request a fresh snapshot from
|
|
||||||
// oc-discovery and, on close (clean or forced), emits PB_CLOSE_PLANNER to
|
|
||||||
// release the stream. Any subsequent session that needs the same peer's
|
|
||||||
// planner will see Refreshing=true and skip the duplicate request.
|
|
||||||
type plannerEntry struct {
|
|
||||||
Planner *planner.Planner
|
|
||||||
Refreshing bool // true while a PB_PLANNER request is in flight
|
|
||||||
RefreshOwner string // session UUID that initiated the current refresh
|
|
||||||
}
|
|
||||||
|
|
||||||
type PlannerService struct {
|
|
||||||
Mu sync.RWMutex
|
|
||||||
Cache map[string]*plannerEntry
|
|
||||||
SubMu sync.RWMutex
|
|
||||||
Subs map[string][]chan string
|
|
||||||
AddedAt map[string]time.Time
|
|
||||||
WorkflowSubMu sync.RWMutex
|
|
||||||
WorkflowSubs map[string][]chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
var singleton *PlannerService
|
|
||||||
|
|
||||||
// InitSelfPlanner bootstraps our own planner entry at startup.
|
|
||||||
// It waits (with 15-second retries) for our peer record to be present in the
|
|
||||||
// database before generating the first planner snapshot and broadcasting it
|
|
||||||
// on PB_PLANNER. This handles the race between oc-scheduler starting before
|
|
||||||
// oc-peer has fully registered our node.
|
|
||||||
func InitPlanner() {
|
|
||||||
singleton = &PlannerService{
|
|
||||||
AddedAt: map[string]time.Time{},
|
|
||||||
Subs: map[string][]chan string{},
|
|
||||||
Cache: map[string]*plannerEntry{},
|
|
||||||
WorkflowSubs: map[string][]chan struct{}{},
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
self, err := oclib.GetMySelf()
|
|
||||||
if err != nil || self == nil {
|
|
||||||
fmt.Println("InitPlanner: self peer not found yet, retrying in 15s...")
|
|
||||||
time.Sleep(15 * time.Second)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
singleton.RefreshSelf(self.PeerID, &tools.APIRequest{Admin: true})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetPlannerService() *PlannerService {
|
|
||||||
return singleton
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PlannerService) HandleStore(resp tools.NATSResponse) {
|
|
||||||
m := map[string]interface{}{}
|
|
||||||
p := planner.Planner{}
|
|
||||||
if err := json.Unmarshal(resp.Payload, &m); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(resp.Payload, &p); err != nil {
|
|
||||||
fmt.Println("RETRIEVE PLANNER ERR", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.Store(fmt.Sprintf("%v", m["peer_id"]), &p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// missingPlannerPeers returns the peer IDs from res whose planner is absent
|
|
||||||
// or not yet populated in PlannerCache.
|
|
||||||
// func missingPlannerPeers(res map[string]bookingResource) []string {
|
|
||||||
func (s *PlannerService) MissingPeers(res map[string]utils.BookingResource) []string {
|
|
||||||
var out []string
|
|
||||||
for _, r := range res {
|
|
||||||
s.Mu.RLock()
|
|
||||||
entry := s.Cache[r.PeerPID]
|
|
||||||
s.Mu.RUnlock()
|
|
||||||
if entry == nil || entry.Planner == nil {
|
|
||||||
out = append(out, r.PeerPID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PlannerService) FindDate(wfID string, checkables map[string]utils.BookingResource, start time.Time, end *time.Time, preemption bool, asap bool) (time.Time, *time.Time, bool, bool, []string) {
|
|
||||||
var unavailable, warnings []string
|
|
||||||
// 4. Preemption: Planify ran (end is resolved), skip availability check.
|
|
||||||
if preemption {
|
|
||||||
return start, end, true, true, warnings
|
|
||||||
}
|
|
||||||
// 5b. For any peer whose planner is not yet cached, request it and wait
|
|
||||||
// briefly so the decision is based on real data rather than a blind
|
|
||||||
// "assume available". The wait is capped to avoid blocking the caller
|
|
||||||
// when oc-discovery is unreachable.
|
|
||||||
s.Fill(checkables, wfID)
|
|
||||||
|
|
||||||
unavailable, warnings = s.checkResourceAvailability(checkables, start, end)
|
|
||||||
|
|
||||||
if len(unavailable) == 0 {
|
|
||||||
//result.Available = true
|
|
||||||
return start, end, true, false, warnings
|
|
||||||
}
|
|
||||||
|
|
||||||
// 6. as_possible: find and commit to the next free slot.
|
|
||||||
if asap {
|
|
||||||
next := s.findNextSlot(checkables, start, end, checkWindowHours)
|
|
||||||
if next != nil {
|
|
||||||
if end != nil {
|
|
||||||
duration := end.Sub(start) // capture before overwriting start
|
|
||||||
e := next.Add(duration)
|
|
||||||
end = &e
|
|
||||||
}
|
|
||||||
start = *next
|
|
||||||
return start, end, true, false, warnings
|
|
||||||
} else {
|
|
||||||
return start, end, false, false, warnings
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return start, end, false, false, warnings
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PlannerService) Fill(checkables map[string]utils.BookingResource, wfID string) {
|
|
||||||
// Collect all peers involved in this check (not just missing ones).
|
|
||||||
// We always re-request every peer because PB_CLOSE_PLANNER is emitted
|
|
||||||
// after each check session, which stops the remote stream. The cached
|
|
||||||
// snapshot may therefore be stale: re-fetching ensures the check is made
|
|
||||||
// against up-to-date availability data.
|
|
||||||
all := s.allPeers(checkables)
|
|
||||||
if len(all) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
const plannerFetchTimeout = 5 * time.Second
|
|
||||||
tmpSession := "check-oneshot-" + wfID
|
|
||||||
|
|
||||||
// Mark pending entries and clear any stale planner so the wait loop below
|
|
||||||
// will not return early with an old snapshot.
|
|
||||||
s.Mu.Lock()
|
|
||||||
myself, _ := oclib.GetMySelf()
|
|
||||||
for _, peerID := range all {
|
|
||||||
entry := s.Cache[peerID]
|
|
||||||
if entry == nil {
|
|
||||||
entry = &plannerEntry{}
|
|
||||||
s.Cache[peerID] = entry
|
|
||||||
s.AddedAt[peerID] = time.Now().UTC()
|
|
||||||
go s.EvictAfter(peerID, plannerTTL)
|
|
||||||
}
|
|
||||||
// Reset so MissingPeers sees it as absent until the fresh snapshot arrives.
|
|
||||||
entry.Planner = nil
|
|
||||||
if !entry.Refreshing {
|
|
||||||
entry.Refreshing = true
|
|
||||||
entry.RefreshOwner = tmpSession
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.Mu.Unlock()
|
|
||||||
defer s.ReleaseRefreshOwnership(all, tmpSession)
|
|
||||||
|
|
||||||
for _, peerID := range all {
|
|
||||||
if myself != nil && myself.PeerID == peerID {
|
|
||||||
go s.RefreshSelf(peerID, &tools.APIRequest{Admin: true})
|
|
||||||
} else {
|
|
||||||
payload, _ := json.Marshal(map[string]any{"peer_id": peerID})
|
|
||||||
utils.Propalgate(peerID, tools.PropalgationMessage{
|
|
||||||
Action: tools.PB_PLANNER,
|
|
||||||
Payload: payload,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
deadline := time.Now().Add(plannerFetchTimeout)
|
|
||||||
for {
|
|
||||||
remaining := s.MissingPeers(checkables)
|
|
||||||
if len(remaining) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
wait := time.Until(deadline)
|
|
||||||
if wait <= 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ch, cancelSub := SubscribeUpdates(s.Subs, &s.SubMu, remaining...)
|
|
||||||
select {
|
|
||||||
case <-ch:
|
|
||||||
case <-time.After(wait):
|
|
||||||
}
|
|
||||||
cancelSub()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// allPeers returns the deduplicated list of peer IDs for all checkable resources.
|
|
||||||
func (s *PlannerService) allPeers(res map[string]utils.BookingResource) []string {
|
|
||||||
seen := map[string]struct{}{}
|
|
||||||
var out []string
|
|
||||||
for _, r := range res {
|
|
||||||
if _, ok := seen[r.PeerPID]; !ok {
|
|
||||||
seen[r.PeerPID] = struct{}{}
|
|
||||||
out = append(out, r.PeerPID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// evictAfter waits ttl from first insertion then deletes the cache entry and
|
|
||||||
// emits PB_CLOSE_PLANNER so oc-discovery stops streaming for this peer.
|
|
||||||
// This is the only path that actually removes an entry from PlannerCache;
|
|
||||||
// session close (ReleaseRefreshOwnership) only resets ownership state.
|
|
||||||
func (s *PlannerService) EvictAfter(peerID string, ttl time.Duration) {
|
|
||||||
time.Sleep(ttl)
|
|
||||||
s.Mu.Lock()
|
|
||||||
_, exists := s.Cache[peerID]
|
|
||||||
if exists {
|
|
||||||
delete(s.Cache, peerID)
|
|
||||||
delete(s.AddedAt, peerID)
|
|
||||||
}
|
|
||||||
s.Mu.Unlock()
|
|
||||||
if exists {
|
|
||||||
utils.Notify(&s.SubMu, s.Subs, peerID, peerID)
|
|
||||||
utils.Propalgate(peerID, tools.PropalgationMessage{Action: tools.PB_CLOSE_PLANNER})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscribePlannerUpdates registers interest in planner changes for the given
|
|
||||||
// peer IDs. The returned channel receives the peerID string (non-blocking) each
|
|
||||||
// time any of those planners is updated. Call cancel to unregister.
|
|
||||||
func SubscribeUpdates[T interface{}](subs map[string][]chan T, mu *sync.RWMutex, updates ...string) (<-chan T, func()) {
|
|
||||||
ch := make(chan T, 1)
|
|
||||||
mu.Lock()
|
|
||||||
for _, k := range updates {
|
|
||||||
subs[k] = append(subs[k], ch)
|
|
||||||
}
|
|
||||||
mu.Unlock()
|
|
||||||
cancel := func() {
|
|
||||||
mu.Lock()
|
|
||||||
for _, k := range updates {
|
|
||||||
subsk := subs[k]
|
|
||||||
for i, s := range subsk {
|
|
||||||
if s == ch {
|
|
||||||
subs[k] = append(subsk[:i], subsk[i+1:]...)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mu.Unlock()
|
|
||||||
}
|
|
||||||
return ch, cancel
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Cache helpers
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func (s *PlannerService) Store(peerID string, p *planner.Planner) {
|
|
||||||
if s == nil {
|
|
||||||
fmt.Println("PLANNER IS NULL")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.Mu.Lock()
|
|
||||||
entry := s.Cache[peerID]
|
|
||||||
isNew := entry == nil
|
|
||||||
if isNew {
|
|
||||||
entry = &plannerEntry{}
|
|
||||||
s.Cache[peerID] = entry
|
|
||||||
s.AddedAt[peerID] = time.Now().UTC()
|
|
||||||
go s.EvictAfter(peerID, plannerTTL)
|
|
||||||
}
|
|
||||||
entry.Planner = p
|
|
||||||
s.Cache[peerID] = entry
|
|
||||||
s.Mu.Unlock()
|
|
||||||
utils.Notify(&s.SubMu, s.Subs, peerID, peerID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Planner refresh / broadcast
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// RequestPlannerRefresh asks oc-discovery for a fresh planner snapshot for
|
|
||||||
// each peer in peerIDs. Only the first session to request a given peer becomes
|
|
||||||
// its "refresh owner": subsequent sessions see Refreshing=true and skip the
|
|
||||||
// duplicate PB_PLANNER emission. Returns the subset of peerIDs for which this
|
|
||||||
// session claimed ownership (needed to release on close).
|
|
||||||
|
|
||||||
// RequestPlannerRefresh
|
|
||||||
func (s *PlannerService) Refresh(peerIDs []string, executionsID string) []string {
|
|
||||||
var owned []string
|
|
||||||
for _, peerID := range peerIDs {
|
|
||||||
s.Mu.Lock()
|
|
||||||
entry := s.Cache[peerID]
|
|
||||||
if entry == nil {
|
|
||||||
entry = &plannerEntry{}
|
|
||||||
s.Cache[peerID] = entry
|
|
||||||
s.AddedAt[peerID] = time.Now().UTC()
|
|
||||||
go s.EvictAfter(peerID, plannerTTL)
|
|
||||||
}
|
|
||||||
shouldRequest := !entry.Refreshing
|
|
||||||
if shouldRequest {
|
|
||||||
entry.Refreshing = true
|
|
||||||
entry.RefreshOwner = executionsID
|
|
||||||
}
|
|
||||||
s.Mu.Unlock()
|
|
||||||
if shouldRequest {
|
|
||||||
owned = append(owned, peerID)
|
|
||||||
if p, err := oclib.GetMySelf(); err == nil && p != nil && p.PeerID == peerID {
|
|
||||||
go s.RefreshSelf(peerID, &tools.APIRequest{Admin: true})
|
|
||||||
} else {
|
|
||||||
payload, _ := json.Marshal(map[string]any{"peer_id": peerID})
|
|
||||||
utils.Propalgate(peerID, tools.PropalgationMessage{
|
|
||||||
Action: tools.PB_PLANNER,
|
|
||||||
Payload: payload,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return owned
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseRefreshOwnership is called when a check session closes (clean or
|
|
||||||
// forced). For each peer this session owns, it resets the refresh state and
|
|
||||||
// emits PB_CLOSE_PLANNER so oc-discovery stops the planner stream.
|
|
||||||
// The planner data itself stays in the cache until TTL eviction.
|
|
||||||
func (s *PlannerService) ReleaseRefreshOwnership(peerIDs []string, executionsID string) {
|
|
||||||
for _, peerID := range peerIDs {
|
|
||||||
s.Mu.Lock()
|
|
||||||
if entry := s.Cache[peerID]; entry != nil && entry.RefreshOwner == executionsID {
|
|
||||||
entry.Refreshing = false
|
|
||||||
entry.RefreshOwner = ""
|
|
||||||
}
|
|
||||||
s.Mu.Unlock()
|
|
||||||
utils.Notify(&s.SubMu, s.Subs, peerID, peerID)
|
|
||||||
payload, _ := json.Marshal(map[string]any{"peer_id": peerID})
|
|
||||||
utils.Propalgate(peerID, tools.PropalgationMessage{
|
|
||||||
Action: tools.PB_CLOSE_PLANNER,
|
|
||||||
Payload: payload,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// broadcastPlanner iterates the storage and compute peers of the given workflow
|
|
||||||
// and, for each peer not yet in the cache, emits a PB_PLANNER propagation so
|
|
||||||
// downstream consumers (oc-discovery, other schedulers) refresh their state.
|
|
||||||
func (s *PlannerService) Broadcast(wf *workflow.Workflow) {
|
|
||||||
if wf.Graph == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
items := []graph.GraphItem{}
|
|
||||||
items = append(items, wf.GetGraphItems(wf.Graph.IsStorage)...)
|
|
||||||
items = append(items, wf.GetGraphItems(wf.Graph.IsCompute)...)
|
|
||||||
|
|
||||||
seen := []string{}
|
|
||||||
for _, item := range items {
|
|
||||||
_, res := item.GetResource()
|
|
||||||
if res == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
creatorID := res.GetCreatorID()
|
|
||||||
if slices.Contains(seen, creatorID) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
data := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).LoadOne(creatorID)
|
|
||||||
p := data.ToPeer()
|
|
||||||
if p == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Mu.RLock()
|
|
||||||
cached := s.Cache[p.PeerID]
|
|
||||||
s.Mu.RUnlock()
|
|
||||||
|
|
||||||
// Only request if no snapshot and no refresh already in flight.
|
|
||||||
if cached == nil || (cached.Planner == nil && !cached.Refreshing) {
|
|
||||||
payload, err := json.Marshal(map[string]interface{}{"peer_id": p.PeerID})
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seen = append(seen, creatorID)
|
|
||||||
utils.Propalgate(p.PeerID, tools.PropalgationMessage{
|
|
||||||
Action: tools.PB_PLANNER,
|
|
||||||
Payload: payload,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Self-planner refresh
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func (s *PlannerService) RefreshSelf(peerID string, request *tools.APIRequest) {
|
|
||||||
p, err := planner.GenerateShallow(request)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("refreshSelfPlanner: could not generate planner:", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Update the local cache and notify any waiting CheckStream goroutines.
|
|
||||||
s.Store(peerID, p)
|
|
||||||
// Broadcast the updated planner so remote peers (and oc-discovery) can
|
|
||||||
// refresh their view of our availability.
|
|
||||||
type plannerWithPeer struct {
|
|
||||||
PeerID string `json:"peer_id"`
|
|
||||||
*planner.Planner
|
|
||||||
}
|
|
||||||
plannerPayload, err := json.Marshal(plannerWithPeer{PeerID: peerID, Planner: p})
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
utils.Propalgate(peerID, tools.PropalgationMessage{
|
|
||||||
Action: tools.PB_PLANNER,
|
|
||||||
Payload: plannerPayload,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// findNextSlot scans forward from 'from' in checkStepMin increments for up to
|
|
||||||
// windowH hours and returns the first candidate start time at which all
|
|
||||||
// resources are simultaneously free.
|
|
||||||
func (s *PlannerService) findNextSlot(resources map[string]utils.BookingResource, from time.Time, originalEnd *time.Time, windowH int) *time.Time {
|
|
||||||
duration := 5 * time.Minute
|
|
||||||
if originalEnd != nil {
|
|
||||||
if d := originalEnd.Sub(from); d > 0 {
|
|
||||||
duration = d
|
|
||||||
}
|
|
||||||
}
|
|
||||||
step := time.Duration(checkStepMin) * time.Minute
|
|
||||||
limit := from.Add(time.Duration(windowH) * time.Hour)
|
|
||||||
for t := from.Add(step); t.Before(limit); t = t.Add(step) {
|
|
||||||
e := t.Add(duration)
|
|
||||||
if unavail, _ := s.checkResourceAvailability(resources, t, &e); len(unavail) == 0 {
|
|
||||||
return &t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkResourceAvailability returns the IDs of unavailable resources and
|
|
||||||
// human-readable warning messages.
|
|
||||||
func (s *PlannerService) checkResourceAvailability(res map[string]utils.BookingResource, start time.Time, end *time.Time) (unavailable []string, warnings []string) {
|
|
||||||
for _, r := range res {
|
|
||||||
s.Mu.RLock()
|
|
||||||
entry := s.Cache[r.PeerPID]
|
|
||||||
s.Mu.RUnlock()
|
|
||||||
fmt.Println("Retrieve", r.PeerPID, s.Cache, entry.Planner)
|
|
||||||
if entry == nil {
|
|
||||||
unavailable = append(unavailable, r.ID)
|
|
||||||
warnings = append(warnings, fmt.Sprintf(
|
|
||||||
"resource %s is not available in [%s – %s] : Missing Planner",
|
|
||||||
r.ID, start.Format(time.RFC3339), utils.FormatOptTime(end)))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if entry.Planner == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !s.checkInstance(entry.Planner, r.ID, r.InstanceID, start, end) {
|
|
||||||
unavailable = append(unavailable, r.ID)
|
|
||||||
warnings = append(warnings, fmt.Sprintf(
|
|
||||||
"resource %s is not available in [%s – %s]",
|
|
||||||
r.ID, start.Format(time.RFC3339), utils.FormatOptTime(end)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckResourceInstance checks whether a resource/instance is available on the
|
|
||||||
// local planner cache for the given peer. Called by scheduling_resources when
|
|
||||||
// validating an incoming booking creation.
|
|
||||||
func (s *PlannerService) CheckResourceInstance(peerID, resourceID, instanceID string, start time.Time, end *time.Time) bool {
|
|
||||||
s.Mu.RLock()
|
|
||||||
entry := s.Cache[peerID]
|
|
||||||
s.Mu.RUnlock()
|
|
||||||
if entry == nil || entry.Planner == nil {
|
|
||||||
return true // no planner cached → assume available
|
|
||||||
}
|
|
||||||
return s.checkInstance(entry.Planner, resourceID, instanceID, start, end)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscribePlannerUpdates returns a channel that receives a peerID each time
|
|
||||||
// one of the given peers' planners is updated.
|
|
||||||
func (s *PlannerService) SubscribePlannerUpdates(peerIDs ...string) (<-chan string, func()) {
|
|
||||||
return SubscribeUpdates(s.Subs, &s.SubMu, peerIDs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscribeWorkflowUpdates returns a channel signalled when the workflow changes.
|
|
||||||
func (s *PlannerService) SubscribeWorkflowUpdates(wfID string) (<-chan struct{}, func()) {
|
|
||||||
return SubscribeUpdates(s.WorkflowSubs, &s.WorkflowSubMu, wfID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotifyWorkflow signals all subscribers watching wfID.
|
|
||||||
func (s *PlannerService) NotifyWorkflow(wfID string) {
|
|
||||||
utils.Notify(&s.WorkflowSubMu, s.WorkflowSubs, wfID, struct{}{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// FillForPeers fetches and waits for planners for an explicit list of peer PIDs.
|
|
||||||
// Same mechanic as Fill but decoupled from the BookingResource map — used for
|
|
||||||
// dynamic resource resolution where the peer set is not part of checkables.
|
|
||||||
func (s *PlannerService) FillForPeers(peerPIDs []string, wfID string) {
|
|
||||||
if len(peerPIDs) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
const plannerFetchTimeout = 5 * time.Second
|
|
||||||
tmpSession := "check-dynamic-" + wfID
|
|
||||||
|
|
||||||
s.Mu.Lock()
|
|
||||||
myself, _ := oclib.GetMySelf()
|
|
||||||
for _, peerID := range peerPIDs {
|
|
||||||
entry := s.Cache[peerID]
|
|
||||||
if entry == nil {
|
|
||||||
entry = &plannerEntry{}
|
|
||||||
s.Cache[peerID] = entry
|
|
||||||
s.AddedAt[peerID] = time.Now().UTC()
|
|
||||||
go s.EvictAfter(peerID, plannerTTL)
|
|
||||||
}
|
|
||||||
entry.Planner = nil
|
|
||||||
if !entry.Refreshing {
|
|
||||||
entry.Refreshing = true
|
|
||||||
entry.RefreshOwner = tmpSession
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.Mu.Unlock()
|
|
||||||
defer s.ReleaseRefreshOwnership(peerPIDs, tmpSession)
|
|
||||||
|
|
||||||
for _, peerID := range peerPIDs {
|
|
||||||
if myself != nil && myself.PeerID == peerID {
|
|
||||||
go s.RefreshSelf(peerID, &tools.APIRequest{Admin: true})
|
|
||||||
} else {
|
|
||||||
payload, _ := json.Marshal(map[string]any{"peer_id": peerID})
|
|
||||||
utils.Propalgate(peerID, tools.PropalgationMessage{
|
|
||||||
Action: tools.PB_PLANNER,
|
|
||||||
Payload: payload,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
deadline := time.Now().Add(plannerFetchTimeout)
|
|
||||||
remaining := slices.Clone(peerPIDs)
|
|
||||||
for len(remaining) > 0 {
|
|
||||||
wait := time.Until(deadline)
|
|
||||||
if wait <= 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ch, cancelSub := SubscribeUpdates(s.Subs, &s.SubMu, remaining...)
|
|
||||||
select {
|
|
||||||
case <-ch:
|
|
||||||
case <-time.After(wait):
|
|
||||||
cancelSub()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cancelSub()
|
|
||||||
remaining = remaining[:0]
|
|
||||||
s.Mu.RLock()
|
|
||||||
for _, pid := range peerPIDs {
|
|
||||||
if entry := s.Cache[pid]; entry == nil || entry.Planner == nil {
|
|
||||||
remaining = append(remaining, pid)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.Mu.RUnlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FillDynamic resolves all peer DIDs across the given dynamic resources to PIDs,
|
|
||||||
// fetches their planners via FillForPeers, and returns the DID→PID mapping for use
|
|
||||||
// in ResolveDynamic. All dynamics are batched into a single planner fetch round.
|
|
||||||
func (s *PlannerService) FillDynamic(dynamics []*resources.DynamicResource, wfID string) map[string]string {
|
|
||||||
didToPID := map[string]string{}
|
|
||||||
peerPIDs := []string{}
|
|
||||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(tools.PEER), nil)
|
|
||||||
for _, d := range dynamics {
|
|
||||||
for _, did := range d.PeerIds {
|
|
||||||
if did == "" || didToPID[did] != "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if data := access.LoadOne(did); data.Data != nil {
|
|
||||||
if p := data.ToPeer(); p != nil {
|
|
||||||
didToPID[did] = p.PeerID
|
|
||||||
peerPIDs = append(peerPIDs, p.PeerID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.FillForPeers(peerPIDs, wfID)
|
|
||||||
return didToPID
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolveDynamic walks the sorted instance list of a DynamicResource via
|
|
||||||
// GetSelectedInstance and returns true as soon as it finds an instance whose
|
|
||||||
// peer's planner confirms availability for [start, end].
|
|
||||||
// d.SelectedIndex is updated to the elected instance on success.
|
|
||||||
// Peers that did not respond (no planner in cache) are skipped.
|
|
||||||
func (s *PlannerService) ResolveDynamic(d *resources.DynamicResource, didToPID map[string]string, start time.Time, end *time.Time) bool {
|
|
||||||
for {
|
|
||||||
inst := d.GetSelectedInstance(nil)
|
|
||||||
if inst == nil {
|
|
||||||
return false // exhausted all candidates
|
|
||||||
}
|
|
||||||
did := d.PeerIds[d.SelectedIndex]
|
|
||||||
resourceID := d.ResourceIds[d.SelectedIndex]
|
|
||||||
pid, ok := didToPID[did]
|
|
||||||
if !ok {
|
|
||||||
continue // peer DID could not be resolved
|
|
||||||
}
|
|
||||||
s.Mu.RLock()
|
|
||||||
entry := s.Cache[pid]
|
|
||||||
s.Mu.RUnlock()
|
|
||||||
if entry == nil || entry.Planner == nil {
|
|
||||||
continue // peer did not respond in time
|
|
||||||
}
|
|
||||||
if s.checkInstance(entry.Planner, resourceID, inst.GetID(), start, end) {
|
|
||||||
return true // d.SelectedIndex points to the elected instance
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkInstance checks availability for the specific instance resolved by the
|
|
||||||
// scheduler. When instanceID is empty (no instance selected / none resolvable),
|
|
||||||
// it falls back to checking all instances known in the planner and returns true
|
|
||||||
// if any one has remaining capacity. Returns true when no capacity is recorded.
|
|
||||||
func (s *PlannerService) checkInstance(p *planner.Planner, resourceID string, instanceID string, start time.Time, end *time.Time) bool {
|
|
||||||
if instanceID != "" {
|
|
||||||
return p.Check(resourceID, instanceID, nil, start, end)
|
|
||||||
}
|
|
||||||
caps, ok := p.Capacities[resourceID]
|
|
||||||
if !ok || len(caps) == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for id := range caps {
|
|
||||||
if p.Check(resourceID, id, nil, start, end) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
@@ -1,276 +0,0 @@
|
|||||||
package scheduler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"oc-scheduler/conf"
|
|
||||||
"oc-scheduler/infrastructure/planner"
|
|
||||||
"oc-scheduler/infrastructure/scheduling_resources"
|
|
||||||
infUtils "oc-scheduler/infrastructure/utils"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/robfig/cron"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Schedule holds a resolved start/end pair for a single execution slot.
|
|
||||||
type Schedule struct {
|
|
||||||
Start time.Time
|
|
||||||
End *time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// WorkflowSchedule is the flying session object for a scheduling interaction.
|
|
||||||
// It is never persisted; it lives only for the duration of a WebSocket check session.
|
|
||||||
type WorkflowSchedule struct {
|
|
||||||
UUID string `json:"id" validate:"required"`
|
|
||||||
Workflow *workflow.Workflow `json:"workflow,omitempty"`
|
|
||||||
WorkflowExecution []*workflow_execution.WorkflowExecution `json:"workflow_executions,omitempty"`
|
|
||||||
Message string `json:"message,omitempty"`
|
|
||||||
Warning string `json:"warning,omitempty"`
|
|
||||||
Start time.Time `json:"start" validate:"required,ltfield=End"`
|
|
||||||
End *time.Time `json:"end,omitempty"`
|
|
||||||
DurationS float64 `json:"duration_s" default:"-1"`
|
|
||||||
Cron string `json:"cron,omitempty"`
|
|
||||||
|
|
||||||
BookingMode booking.BookingMode `json:"booking_mode,omitempty"`
|
|
||||||
SelectedInstances workflow.ConfigItem `json:"selected_instances"`
|
|
||||||
SelectedPartnerships workflow.ConfigItem `json:"selected_partnerships"`
|
|
||||||
SelectedBuyings workflow.ConfigItem `json:"selected_buyings"`
|
|
||||||
SelectedStrategies workflow.ConfigItem `json:"selected_strategies"`
|
|
||||||
SelectedPaymentType workflow.ConfigItem `json:"selected_payment_type"`
|
|
||||||
SelectedBillingStrategy pricing.BillingStrategy `json:"selected_billing_strategy"`
|
|
||||||
SelectedEmbeddedStorages map[string]*resources.EmbeddedStorageSelection `json:"selected_embedded_storages,omitempty"`
|
|
||||||
|
|
||||||
// Confirm, when true, triggers Schedule() to confirm the drafts held by this session.
|
|
||||||
Confirm bool `json:"confirm,omitempty"`
|
|
||||||
|
|
||||||
// Asap and Preemption override the query-param mode on a per-message basis.
|
|
||||||
// nil means "not set" (keep previous value).
|
|
||||||
Asap *bool `json:"asap,omitempty"`
|
|
||||||
Preemption *bool `json:"preemption,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckResult is the response payload for an availability check.
|
|
||||||
type CheckResult struct {
|
|
||||||
Available bool `json:"available"`
|
|
||||||
Start time.Time `json:"start"`
|
|
||||||
End *time.Time `json:"end,omitempty"`
|
|
||||||
Warnings []string `json:"warnings,omitempty"`
|
|
||||||
Preemptible bool `json:"preemptible,omitempty"`
|
|
||||||
// SchedulingID is the session UUID the client must supply when confirming.
|
|
||||||
SchedulingID string `json:"scheduling_id,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Check — availability
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// Check verifies whether the requested slot is available across all resource peers.
|
|
||||||
func (ws *WorkflowSchedule) Check(wfID string, asap bool, preemption bool, request *tools.APIRequest) (*CheckResult, error) {
|
|
||||||
obj, code, err := workflow.NewAccessor(request).LoadOne(wfID)
|
|
||||||
if code != 200 || err != nil {
|
|
||||||
msg := "could not load workflow " + wfID
|
|
||||||
if err != nil {
|
|
||||||
msg += ": " + err.Error()
|
|
||||||
}
|
|
||||||
return nil, errors.New(msg)
|
|
||||||
}
|
|
||||||
wf := obj.(*workflow.Workflow)
|
|
||||||
|
|
||||||
prepLead := conf.GetConfig().PrepLead()
|
|
||||||
start := ws.Start
|
|
||||||
if asap || preemption || start.IsZero() {
|
|
||||||
start = time.Now().UTC().Add(prepLead)
|
|
||||||
} else if start.Before(time.Now().UTC().Add(prepLead)) {
|
|
||||||
// Explicit date is within the prep window — impossible to guarantee on time.
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"start date %s is too soon: minimum lead time is %s (earliest: %s)",
|
|
||||||
start.Format(time.RFC3339),
|
|
||||||
prepLead,
|
|
||||||
time.Now().UTC().Add(prepLead).Format(time.RFC3339),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
end := ws.End
|
|
||||||
if end == nil {
|
|
||||||
if ws.DurationS > 0 {
|
|
||||||
e := start.Add(time.Duration(ws.DurationS * float64(time.Second)))
|
|
||||||
end = &e
|
|
||||||
} else {
|
|
||||||
_, longest, _, _, planErr := wf.Planify(
|
|
||||||
start, nil,
|
|
||||||
ws.SelectedInstances, ws.SelectedPartnerships,
|
|
||||||
ws.SelectedBuyings, ws.SelectedStrategies,
|
|
||||||
int(ws.BookingMode), nil, request,
|
|
||||||
)
|
|
||||||
if planErr == nil && longest > 0 {
|
|
||||||
e := start.Add(time.Duration(longest) * time.Second)
|
|
||||||
end = &e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
checkables := infUtils.CollectBookingResources(wf, ws.SelectedInstances)
|
|
||||||
start, end, available, preemptible, warnings := planner.GetPlannerService().FindDate(wfID, checkables, start, end, preemption, asap)
|
|
||||||
|
|
||||||
// Dynamic resources are resolved separately: their peer planners are fetched
|
|
||||||
// and the sorted instance list is walked until an available one is found.
|
|
||||||
var dynamics []*resources.DynamicResource
|
|
||||||
for _, item := range wf.GetGraphItems(wf.Graph.IsDynamic) {
|
|
||||||
_, res := item.GetResource()
|
|
||||||
if res == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
d := res.(*resources.DynamicResource)
|
|
||||||
d.SetAllowedInstances(request)
|
|
||||||
dynamics = append(dynamics, d)
|
|
||||||
}
|
|
||||||
if len(dynamics) > 0 {
|
|
||||||
didToPID := planner.GetPlannerService().FillDynamic(dynamics, wfID)
|
|
||||||
for _, d := range dynamics {
|
|
||||||
if !planner.GetPlannerService().ResolveDynamic(d, didToPID, start, end) {
|
|
||||||
available = false
|
|
||||||
warnings = append(warnings, "no available instance for dynamic resource "+d.GetName())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &CheckResult{
|
|
||||||
Start: start,
|
|
||||||
End: end,
|
|
||||||
Available: available,
|
|
||||||
Preemptible: preemptible,
|
|
||||||
Warnings: warnings,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// GetBuyAndBook — generate scheduling resources
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// GetBuyAndBook runs Planify to generate the purchases and bookings for this session.
|
|
||||||
func (ws *WorkflowSchedule) GetBuyAndBook(wfID string, request *tools.APIRequest) (
|
|
||||||
bool,
|
|
||||||
*workflow.Workflow,
|
|
||||||
[]*workflow_execution.WorkflowExecution,
|
|
||||||
[]scheduling_resources.SchedulerObject,
|
|
||||||
[]scheduling_resources.SchedulerObject,
|
|
||||||
error,
|
|
||||||
) {
|
|
||||||
res, code, err := workflow.NewAccessor(request).LoadOne(wfID)
|
|
||||||
if code != 200 {
|
|
||||||
return false, nil, nil, nil, nil,
|
|
||||||
errors.New("could not load the workflow: " + err.Error())
|
|
||||||
}
|
|
||||||
wf := res.(*workflow.Workflow)
|
|
||||||
isPreemptible, longest, priceds, wf, err := wf.Planify(
|
|
||||||
ws.Start, ws.End,
|
|
||||||
ws.SelectedInstances, ws.SelectedPartnerships,
|
|
||||||
ws.SelectedBuyings, ws.SelectedStrategies,
|
|
||||||
int(ws.BookingMode), nil, request,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return false, wf, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
ws.DurationS = longest
|
|
||||||
ws.Message = "We estimate that the workflow will start at " + ws.Start.String() +
|
|
||||||
" and last " + fmt.Sprintf("%v", ws.DurationS) + " seconds."
|
|
||||||
if ws.End != nil && ws.Start.Add(time.Duration(longest)*time.Second).After(*ws.End) {
|
|
||||||
ws.Warning = "The workflow may be too long to be executed in the given time frame, we will try to book it anyway\n"
|
|
||||||
}
|
|
||||||
|
|
||||||
execs, err := ws.GenerateExecutions(wf, isPreemptible)
|
|
||||||
if err != nil {
|
|
||||||
return false, wf, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var purchased, bookings []scheduling_resources.SchedulerObject
|
|
||||||
for _, exec := range execs {
|
|
||||||
for _, obj := range exec.Buy(ws.SelectedBillingStrategy, ws.UUID, wfID, priceds) {
|
|
||||||
purchased = append(purchased, scheduling_resources.ToSchedulerObject(tools.PURCHASE_RESOURCE, obj))
|
|
||||||
}
|
|
||||||
for _, obj := range exec.Book(ws.UUID, wfID, priceds) {
|
|
||||||
bookings = append(bookings, scheduling_resources.ToSchedulerObject(tools.BOOKING, obj))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true, wf, execs, purchased, bookings, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// GenerateExecutions / GetDates
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// GenerateExecutions expands the cron schedule into WorkflowExecution instances.
|
|
||||||
func (ws *WorkflowSchedule) GenerateExecutions(wf *workflow.Workflow, isPreemptible bool) ([]*workflow_execution.WorkflowExecution, error) {
|
|
||||||
dates, err := ws.GetDates()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var executions []*workflow_execution.WorkflowExecution
|
|
||||||
for _, date := range dates {
|
|
||||||
obj := &workflow_execution.WorkflowExecution{
|
|
||||||
AbstractObject: utils.AbstractObject{
|
|
||||||
UUID: uuid.New().String(),
|
|
||||||
Name: wf.Name + " execution " + date.Start.Format("2006-01-02 15:04"),
|
|
||||||
},
|
|
||||||
SelectedInstances: ws.SelectedInstances,
|
|
||||||
SelectedPartnerships: ws.SelectedPartnerships,
|
|
||||||
SelectedBuyings: ws.SelectedBuyings,
|
|
||||||
SelectedStrategies: ws.SelectedStrategies,
|
|
||||||
SelectedEmbeddedStorages: ws.SelectedEmbeddedStorages,
|
|
||||||
Priority: 1,
|
|
||||||
ExecutionsID: ws.UUID,
|
|
||||||
ExecDate: date.Start,
|
|
||||||
EndDate: date.End,
|
|
||||||
State: enum.DRAFT,
|
|
||||||
WorkflowID: wf.GetID(),
|
|
||||||
}
|
|
||||||
if ws.BookingMode != booking.PLANNED {
|
|
||||||
obj.Priority = 0
|
|
||||||
}
|
|
||||||
if ws.BookingMode == booking.PREEMPTED && isPreemptible {
|
|
||||||
obj.Priority = 7
|
|
||||||
}
|
|
||||||
executions = append(executions, obj)
|
|
||||||
}
|
|
||||||
return executions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDates parses the cron expression and returns execution date slots.
|
|
||||||
func (ws *WorkflowSchedule) GetDates() ([]Schedule, error) {
|
|
||||||
var schedule []Schedule
|
|
||||||
if len(ws.Cron) > 0 {
|
|
||||||
if ws.End == nil {
|
|
||||||
return schedule, errors.New("a cron task should have an end date")
|
|
||||||
}
|
|
||||||
if ws.DurationS <= 0 {
|
|
||||||
ws.DurationS = ws.End.Sub(ws.Start).Seconds()
|
|
||||||
}
|
|
||||||
cronStr := strings.Split(ws.Cron, " ")
|
|
||||||
if len(cronStr) < 6 {
|
|
||||||
return schedule, errors.New("Bad cron message: (" + ws.Cron + "). Should be at least ss mm hh dd MM dw")
|
|
||||||
}
|
|
||||||
subCron := strings.Join(cronStr[:6], " ")
|
|
||||||
specParser := cron.NewParser(cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow)
|
|
||||||
sched, err := specParser.Parse(subCron)
|
|
||||||
if err != nil {
|
|
||||||
return schedule, errors.New("Bad cron message: " + err.Error())
|
|
||||||
}
|
|
||||||
for s := sched.Next(ws.Start); !s.IsZero() && s.Before(*ws.End); s = sched.Next(s) {
|
|
||||||
e := s.Add(time.Duration(ws.DurationS) * time.Second)
|
|
||||||
schedule = append(schedule, Schedule{Start: s, End: &e})
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
schedule = append(schedule, Schedule{Start: ws.Start, End: ws.End})
|
|
||||||
}
|
|
||||||
return schedule, nil
|
|
||||||
}
|
|
||||||
@@ -1,101 +0,0 @@
|
|||||||
package scheduling_resources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SchedulerObject interface {
|
|
||||||
utils.DBObject
|
|
||||||
SetIsDraft(bool)
|
|
||||||
GetKey() string
|
|
||||||
SetSchedulerPeerID(peerID string)
|
|
||||||
SetExecutionsID(ei string)
|
|
||||||
GetDestPeer() string
|
|
||||||
GetPeerSession() string
|
|
||||||
GetExecutionsId() string
|
|
||||||
GetExecutionId() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type ScheduledPurchase struct {
|
|
||||||
purchase_resource.PurchaseResource
|
|
||||||
}
|
|
||||||
|
|
||||||
type ScheduledBooking struct {
|
|
||||||
booking.Booking
|
|
||||||
}
|
|
||||||
|
|
||||||
func FromSchedulerDBObject(dt tools.DataType, obj SchedulerObject) utils.DBObject {
|
|
||||||
switch dt {
|
|
||||||
case tools.BOOKING:
|
|
||||||
o := &booking.Booking{}
|
|
||||||
b, _ := json.Marshal(obj)
|
|
||||||
json.Unmarshal(b, &o)
|
|
||||||
return o
|
|
||||||
case tools.PURCHASE_RESOURCE:
|
|
||||||
o := &purchase_resource.PurchaseResource{}
|
|
||||||
b, _ := json.Marshal(obj)
|
|
||||||
json.Unmarshal(b, &o)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func FromSchedulerObject(dt tools.DataType, obj SchedulerObject) utils.ShallowDBObject {
|
|
||||||
switch dt {
|
|
||||||
case tools.BOOKING:
|
|
||||||
o := &booking.Booking{}
|
|
||||||
b, _ := json.Marshal(obj)
|
|
||||||
json.Unmarshal(b, &o)
|
|
||||||
return o
|
|
||||||
case tools.PURCHASE_RESOURCE:
|
|
||||||
o := &purchase_resource.PurchaseResource{}
|
|
||||||
b, _ := json.Marshal(obj)
|
|
||||||
json.Unmarshal(b, &o)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToSchedulerObject(dt tools.DataType, obj utils.ShallowDBObject) SchedulerObject {
|
|
||||||
switch dt {
|
|
||||||
case tools.BOOKING:
|
|
||||||
o := &ScheduledBooking{}
|
|
||||||
b, _ := json.Marshal(obj)
|
|
||||||
json.Unmarshal(b, &o)
|
|
||||||
return o
|
|
||||||
case tools.PURCHASE_RESOURCE:
|
|
||||||
o := &ScheduledPurchase{}
|
|
||||||
b, _ := json.Marshal(obj)
|
|
||||||
json.Unmarshal(b, &o)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *ScheduledBooking) GetExecutionId() string { return b.ExecutionID }
|
|
||||||
func (b *ScheduledPurchase) GetExecutionId() string { return b.ExecutionID }
|
|
||||||
func (b *ScheduledBooking) GetExecutionsId() string { return b.ExecutionsID }
|
|
||||||
func (b *ScheduledPurchase) GetExecutionsId() string { return b.ExecutionsID }
|
|
||||||
func (b *ScheduledBooking) GetPeerSession() string { return b.SchedulerPeerID }
|
|
||||||
func (b *ScheduledPurchase) GetPeerSession() string { return b.SchedulerPeerID }
|
|
||||||
func (b *ScheduledBooking) GetDestPeer() string { return b.DestPeerID }
|
|
||||||
func (b *ScheduledPurchase) GetDestPeer() string { return b.DestPeerID }
|
|
||||||
|
|
||||||
func (b *ScheduledBooking) GetKey() string {
|
|
||||||
return b.ResourceID + "/" + b.InstanceID + "/" + tools.BOOKING.String()
|
|
||||||
}
|
|
||||||
func (b *ScheduledPurchase) GetKey() string {
|
|
||||||
return b.ResourceID + "/" + b.InstanceID + "/" + tools.PURCHASE_RESOURCE.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *ScheduledBooking) SetIsDraft(ok bool) { b.IsDraft = ok }
|
|
||||||
func (b *ScheduledPurchase) SetIsDraft(ok bool) { b.IsDraft = ok }
|
|
||||||
func (b *ScheduledBooking) SetSchedulerPeerID(p string) { b.SchedulerPeerID = p }
|
|
||||||
func (b *ScheduledPurchase) SetSchedulerPeerID(p string) { b.SchedulerPeerID = p }
|
|
||||||
func (b *ScheduledBooking) SetExecutionsID(ei string) { b.ExecutionsID = ei }
|
|
||||||
func (b *ScheduledPurchase) SetExecutionsID(ei string) { b.ExecutionsID = ei }
|
|
||||||
@@ -1,617 +0,0 @@
|
|||||||
package scheduling_resources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"oc-scheduler/infrastructure/planner"
|
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/config"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Service
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// SchedulingResourcesService manages the lifecycle of Booking and PurchaseResource
|
|
||||||
// as SchedulerObjects. It caches the local peer identity so every operation can
|
|
||||||
// route correctly without calling oclib.GetMySelf() on each request.
|
|
||||||
type SchedulingResourcesService struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
selfPeer *peer.Peer
|
|
||||||
}
|
|
||||||
|
|
||||||
var singleton *SchedulingResourcesService
|
|
||||||
|
|
||||||
func InitSchedulingResource() {
|
|
||||||
singleton = &SchedulingResourcesService{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetService returns the singleton SchedulingResourcesService.
|
|
||||||
func GetService() *SchedulingResourcesService {
|
|
||||||
return singleton
|
|
||||||
}
|
|
||||||
|
|
||||||
// Self returns the cached local peer, lazily resolving it on first call.
|
|
||||||
func (s *SchedulingResourcesService) Self() *peer.Peer {
|
|
||||||
s.mu.RLock()
|
|
||||||
p := s.selfPeer
|
|
||||||
s.mu.RUnlock()
|
|
||||||
if p != nil {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
p, _ = oclib.GetMySelf()
|
|
||||||
if p != nil {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.selfPeer = p
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvalidateSelf clears the cached self peer (e.g. after a peer re-registration).
|
|
||||||
func (s *SchedulingResourcesService) InvalidateSelf() {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.selfPeer = nil
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// RemoveResourcePayload
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// RemoveResourcePayload is sent via NATS REMOVE_RESOURCE so the receiver can
|
|
||||||
// verify the delete order comes from the original scheduler session.
|
|
||||||
type RemoveResourcePayload struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
SchedulerPeerID string `json:"scheduler_peer_id"`
|
|
||||||
ExecutionsID string `json:"executions_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Propagation — creation
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// PropagateCreate routes a new booking/purchase draft to its destination:
|
|
||||||
// - local peer → store in DB + refresh planner
|
|
||||||
// - remote peer → emit NATS PROPALGATION_EVENT/PB_CREATE
|
|
||||||
func (s *SchedulingResourcesService) PropagateCreate(
|
|
||||||
obj utils.DBObject,
|
|
||||||
destPeerID string,
|
|
||||||
dt tools.DataType,
|
|
||||||
request *tools.APIRequest,
|
|
||||||
errCh chan error,
|
|
||||||
) {
|
|
||||||
selfID := s.Self()
|
|
||||||
if selfID == nil {
|
|
||||||
errCh <- fmt.Errorf("PropagateCreate: local peer not available")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if destPeerID == selfID.GetID() {
|
|
||||||
stored := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).StoreOne(obj.Serialize(obj))
|
|
||||||
if stored.Err != "" || stored.Data == nil {
|
|
||||||
errCh <- fmt.Errorf("could not store %s locally: %s", dt.String(), stored.Err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if dt == tools.BOOKING {
|
|
||||||
planner.GetPlannerService().RefreshSelf(selfID.PeerID, request)
|
|
||||||
}
|
|
||||||
errCh <- nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
m := obj.Serialize(obj)
|
|
||||||
if m["dest_peer_id"] != nil {
|
|
||||||
if data := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).LoadOne(fmt.Sprintf("%v", m["dest_peer_id"])); data.Data != nil {
|
|
||||||
m["peer_id"] = data.Data.(*peer.Peer).PeerID
|
|
||||||
}
|
|
||||||
} else if m["peerless"] == true {
|
|
||||||
originRef := fmt.Sprintf("%v", m["origin_ref"])
|
|
||||||
if !isValidPeerlessRef(originRef) {
|
|
||||||
emitPeerBehaviorReport(request.PeerID, tools.BehaviorFraud,
|
|
||||||
"peerless booking with invalid or unrecognised Origin.Ref", originRef)
|
|
||||||
errCh <- fmt.Errorf("peerless booking rejected: invalid Origin.Ref %q", originRef)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
stored := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).StoreOne(m)
|
|
||||||
if stored.Err != "" || stored.Data == nil {
|
|
||||||
errCh <- fmt.Errorf("could not store peerless %s locally: %s", dt.String(), stored.Err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if dt == tools.BOOKING {
|
|
||||||
planner.GetPlannerService().RefreshSelf(selfID.PeerID, request)
|
|
||||||
}
|
|
||||||
errCh <- nil
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
fmt.Println("PropagateCreate: no dest_peer_id and not peerless, skipping")
|
|
||||||
errCh <- nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
payload, err := json.Marshal(m)
|
|
||||||
if err != nil {
|
|
||||||
errCh <- fmt.Errorf("could not serialize %s: %w", dt.String(), err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b, err := json.Marshal(&tools.PropalgationMessage{
|
|
||||||
DataType: dt.EnumIndex(),
|
|
||||||
Action: tools.PB_CREATE,
|
|
||||||
Payload: payload,
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
|
||||||
FromApp: "oc-scheduler",
|
|
||||||
Datatype: dt,
|
|
||||||
Method: int(tools.PROPALGATION_EVENT),
|
|
||||||
Payload: b,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
errCh <- nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Propagation — update / confirmation
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// PropagateWrite routes a booking/purchase update to its destination.
|
|
||||||
// Returns true when the resource was confirmed locally (IsDraft=false on self peer)
|
|
||||||
// and the caller must trigger considers via execution.UpdateExecutionState.
|
|
||||||
func (s *SchedulingResourcesService) PropagateWrite(
|
|
||||||
obj utils.DBObject,
|
|
||||||
destPeerID string,
|
|
||||||
dt tools.DataType,
|
|
||||||
request *tools.APIRequest,
|
|
||||||
) bool {
|
|
||||||
selfID := s.Self()
|
|
||||||
if selfID == nil {
|
|
||||||
fmt.Println("PropagateWrite: local peer not available")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if destPeerID == selfID.GetID() {
|
|
||||||
if _, _, err := utils.GenericRawUpdateOne(obj, obj.GetID(), obj.GetAccessor(request)); err != nil {
|
|
||||||
fmt.Printf("PropagateWrite: local update failed for %s %s: %v\n", dt, obj.GetID(), err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if dt == tools.BOOKING {
|
|
||||||
planner.GetPlannerService().RefreshSelf(selfID.PeerID, request)
|
|
||||||
}
|
|
||||||
return !obj.IsDrafted()
|
|
||||||
}
|
|
||||||
|
|
||||||
payload, err := json.Marshal(obj)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{
|
|
||||||
FromApp: "oc-scheduler",
|
|
||||||
Datatype: dt,
|
|
||||||
Method: int(tools.CREATE_RESOURCE),
|
|
||||||
Payload: payload,
|
|
||||||
})
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Deletion
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// Delete removes a booking/purchase from its destination peer (local or NATS).
|
|
||||||
func (s *SchedulingResourcesService) Delete(dt tools.DataType, bk SchedulerObject, request *tools.APIRequest) {
|
|
||||||
selfID := s.Self()
|
|
||||||
if selfID == nil {
|
|
||||||
fmt.Println("Delete: local peer not available")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if bk.GetDestPeer() == selfID.GetID() {
|
|
||||||
data := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).DeleteOne(bk.GetID())
|
|
||||||
fmt.Println("Delete scheduling resource", bk.GetID(), data.Err)
|
|
||||||
if dt == tools.BOOKING {
|
|
||||||
planner.GetPlannerService().RefreshSelf(selfID.PeerID, request)
|
|
||||||
}
|
|
||||||
if (dt == tools.BOOKING || dt == tools.PURCHASE_RESOURCE) && config.GetConfig().IsNano {
|
|
||||||
SendRemoveToMaster(bk, dt)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
EmitNATSRemove(bk.GetID(), bk.GetPeerSession(), bk.GetExecutionsId(), dt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmitNATSRemove sends a REMOVE_RESOURCE NATS event with auth fields.
|
|
||||||
func EmitNATSRemove(id, schedulerPeerID, executionsID string, dt tools.DataType) {
|
|
||||||
payload, _ := json.Marshal(RemoveResourcePayload{
|
|
||||||
ID: id,
|
|
||||||
SchedulerPeerID: schedulerPeerID,
|
|
||||||
ExecutionsID: executionsID,
|
|
||||||
})
|
|
||||||
tools.NewNATSCaller().SetNATSPub(tools.REMOVE_RESOURCE, tools.NATSResponse{
|
|
||||||
FromApp: "oc-scheduler",
|
|
||||||
Datatype: dt,
|
|
||||||
Method: int(tools.REMOVE_RESOURCE),
|
|
||||||
Payload: payload,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Confirmation
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// Confirm sets IsDraft=false on a booking or purchase.
|
|
||||||
// For bookings, also advances State to SCHEDULED and refreshes the self planner.
|
|
||||||
func Confirm(id string, dt tools.DataType) {
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
switch dt {
|
|
||||||
case tools.BOOKING:
|
|
||||||
res, _, err := booking.NewAccessor(adminReq).LoadOne(id)
|
|
||||||
if err != nil || res == nil {
|
|
||||||
fmt.Printf("Confirm: could not load booking %s: %v\n", id, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bk := res.(*booking.Booking)
|
|
||||||
bk.IsDraft = false
|
|
||||||
bk.State = enum.SCHEDULED
|
|
||||||
if _, _, err := utils.GenericRawUpdateOne(bk, id, booking.NewAccessor(adminReq)); err != nil {
|
|
||||||
fmt.Printf("Confirm: could not confirm booking %s: %v\n", id, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if self := GetService().Self(); self != nil {
|
|
||||||
planner.GetPlannerService().RefreshSelf(self.PeerID, adminReq)
|
|
||||||
}
|
|
||||||
case tools.PURCHASE_RESOURCE:
|
|
||||||
res, _, err := purchase_resource.NewAccessor(adminReq).LoadOne(id)
|
|
||||||
if err != nil || res == nil {
|
|
||||||
fmt.Printf("Confirm: could not load purchase %s: %v\n", id, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pr := res.(*purchase_resource.PurchaseResource)
|
|
||||||
pr.IsDraft = false
|
|
||||||
if _, _, err := utils.GenericRawUpdateOne(pr, id, purchase_resource.NewAccessor(adminReq)); err != nil {
|
|
||||||
fmt.Printf("Confirm: could not confirm purchase %s: %v\n", id, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DraftTimeout deletes a booking/purchase if it is still a draft after 10 minutes.
|
|
||||||
func DraftTimeout(id string, dt tools.DataType) {
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
var res utils.DBObject
|
|
||||||
var loadErr error
|
|
||||||
switch dt {
|
|
||||||
case tools.BOOKING:
|
|
||||||
res, _, loadErr = booking.NewAccessor(adminReq).LoadOne(id)
|
|
||||||
case tools.PURCHASE_RESOURCE:
|
|
||||||
res, _, loadErr = purchase_resource.NewAccessor(adminReq).LoadOne(id)
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if loadErr != nil || res == nil || !res.IsDrafted() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch dt {
|
|
||||||
case tools.BOOKING:
|
|
||||||
booking.NewAccessor(adminReq).DeleteOne(id)
|
|
||||||
if config.GetConfig().IsNano {
|
|
||||||
SendRemoveToMaster(res, dt)
|
|
||||||
}
|
|
||||||
case tools.PURCHASE_RESOURCE:
|
|
||||||
purchase_resource.NewAccessor(adminReq).DeleteOne(id)
|
|
||||||
if config.GetConfig().IsNano {
|
|
||||||
SendRemoveToMaster(res, dt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("DraftTimeout: %s %s deleted (still draft after 10 min)\n", dt.String(), id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// NATS handlers — incoming booking/purchase
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// HandleCreateBooking processes an incoming booking from NATS.
|
|
||||||
// Returns true if the booking was confirmed (IsDraft→false) and considers must be triggered.
|
|
||||||
func (s *SchedulingResourcesService) HandleCreateBooking(bk *booking.Booking, adminReq *tools.APIRequest) bool {
|
|
||||||
self := s.Self()
|
|
||||||
if self == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if existing, _, loadErr := booking.NewAccessor(adminReq).LoadOne(bk.GetID()); loadErr == nil && existing != nil {
|
|
||||||
prev := existing.(*booking.Booking)
|
|
||||||
if prev.SchedulerPeerID != bk.SchedulerPeerID || prev.ExecutionsID != bk.ExecutionsID {
|
|
||||||
fmt.Println("HandleCreateBooking: auth mismatch, ignoring", bk.GetID())
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !prev.IsDrafted() && bk.IsDraft {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !bk.IsDraft && !prev.ExpectedStartDate.IsZero() && prev.ExpectedStartDate.Before(time.Now().UTC()) {
|
|
||||||
fmt.Println("HandleCreateBooking: expired, deleting", bk.GetID())
|
|
||||||
booking.NewAccessor(adminReq).DeleteOne(bk.GetID())
|
|
||||||
if config.GetConfig().IsNano {
|
|
||||||
SendRemoveToMaster(bk, tools.BOOKING)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if _, _, err := utils.GenericRawUpdateOne(bk, bk.GetID(), booking.NewAccessor(adminReq)); err != nil {
|
|
||||||
fmt.Println("HandleCreateBooking: update failed:", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if config.GetConfig().IsNano {
|
|
||||||
SendBookingToMaster(bk)
|
|
||||||
}
|
|
||||||
planner.GetPlannerService().RefreshSelf(self.PeerID, adminReq)
|
|
||||||
|
|
||||||
return !bk.IsDraft
|
|
||||||
}
|
|
||||||
|
|
||||||
// New booking
|
|
||||||
if !bk.ExpectedStartDate.IsZero() && bk.ExpectedStartDate.Before(time.Now().UTC()) {
|
|
||||||
fmt.Println("HandleCreateBooking: start date in the past, discarding")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !planner.GetPlannerService().CheckResourceInstance(self.PeerID, bk.ResourceID, bk.InstanceID, bk.ExpectedStartDate, bk.ExpectedEndDate) {
|
|
||||||
fmt.Println("HandleCreateBooking: conflicts with local planner, discarding")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
bk.IsDraft = true
|
|
||||||
stored, _, err := booking.NewAccessor(adminReq).StoreOne(bk)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("HandleCreateBooking: could not store:", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
storedID := stored.GetID()
|
|
||||||
|
|
||||||
planner.GetPlannerService().RefreshSelf(self.PeerID, adminReq)
|
|
||||||
|
|
||||||
time.AfterFunc(10*time.Minute, func() { DraftTimeout(storedID, tools.BOOKING) })
|
|
||||||
|
|
||||||
if config.GetConfig().IsNano {
|
|
||||||
SendBookingToMaster(bk) // TODO : ASK FOR RESPONSE...
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func SendBookingToMaster(booking *booking.Booking) {
|
|
||||||
self, _ := oclib.GetMySelf()
|
|
||||||
if booking.GetCreatorID() != self.GetID() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).Search(&dbs.Filters{
|
|
||||||
And: map[string][]dbs.Filter{
|
|
||||||
"relation": {{Operator: dbs.EQUAL.String(), Value: peer.MASTER}},
|
|
||||||
},
|
|
||||||
}, "", false, 0, 1)
|
|
||||||
for _, dd := range d.Data {
|
|
||||||
booking.IsDraft = false
|
|
||||||
booking.FromNano = self.GetID()
|
|
||||||
m := map[string]interface{}{}
|
|
||||||
i, err := json.Marshal(m)
|
|
||||||
if err == nil {
|
|
||||||
json.Unmarshal(i, &m)
|
|
||||||
m["peer_id"] = dd.(*peer.Peer).PeerID
|
|
||||||
if payloadd, err := json.Marshal(m); err == nil {
|
|
||||||
b, err := json.Marshal(&tools.PropalgationMessage{
|
|
||||||
DataType: tools.BOOKING.EnumIndex(),
|
|
||||||
Action: tools.PB_CREATE,
|
|
||||||
Payload: payloadd,
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
|
||||||
FromApp: "oc-scheduler",
|
|
||||||
Datatype: tools.BOOKING,
|
|
||||||
Method: int(tools.PROPALGATION_EVENT),
|
|
||||||
Payload: b,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func SendRemoveToMaster(obj utils.DBObject, dt tools.DataType) {
|
|
||||||
self, _ := oclib.GetMySelf()
|
|
||||||
if obj.GetCreatorID() != self.GetID() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).Search(&dbs.Filters{
|
|
||||||
And: map[string][]dbs.Filter{
|
|
||||||
"relation": {{Operator: dbs.EQUAL.String(), Value: peer.MASTER}},
|
|
||||||
},
|
|
||||||
}, "", false, 0, 1)
|
|
||||||
for _, dd := range d.Data {
|
|
||||||
m := map[string]interface{}{}
|
|
||||||
i, err := json.Marshal(m)
|
|
||||||
if err == nil {
|
|
||||||
json.Unmarshal(i, &m)
|
|
||||||
m["peer_id"] = dd.(*peer.Peer).PeerID
|
|
||||||
if payloadd, err := json.Marshal(m); err == nil {
|
|
||||||
b, err := json.Marshal(&tools.PropalgationMessage{
|
|
||||||
DataType: dt.EnumIndex(),
|
|
||||||
Action: tools.PB_DELETE,
|
|
||||||
Payload: payloadd,
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
|
||||||
FromApp: "oc-scheduler",
|
|
||||||
Datatype: dt,
|
|
||||||
Method: int(tools.PROPALGATION_EVENT),
|
|
||||||
Payload: b,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func SendPurchaseToMaster(purchase *purchase_resource.PurchaseResource) {
|
|
||||||
self, _ := oclib.GetMySelf()
|
|
||||||
if purchase.GetCreatorID() != self.GetID() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).Search(&dbs.Filters{
|
|
||||||
And: map[string][]dbs.Filter{
|
|
||||||
"relation": {{Operator: dbs.EQUAL.String(), Value: peer.MASTER}},
|
|
||||||
},
|
|
||||||
}, "", false, 0, 1)
|
|
||||||
for _, dd := range d.Data {
|
|
||||||
purchase.IsDraft = false
|
|
||||||
purchase.FromNano = self.GetID()
|
|
||||||
m := map[string]interface{}{}
|
|
||||||
i, err := json.Marshal(m)
|
|
||||||
if err == nil {
|
|
||||||
json.Unmarshal(i, &m)
|
|
||||||
m["peer_id"] = dd.(*peer.Peer).PeerID
|
|
||||||
if payloadd, err := json.Marshal(m); err == nil {
|
|
||||||
b, err := json.Marshal(&tools.PropalgationMessage{
|
|
||||||
DataType: tools.PURCHASE_RESOURCE.EnumIndex(),
|
|
||||||
Action: tools.PB_CREATE,
|
|
||||||
Payload: payloadd,
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
|
||||||
FromApp: "oc-scheduler",
|
|
||||||
Datatype: tools.PURCHASE_RESOURCE,
|
|
||||||
Method: int(tools.PROPALGATION_EVENT),
|
|
||||||
Payload: b,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleCreatePurchase processes an incoming purchase from NATS.
|
|
||||||
// Returns true if considers must be triggered.
|
|
||||||
func (s *SchedulingResourcesService) HandleCreatePurchase(pr *purchase_resource.PurchaseResource, adminReq *tools.APIRequest) bool {
|
|
||||||
self := s.Self()
|
|
||||||
if self == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if pr.DestPeerID != self.GetID() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if existing, _, loadErr := purchase_resource.NewAccessor(adminReq).LoadOne(pr.GetID()); loadErr == nil && existing != nil {
|
|
||||||
prev := existing.(*purchase_resource.PurchaseResource)
|
|
||||||
if prev.SchedulerPeerID != pr.SchedulerPeerID || prev.ExecutionsID != pr.ExecutionsID {
|
|
||||||
fmt.Println("HandleCreatePurchase: auth mismatch, ignoring", pr.GetID())
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !prev.IsDrafted() && pr.IsDraft {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if _, _, err := utils.GenericRawUpdateOne(pr, pr.GetID(), purchase_resource.NewAccessor(adminReq)); err != nil {
|
|
||||||
fmt.Println("HandleCreatePurchase: update failed:", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return !pr.IsDraft
|
|
||||||
}
|
|
||||||
|
|
||||||
pr.IsDraft = true
|
|
||||||
stored, _, err := purchase_resource.NewAccessor(adminReq).StoreOne(pr)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("HandleCreatePurchase: could not store:", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if config.GetConfig().IsNano {
|
|
||||||
SendPurchaseToMaster(pr) // TODO : ASK FOR RESPONSE...
|
|
||||||
}
|
|
||||||
storedID := stored.GetID()
|
|
||||||
time.AfterFunc(10*time.Minute, func() { DraftTimeout(storedID, tools.PURCHASE_RESOURCE) })
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleRemoveBooking verifies auth and deletes the booking.
|
|
||||||
func (s *SchedulingResourcesService) HandleRemoveBooking(p RemoveResourcePayload, adminReq *tools.APIRequest) {
|
|
||||||
res, _, loadErr := booking.NewAccessor(adminReq).LoadOne(p.ID)
|
|
||||||
if loadErr != nil || res == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
existing := res.(*booking.Booking)
|
|
||||||
if existing.SchedulerPeerID != p.SchedulerPeerID || existing.ExecutionsID != p.ExecutionsID || existing.IsDraft {
|
|
||||||
fmt.Println("HandleRemoveBooking: auth mismatch, ignoring", p.ID)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d, _, _ := booking.NewAccessor(adminReq).DeleteOne(p.ID)
|
|
||||||
if self := s.Self(); self != nil {
|
|
||||||
planner.GetPlannerService().RefreshSelf(self.PeerID, adminReq)
|
|
||||||
}
|
|
||||||
if config.GetConfig().IsNano && d != nil {
|
|
||||||
SendRemoveToMaster(d, tools.BOOKING) // TODO : ASK FOR RESPONSE...
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleRemovePurchase verifies auth and deletes the purchase.
|
|
||||||
func (s *SchedulingResourcesService) HandleRemovePurchase(p RemoveResourcePayload, adminReq *tools.APIRequest) {
|
|
||||||
res, _, loadErr := purchase_resource.NewAccessor(adminReq).LoadOne(p.ID)
|
|
||||||
if loadErr != nil || res == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
existing := res.(*purchase_resource.PurchaseResource)
|
|
||||||
if existing.SchedulerPeerID != p.SchedulerPeerID || existing.ExecutionsID != p.ExecutionsID || existing.IsDraft {
|
|
||||||
fmt.Println("HandleRemovePurchase: auth mismatch, ignoring", p.ID)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d, _, _ := purchase_resource.NewAccessor(adminReq).DeleteOne(p.ID)
|
|
||||||
if config.GetConfig().IsNano && d != nil {
|
|
||||||
SendRemoveToMaster(d, tools.PURCHASE_RESOURCE) // TODO : ASK FOR RESPONSE...
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Internal helpers
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
var knownRegistryPrefixes = []string{
|
|
||||||
"docker.io/", "index.docker.io/", "ghcr.io/", "quay.io/",
|
|
||||||
"registry.hub.docker.com/", "gcr.io/", "public.ecr.aws/",
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidPeerlessRef(ref string) bool {
|
|
||||||
if ref == "" || ref == "<nil>" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, prefix := range knownRegistryPrefixes {
|
|
||||||
if strings.HasPrefix(ref, prefix) && len(ref) > len(prefix) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func emitPeerBehaviorReport(targetPeerDID string, severity tools.BehaviorSeverity, reason, evidence string) {
|
|
||||||
if targetPeerDID == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
report := tools.PeerBehaviorReport{
|
|
||||||
ReporterApp: "oc-scheduler",
|
|
||||||
TargetPeerID: targetPeerDID,
|
|
||||||
Severity: severity,
|
|
||||||
Reason: reason,
|
|
||||||
Evidence: evidence,
|
|
||||||
At: time.Now().UTC(),
|
|
||||||
}
|
|
||||||
payload, err := json.Marshal(report)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
tools.NewNATSCaller().SetNATSPub(tools.PEER_BEHAVIOR_EVENT, tools.NATSResponse{
|
|
||||||
FromApp: "oc-scheduler",
|
|
||||||
Datatype: tools.PEER,
|
|
||||||
Method: int(tools.PEER_BEHAVIOR_EVENT),
|
|
||||||
Payload: payload,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,303 +0,0 @@
|
|||||||
package session
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"oc-scheduler/infrastructure/execution"
|
|
||||||
"oc-scheduler/infrastructure/scheduling_resources"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/bill"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/order"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SessionExecutionsService struct {
|
|
||||||
Mu sync.RWMutex
|
|
||||||
ExecutionsSessionID string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSessionExecutionsService(sessionID string) *SessionExecutionsService {
|
|
||||||
return &SessionExecutionsService{ExecutionsSessionID: sessionID}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Remote resource registry
|
|
||||||
//
|
|
||||||
// Bookings and purchases for remote peers are sent via NATS and stored only on
|
|
||||||
// the remote peer — they never appear in local MongoDB. CleanupSession would
|
|
||||||
// therefore miss them entirely. We keep a package-level in-memory registry
|
|
||||||
// (executionsID → list) that is populated when PropagateCreate routes to a
|
|
||||||
// remote peer, and consumed (cleared) by CleanupSession so it can emit the
|
|
||||||
// corresponding REMOVE_RESOURCE NATS messages.
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
type remoteResourceEntry struct {
|
|
||||||
ID string
|
|
||||||
SchedulerPeerID string
|
|
||||||
ExecutionsID string
|
|
||||||
DT tools.DataType
|
|
||||||
}
|
|
||||||
|
|
||||||
var remoteRegistryMu sync.Mutex
|
|
||||||
var remoteRegistry = map[string][]remoteResourceEntry{}
|
|
||||||
|
|
||||||
func trackRemoteResource(executionsID, id, schedulerPeerID string, dt tools.DataType) {
|
|
||||||
if id == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
remoteRegistryMu.Lock()
|
|
||||||
remoteRegistry[executionsID] = append(remoteRegistry[executionsID], remoteResourceEntry{
|
|
||||||
ID: id, SchedulerPeerID: schedulerPeerID, ExecutionsID: executionsID, DT: dt,
|
|
||||||
})
|
|
||||||
remoteRegistryMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// consumeTrackedRemotes atomically returns and removes all tracked remote
|
|
||||||
// resources for the given session.
|
|
||||||
func consumeTrackedRemotes(executionsID string) []remoteResourceEntry {
|
|
||||||
remoteRegistryMu.Lock()
|
|
||||||
defer remoteRegistryMu.Unlock()
|
|
||||||
entries := remoteRegistry[executionsID]
|
|
||||||
delete(remoteRegistry, executionsID)
|
|
||||||
return entries
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// DB helpers
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func (s *SessionExecutionsService) sessionIDFilter(field, id string) *dbs.Filters {
|
|
||||||
return &dbs.Filters{
|
|
||||||
And: map[string][]dbs.Filter{
|
|
||||||
field: {{Operator: dbs.EQUAL.String(), Value: id}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SessionExecutionsService) loadSession(dt tools.DataType) []scheduling_resources.SchedulerObject {
|
|
||||||
results := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).Search(
|
|
||||||
s.sessionIDFilter("executions_id", s.ExecutionsSessionID), "", true, 0, 10000)
|
|
||||||
out := make([]scheduling_resources.SchedulerObject, 0, len(results.Data))
|
|
||||||
for _, obj := range results.Data {
|
|
||||||
out = append(out, scheduling_resources.ToSchedulerObject(dt, obj))
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SessionExecutionsService) LoadSessionExecs() []*workflow_execution.WorkflowExecution {
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
results, _, _ := workflow_execution.NewAccessor(adminReq).Search(
|
|
||||||
s.sessionIDFilter("executions_id", s.ExecutionsSessionID), "", true, 0, 10000)
|
|
||||||
out := make([]*workflow_execution.WorkflowExecution, 0)
|
|
||||||
for _, obj := range results {
|
|
||||||
if exec, ok := obj.(*workflow_execution.WorkflowExecution); ok {
|
|
||||||
out = append(out, exec)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SessionExecutionsService) loadSessionOrder() *order.Order {
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
results, _, _ := order.NewAccessor(adminReq).Search(
|
|
||||||
s.sessionIDFilter("executions_id", s.ExecutionsSessionID), "", true, 0, 10000)
|
|
||||||
for _, obj := range results {
|
|
||||||
if o, ok := obj.(*order.Order); ok {
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Session upsert
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func (s *SessionExecutionsService) UpsertSessionDrafts(
|
|
||||||
purchases, bookings []scheduling_resources.SchedulerObject,
|
|
||||||
execs []*workflow_execution.WorkflowExecution,
|
|
||||||
request *tools.APIRequest,
|
|
||||||
) {
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
|
|
||||||
for dt, datas := range map[tools.DataType][]scheduling_resources.SchedulerObject{
|
|
||||||
tools.BOOKING: bookings,
|
|
||||||
tools.PURCHASE_RESOURCE: purchases,
|
|
||||||
} {
|
|
||||||
existing := map[string]scheduling_resources.SchedulerObject{}
|
|
||||||
seen := map[string]bool{}
|
|
||||||
for _, bk := range s.loadSession(dt) {
|
|
||||||
existing[bk.GetKey()] = bk
|
|
||||||
}
|
|
||||||
s.upsertDrafts(dt, datas, existing, seen, request)
|
|
||||||
for key, prev := range existing {
|
|
||||||
if !seen[key] {
|
|
||||||
scheduling_resources.GetService().Delete(dt, prev, request)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, old := range s.LoadSessionExecs() {
|
|
||||||
execution.UnregisterExecLock(old.GetID())
|
|
||||||
workflow_execution.NewAccessor(adminReq).DeleteOne(old.GetID())
|
|
||||||
}
|
|
||||||
for _, exec := range execs {
|
|
||||||
exec.ExecutionsID = s.ExecutionsSessionID
|
|
||||||
exec.IsDraft = true
|
|
||||||
ex, _, err := utils.GenericStoreOne(exec, workflow_execution.NewAccessor(adminReq))
|
|
||||||
if err == nil {
|
|
||||||
execution.RegisterExecLock(ex.GetID())
|
|
||||||
go execution.WatchDeadline(ex.GetID(), s.ExecutionsSessionID, exec.ExecDate, request)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if existing := s.loadSessionOrder(); existing == nil {
|
|
||||||
GenerateOrder(purchases, bookings, s.ExecutionsSessionID, request)
|
|
||||||
} else {
|
|
||||||
for _, purch := range purchases {
|
|
||||||
existing.Purchases = append(existing.Purchases,
|
|
||||||
scheduling_resources.FromSchedulerObject(tools.PURCHASE_RESOURCE, purch).(*purchase_resource.PurchaseResource))
|
|
||||||
}
|
|
||||||
for _, b := range bookings {
|
|
||||||
existing.Bookings = append(existing.Bookings,
|
|
||||||
scheduling_resources.FromSchedulerObject(tools.BOOKING, b).(*booking.Booking))
|
|
||||||
}
|
|
||||||
utils.GenericRawUpdateOne(existing, existing.GetID(), order.NewAccessor(adminReq))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SessionExecutionsService) upsertDrafts(
|
|
||||||
dt tools.DataType,
|
|
||||||
datas []scheduling_resources.SchedulerObject,
|
|
||||||
existing map[string]scheduling_resources.SchedulerObject,
|
|
||||||
seen map[string]bool,
|
|
||||||
request *tools.APIRequest,
|
|
||||||
) {
|
|
||||||
self := scheduling_resources.GetService().Self()
|
|
||||||
fmt.Println("upsertDrafts", len(datas), len(existing))
|
|
||||||
for _, bk := range datas {
|
|
||||||
if self != nil {
|
|
||||||
bk.SetSchedulerPeerID(self.PeerID)
|
|
||||||
}
|
|
||||||
bk.SetExecutionsID(s.ExecutionsSessionID)
|
|
||||||
seen[bk.GetKey()] = true
|
|
||||||
if prev, ok := existing[bk.GetKey()]; ok {
|
|
||||||
bk.SetID(prev.GetID())
|
|
||||||
bk.SetIsDraft(false)
|
|
||||||
needsConsiders := scheduling_resources.GetService().PropagateWrite(
|
|
||||||
scheduling_resources.FromSchedulerDBObject(dt, bk), bk.GetDestPeer(), dt, request)
|
|
||||||
if needsConsiders {
|
|
||||||
if payload, err := json.Marshal(execution.ConsidersPayload{ID: bk.GetID()}); err == nil {
|
|
||||||
go execution.UpdateExecutionState(payload, dt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
errCh := make(chan error, 1)
|
|
||||||
scheduling_resources.GetService().PropagateCreate(
|
|
||||||
scheduling_resources.FromSchedulerDBObject(dt, bk), bk.GetDestPeer(), dt, request, errCh)
|
|
||||||
<-errCh
|
|
||||||
// If this booking/purchase was routed to a remote peer (not stored in
|
|
||||||
// local DB), register it so CleanupSession can emit REMOVE_RESOURCE later.
|
|
||||||
if self != nil && bk.GetDestPeer() != self.GetID() {
|
|
||||||
trackRemoteResource(s.ExecutionsSessionID, bk.GetID(), bk.GetPeerSession(), dt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Session lifecycle
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func (s *SessionExecutionsService) CleanupSession(request *tools.APIRequest) {
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
|
|
||||||
// Delete bookings and purchases directly by executions_id.
|
|
||||||
// We cannot rely on execution.Unschedule here because it uses
|
|
||||||
// exec.PeerBookByGraph which is empty during the draft/check phase.
|
|
||||||
for _, dt := range []tools.DataType{tools.BOOKING, tools.PURCHASE_RESOURCE} {
|
|
||||||
for _, obj := range s.loadSession(dt) {
|
|
||||||
scheduling_resources.GetService().Delete(dt, obj, request)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit NATS REMOVE_RESOURCE for bookings/purchases that were routed to
|
|
||||||
// remote peers and therefore never stored in local DB. loadSession above
|
|
||||||
// cannot find them, so we rely on the in-memory registry populated by
|
|
||||||
// upsertDrafts when PropagateCreate routes to a non-self peer.
|
|
||||||
for _, entry := range consumeTrackedRemotes(s.ExecutionsSessionID) {
|
|
||||||
scheduling_resources.EmitNATSRemove(entry.ID, entry.SchedulerPeerID, entry.ExecutionsID, entry.DT)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, exec := range s.LoadSessionExecs() {
|
|
||||||
execution.UnregisterExecLock(exec.GetID())
|
|
||||||
workflow_execution.NewAccessor(adminReq).DeleteOne(exec.GetID())
|
|
||||||
}
|
|
||||||
if o := s.loadSessionOrder(); o != nil {
|
|
||||||
order.NewAccessor(adminReq).DeleteOne(o.GetID())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func GenerateOrder(
|
|
||||||
purchases, bookings []scheduling_resources.SchedulerObject,
|
|
||||||
executionsID string,
|
|
||||||
request *tools.APIRequest,
|
|
||||||
) (string, error) {
|
|
||||||
newOrder := &order.Order{
|
|
||||||
AbstractObject: utils.AbstractObject{
|
|
||||||
Name: "order_" + request.PeerID + "_" + time.Now().UTC().Format("2006-01-02T15:04:05"),
|
|
||||||
IsDraft: true,
|
|
||||||
},
|
|
||||||
ExecutionsID: executionsID,
|
|
||||||
Purchases: []*purchase_resource.PurchaseResource{},
|
|
||||||
Bookings: []*booking.Booking{},
|
|
||||||
Status: enum.PENDING,
|
|
||||||
}
|
|
||||||
for _, purch := range purchases {
|
|
||||||
newOrder.Purchases = append(newOrder.Purchases,
|
|
||||||
scheduling_resources.FromSchedulerObject(tools.PURCHASE_RESOURCE, purch).(*purchase_resource.PurchaseResource))
|
|
||||||
}
|
|
||||||
for _, b := range bookings {
|
|
||||||
newOrder.Bookings = append(newOrder.Bookings,
|
|
||||||
scheduling_resources.FromSchedulerObject(tools.BOOKING, b).(*booking.Booking))
|
|
||||||
}
|
|
||||||
res, _, err := order.NewAccessor(request).StoreOne(newOrder)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if _, err := bill.DraftFirstBill(res.(*order.Order), request); err != nil {
|
|
||||||
return res.GetID(), err
|
|
||||||
}
|
|
||||||
return res.GetID(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SessionExecutionsService) ConfirmSession(request *tools.APIRequest) error {
|
|
||||||
adminReq := &tools.APIRequest{Admin: true}
|
|
||||||
for _, dt := range []tools.DataType{tools.BOOKING, tools.PURCHASE_RESOURCE} {
|
|
||||||
for _, bk := range s.loadSession(dt) {
|
|
||||||
bk.SetIsDraft(false)
|
|
||||||
needsConsiders := scheduling_resources.GetService().PropagateWrite(
|
|
||||||
scheduling_resources.FromSchedulerDBObject(dt, bk), bk.GetDestPeer(), dt, request)
|
|
||||||
if needsConsiders {
|
|
||||||
if payload, err := json.Marshal(execution.ConsidersPayload{ID: bk.GetID()}); err == nil {
|
|
||||||
go execution.UpdateExecutionState(payload, dt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, exec := range s.LoadSessionExecs() {
|
|
||||||
exec.State = enum.SCHEDULED
|
|
||||||
utils.GenericRawUpdateOne(exec, exec.GetID(), workflow_execution.NewAccessor(adminReq))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,254 +0,0 @@
|
|||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BookingResource struct {
|
|
||||||
ID string // resource MongoDB _id
|
|
||||||
PeerPID string // peer public PeerID (PID) — PlannerCache key
|
|
||||||
InstanceID string // resolved from WorkflowSchedule.SelectedInstances
|
|
||||||
}
|
|
||||||
|
|
||||||
// collectBookingResources returns unique storage and compute resources from the
|
|
||||||
// workflow graph. For each resource the selected instance ID is resolved from
|
|
||||||
// selectedInstances (the scheduler's SelectedInstances ConfigItem) so the planner
|
|
||||||
// check targets the exact instance chosen by the user.
|
|
||||||
func CollectBookingResources(wf *workflow.Workflow, selectedInstances workflow.ConfigItem) map[string]BookingResource {
|
|
||||||
if wf.Graph == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
seen := map[string]bool{}
|
|
||||||
result := map[string]BookingResource{}
|
|
||||||
|
|
||||||
// Resolve MongoDB peer _id (DID) → public PeerID (PID) used as PlannerCache key.
|
|
||||||
peerAccess := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
|
||||||
didToPID := map[string]string{}
|
|
||||||
resolvePID := func(did string) string {
|
|
||||||
if pid, ok := didToPID[did]; ok {
|
|
||||||
return pid
|
|
||||||
}
|
|
||||||
if data := peerAccess.LoadOne(did); data.Data != nil {
|
|
||||||
if p := data.ToPeer(); p != nil {
|
|
||||||
didToPID[did] = p.PeerID
|
|
||||||
return p.PeerID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
resolveInstanceID := func(res interface {
|
|
||||||
GetID() string
|
|
||||||
GetCreatorID() string
|
|
||||||
}) string {
|
|
||||||
idx := selectedInstances.Get(res.GetID())
|
|
||||||
switch r := res.(type) {
|
|
||||||
case *resources.StorageResource:
|
|
||||||
if inst := r.GetSelectedInstance(idx); inst != nil {
|
|
||||||
return inst.GetID()
|
|
||||||
}
|
|
||||||
case *resources.ComputeResource:
|
|
||||||
if inst := r.GetSelectedInstance(idx); inst != nil {
|
|
||||||
return inst.GetID()
|
|
||||||
}
|
|
||||||
case *resources.ServiceResource:
|
|
||||||
if inst := r.GetSelectedInstance(idx); inst != nil {
|
|
||||||
return inst.GetID()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range wf.GetGraphItems(wf.Graph.IsStorage) {
|
|
||||||
_, res := item.GetResource()
|
|
||||||
if res == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
id := res.GetID()
|
|
||||||
if seen[id] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
pid := resolvePID(res.GetCreatorID())
|
|
||||||
if pid == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seen[id] = true
|
|
||||||
result[pid] = BookingResource{
|
|
||||||
ID: id,
|
|
||||||
PeerPID: pid,
|
|
||||||
InstanceID: resolveInstanceID(res),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range wf.GetGraphItems(wf.Graph.IsCompute) {
|
|
||||||
_, res := item.GetResource()
|
|
||||||
if res == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
id := res.GetID()
|
|
||||||
if seen[id] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
pid := resolvePID(res.GetCreatorID())
|
|
||||||
if pid == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seen[id] = true
|
|
||||||
result[pid] = BookingResource{
|
|
||||||
ID: id,
|
|
||||||
PeerPID: pid,
|
|
||||||
InstanceID: resolveInstanceID(res),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HOSTED services: capacity is capped by MaxConcurrent on the LiveService.
|
|
||||||
// The peer to watch is the creator (who operates the service).
|
|
||||||
// DEPLOYMENT services are covered through their linked compute unit.
|
|
||||||
for _, item := range wf.GetGraphItems(wf.Graph.IsService) {
|
|
||||||
_, res := item.GetResource()
|
|
||||||
if res == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
svc := res.(*resources.ServiceResource)
|
|
||||||
idx := selectedInstances.Get(svc.GetID())
|
|
||||||
inst := svc.GetSelectedInstance(idx)
|
|
||||||
if inst == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if inst.(*resources.ServiceInstance).Mode != resources.HOSTED {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
id := svc.GetID()
|
|
||||||
if seen[id] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
pid := resolvePID(svc.GetCreatorID())
|
|
||||||
if pid == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seen[id] = true
|
|
||||||
result[pid] = BookingResource{
|
|
||||||
ID: id,
|
|
||||||
PeerPID: pid,
|
|
||||||
InstanceID: resolveInstanceID(res),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetWorkflowPeerIDs loads the workflow and returns the deduplicated list of
|
|
||||||
// creator peer IDs for all its storage and compute resources.
|
|
||||||
// These are the peers whose planners must be watched by a check stream.
|
|
||||||
func GetWorkflowPeerIDs(wfID string, request *tools.APIRequest) ([]string, error) {
|
|
||||||
obj, code, err := workflow.NewAccessor(request).LoadOne(wfID)
|
|
||||||
if code != 200 || err != nil {
|
|
||||||
msg := "could not load workflow " + wfID
|
|
||||||
if err != nil {
|
|
||||||
msg += ": " + err.Error()
|
|
||||||
}
|
|
||||||
return nil, errors.New(msg)
|
|
||||||
}
|
|
||||||
wf := obj.(*workflow.Workflow)
|
|
||||||
if wf.Graph == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
seen := map[string]bool{}
|
|
||||||
var peerIDs []string
|
|
||||||
for _, item := range wf.GetGraphItems(wf.Graph.IsStorage) {
|
|
||||||
_, res := item.GetResource()
|
|
||||||
if res == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if id := res.GetCreatorID(); id != "" && !seen[id] {
|
|
||||||
seen[id] = true
|
|
||||||
peerIDs = append(peerIDs, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, item := range wf.GetGraphItems(wf.Graph.IsCompute) {
|
|
||||||
_, res := item.GetResource()
|
|
||||||
if res == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if id := res.GetCreatorID(); id != "" && !seen[id] {
|
|
||||||
seen[id] = true
|
|
||||||
peerIDs = append(peerIDs, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, item := range wf.GetGraphItems(wf.Graph.IsService) {
|
|
||||||
_, res := item.GetResource()
|
|
||||||
if res == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
svc := res.(*resources.ServiceResource)
|
|
||||||
if len(svc.Instances) == 0 || svc.Instances[0].Mode != resources.HOSTED {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if id := svc.GetCreatorID(); id != "" && !seen[id] {
|
|
||||||
seen[id] = true
|
|
||||||
peerIDs = append(peerIDs, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, item := range wf.GetGraphItems(wf.Graph.IsDynamic) {
|
|
||||||
_, res := item.GetResource()
|
|
||||||
if res == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
d := res.(*resources.DynamicResource)
|
|
||||||
d.SetAllowedInstances(request)
|
|
||||||
for _, creatorID := range d.PeerIds {
|
|
||||||
if creatorID != "" && !seen[creatorID] {
|
|
||||||
seen[creatorID] = true
|
|
||||||
peerIDs = append(peerIDs, creatorID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
realPeersID := []string{}
|
|
||||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(tools.PEER), nil)
|
|
||||||
for _, id := range peerIDs {
|
|
||||||
if data := access.LoadOne(id); data.Data != nil {
|
|
||||||
realPeersID = append(realPeersID, data.ToPeer().PeerID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return realPeersID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func FormatOptTime(t *time.Time) string {
|
|
||||||
if t == nil {
|
|
||||||
return "open"
|
|
||||||
}
|
|
||||||
return t.Format(time.RFC3339)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Notify[T interface{}](mu *sync.RWMutex, registry map[string][]chan T, key string, toAdd T) {
|
|
||||||
mu.RLock()
|
|
||||||
subs := registry[key]
|
|
||||||
mu.RUnlock()
|
|
||||||
for _, ch := range subs {
|
|
||||||
select {
|
|
||||||
case ch <- toAdd:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Propalgate(peerID string, message tools.PropalgationMessage) {
|
|
||||||
b, _ := json.Marshal(message)
|
|
||||||
fmt.Println("Propalgate")
|
|
||||||
tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
|
||||||
FromApp: "oc-scheduler",
|
|
||||||
Datatype: -1,
|
|
||||||
Method: int(tools.PROPALGATION_EVENT),
|
|
||||||
Payload: b,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
45
main.go
45
main.go
@@ -1,42 +1,39 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"oc-scheduler/conf"
|
|
||||||
"oc-scheduler/infrastructure"
|
|
||||||
"oc-scheduler/infrastructure/planner"
|
|
||||||
"oc-scheduler/infrastructure/scheduling_resources"
|
|
||||||
_ "oc-scheduler/routers"
|
_ "oc-scheduler/routers"
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
oclib "cloud.o-forge.io/core/oc-lib"
|
||||||
"cloud.o-forge.io/core/oc-lib/config"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
beego "github.com/beego/beego/v2/server/web"
|
beego "github.com/beego/beego/v2/server/web"
|
||||||
)
|
)
|
||||||
|
|
||||||
const appname = "oc-scheduler"
|
const appname = "oc-scheduler"
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
o := oclib.GetConfLoader(appname)
|
|
||||||
conf.GetConfig().PrepLeadSeconds = o.GetIntDefault("PREP_LEAD_SECONDS", 120)
|
|
||||||
conf.GetConfig().KubeHost = o.GetStringDefault("KUBERNETES_SERVICE_HOST", "kubernetes.default.svc.cluster.local")
|
|
||||||
conf.GetConfig().KubePort = o.GetStringDefault("KUBERNETES_SERVICE_PORT", "6443")
|
|
||||||
|
|
||||||
conf.GetConfig().KubeCA = o.GetStringDefault("KUBE_CA", "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkakNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTnpNeE1qY3dPVFl3SGhjTk1qWXdNekV3TURjeE9ERTJXaGNOTXpZd016QTNNRGN4T0RFMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTnpNeE1qY3dPVFl3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFReG81cXQ0MGxEekczRHJKTE1wRVBrd0ZBY1FmbC8vVE1iWjZzemMreHAKbmVzVzRTSTdXK1lWdFpRYklmV2xBMTRaazQvRFlDMHc1YlgxZU94RVVuL0pvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVXBLM2pGK25IRlZSbDcwb3ZRVGZnCmZabGNQZE13Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnVnkyaUx0Y0xaYm1vTnVoVHdKbU5sWlo3RVlBYjJKNW0KSjJYbG1UbVF5a2tDSUhLbzczaDBkdEtUZTlSa0NXYTJNdStkS1FzOXRFU0tBV0x1emlnYXBHYysKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=")
|
// Init the oc-lib
|
||||||
conf.GetConfig().KubeCert = o.GetStringDefault("KUBE_CERT", "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRlZ0F3SUJBZ0lJQUkvSUg2R2Rodm93Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOemN6TVRJM01EazJNQjRYRFRJMk1ETXhNREEzTVRneE5sb1hEVEkzTURNeApNREEzTVRneE5sb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJQTTdBVEZQSmFMMjUrdzAKUU1vZUIxV2hBRW4vWnViM0tSRERrYnowOFhwQWJ2akVpdmdnTkdpdG4wVmVsaEZHamRmNHpBT29Nd1J3M21kbgpYSGtHVDB5alNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUVZLOThaMEMxcFFyVFJSMGVLZHhIa2o0ejFJREFLQmdncWhrak9QUVFEQWdOSkFEQkcKQWlFQXZYWll6Zk9iSUtlWTRtclNsRmt4ZS80a0E4K01ieDc1UDFKRmNlRS8xdGNDSVFDNnM0ZXlZclhQYmNWSgpxZm5EamkrZ1RacGttN0tWSTZTYTlZN2FSRGFabUE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZURDQ0FSMmdBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwClpXNTBMV05oUURFM056TXhNamN3T1RZd0hoY05Nall3TXpFd01EY3hPREUyV2hjTk16WXdNekEzTURjeE9ERTIKV2pBak1TRXdId1lEVlFRRERCaHJNM010WTJ4cFpXNTBMV05oUURFM056TXhNamN3T1RZd1dUQVRCZ2NxaGtqTwpQUUlCQmdncWhrak9QUU1CQndOQ0FBUzV1NGVJbStvVnV1SFI0aTZIOU1kVzlyUHdJbFVPNFhIMEJWaDRUTGNlCkNkMnRBbFVXUW5FakxMdlpDWlVaYTlzTlhKOUVtWWt5S0dtQWR2TE9FbUVrbzBJd1FEQU9CZ05WSFE4QkFmOEUKQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVVGU3ZmR2RBdGFVSzAwVWRIaW5jUgo1SStNOVNBd0NnWUlLb1pJemowRUF3SURTUUF3UmdJaEFMY2xtQnR4TnpSVlBvV2hoVEVKSkM1Z3VNSGsvcFZpCjFvYXJ2UVJxTWRKcUFpRUEyR1dNTzlhZFFYTEQwbFZKdHZMVkc1M3I0M0lxMHpEUUQwbTExMVZyL1MwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==")
|
oclib.Init(appname)
|
||||||
conf.GetConfig().KubeData = o.GetStringDefault("KUBE_DATA", "LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUVkSTRZN3lRU1ZwRGNrblhsQmJEaXBWZHRMWEVsYVBkN3VBZHdBWFFya2xvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFOHpzQk1VOGxvdmJuN0RSQXloNEhWYUVBU2Y5bTV2Y3BFTU9SdlBUeGVrQnUrTVNLK0NBMAphSzJmUlY2V0VVYU4xL2pNQTZnekJIRGVaMmRjZVFaUFRBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=")
|
|
||||||
|
|
||||||
oclib.InitAPI(appname, map[string][]string{
|
// Load the right config file
|
||||||
"/oc/check/:id": {"GET"},
|
o := oclib.GetConfLoader()
|
||||||
"/oc/logs/:id": {"GET"},
|
|
||||||
})
|
|
||||||
go planner.InitPlanner()
|
|
||||||
go scheduling_resources.InitSchedulingResource()
|
|
||||||
|
|
||||||
go infrastructure.ListenNATS()
|
// feed the library with the loaded config
|
||||||
go infrastructure.InitSelfPlanner()
|
oclib.SetConfig(
|
||||||
go infrastructure.RecoverDraftExecutions()
|
o.GetStringDefault("MONGO_URL", "mongodb://127.0.0.1:27017"),
|
||||||
go infrastructure.WatchExecutions()
|
o.GetStringDefault("MONGO_DATABASE", "DC_myDC"),
|
||||||
|
o.GetStringDefault("NATS_URL", "nats://localhost:4222"),
|
||||||
|
o.GetStringDefault("LOKI_URL", ""),
|
||||||
|
o.GetStringDefault("LOG_LEVEL", "info"),
|
||||||
|
)
|
||||||
|
|
||||||
|
// Beego init
|
||||||
|
beego.BConfig.AppName = appname
|
||||||
|
beego.BConfig.Listen.HTTPPort = o.GetIntDefault("port", 8080)
|
||||||
|
beego.BConfig.WebConfig.DirectoryIndex = true
|
||||||
|
beego.BConfig.WebConfig.StaticDir["/swagger"] = "swagger"
|
||||||
|
api := &tools.API{}
|
||||||
|
api.Discovered(beego.BeeApp.Handlers.GetAllControllerInfo())
|
||||||
|
|
||||||
if config.GetConfig().IsApi {
|
|
||||||
beego.Run()
|
beego.Run()
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|||||||
BIN
oc-scheduler
BIN
oc-scheduler
Binary file not shown.
@@ -1,9 +0,0 @@
|
|||||||
créer une api inspiré de celle courante : oc-scheduler, nommé oc-billing dans le folder ..
|
|
||||||
cette api dispose en controlleur seulement version.go, adapté.
|
|
||||||
|
|
||||||
son infrastructure n'est pas du tout similaire à oc-scheduler considre là vide, lors de la copie.
|
|
||||||
Ensuite voici se qui est entendu. oc-billing communique par nats avec oc-scheduler.
|
|
||||||
|
|
||||||
Lors d'un confirm oc-scheduler doit émettre sur nats sur un channel dédié, dans le message doit être inclus :
|
|
||||||
le workflow_scheduler validé.
|
|
||||||
A la reception oc-billing, celons le mode de payment attendu par resource (dans scheduler), drafté la bill à réglé "MAINTENANT".
|
|
||||||
@@ -7,69 +7,6 @@ import (
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|
||||||
beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"],
|
|
||||||
beego.ControllerComments{
|
|
||||||
Method: "GetAll",
|
|
||||||
Router: `/`,
|
|
||||||
AllowHTTPMethods: []string{"get"},
|
|
||||||
MethodParams: param.Make(),
|
|
||||||
Filters: nil,
|
|
||||||
Params: nil})
|
|
||||||
|
|
||||||
beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"],
|
|
||||||
beego.ControllerComments{
|
|
||||||
Method: "Get",
|
|
||||||
Router: `/:id`,
|
|
||||||
AllowHTTPMethods: []string{"get"},
|
|
||||||
MethodParams: param.Make(),
|
|
||||||
Filters: nil,
|
|
||||||
Params: nil})
|
|
||||||
|
|
||||||
beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:BookingController"],
|
|
||||||
beego.ControllerComments{
|
|
||||||
Method: "Search",
|
|
||||||
Router: `/search/:start_date/:end_date`,
|
|
||||||
AllowHTTPMethods: []string{"get"},
|
|
||||||
MethodParams: param.Make(),
|
|
||||||
Filters: nil,
|
|
||||||
Params: nil})
|
|
||||||
|
|
||||||
beego.GlobalControllerRouter["oc-scheduler/controllers:ExecutionVerificationController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:ExecutionVerificationController"],
|
|
||||||
beego.ControllerComments{
|
|
||||||
Method: "GetAll",
|
|
||||||
Router: `/`,
|
|
||||||
AllowHTTPMethods: []string{"get"},
|
|
||||||
MethodParams: param.Make(),
|
|
||||||
Filters: nil,
|
|
||||||
Params: nil})
|
|
||||||
|
|
||||||
beego.GlobalControllerRouter["oc-scheduler/controllers:ExecutionVerificationController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:ExecutionVerificationController"],
|
|
||||||
beego.ControllerComments{
|
|
||||||
Method: "Get",
|
|
||||||
Router: `/:id`,
|
|
||||||
AllowHTTPMethods: []string{"get"},
|
|
||||||
MethodParams: param.Make(),
|
|
||||||
Filters: nil,
|
|
||||||
Params: nil})
|
|
||||||
|
|
||||||
beego.GlobalControllerRouter["oc-scheduler/controllers:ExecutionVerificationController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:ExecutionVerificationController"],
|
|
||||||
beego.ControllerComments{
|
|
||||||
Method: "Put",
|
|
||||||
Router: `/:id`,
|
|
||||||
AllowHTTPMethods: []string{"put"},
|
|
||||||
MethodParams: param.Make(),
|
|
||||||
Filters: nil,
|
|
||||||
Params: nil})
|
|
||||||
|
|
||||||
beego.GlobalControllerRouter["oc-scheduler/controllers:LokiController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:LokiController"],
|
|
||||||
beego.ControllerComments{
|
|
||||||
Method: "GetLogs",
|
|
||||||
Router: `/:id`,
|
|
||||||
AllowHTTPMethods: []string{"post"},
|
|
||||||
MethodParams: param.Make(),
|
|
||||||
Filters: nil,
|
|
||||||
Params: nil})
|
|
||||||
|
|
||||||
beego.GlobalControllerRouter["oc-scheduler/controllers:VersionController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:VersionController"],
|
beego.GlobalControllerRouter["oc-scheduler/controllers:VersionController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:VersionController"],
|
||||||
beego.ControllerComments{
|
beego.ControllerComments{
|
||||||
Method: "GetAll",
|
Method: "GetAll",
|
||||||
@@ -106,15 +43,6 @@ func init() {
|
|||||||
Filters: nil,
|
Filters: nil,
|
||||||
Params: nil})
|
Params: nil})
|
||||||
|
|
||||||
beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowExecutionController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowExecutionController"],
|
|
||||||
beego.ControllerComments{
|
|
||||||
Method: "Delete",
|
|
||||||
Router: `/:id`,
|
|
||||||
AllowHTTPMethods: []string{"delete"},
|
|
||||||
MethodParams: param.Make(),
|
|
||||||
Filters: nil,
|
|
||||||
Params: nil})
|
|
||||||
|
|
||||||
beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowExecutionController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowExecutionController"],
|
beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowExecutionController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowExecutionController"],
|
||||||
beego.ControllerComments{
|
beego.ControllerComments{
|
||||||
Method: "Search",
|
Method: "Search",
|
||||||
@@ -133,6 +61,15 @@ func init() {
|
|||||||
Filters: nil,
|
Filters: nil,
|
||||||
Params: nil})
|
Params: nil})
|
||||||
|
|
||||||
|
beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowSchedulerController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowSchedulerController"],
|
||||||
|
beego.ControllerComments{
|
||||||
|
Method: "Schedule",
|
||||||
|
Router: `/:id`,
|
||||||
|
AllowHTTPMethods: []string{"post"},
|
||||||
|
MethodParams: param.Make(),
|
||||||
|
Filters: nil,
|
||||||
|
Params: nil})
|
||||||
|
|
||||||
beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowSchedulerController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowSchedulerController"],
|
beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowSchedulerController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowSchedulerController"],
|
||||||
beego.ControllerComments{
|
beego.ControllerComments{
|
||||||
Method: "UnSchedule",
|
Method: "UnSchedule",
|
||||||
@@ -142,13 +79,4 @@ func init() {
|
|||||||
Filters: nil,
|
Filters: nil,
|
||||||
Params: nil})
|
Params: nil})
|
||||||
|
|
||||||
beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowSchedulerController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:WorkflowSchedulerController"],
|
|
||||||
beego.ControllerComments{
|
|
||||||
Method: "SearchScheduledDraftOrder",
|
|
||||||
Router: `/order/:id`,
|
|
||||||
AllowHTTPMethods: []string{"get"},
|
|
||||||
MethodParams: param.Make(),
|
|
||||||
Filters: nil,
|
|
||||||
Params: nil})
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,36 +8,20 @@
|
|||||||
package routers
|
package routers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
|
||||||
"oc-scheduler/controllers"
|
"oc-scheduler/controllers"
|
||||||
|
|
||||||
beego "github.com/beego/beego/v2/server/web"
|
beego "github.com/beego/beego/v2/server/web"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
ns := beego.NewNamespace("/oc",
|
ns := beego.NewNamespace("/oc/",
|
||||||
beego.NSInclude(
|
|
||||||
&controllers.WorkflowSchedulerController{},
|
|
||||||
),
|
|
||||||
beego.NSNamespace("/loki",
|
|
||||||
beego.NSInclude(
|
|
||||||
&controllers.LokiController{},
|
|
||||||
),
|
|
||||||
),
|
|
||||||
beego.NSNamespace("/booking",
|
|
||||||
beego.NSInclude(
|
|
||||||
&controllers.BookingController{},
|
|
||||||
),
|
|
||||||
),
|
|
||||||
beego.NSNamespace("/verification",
|
|
||||||
beego.NSInclude(
|
|
||||||
&controllers.ExecutionVerificationController{},
|
|
||||||
),
|
|
||||||
),
|
|
||||||
beego.NSNamespace("/execution",
|
|
||||||
beego.NSInclude(
|
beego.NSInclude(
|
||||||
&controllers.WorkflowExecutionController{},
|
&controllers.WorkflowExecutionController{},
|
||||||
),
|
),
|
||||||
|
beego.NSNamespace("/workflow",
|
||||||
|
beego.NSInclude(
|
||||||
|
&controllers.WorkflowSchedulerController{},
|
||||||
|
),
|
||||||
),
|
),
|
||||||
beego.NSNamespace("/version",
|
beego.NSNamespace("/version",
|
||||||
beego.NSInclude(
|
beego.NSInclude(
|
||||||
@@ -47,11 +31,4 @@ func init() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
beego.AddNamespace(ns)
|
beego.AddNamespace(ns)
|
||||||
|
|
||||||
// WebSocket routes registered outside the Beego pipeline to avoid the
|
|
||||||
// spurious WriteHeader that prevents the 101 Switching Protocols upgrade.
|
|
||||||
beego.Handler("/oc/check/:id", http.HandlerFunc(controllers.CheckStreamHandler))
|
|
||||||
beego.Handler("/oc/logs/:id", http.HandlerFunc(controllers.LogsStreamHandler))
|
|
||||||
beego.Handler("/oc/booking/stream", http.HandlerFunc(controllers.BookingStreamHandler))
|
|
||||||
beego.Handler("/oc/execution/stream", http.HandlerFunc(controllers.ExecutionStreamHandler))
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
{
|
{
|
||||||
"port":8090,
|
"port":8080,
|
||||||
"MONGO_URL":"mongodb://localhost:27017/",
|
"MONGO_URL":"mongodb://localhost:27017/",
|
||||||
"MONGO_DATABASE":"DC_myDC",
|
"MONGO_DATABASE":"DC_myDC"
|
||||||
"LOKI_URL": "http://localhost:3100"
|
|
||||||
}
|
}
|
||||||
@@ -13,117 +13,12 @@
|
|||||||
"url": "https://www.gnu.org/licenses/agpl-3.0.html"
|
"url": "https://www.gnu.org/licenses/agpl-3.0.html"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"basePath": "/oc",
|
"basePath": "/oc/",
|
||||||
"paths": {
|
"paths": {
|
||||||
"/booking/": {
|
"/": {
|
||||||
"get": {
|
"get": {
|
||||||
"tags": [
|
"tags": [
|
||||||
"booking"
|
"oc-scheduler/controllersWorkflowExecutionController"
|
||||||
],
|
|
||||||
"description": "find booking by id\n\u003cbr\u003e",
|
|
||||||
"operationId": "BookingController.GetAll",
|
|
||||||
"parameters": [
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "is_draft",
|
|
||||||
"description": "draft wished",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "offset",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "limit",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"responses": {
|
|
||||||
"200": {
|
|
||||||
"description": "{booking} models.booking"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"/booking/search/{start_date}/{end_date}": {
|
|
||||||
"get": {
|
|
||||||
"tags": [
|
|
||||||
"booking"
|
|
||||||
],
|
|
||||||
"description": "search bookings\n\u003cbr\u003e",
|
|
||||||
"operationId": "BookingController.Search",
|
|
||||||
"parameters": [
|
|
||||||
{
|
|
||||||
"in": "path",
|
|
||||||
"name": "start_date",
|
|
||||||
"description": "the word search you want to get",
|
|
||||||
"required": true,
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "path",
|
|
||||||
"name": "end_date",
|
|
||||||
"description": "the word search you want to get",
|
|
||||||
"required": true,
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "is_draft",
|
|
||||||
"description": "draft wished",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "offset",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "limit",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"responses": {
|
|
||||||
"200": {
|
|
||||||
"description": "{workspace} models.workspace"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"/booking/{id}": {
|
|
||||||
"get": {
|
|
||||||
"tags": [
|
|
||||||
"booking"
|
|
||||||
],
|
|
||||||
"description": "find booking by id\n\u003cbr\u003e",
|
|
||||||
"operationId": "BookingController.Get",
|
|
||||||
"parameters": [
|
|
||||||
{
|
|
||||||
"in": "path",
|
|
||||||
"name": "id",
|
|
||||||
"description": "the id you want to get",
|
|
||||||
"required": true,
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"responses": {
|
|
||||||
"200": {
|
|
||||||
"description": "{booking} models.booking"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"/execution/": {
|
|
||||||
"get": {
|
|
||||||
"tags": [
|
|
||||||
"execution"
|
|
||||||
],
|
],
|
||||||
"description": "find workflow by workflowid\n\u003cbr\u003e",
|
"description": "find workflow by workflowid\n\u003cbr\u003e",
|
||||||
"operationId": "WorkflowExecutionController.GetAll",
|
"operationId": "WorkflowExecutionController.GetAll",
|
||||||
@@ -133,18 +28,6 @@
|
|||||||
"name": "is_draft",
|
"name": "is_draft",
|
||||||
"description": "draft wished",
|
"description": "draft wished",
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "offset",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "limit",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"responses": {
|
"responses": {
|
||||||
@@ -154,10 +37,10 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/execution/search/{search}": {
|
"/search/{search}": {
|
||||||
"get": {
|
"get": {
|
||||||
"tags": [
|
"tags": [
|
||||||
"execution"
|
"oc-scheduler/controllersWorkflowExecutionController"
|
||||||
],
|
],
|
||||||
"description": "find compute by key word\n\u003cbr\u003e",
|
"description": "find compute by key word\n\u003cbr\u003e",
|
||||||
"operationId": "WorkflowExecutionController.Search",
|
"operationId": "WorkflowExecutionController.Search",
|
||||||
@@ -174,18 +57,6 @@
|
|||||||
"name": "is_draft",
|
"name": "is_draft",
|
||||||
"description": "draft wished",
|
"description": "draft wished",
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "offset",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "limit",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"responses": {
|
"responses": {
|
||||||
@@ -195,10 +66,10 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/execution/search/{start_date}/{end_date}": {
|
"/search/{start_date}/{end_date}": {
|
||||||
"get": {
|
"get": {
|
||||||
"tags": [
|
"tags": [
|
||||||
"execution"
|
"oc-scheduler/controllersWorkflowExecutionController"
|
||||||
],
|
],
|
||||||
"description": "search workspace\n\u003cbr\u003e",
|
"description": "search workspace\n\u003cbr\u003e",
|
||||||
"operationId": "WorkflowExecutionController.SearchPerDate",
|
"operationId": "WorkflowExecutionController.SearchPerDate",
|
||||||
@@ -222,18 +93,6 @@
|
|||||||
"name": "is_draft",
|
"name": "is_draft",
|
||||||
"description": "draft wished",
|
"description": "draft wished",
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "offset",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "limit",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"responses": {
|
"responses": {
|
||||||
@@ -243,197 +102,6 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/execution/{id}": {
|
|
||||||
"get": {
|
|
||||||
"tags": [
|
|
||||||
"execution"
|
|
||||||
],
|
|
||||||
"description": "find workflow by workflowid\n\u003cbr\u003e",
|
|
||||||
"operationId": "WorkflowExecutionController.Get",
|
|
||||||
"parameters": [
|
|
||||||
{
|
|
||||||
"in": "path",
|
|
||||||
"name": "id",
|
|
||||||
"description": "the workflowid you want to get",
|
|
||||||
"required": true,
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"responses": {
|
|
||||||
"200": {
|
|
||||||
"description": "{workflow} models.workflow"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"delete": {
|
|
||||||
"tags": [
|
|
||||||
"execution"
|
|
||||||
],
|
|
||||||
"description": "find workflow by workflowid\n\u003cbr\u003e",
|
|
||||||
"operationId": "WorkflowExecutionController.Delete",
|
|
||||||
"parameters": [
|
|
||||||
{
|
|
||||||
"in": "path",
|
|
||||||
"name": "id",
|
|
||||||
"description": "the workflowid you want to get",
|
|
||||||
"required": true,
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"responses": {
|
|
||||||
"200": {
|
|
||||||
"description": "{workflow} models.workflow"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"/loki/{id}": {
|
|
||||||
"post": {
|
|
||||||
"tags": [
|
|
||||||
"loki"
|
|
||||||
],
|
|
||||||
"description": "get logs\n\u003cbr\u003e",
|
|
||||||
"operationId": "LokiController.GetLogs",
|
|
||||||
"parameters": [
|
|
||||||
{
|
|
||||||
"in": "body",
|
|
||||||
"name": "body",
|
|
||||||
"description": "The compute content",
|
|
||||||
"required": true,
|
|
||||||
"schema": {
|
|
||||||
"$ref": "#/definitions/models.compute"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"responses": {
|
|
||||||
"200": {
|
|
||||||
"description": "{workspace} models.workspace"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"/order/{id}": {
|
|
||||||
"get": {
|
|
||||||
"tags": [
|
|
||||||
"oc-scheduler/controllersWorkflowSchedulerController"
|
|
||||||
],
|
|
||||||
"description": "search draft order for a workflow\n\u003cbr\u003e",
|
|
||||||
"operationId": "WorkflowSchedulerController.SearchScheduledDraftOrder",
|
|
||||||
"parameters": [
|
|
||||||
{
|
|
||||||
"in": "path",
|
|
||||||
"name": "id",
|
|
||||||
"description": "id execution",
|
|
||||||
"required": true,
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "offset",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "limit",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"responses": {
|
|
||||||
"200": {
|
|
||||||
"description": "{workspace} models.workspace"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"/verification/": {
|
|
||||||
"get": {
|
|
||||||
"tags": [
|
|
||||||
"verification"
|
|
||||||
],
|
|
||||||
"description": "find verification by id\n\u003cbr\u003e",
|
|
||||||
"operationId": "ExecutionVerificationController.GetAll",
|
|
||||||
"parameters": [
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "is_draft",
|
|
||||||
"description": "draft wished",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "offset",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "query",
|
|
||||||
"name": "limit",
|
|
||||||
"description": "false",
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"responses": {
|
|
||||||
"200": {
|
|
||||||
"description": "{booking} models.booking"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"/verification/{id}": {
|
|
||||||
"get": {
|
|
||||||
"tags": [
|
|
||||||
"verification"
|
|
||||||
],
|
|
||||||
"description": "find verification by id\n\u003cbr\u003e",
|
|
||||||
"operationId": "ExecutionVerificationController.Get",
|
|
||||||
"parameters": [
|
|
||||||
{
|
|
||||||
"in": "path",
|
|
||||||
"name": "id",
|
|
||||||
"description": "the id you want to get",
|
|
||||||
"required": true,
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"responses": {
|
|
||||||
"200": {
|
|
||||||
"description": "{booking} models.booking"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"put": {
|
|
||||||
"tags": [
|
|
||||||
"verification"
|
|
||||||
],
|
|
||||||
"description": "create computes\n\u003cbr\u003e",
|
|
||||||
"operationId": "ExecutionVerificationController.Update",
|
|
||||||
"parameters": [
|
|
||||||
{
|
|
||||||
"in": "path",
|
|
||||||
"name": "id",
|
|
||||||
"description": "the compute id you want to get",
|
|
||||||
"required": true,
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"in": "body",
|
|
||||||
"name": "body",
|
|
||||||
"description": "The compute content",
|
|
||||||
"required": true,
|
|
||||||
"schema": {
|
|
||||||
"$ref": "#/definitions/models.compute"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"responses": {
|
|
||||||
"200": {
|
|
||||||
"description": "{compute} models.compute"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"/version/": {
|
"/version/": {
|
||||||
"get": {
|
"get": {
|
||||||
"tags": [
|
"tags": [
|
||||||
@@ -462,38 +130,93 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/{id}": {
|
"/workflow/{id}": {
|
||||||
|
"post": {
|
||||||
|
"tags": [
|
||||||
|
"workflow"
|
||||||
|
],
|
||||||
|
"description": "schedule workflow\n\u003cbr\u003e",
|
||||||
|
"operationId": "WorkflowSchedulerController.Schedule",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"in": "path",
|
||||||
|
"name": "id",
|
||||||
|
"description": "id execution",
|
||||||
|
"required": true,
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"in": "body",
|
||||||
|
"name": "body",
|
||||||
|
"description": "The compute content",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/models.compute"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "{workspace} models.workspace"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"delete": {
|
"delete": {
|
||||||
"tags": [
|
"tags": [
|
||||||
"oc-scheduler/controllersWorkflowSchedulerController"
|
"workflow"
|
||||||
],
|
],
|
||||||
"description": "unschedule a workflow execution: deletes its bookings on all peers then deletes the execution.\n\u003cbr\u003e",
|
"description": "schedule workflow\n\u003cbr\u003e",
|
||||||
"operationId": "WorkflowSchedulerController.UnSchedule",
|
"operationId": "WorkflowSchedulerController.UnSchedule",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
"in": "path",
|
"in": "path",
|
||||||
"name": "id",
|
"name": "id",
|
||||||
"description": "execution id",
|
"description": "id execution",
|
||||||
|
"required": true,
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"in": "body",
|
||||||
|
"name": "body",
|
||||||
|
"description": "The compute content",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/models.compute"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "{workspace} models.workspace"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/{id}": {
|
||||||
|
"get": {
|
||||||
|
"tags": [
|
||||||
|
"oc-scheduler/controllersWorkflowExecutionController"
|
||||||
|
],
|
||||||
|
"description": "find workflow by workflowid\n\u003cbr\u003e",
|
||||||
|
"operationId": "WorkflowExecutionController.Get",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"in": "path",
|
||||||
|
"name": "id",
|
||||||
|
"description": "the workflowid you want to get",
|
||||||
"required": true,
|
"required": true,
|
||||||
"type": "string"
|
"type": "string"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
"description": "",
|
"description": "{workflow} models.workflow"
|
||||||
"schema": {
|
|
||||||
"$ref": "#/definitions/map[string]interface{}"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"definitions": {
|
"definitions": {
|
||||||
"map[string]interface{}": {
|
|
||||||
"title": "map[string]interface{}",
|
|
||||||
"type": "object"
|
|
||||||
},
|
|
||||||
"models.compute": {
|
"models.compute": {
|
||||||
"title": "compute",
|
"title": "compute",
|
||||||
"type": "object"
|
"type": "object"
|
||||||
@@ -501,23 +224,11 @@
|
|||||||
},
|
},
|
||||||
"tags": [
|
"tags": [
|
||||||
{
|
{
|
||||||
"name": "oc-scheduler/controllersWorkflowSchedulerController",
|
"name": "oc-scheduler/controllersWorkflowExecutionController",
|
||||||
"description": "Operations about workflow\n"
|
"description": "Operations about workflow\n"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "loki",
|
"name": "workflow",
|
||||||
"description": "Operations about workflow\n"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "booking",
|
|
||||||
"description": "Operations about workspace\n"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "verification",
|
|
||||||
"description": "Operations about workspace\n"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "execution",
|
|
||||||
"description": "Operations about workflow\n"
|
"description": "Operations about workflow\n"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -10,106 +10,12 @@ info:
|
|||||||
license:
|
license:
|
||||||
name: AGPL
|
name: AGPL
|
||||||
url: https://www.gnu.org/licenses/agpl-3.0.html
|
url: https://www.gnu.org/licenses/agpl-3.0.html
|
||||||
basePath: /oc
|
basePath: /oc/
|
||||||
paths:
|
paths:
|
||||||
/{id}:
|
/:
|
||||||
delete:
|
|
||||||
tags:
|
|
||||||
- oc-scheduler/controllersWorkflowSchedulerController
|
|
||||||
description: |-
|
|
||||||
unschedule a workflow execution: deletes its bookings on all peers then deletes the execution.
|
|
||||||
<br>
|
|
||||||
operationId: WorkflowSchedulerController.UnSchedule
|
|
||||||
parameters:
|
|
||||||
- in: path
|
|
||||||
name: id
|
|
||||||
description: execution id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: ""
|
|
||||||
schema:
|
|
||||||
$ref: '#/definitions/map[string]interface{}'
|
|
||||||
/booking/:
|
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- booking
|
- oc-scheduler/controllersWorkflowExecutionController
|
||||||
description: |-
|
|
||||||
find booking by id
|
|
||||||
<br>
|
|
||||||
operationId: BookingController.GetAll
|
|
||||||
parameters:
|
|
||||||
- in: query
|
|
||||||
name: is_draft
|
|
||||||
description: draft wished
|
|
||||||
type: string
|
|
||||||
- in: query
|
|
||||||
name: offset
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
- in: query
|
|
||||||
name: limit
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: '{booking} models.booking'
|
|
||||||
/booking/{id}:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- booking
|
|
||||||
description: |-
|
|
||||||
find booking by id
|
|
||||||
<br>
|
|
||||||
operationId: BookingController.Get
|
|
||||||
parameters:
|
|
||||||
- in: path
|
|
||||||
name: id
|
|
||||||
description: the id you want to get
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: '{booking} models.booking'
|
|
||||||
/booking/search/{start_date}/{end_date}:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- booking
|
|
||||||
description: |-
|
|
||||||
search bookings
|
|
||||||
<br>
|
|
||||||
operationId: BookingController.Search
|
|
||||||
parameters:
|
|
||||||
- in: path
|
|
||||||
name: start_date
|
|
||||||
description: the word search you want to get
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
- in: path
|
|
||||||
name: end_date
|
|
||||||
description: the word search you want to get
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
- in: query
|
|
||||||
name: is_draft
|
|
||||||
description: draft wished
|
|
||||||
type: string
|
|
||||||
- in: query
|
|
||||||
name: offset
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
- in: query
|
|
||||||
name: limit
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: '{workspace} models.workspace'
|
|
||||||
/execution/:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- execution
|
|
||||||
description: |-
|
description: |-
|
||||||
find workflow by workflowid
|
find workflow by workflowid
|
||||||
<br>
|
<br>
|
||||||
@@ -119,21 +25,13 @@ paths:
|
|||||||
name: is_draft
|
name: is_draft
|
||||||
description: draft wished
|
description: draft wished
|
||||||
type: string
|
type: string
|
||||||
- in: query
|
|
||||||
name: offset
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
- in: query
|
|
||||||
name: limit
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
responses:
|
responses:
|
||||||
"200":
|
"200":
|
||||||
description: '{workflow} models.workflow'
|
description: '{workflow} models.workflow'
|
||||||
/execution/{id}:
|
/{id}:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- execution
|
- oc-scheduler/controllersWorkflowExecutionController
|
||||||
description: |-
|
description: |-
|
||||||
find workflow by workflowid
|
find workflow by workflowid
|
||||||
<br>
|
<br>
|
||||||
@@ -147,26 +45,10 @@ paths:
|
|||||||
responses:
|
responses:
|
||||||
"200":
|
"200":
|
||||||
description: '{workflow} models.workflow'
|
description: '{workflow} models.workflow'
|
||||||
delete:
|
/search/{search}:
|
||||||
tags:
|
|
||||||
- execution
|
|
||||||
description: |-
|
|
||||||
find workflow by workflowid
|
|
||||||
<br>
|
|
||||||
operationId: WorkflowExecutionController.Delete
|
|
||||||
parameters:
|
|
||||||
- in: path
|
|
||||||
name: id
|
|
||||||
description: the workflowid you want to get
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: '{workflow} models.workflow'
|
|
||||||
/execution/search/{search}:
|
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- execution
|
- oc-scheduler/controllersWorkflowExecutionController
|
||||||
description: |-
|
description: |-
|
||||||
find compute by key word
|
find compute by key word
|
||||||
<br>
|
<br>
|
||||||
@@ -181,21 +63,13 @@ paths:
|
|||||||
name: is_draft
|
name: is_draft
|
||||||
description: draft wished
|
description: draft wished
|
||||||
type: string
|
type: string
|
||||||
- in: query
|
|
||||||
name: offset
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
- in: query
|
|
||||||
name: limit
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
responses:
|
responses:
|
||||||
"200":
|
"200":
|
||||||
description: '{compute} models.compute'
|
description: '{compute} models.compute'
|
||||||
/execution/search/{start_date}/{end_date}:
|
/search/{start_date}/{end_date}:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- execution
|
- oc-scheduler/controllersWorkflowExecutionController
|
||||||
description: |-
|
description: |-
|
||||||
search workspace
|
search workspace
|
||||||
<br>
|
<br>
|
||||||
@@ -215,123 +89,9 @@ paths:
|
|||||||
name: is_draft
|
name: is_draft
|
||||||
description: draft wished
|
description: draft wished
|
||||||
type: string
|
type: string
|
||||||
- in: query
|
|
||||||
name: offset
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
- in: query
|
|
||||||
name: limit
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
responses:
|
responses:
|
||||||
"200":
|
"200":
|
||||||
description: '{workspace} models.workspace'
|
description: '{workspace} models.workspace'
|
||||||
/loki/{id}:
|
|
||||||
post:
|
|
||||||
tags:
|
|
||||||
- loki
|
|
||||||
description: |-
|
|
||||||
get logs
|
|
||||||
<br>
|
|
||||||
operationId: LokiController.GetLogs
|
|
||||||
parameters:
|
|
||||||
- in: body
|
|
||||||
name: body
|
|
||||||
description: The compute content
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
$ref: '#/definitions/models.compute'
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: '{workspace} models.workspace'
|
|
||||||
/order/{id}:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- oc-scheduler/controllersWorkflowSchedulerController
|
|
||||||
description: |-
|
|
||||||
search draft order for a workflow
|
|
||||||
<br>
|
|
||||||
operationId: WorkflowSchedulerController.SearchScheduledDraftOrder
|
|
||||||
parameters:
|
|
||||||
- in: path
|
|
||||||
name: id
|
|
||||||
description: id execution
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
- in: query
|
|
||||||
name: offset
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
- in: query
|
|
||||||
name: limit
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: '{workspace} models.workspace'
|
|
||||||
/verification/:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- verification
|
|
||||||
description: |-
|
|
||||||
find verification by id
|
|
||||||
<br>
|
|
||||||
operationId: ExecutionVerificationController.GetAll
|
|
||||||
parameters:
|
|
||||||
- in: query
|
|
||||||
name: is_draft
|
|
||||||
description: draft wished
|
|
||||||
type: string
|
|
||||||
- in: query
|
|
||||||
name: offset
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
- in: query
|
|
||||||
name: limit
|
|
||||||
description: "false"
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: '{booking} models.booking'
|
|
||||||
/verification/{id}:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- verification
|
|
||||||
description: |-
|
|
||||||
find verification by id
|
|
||||||
<br>
|
|
||||||
operationId: ExecutionVerificationController.Get
|
|
||||||
parameters:
|
|
||||||
- in: path
|
|
||||||
name: id
|
|
||||||
description: the id you want to get
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: '{booking} models.booking'
|
|
||||||
put:
|
|
||||||
tags:
|
|
||||||
- verification
|
|
||||||
description: |-
|
|
||||||
create computes
|
|
||||||
<br>
|
|
||||||
operationId: ExecutionVerificationController.Update
|
|
||||||
parameters:
|
|
||||||
- in: path
|
|
||||||
name: id
|
|
||||||
description: the compute id you want to get
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
- in: body
|
|
||||||
name: body
|
|
||||||
description: The compute content
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
$ref: '#/definitions/models.compute'
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: '{compute} models.compute'
|
|
||||||
/version/:
|
/version/:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
@@ -354,27 +114,60 @@ paths:
|
|||||||
responses:
|
responses:
|
||||||
"200":
|
"200":
|
||||||
description: ""
|
description: ""
|
||||||
|
/workflow/{id}:
|
||||||
|
post:
|
||||||
|
tags:
|
||||||
|
- workflow
|
||||||
|
description: |-
|
||||||
|
schedule workflow
|
||||||
|
<br>
|
||||||
|
operationId: WorkflowSchedulerController.Schedule
|
||||||
|
parameters:
|
||||||
|
- in: path
|
||||||
|
name: id
|
||||||
|
description: id execution
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
- in: body
|
||||||
|
name: body
|
||||||
|
description: The compute content
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/models.compute'
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: '{workspace} models.workspace'
|
||||||
|
delete:
|
||||||
|
tags:
|
||||||
|
- workflow
|
||||||
|
description: |-
|
||||||
|
schedule workflow
|
||||||
|
<br>
|
||||||
|
operationId: WorkflowSchedulerController.UnSchedule
|
||||||
|
parameters:
|
||||||
|
- in: path
|
||||||
|
name: id
|
||||||
|
description: id execution
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
- in: body
|
||||||
|
name: body
|
||||||
|
description: The compute content
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/models.compute'
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: '{workspace} models.workspace'
|
||||||
definitions:
|
definitions:
|
||||||
map[string]interface{}:
|
|
||||||
title: map[string]interface{}
|
|
||||||
type: object
|
|
||||||
models.compute:
|
models.compute:
|
||||||
title: compute
|
title: compute
|
||||||
type: object
|
type: object
|
||||||
tags:
|
tags:
|
||||||
- name: oc-scheduler/controllersWorkflowSchedulerController
|
- name: oc-scheduler/controllersWorkflowExecutionController
|
||||||
description: |
|
description: |
|
||||||
Operations about workflow
|
Operations about workflow
|
||||||
- name: loki
|
- name: workflow
|
||||||
description: |
|
|
||||||
Operations about workflow
|
|
||||||
- name: booking
|
|
||||||
description: |
|
|
||||||
Operations about workspace
|
|
||||||
- name: verification
|
|
||||||
description: |
|
|
||||||
Operations about workspace
|
|
||||||
- name: execution
|
|
||||||
description: |
|
description: |
|
||||||
Operations about workflow
|
Operations about workflow
|
||||||
- name: version
|
- name: version
|
||||||
|
|||||||
137
ws.go
137
ws.go
@@ -1,137 +0,0 @@
|
|||||||
//go:build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/net/websocket"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
timeout := flag.Int("timeout", 30, "secondes sans message avant de quitter")
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
args := flag.Args()
|
|
||||||
// Exemples de routes WS disponibles :
|
|
||||||
// ws://localhost:8090/oc/<workflow-id>/check
|
|
||||||
// ws://localhost:8090/oc/<workflow-id>/check?as_possible=true
|
|
||||||
// ws://localhost:8090/oc/<workflow-id>/check?as_possible=true&preemption=true
|
|
||||||
url := "ws://localhost:8000/scheduler/check/58314c99-c595-4ca2-8b5e-822a6774efed?as_possible=true"
|
|
||||||
token := ""
|
|
||||||
// Body JSON envoyé comme premier message WebSocket (WorkflowSchedule).
|
|
||||||
// Seuls start + duration_s sont requis si as_possible=true.
|
|
||||||
body := `{"start":"` + time.Now().UTC().Format(time.RFC3339) + `"}`
|
|
||||||
|
|
||||||
if len(args) >= 1 {
|
|
||||||
url = args[0]
|
|
||||||
}
|
|
||||||
if len(args) >= 2 {
|
|
||||||
token = args[1]
|
|
||||||
}
|
|
||||||
if len(args) >= 3 {
|
|
||||||
body = args[2]
|
|
||||||
}
|
|
||||||
|
|
||||||
origin := "http://localhost/"
|
|
||||||
config, err := websocket.NewConfig(url, origin)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Config invalide : %v", err)
|
|
||||||
}
|
|
||||||
if token != "" {
|
|
||||||
config.Header.Set("Authorization", "Bearer "+token)
|
|
||||||
fmt.Printf("Token : %s...\n", token[:min(20, len(token))])
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Connexion à : %s\n", url)
|
|
||||||
ws, err := websocket.DialConfig(config)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Impossible de se connecter : %v", err)
|
|
||||||
}
|
|
||||||
defer ws.Close()
|
|
||||||
fmt.Println("Connecté — envoi du body initial...")
|
|
||||||
|
|
||||||
// Envoi du WorkflowSchedule comme premier message.
|
|
||||||
if err := websocket.Message.Send(ws, body); err != nil {
|
|
||||||
log.Fatalf("Impossible d'envoyer le body initial : %v", err)
|
|
||||||
}
|
|
||||||
fmt.Printf("Body envoyé : %s\n\nEn attente de messages...\n\n", body)
|
|
||||||
|
|
||||||
stop := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(stop, os.Interrupt)
|
|
||||||
|
|
||||||
msgs := make(chan string)
|
|
||||||
errs := make(chan error, 1)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
var raw string
|
|
||||||
if err := websocket.Message.Receive(ws, &raw); err != nil {
|
|
||||||
errs <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
msgs <- raw
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Après 5 secondes, simule un changement de date côté front (now + 3 min).
|
|
||||||
dateChangeTick := time.NewTimer(10 * time.Second)
|
|
||||||
defer dateChangeTick.Stop()
|
|
||||||
|
|
||||||
// Après 15 secondes, simule la confirmation du scheduling par le client.
|
|
||||||
confirmTick := time.NewTimer(15 * time.Second)
|
|
||||||
defer confirmTick.Stop()
|
|
||||||
|
|
||||||
idleTimer := time.NewTimer(time.Duration(*timeout) * time.Second)
|
|
||||||
defer idleTimer.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-stop:
|
|
||||||
fmt.Println("\nInterruption — fermeture.")
|
|
||||||
return
|
|
||||||
case err := <-errs:
|
|
||||||
fmt.Printf("Connexion fermée : %v\n", err)
|
|
||||||
return
|
|
||||||
case <-idleTimer.C:
|
|
||||||
fmt.Printf("Timeout (%ds) — aucun message reçu, fermeture.\n", *timeout)
|
|
||||||
return
|
|
||||||
case <-dateChangeTick.C:
|
|
||||||
newStart := time.Now().UTC().Add(3 * time.Minute)
|
|
||||||
update := `{"start":"` + newStart.Format(time.RFC3339) + `"}`
|
|
||||||
fmt.Printf("\n[sim] Envoi mise à jour de date → %s\n\n", update)
|
|
||||||
if err := websocket.Message.Send(ws, update); err != nil {
|
|
||||||
fmt.Printf("Erreur envoi mise à jour : %v\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-confirmTick.C:
|
|
||||||
fmt.Println("\n[sim] Envoi confirmation du scheduling → {\"confirm\":true}\n")
|
|
||||||
if err := websocket.Message.Send(ws, `{"confirm":true}`); err != nil {
|
|
||||||
fmt.Printf("Erreur envoi confirmation : %v\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case raw := <-msgs:
|
|
||||||
idleTimer.Reset(time.Duration(*timeout) * time.Second)
|
|
||||||
var data any
|
|
||||||
if err := json.Unmarshal([]byte(raw), &data); err == nil {
|
|
||||||
b, _ := json.MarshalIndent(data, "", " ")
|
|
||||||
fmt.Println(string(b))
|
|
||||||
} else {
|
|
||||||
fmt.Printf("Message brut : %s\n", raw)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func min(a, b int) int {
|
|
||||||
if a < b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user