Add verification

This commit is contained in:
mr
2026-03-12 12:05:52 +01:00
parent 98fe2600b3
commit b9df0b2731
8 changed files with 275 additions and 64 deletions

View File

@@ -0,0 +1,68 @@
package controllers
import (
"encoding/json"
"oc-scheduler/infrastructure"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/models/execution_verification"
"cloud.o-forge.io/core/oc-lib/models/resources/native_tools"
"cloud.o-forge.io/core/oc-lib/tools"
beego "github.com/beego/beego/v2/server/web"
)
// Operations about workspace
type ExecutionVerificationController struct {
beego.Controller
}
// @Title GetAll
// @Description find verification by id
// @Param is_draft query string false "draft wished"
// @Success 200 {booking} models.booking
// @router / [get]
func (o *ExecutionVerificationController) GetAll() {
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
isDraft := o.Ctx.Input.Query("is_draft")
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.EXECUTION_VERIFICATION), user, peerID, groups, nil).LoadAll(isDraft == "true")
o.ServeJSON()
}
// @Title Get
// @Description find verification by id
// @Param id path string true "the id you want to get"
// @Success 200 {booking} models.booking
// @router /:id [get]
func (o *ExecutionVerificationController) Get() {
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
id := o.Ctx.Input.Param(":id")
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.EXECUTION_VERIFICATION), user, peerID, groups, nil).LoadOne(id)
o.ServeJSON()
}
// @Title Update
// @Description create computes
// @Param id path string true "the compute id you want to get"
// @Param body body models.compute true "The compute content"
// @Success 200 {compute} models.compute
// @router /:id [put]
func (o *ExecutionVerificationController) Put() {
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
// store and return Id or post with UUID
var res map[string]interface{}
id := o.Ctx.Input.Param(":id")
json.Unmarshal(o.Ctx.Input.CopyBody(10000), &res)
data := oclib.NewRequest(oclib.LibDataEnum(oclib.EXECUTION_VERIFICATION), user, peerID, groups, nil).UpdateOne(res, id)
if data.Err == "" && data.Data != nil && data.Data.(*execution_verification.ExecutionVerification).Validate {
data, _ := json.Marshal(&native_tools.WorkflowEventParams{
WorkflowResourceID: data.Data.(*execution_verification.ExecutionVerification).WorkflowID,
})
infrastructure.EmitNATS(peerID, tools.PropalgationMessage{
Action: tools.PubSubAction(tools.WORKFLOW_EVENT),
DataType: tools.EXECUTION_VERIFICATION.EnumIndex(),
Payload: data,
})
}
o.Data["json"] = data
o.ServeJSON()
}

View File

@@ -39,18 +39,8 @@ func (o *WorkflowSchedulerController) Schedule() {
var resp *infrastructure.WorkflowSchedule var resp *infrastructure.WorkflowSchedule
json.Unmarshal(o.Ctx.Input.CopyBody(100000), &resp) json.Unmarshal(o.Ctx.Input.CopyBody(100000), &resp)
caller := tools.NewHTTPCaller(map[tools.DataType]map[tools.METHOD]string{ // paths to call other OC services
tools.PEER: {
tools.POST: "/status/",
},
tools.BOOKING: {
tools.GET: "/booking/check/:id/:start_date/:end_date",
tools.POST: "/booking/",
},
})
logger.Info().Msg("Booking for " + wfId) logger.Info().Msg("Booking for " + wfId)
req := oclib.NewRequestAdmin(collection, caller) req := oclib.NewRequestAdmin(collection, nil)
// req := oclib.NewRequest(collection, user, peerID, groups, caller) // req := oclib.NewRequest(collection, user, peerID, groups, caller)
resp.UUID = uuid.New().String() resp.UUID = uuid.New().String()
fmt.Println(user, peerID, groups) fmt.Println(user, peerID, groups)
@@ -58,7 +48,7 @@ func (o *WorkflowSchedulerController) Schedule() {
Username: user, Username: user,
PeerID: peerID, PeerID: peerID,
Groups: groups, Groups: groups,
Caller: caller, Caller: nil,
Admin: true, Admin: true,
}) })
if err != nil { if err != nil {
@@ -87,13 +77,24 @@ var wsUpgrader = gorillaws.Upgrader{
CheckOrigin: func(r *http.Request) bool { return true }, CheckOrigin: func(r *http.Request) bool { return true },
} }
// CheckStreamHandler is a plain http.HandlerFunc (registered via beego.Handler // @Title CheckStream
// to avoid Beego's WriteHeader interference with the WebSocket upgrade). // @Description WebSocket stream for slot availability checking.
// Path: /oc/:id/check → parts = ["", "oc", "<id>", "check"] // @Param id path string true "workflow id"
// @Param as_possible query bool false "search from now"
// @Param preemption query bool false "force-valid, surface warnings"
// @router /:id/check [get]
func (o *WorkflowSchedulerController) CheckStream() {
CheckStreamHandler(o.Ctx.ResponseWriter, o.Ctx.Request)
}
// CheckStreamHandler is the WebSocket handler for slot availability checking.
// It is invoked via the CheckStream controller method.
// Query params: as_possible=true, preemption=true // Query params: as_possible=true, preemption=true
func CheckStreamHandler(w http.ResponseWriter, r *http.Request) { func CheckStreamHandler(w http.ResponseWriter, r *http.Request) {
parts := strings.Split(strings.TrimSuffix(r.URL.Path, "/"), "/") wfID := strings.TrimSuffix(
wfID := parts[len(parts)-2] // second-to-last segment strings.TrimPrefix(r.URL.Path, "/oc/"),
"/check",
)
q := r.URL.Query() q := r.URL.Query()
asap := q.Get("as_possible") == "true" asap := q.Get("as_possible") == "true"
@@ -136,20 +137,23 @@ func CheckStreamHandler(w http.ResponseWriter, r *http.Request) {
plannerCh, plannerUnsub := infrastructure.SubscribePlannerUpdates(watchedPeers) plannerCh, plannerUnsub := infrastructure.SubscribePlannerUpdates(watchedPeers)
wfCh, wfUnsub := infrastructure.SubscribeWorkflowUpdates(wfID) wfCh, wfUnsub := infrastructure.SubscribeWorkflowUpdates(wfID)
// Cleanup on exit: cancel subscriptions, evict planner cache entries, // Unique ID for this check session — used to track refresh ownership.
// signal PB_CLOSE_PLANNER on NATS for each peer that was being watched. sessionID := uuid.New().String()
// Request a fresh planner snapshot for every concerned peer.
// The first session to claim a peer becomes its refresh owner; others skip
// the duplicate PB_PLANNER emission. ownedPeers grows if the workflow's
// peer list changes (wfCh).
ownedPeers := infrastructure.RequestPlannerRefresh(watchedPeers, sessionID)
// Cleanup on exit (clean or forced): release refresh ownership for the
// peers this session claimed, which resets Refreshing state and emits
// PB_CLOSE_PLANNER so oc-discovery stops the planner stream.
defer func() { defer func() {
conn.Close() conn.Close()
plannerUnsub() plannerUnsub()
wfUnsub() wfUnsub()
for _, peer := range watchedPeers { infrastructure.ReleaseRefreshOwnership(ownedPeers, sessionID)
if b, err := json.Marshal(map[string]interface{}{"peer_id": peer}); err == nil {
infrastructure.EmitNATS(peer, tools.PropalgationMessage{
Action: tools.PB_CLOSE_PLANNER,
Payload: b,
})
}
}
}() }()
push := func() error { push := func() error {
@@ -166,20 +170,38 @@ func CheckStreamHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
// Detect client-side close in a separate goroutine. // Read loop: detect client-side close and parse schedule parameter
// updates (date changes, booking mode changes, …) sent by the client.
updateCh := make(chan infrastructure.WorkflowSchedule, 1)
closeCh := make(chan struct{}) closeCh := make(chan struct{})
go func() { go func() {
defer close(closeCh) defer close(closeCh)
for { for {
if _, _, err := conn.ReadMessage(); err != nil { var updated infrastructure.WorkflowSchedule
if err := conn.ReadJSON(&updated); err != nil {
// Connection closed or unrecoverable read error.
return return
} }
// Drop the oldest pending update if the consumer hasn't caught up.
select {
case updateCh <- updated:
default:
<-updateCh
updateCh <- updated
}
} }
}() }()
// Stream loop. // Stream loop.
for { for {
select { select {
case updated := <-updateCh:
// The client changed the requested date/params: adopt the new
// schedule and re-run the check immediately.
ws = updated
if err := push(); err != nil {
return
}
case <-wfCh: case <-wfCh:
// The workflow was modified: refresh the peer list and re-subscribe // The workflow was modified: refresh the peer list and re-subscribe
// so the stream watches the correct set of planners going forward. // so the stream watches the correct set of planners going forward.
@@ -187,11 +209,15 @@ func CheckStreamHandler(w http.ResponseWriter, r *http.Request) {
plannerUnsub() plannerUnsub()
watchedPeers = newPeers watchedPeers = newPeers
plannerCh, plannerUnsub = infrastructure.SubscribePlannerUpdates(newPeers) plannerCh, plannerUnsub = infrastructure.SubscribePlannerUpdates(newPeers)
// Claim refresh ownership for any newly added peers.
newOwned := infrastructure.RequestPlannerRefresh(newPeers, sessionID)
ownedPeers = append(ownedPeers, newOwned...)
} }
if err := push(); err != nil { if err := push(); err != nil {
return return
} }
case <-plannerCh: case <-plannerCh:
// A planner snapshot arrived (or was evicted): re-evaluate.
if err := push(); err != nil { if err := push(); err != nil {
return return
} }

2
go.mod
View File

@@ -3,7 +3,7 @@ module oc-scheduler
go 1.25.0 go 1.25.0
require ( require (
cloud.o-forge.io/core/oc-lib v0.0.0-20260304145747-e03a0d3dd0aa cloud.o-forge.io/core/oc-lib v0.0.0-20260312105633-a30173921f67
github.com/beego/beego/v2 v2.3.8 github.com/beego/beego/v2 v2.3.8
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/robfig/cron v1.2.0 github.com/robfig/cron v1.2.0

12
go.sum
View File

@@ -16,6 +16,18 @@ cloud.o-forge.io/core/oc-lib v0.0.0-20260224093610-a9ebad78f3a8 h1:xoC5PAz1469Qx
cloud.o-forge.io/core/oc-lib v0.0.0-20260224093610-a9ebad78f3a8/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA= cloud.o-forge.io/core/oc-lib v0.0.0-20260224093610-a9ebad78f3a8/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260304145747-e03a0d3dd0aa h1:1wCpI4dwN1pj6MlpJ7/WifhHVHmCE4RU+9klwqgo/bk= cloud.o-forge.io/core/oc-lib v0.0.0-20260304145747-e03a0d3dd0aa h1:1wCpI4dwN1pj6MlpJ7/WifhHVHmCE4RU+9klwqgo/bk=
cloud.o-forge.io/core/oc-lib v0.0.0-20260304145747-e03a0d3dd0aa/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA= cloud.o-forge.io/core/oc-lib v0.0.0-20260304145747-e03a0d3dd0aa/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260311072518-933b7147e908 h1:1jz3xI/u2FzCG8phY7ShqADrmCj0mlrdjbdNUosSwgs=
cloud.o-forge.io/core/oc-lib v0.0.0-20260311072518-933b7147e908/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260311084029-97bfb0582a99 h1:60BGJeR9uvpDwvNeWqVBnB2JjWLOZv16sUGZjzXSQlg=
cloud.o-forge.io/core/oc-lib v0.0.0-20260311084029-97bfb0582a99/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260312073634-2c9c42dd516a h1:oCkb9l/Cvn0x6iicxIydrjfCNU+UHhKuklFgfzDa174=
cloud.o-forge.io/core/oc-lib v0.0.0-20260312073634-2c9c42dd516a/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260312083310-f5e199132416 h1:QHR5pzCI/HUawu8pst5Ggio6WPCUUf8XYjNMVk8kSqo=
cloud.o-forge.io/core/oc-lib v0.0.0-20260312083310-f5e199132416/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260312104524-e28b79ac0d62 h1:sHzacZxPIKHyjL4EkgG/c7MI8gM1xmLdhaoUx2ZsH+M=
cloud.o-forge.io/core/oc-lib v0.0.0-20260312104524-e28b79ac0d62/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260312105633-a30173921f67 h1:x6klvxiRpU1KcvmygIcHGDHFW3CbWC05El6Fryvr3uo=
cloud.o-forge.io/core/oc-lib v0.0.0-20260312105633-a30173921f67/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/beego/beego/v2 v2.3.8 h1:wplhB1pF4TxR+2SS4PUej8eDoH4xGfxuHfS7wAk9VBc= github.com/beego/beego/v2 v2.3.8 h1:wplhB1pF4TxR+2SS4PUej8eDoH4xGfxuHfS7wAk9VBc=
github.com/beego/beego/v2 v2.3.8/go.mod h1:8vl9+RrXqvodrl9C8yivX1e6le6deCK6RWeq8R7gTTg= github.com/beego/beego/v2 v2.3.8/go.mod h1:8vl9+RrXqvodrl9C8yivX1e6le6deCK6RWeq8R7gTTg=

View File

@@ -29,8 +29,20 @@ const plannerTTL = 24 * time.Hour
// Planner cache — protected by plannerMu // Planner cache — protected by plannerMu
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// plannerEntry wraps a planner snapshot with refresh-ownership tracking.
// At most one check session may be the "refresh owner" of a given peer's
// planner at a time: it emits PB_PLANNER to request a fresh snapshot from
// oc-discovery and, on close (clean or forced), emits PB_CLOSE_PLANNER to
// release the stream. Any subsequent session that needs the same peer's
// planner will see Refreshing=true and skip the duplicate request.
type plannerEntry struct {
Planner *planner.Planner
Refreshing bool // true while a PB_PLANNER request is in flight
RefreshOwner string // session UUID that initiated the current refresh
}
var plannerMu sync.RWMutex var plannerMu sync.RWMutex
var PlannerCache = map[string]*planner.Planner{} var PlannerCache = map[string]*plannerEntry{}
var plannerAddedAt = map[string]time.Time{} // peerID → first-seen timestamp var plannerAddedAt = map[string]time.Time{} // peerID → first-seen timestamp
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@@ -104,29 +116,39 @@ func notify(mu *sync.RWMutex, registry map[string][]chan struct{}, key string) {
// Cache helpers // Cache helpers
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// storePlanner inserts or updates a planner for peerID. // storePlanner inserts or updates the planner snapshot for peerID.
// On first insertion it schedules an automatic eviction after plannerTTL. // On first insertion it schedules an automatic eviction after plannerTTL.
// Existing refresh-ownership state (Refreshing / RefreshOwner) is preserved
// so that an in-flight request is not inadvertently reset.
// All subscribers interested in this peer are notified. // All subscribers interested in this peer are notified.
func storePlanner(peerID string, p *planner.Planner) { func storePlanner(peerID string, p *planner.Planner) {
plannerMu.Lock() plannerMu.Lock()
isNew := PlannerCache[peerID] == nil entry := PlannerCache[peerID]
PlannerCache[peerID] = p isNew := entry == nil
if isNew { if isNew {
entry = &plannerEntry{}
PlannerCache[peerID] = entry
plannerAddedAt[peerID] = time.Now() plannerAddedAt[peerID] = time.Now()
go evictAfter(peerID, plannerTTL) go evictAfter(peerID, plannerTTL)
} }
entry.Planner = p
plannerMu.Unlock() plannerMu.Unlock()
notifyPlannerWatchers(peerID) notifyPlannerWatchers(peerID)
} }
// evictAfter waits ttl from the first-seen time for peerID then emits a // evictAfter waits ttl from first insertion then deletes the cache entry and
// PB_CLOSE_PLANNER event, which removes the entry from the cache and notifies // emits PB_CLOSE_PLANNER so oc-discovery stops streaming for this peer.
// NATS. // This is the only path that actually removes an entry from PlannerCache;
// session close (ReleaseRefreshOwnership) only resets ownership state.
func evictAfter(peerID string, ttl time.Duration) { func evictAfter(peerID string, ttl time.Duration) {
time.Sleep(ttl) time.Sleep(ttl)
plannerMu.RLock() plannerMu.Lock()
_, exists := PlannerCache[peerID] _, exists := PlannerCache[peerID]
plannerMu.RUnlock() if exists {
delete(PlannerCache, peerID)
delete(plannerAddedAt, peerID)
}
plannerMu.Unlock()
if exists { if exists {
EmitNATS(peerID, tools.PropalgationMessage{Action: tools.PB_CLOSE_PLANNER}) EmitNATS(peerID, tools.PropalgationMessage{Action: tools.PB_CLOSE_PLANNER})
} }
@@ -137,12 +159,11 @@ func evictAfter(peerID string, ttl time.Duration) {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
func EmitNATS(peerID string, message tools.PropalgationMessage) { func EmitNATS(peerID string, message tools.PropalgationMessage) {
// PB_CLOSE_PLANNER: notify local watchers so streams re-evaluate.
// Cache mutations (eviction or ownership reset) are the caller's
// responsibility — see evictAfter and ReleaseRefreshOwnership.
if message.Action == tools.PB_CLOSE_PLANNER { if message.Action == tools.PB_CLOSE_PLANNER {
plannerMu.Lock() notifyPlannerWatchers(peerID)
delete(PlannerCache, peerID)
delete(plannerAddedAt, peerID)
plannerMu.Unlock()
notifyPlannerWatchers(peerID) // let streams re-evaluate (will warn "no planner")
} }
b, _ := json.Marshal(message) b, _ := json.Marshal(message)
tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{ tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
@@ -309,6 +330,17 @@ func ListenNATS() {
// considered in the target WorkflowExecution (BookingsState / PurchasesState). // considered in the target WorkflowExecution (BookingsState / PurchasesState).
// WORKFLOW_EXECUTION → the execution reached SCHEDULED; confirm all // WORKFLOW_EXECUTION → the execution reached SCHEDULED; confirm all
// local draft bookings and purchases listed in its states. // local draft bookings and purchases listed in its states.
tools.PLANNER_EXECUTION: func(resp tools.NATSResponse) {
m := map[string]interface{}{}
p := planner.Planner{}
if err := json.Unmarshal(resp.Payload, &m); err != nil {
return
}
if err := json.Unmarshal(resp.Payload, &p); err != nil {
return
}
storePlanner(fmt.Sprintf("%v", m["peer_id"]), &p)
},
tools.PROPALGATION_EVENT: func(resp tools.NATSResponse) { tools.PROPALGATION_EVENT: func(resp tools.NATSResponse) {
if resp.FromApp != "oc-discovery" { if resp.FromApp != "oc-discovery" {
return return
@@ -318,16 +350,6 @@ func ListenNATS() {
return return
} }
switch prop.Action { switch prop.Action {
case tools.PB_PLANNER:
m := map[string]interface{}{}
p := planner.Planner{}
if err := json.Unmarshal(prop.Payload, &m); err != nil {
return
}
if err := json.Unmarshal(prop.Payload, &p); err != nil {
return
}
storePlanner(fmt.Sprintf("%v", m["peer_id"]), &p)
case tools.PB_CONSIDERS: case tools.PB_CONSIDERS:
switch tools.DataType(prop.DataType) { switch tools.DataType(prop.DataType) {
case tools.BOOKING, tools.PURCHASE_RESOURCE: case tools.BOOKING, tools.PURCHASE_RESOURCE:
@@ -379,9 +401,9 @@ func ListenNATS() {
} }
// Verify the slot is free in our planner (if we have one). // Verify the slot is free in our planner (if we have one).
plannerMu.RLock() plannerMu.RLock()
p := PlannerCache[self.PeerID] selfEntry := PlannerCache[self.PeerID]
plannerMu.RUnlock() plannerMu.RUnlock()
if p != nil && !checkInstance(p, bk.ResourceID, bk.InstanceID, bk.ExpectedStartDate, bk.ExpectedEndDate) { if selfEntry != nil && selfEntry.Planner != nil && !checkInstance(selfEntry.Planner, bk.ResourceID, bk.InstanceID, bk.ExpectedStartDate, bk.ExpectedEndDate) {
fmt.Println("ListenNATS: booking conflicts with local planner, discarding") fmt.Println("ListenNATS: booking conflicts with local planner, discarding")
return return
} }
@@ -598,6 +620,60 @@ func refreshSelfPlanner(peerID string, request *tools.APIRequest) {
// Planner broadcast // Planner broadcast
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// RequestPlannerRefresh asks oc-discovery for a fresh planner snapshot for
// each peer in peerIDs. Only the first session to request a given peer becomes
// its "refresh owner": subsequent sessions see Refreshing=true and skip the
// duplicate PB_PLANNER emission. Returns the subset of peerIDs for which this
// session claimed ownership (needed to release on close).
func RequestPlannerRefresh(peerIDs []string, sessionID string) []string {
var owned []string
for _, peerID := range peerIDs {
plannerMu.Lock()
entry := PlannerCache[peerID]
if entry == nil {
entry = &plannerEntry{}
PlannerCache[peerID] = entry
plannerAddedAt[peerID] = time.Now()
go evictAfter(peerID, plannerTTL)
}
shouldRequest := !entry.Refreshing
if shouldRequest {
entry.Refreshing = true
entry.RefreshOwner = sessionID
}
plannerMu.Unlock()
if shouldRequest {
owned = append(owned, peerID)
payload, _ := json.Marshal(map[string]any{"peer_id": peerID})
EmitNATS(peerID, tools.PropalgationMessage{
Action: tools.PB_PLANNER,
Payload: payload,
})
}
}
return owned
}
// ReleaseRefreshOwnership is called when a check session closes (clean or
// forced). For each peer this session owns, it resets the refresh state and
// emits PB_CLOSE_PLANNER so oc-discovery stops the planner stream.
// The planner data itself stays in the cache until TTL eviction.
func ReleaseRefreshOwnership(peerIDs []string, sessionID string) {
for _, peerID := range peerIDs {
plannerMu.Lock()
if entry := PlannerCache[peerID]; entry != nil && entry.RefreshOwner == sessionID {
entry.Refreshing = false
entry.RefreshOwner = ""
}
plannerMu.Unlock()
payload, _ := json.Marshal(map[string]any{"peer_id": peerID})
EmitNATS(peerID, tools.PropalgationMessage{
Action: tools.PB_CLOSE_PLANNER,
Payload: payload,
})
}
}
// broadcastPlanner iterates the storage and compute peers of the given workflow // broadcastPlanner iterates the storage and compute peers of the given workflow
// and, for each peer not yet in the cache, emits a PB_PLANNER propagation so // and, for each peer not yet in the cache, emits a PB_PLANNER propagation so
// downstream consumers (oc-discovery, other schedulers) refresh their state. // downstream consumers (oc-discovery, other schedulers) refresh their state.
@@ -631,7 +707,8 @@ func broadcastPlanner(wf *workflow.Workflow) {
cached := PlannerCache[p.PeerID] cached := PlannerCache[p.PeerID]
plannerMu.RUnlock() plannerMu.RUnlock()
if cached == nil { // Only request if no snapshot and no refresh already in flight.
if cached == nil || (cached.Planner == nil && !cached.Refreshing) {
payload, err := json.Marshal(map[string]interface{}{"peer_id": p.PeerID}) payload, err := json.Marshal(map[string]interface{}{"peer_id": p.PeerID})
if err != nil { if err != nil {
continue continue

View File

@@ -520,14 +520,14 @@ func collectBookingResources(wf *workflow.Workflow, selectedInstances workflow.C
func checkResourceAvailability(res []bookingResource, start time.Time, end *time.Time) (unavailable []string, warnings []string) { func checkResourceAvailability(res []bookingResource, start time.Time, end *time.Time) (unavailable []string, warnings []string) {
for _, r := range res { for _, r := range res {
plannerMu.RLock() plannerMu.RLock()
p := PlannerCache[r.peerID] entry := PlannerCache[r.peerID]
plannerMu.RUnlock() plannerMu.RUnlock()
if p == nil { if entry == nil || entry.Planner == nil {
warnings = append(warnings, fmt.Sprintf( warnings = append(warnings, fmt.Sprintf(
"peer %s planner not in cache for resource %s assuming available", r.peerID, r.id)) "peer %s planner not in cache for resource %s assuming available", r.peerID, r.id))
continue continue
} }
if !checkInstance(p, r.id, r.instanceID, start, end) { if !checkInstance(entry.Planner, r.id, r.instanceID, start, end) {
unavailable = append(unavailable, r.id) unavailable = append(unavailable, r.id)
warnings = append(warnings, fmt.Sprintf( warnings = append(warnings, fmt.Sprintf(
"resource %s is not available in [%s %s]", "resource %s is not available in [%s %s]",

View File

@@ -43,6 +43,33 @@ func init() {
Filters: nil, Filters: nil,
Params: nil}) Params: nil})
beego.GlobalControllerRouter["oc-scheduler/controllers:ExecutionVerificationController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:ExecutionVerificationController"],
beego.ControllerComments{
Method: "GetAll",
Router: `/`,
AllowHTTPMethods: []string{"get"},
MethodParams: param.Make(),
Filters: nil,
Params: nil})
beego.GlobalControllerRouter["oc-scheduler/controllers:ExecutionVerificationController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:ExecutionVerificationController"],
beego.ControllerComments{
Method: "Get",
Router: `/:id`,
AllowHTTPMethods: []string{"get"},
MethodParams: param.Make(),
Filters: nil,
Params: nil})
beego.GlobalControllerRouter["oc-scheduler/controllers:ExecutionVerificationController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:ExecutionVerificationController"],
beego.ControllerComments{
Method: "Put",
Router: `/:id`,
AllowHTTPMethods: []string{"put"},
MethodParams: param.Make(),
Filters: nil,
Params: nil})
beego.GlobalControllerRouter["oc-scheduler/controllers:LokiController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:LokiController"], beego.GlobalControllerRouter["oc-scheduler/controllers:LokiController"] = append(beego.GlobalControllerRouter["oc-scheduler/controllers:LokiController"],
beego.ControllerComments{ beego.ControllerComments{
Method: "GetLogs", Method: "GetLogs",

View File

@@ -8,7 +8,6 @@
package routers package routers
import ( import (
"net/http"
"oc-scheduler/controllers" "oc-scheduler/controllers"
beego "github.com/beego/beego/v2/server/web" beego "github.com/beego/beego/v2/server/web"
@@ -29,6 +28,11 @@ func init() {
&controllers.BookingController{}, &controllers.BookingController{},
), ),
), ),
beego.NSNamespace("/verification",
beego.NSInclude(
&controllers.ExecutionVerificationController{},
),
),
beego.NSNamespace("/execution", beego.NSNamespace("/execution",
beego.NSInclude( beego.NSInclude(
&controllers.WorkflowExecutionController{}, &controllers.WorkflowExecutionController{},
@@ -42,7 +46,4 @@ func init() {
) )
beego.AddNamespace(ns) beego.AddNamespace(ns)
// Route WebSocket hors du pipeline Beego pour éviter le WriteHeader parasite
beego.Handler("/oc/:id/check", http.HandlerFunc(controllers.CheckStreamHandler))
} }