From 12eba65a011a9622e384b643cb4db6dd082ef651 Mon Sep 17 00:00:00 2001 From: mr Date: Wed, 25 Mar 2026 11:11:37 +0100 Subject: [PATCH] Refactor Oc-Sheduler --- controllers/sheduler.go | 68 +- docker_scheduler.json | 6 +- go.mod | 2 +- go.sum | 8 +- infrastructure/api.go | 137 ++ infrastructure/check.go | 343 ----- infrastructure/considers.go | 197 --- infrastructure/execution/execution.go | 508 ++++++++ infrastructure/nats.go | 75 -- infrastructure/nats/nats.go | 23 + infrastructure/nats/nats_handlers.go | 87 ++ infrastructure/nats_handlers.go | 248 ---- infrastructure/planner.go | 353 ------ infrastructure/planner/planner.go | 453 +++++++ infrastructure/scheduler.go | 320 ----- infrastructure/scheduler/scheduler.go | 235 ++++ .../objects.go | 71 +- .../scheduling_resources/service.go | 474 +++++++ infrastructure/session.go | 395 ------ infrastructure/session/session.go | 233 ++++ infrastructure/utils/utils.go | 186 +++ logs.log | 1118 +++++++++++++++++ main.go | 1 + ws.go | 4 +- 24 files changed, 3498 insertions(+), 2047 deletions(-) create mode 100644 infrastructure/api.go delete mode 100644 infrastructure/check.go delete mode 100644 infrastructure/considers.go create mode 100644 infrastructure/execution/execution.go delete mode 100644 infrastructure/nats.go create mode 100644 infrastructure/nats/nats.go create mode 100644 infrastructure/nats/nats_handlers.go delete mode 100644 infrastructure/nats_handlers.go delete mode 100644 infrastructure/planner.go create mode 100644 infrastructure/planner/planner.go delete mode 100644 infrastructure/scheduler.go create mode 100644 infrastructure/scheduler/scheduler.go rename infrastructure/{scheduling => scheduling_resources}/objects.go (62%) create mode 100644 infrastructure/scheduling_resources/service.go delete mode 100644 infrastructure/session.go create mode 100644 infrastructure/session/session.go create mode 100644 infrastructure/utils/utils.go create mode 100644 logs.log diff --git a/controllers/sheduler.go b/controllers/sheduler.go index 28185ac..1fa8d66 100644 --- a/controllers/sheduler.go +++ b/controllers/sheduler.go @@ -28,7 +28,6 @@ var wsUpgrader = gorillaws.Upgrader{ } // CheckStreamHandler is the WebSocket handler for slot availability checking. -// It is invoked via the CheckStream controller method. // Query params: as_possible=true, preemption=true func CheckStreamHandler(w http.ResponseWriter, r *http.Request) { wfID := strings.TrimSuffix( @@ -50,7 +49,7 @@ func CheckStreamHandler(w http.ResponseWriter, r *http.Request) { } watchedPeers, err := infrastructure.GetWorkflowPeerIDs(wfID, req) - fmt.Println("Here my watched peers involved in workflow", watchedPeers) + fmt.Println("Watched peers for workflow", wfID, ":", watchedPeers) if err != nil { http.Error(w, `{"code":404,"error":"`+err.Error()+`"}`, http.StatusNotFound) return @@ -73,17 +72,14 @@ func CheckStreamHandler(w http.ResponseWriter, r *http.Request) { executionsID := uuid.New().String() ownedPeers := infrastructure.RequestPlannerRefresh(watchedPeers, executionsID) - selfID, err := oclib.GetMySelf() - if err != nil || selfID == nil { - logger.Err(err).Msg(err.Error()) + self, err := oclib.GetMySelf() + if err != nil || self == nil { + logger.Err(err).Msg("could not resolve self peer") + conn.Close() return } - selfPeerID := "" - if selfID != nil { - selfPeerID = selfID.PeerID - } + selfPeerID := self.PeerID - // scheduled=true once bookings/purchases/exec have been created for this session. scheduled := false confirmed := false @@ -91,37 +87,33 @@ func CheckStreamHandler(w http.ResponseWriter, r *http.Request) { conn.Close() plannerUnsub() wfUnsub() - infrastructure.ReleaseRefreshOwnership(ownedPeers, executionsID) if !confirmed { - infrastructure.CleanupSession(selfID, executionsID, selfID, req) + infrastructure.CleanupSession(executionsID, req) } }() - // pushCheck runs an availability check and sends the result to the client. - // If reschedule=true and the slot is available, it also creates/updates - // bookings, purchases and the execution draft for this session. pushCheck := func(reschedule bool) error { result, checkErr := ws.Check(wfID, asap, preemption, req) if checkErr != nil { return checkErr } if result.Available && reschedule { - // Sync the resolved start/end back to ws so that UpsertSessionDrafts - // creates bookings/purchases with the actual scheduled dates (not the - // raw client value which may be zero or pre-asapBuffer). ws.Start = result.Start if result.End != nil { ws.End = result.End } - ws.UpsertSessionDrafts(wfID, executionsID, selfID, req) + _, _, execs, purchases, bookings, err := ws.GetBuyAndBook(wfID, req) + if err != nil { + return err + } + infrastructure.UpsertSessionDrafts(executionsID, execs, purchases, bookings, req) scheduled = true } result.SchedulingID = executionsID return conn.WriteJSON(result) } - // Initial check + schedule. if err := pushCheck(true); err != nil { return } @@ -148,10 +140,8 @@ func CheckStreamHandler(w http.ResponseWriter, r *http.Request) { select { case updated := <-updateCh: if updated.Confirm { - // Confirm: flip bookings/purchases to IsDraft=false, then let - // the considers mechanism transition exec to IsDraft=false. ws.UUID = executionsID - _, _, _, schedErr := ws.Schedules(wfID, req) + _, _, _, schedErr := infrastructure.Schedule(&ws, wfID, req) if schedErr != nil { _ = conn.WriteJSON(map[string]interface{}{ "error": schedErr.Error(), @@ -172,7 +162,7 @@ func CheckStreamHandler(w http.ResponseWriter, r *http.Request) { !reflect.DeepEqual(updated.SelectedPartnerships, ws.SelectedPartnerships) || !reflect.DeepEqual(updated.SelectedBuyings, ws.SelectedBuyings) || !reflect.DeepEqual(updated.SelectedStrategies, ws.SelectedStrategies) - infrastructure.CleanupSession(selfID, executionsID, selfID, req) + infrastructure.CleanupSession(executionsID, req) ws = updated if err := pushCheck(changed || !scheduled); err != nil { return @@ -180,9 +170,9 @@ func CheckStreamHandler(w http.ResponseWriter, r *http.Request) { case remotePeerID := <-plannerCh: if remotePeerID == selfPeerID { - // Our own planner updated (caused by our local booking store). - // Just resend the current availability result without rescheduling - // to avoid an infinite loop. + if scheduled { + continue + } result, checkErr := ws.Check(wfID, asap, preemption, req) if checkErr == nil { result.SchedulingID = executionsID @@ -190,24 +180,9 @@ func CheckStreamHandler(w http.ResponseWriter, r *http.Request) { } continue } - // A remote peer's planner changed. Re-check; if our slot is now - // taken and we were already scheduled, reschedule at the new slot. - result, checkErr := ws.Check(wfID, asap, preemption, req) - if checkErr != nil { + if err := pushCheck(scheduled); err != nil { return } - if !result.Available && scheduled { - // Move to the next free slot and reschedule. - if result.NextSlot != nil { - ws.Start = *result.NextSlot - } - if err := pushCheck(true); err != nil { - return - } - } else { - result.SchedulingID = executionsID - _ = conn.WriteJSON(result) - } case <-wfCh: if newPeers, err := infrastructure.GetWorkflowPeerIDs(wfID, req); err == nil { @@ -241,8 +216,7 @@ func (o *WorkflowSchedulerController) UnSchedule() { Groups: groups, Admin: true, } - selfID, _ := oclib.GetMySelf() - if err := infrastructure.UnscheduleExecution(executionID, selfID, req); err != nil { + if err := infrastructure.UnscheduleExecution(executionID, req); err != nil { o.Data["json"] = map[string]interface{}{"code": 404, "error": err.Error()} } else { o.Data["json"] = map[string]interface{}{"code": 200, "error": ""} @@ -251,7 +225,7 @@ func (o *WorkflowSchedulerController) UnSchedule() { } // @Title SearchScheduledDraftOrder -// @Description schedule workflow +// @Description search draft order for a workflow // @Param id path string true "id execution" // @Success 200 {workspace} models.workspace // @router /:id/order [get] @@ -265,7 +239,5 @@ func (o *WorkflowSchedulerController) SearchScheduledDraftOrder() { }, } o.Data["json"] = oclib.NewRequestAdmin(orderCollection, nil).Search(filter, "", true) - - //o.Data["json"] = oclib.NewRequest(orderCollection, user, peerID, groups, nil).Search(filter, "", true) o.ServeJSON() } diff --git a/docker_scheduler.json b/docker_scheduler.json index 8d407cb..c46a663 100644 --- a/docker_scheduler.json +++ b/docker_scheduler.json @@ -7,7 +7,7 @@ "KUBERNETES_SERVICE_PORT": "6443", "KUBERNETES_NAMESPACE": "default", "KUBERNETES_IMAGE": "opencloudregistry/oc-monitord", - "KUBE_CA": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkakNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTnpNeE1qY3dPVFl3SGhjTk1qWXdNekV3TURjeE9ERTJXaGNOTXpZd016QTNNRGN4T0RFMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTnpNeE1qY3dPVFl3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFReG81cXQ0MGxEekczRHJKTE1wRVBrd0ZBY1FmbC8vVE1iWjZzemMreHAKbmVzVzRTSTdXK1lWdFpRYklmV2xBMTRaazQvRFlDMHc1YlgxZU94RVVuL0pvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVXBLM2pGK25IRlZSbDcwb3ZRVGZnCmZabGNQZE13Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnVnkyaUx0Y0xaYm1vTnVoVHdKbU5sWlo3RVlBYjJKNW0KSjJYbG1UbVF5a2tDSUhLbzczaDBkdEtUZTlSa0NXYTJNdStkS1FzOXRFU0tBV0x1emlnYXBHYysKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", - "KUBE_CERT": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRlZ0F3SUJBZ0lJQUkvSUg2R2Rodm93Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOemN6TVRJM01EazJNQjRYRFRJMk1ETXhNREEzTVRneE5sb1hEVEkzTURNeApNREEzTVRneE5sb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJQTTdBVEZQSmFMMjUrdzAKUU1vZUIxV2hBRW4vWnViM0tSRERrYnowOFhwQWJ2akVpdmdnTkdpdG4wVmVsaEZHamRmNHpBT29Nd1J3M21kbgpYSGtHVDB5alNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUVZLOThaMEMxcFFyVFJSMGVLZHhIa2o0ejFJREFLQmdncWhrak9QUVFEQWdOSkFEQkcKQWlFQXZYWll6Zk9iSUtlWTRtclNsRmt4ZS80a0E4K01ieDc1UDFKRmNlRS8xdGNDSVFDNnM0ZXlZclhQYmNWSgpxZm5EamkrZ1RacGttN0tWSTZTYTlZN2FSRGFabUE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZURDQ0FSMmdBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwClpXNTBMV05oUURFM056TXhNamN3T1RZd0hoY05Nall3TXpFd01EY3hPREUyV2hjTk16WXdNekEzTURjeE9ERTIKV2pBak1TRXdId1lEVlFRRERCaHJNM010WTJ4cFpXNTBMV05oUURFM056TXhNamN3T1RZd1dUQVRCZ2NxaGtqTwpQUUlCQmdncWhrak9QUU1CQndOQ0FBUzV1NGVJbStvVnV1SFI0aTZIOU1kVzlyUHdJbFVPNFhIMEJWaDRUTGNlCkNkMnRBbFVXUW5FakxMdlpDWlVaYTlzTlhKOUVtWWt5S0dtQWR2TE9FbUVrbzBJd1FEQU9CZ05WSFE4QkFmOEUKQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVVGU3ZmR2RBdGFVSzAwVWRIaW5jUgo1SStNOVNBd0NnWUlLb1pJemowRUF3SURTUUF3UmdJaEFMY2xtQnR4TnpSVlBvV2hoVEVKSkM1Z3VNSGsvcFZpCjFvYXJ2UVJxTWRKcUFpRUEyR1dNTzlhZFFYTEQwbFZKdHZMVkc1M3I0M0lxMHpEUUQwbTExMVZyL1MwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==", - "KUBE_DATA": "LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUVkSTRZN3lRU1ZwRGNrblhsQmJEaXBWZHRMWEVsYVBkN3VBZHdBWFFya2xvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFOHpzQk1VOGxvdmJuN0RSQXloNEhWYUVBU2Y5bTV2Y3BFTU9SdlBUeGVrQnUrTVNLK0NBMAphSzJmUlY2V0VVYU4xL2pNQTZnekJIRGVaMmRjZVFaUFRBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=" + "KUBE_CA": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTnpReU56STVNVEF3SGhjTk1qWXdNekl6TVRNek5URXdXaGNOTXpZd016SXdNVE16TlRFdwpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTnpReU56STVNVEF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFSSGpYRDVpbnRIYWZWSk5VaDFlRnIxcXBKdFlkUmc5NStKVENEa0tadTIKYjUxRXlKaG1zanRIY3BDUndGL1VGMzlvdzY4TFBUcjBxaUorUHlhQTBLZUtvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTdWQkNzZVN3ajJ2cmczMFE5UG8vCnV6ZzAvMjR3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUlEOVY2aFlUSS83ZW1hRzU0dDdDWVU3TXFSdDdESUkKNlgvSUwrQ0RLbzlNQWlCdlFEMGJmT0tVWDc4UmRGdUplcEhEdWFUMUExaGkxcWdIUGduM1dZdDBxUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", + "KUBE_CERT": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJUU5KbFNJQUJPMDR3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOemMwTWpjeU9URXdNQjRYRFRJMk1ETXlNekV6TXpVeE1Gb1hEVEkzTURNeQpNekV6TXpVeE1Gb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJMY3Uwb2pUbVg4RFhTQkYKSHZwZDZNVEoyTHdXc1lRTmdZVURXRDhTVERIUWlCczlMZ0x5ZTdOMEFvZk85RkNZVW1HamhiaVd3WFVHR3dGTgpUdlRMU2lXalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUlJhRW9wQzc5NGJyTHlnR0g5SVhvbDZTSmlFREFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQWhaRUlrSWV3Y1loL1NmTFVCVjE5MW1CYTNRK0J5S2J5eTVlQmpwL3kzeWtDSUIxWTJicTVOZTNLUUU4RAprNnNzeFJrbjJmN0VoWWVRQU1pUlJ2MjIweDNLCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTnpReU56STVNVEF3SGhjTk1qWXdNekl6TVRNek5URXdXaGNOTXpZd016SXdNVE16TlRFdwpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTnpReU56STVNVEF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTcTdVTC85MEc1ZmVTaE95NjI3eGFZWlM5dHhFdWFoWFQ3Vk5wZkpQSnMKaEdXd2UxOXdtbXZzdlp6dlNPUWFRSzJaMmttN0hSb1IrNlA1YjIyamczbHVvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVVXaEtLUXUvZUc2eThvQmgvU0Y2Ckpla2lZaEF3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUk3cGxHczFtV20ySDErbjRobDBNTk13RmZzd0o5ZXIKTzRGVkM0QzhwRG44QWlCN3NZMVFwd2M5VkRUeGNZaGxuZzZNUzRXai85K0lHWjJxcy94UStrMjdTQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", + "KUBE_DATA": "LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUROZDRnWXd6aVRhK1hwNnFtNVc3SHFzc1JJNkREaUJTbUV2ZHoxZzk3VGxvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFdHk3U2lOT1pmd05kSUVVZStsM294TW5ZdkJheGhBMkJoUU5ZUHhKTU1kQ0lHejB1QXZKNwpzM1FDaDg3MFVKaFNZYU9GdUpiQmRRWWJBVTFPOU10S0pRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=" } \ No newline at end of file diff --git a/go.mod b/go.mod index 7b1c6cb..26aec17 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module oc-scheduler go 1.25.0 require ( - cloud.o-forge.io/core/oc-lib v0.0.0-20260319080542-c7884f5cde5d + cloud.o-forge.io/core/oc-lib v0.0.0-20260324114937-6d0c78946e8b github.com/beego/beego/v2 v2.3.8 github.com/google/uuid v1.6.0 github.com/robfig/cron v1.2.0 diff --git a/go.sum b/go.sum index 0c21d2b..b02a7c1 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,5 @@ -cloud.o-forge.io/core/oc-lib v0.0.0-20260319071818-28b5b7d39ffe h1:CHiWQAX7j/bMfbytCWGL2mUgSWYoDY4+bFQbCHEfypk= -cloud.o-forge.io/core/oc-lib v0.0.0-20260319071818-28b5b7d39ffe/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA= -cloud.o-forge.io/core/oc-lib v0.0.0-20260319074425-5fca0480af06 h1:5nPNvh1ynFaTB6NBwjhR148iUTLZEyANbqAYQRW7dw0= -cloud.o-forge.io/core/oc-lib v0.0.0-20260319074425-5fca0480af06/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA= -cloud.o-forge.io/core/oc-lib v0.0.0-20260319080542-c7884f5cde5d h1:5hM3GibJw5Uc2Z4aPSMt/3wh7RRY9zxJoeE1lGq0WY0= -cloud.o-forge.io/core/oc-lib v0.0.0-20260319080542-c7884f5cde5d/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA= +cloud.o-forge.io/core/oc-lib v0.0.0-20260324114937-6d0c78946e8b h1:y0rppyzGIQTIyvapWwHZ8t20wMaSaMU6NoZLkMCui8w= +cloud.o-forge.io/core/oc-lib v0.0.0-20260324114937-6d0c78946e8b/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= diff --git a/infrastructure/api.go b/infrastructure/api.go new file mode 100644 index 0000000..5ad2632 --- /dev/null +++ b/infrastructure/api.go @@ -0,0 +1,137 @@ +// Package infrastructure is the public façade for all scheduling sub-services. +// Controllers and main.go import only this package; the sub-packages are +// internal implementation details. +package infrastructure + +import ( + "fmt" + "oc-scheduler/infrastructure/execution" + "oc-scheduler/infrastructure/nats" + "oc-scheduler/infrastructure/planner" + "oc-scheduler/infrastructure/scheduler" + "oc-scheduler/infrastructure/scheduling_resources" + "oc-scheduler/infrastructure/session" + "oc-scheduler/infrastructure/utils" + "time" + + "cloud.o-forge.io/core/oc-lib/models/workflow" + "cloud.o-forge.io/core/oc-lib/models/workflow_execution" + "cloud.o-forge.io/core/oc-lib/tools" +) + +// --------------------------------------------------------------------------- +// Type re-exports +// --------------------------------------------------------------------------- + +type WorkflowSchedule = scheduler.WorkflowSchedule +type CheckResult = scheduler.CheckResult + +// --------------------------------------------------------------------------- +// Bootstrap — called from main.go +// --------------------------------------------------------------------------- + +func ListenNATS() { nats.ListenNATS() } +func InitSelfPlanner() { planner.InitPlanner() } +func RecoverDraftExecutions() { execution.RecoverDraft() } +func WatchExecutions() { execution.WatchExecutions() } + +// EmitNATS broadcasts a propagation message via NATS. +func EmitNATS(peerID string, message tools.PropalgationMessage) { + utils.Propalgate(peerID, message) +} + +// --------------------------------------------------------------------------- +// Utilities +// --------------------------------------------------------------------------- + +func GetWorkflowPeerIDs(wfID string, req *tools.APIRequest) ([]string, error) { + return utils.GetWorkflowPeerIDs(wfID, req) +} + +// --------------------------------------------------------------------------- +// Planner subscriptions +// --------------------------------------------------------------------------- + +func SubscribePlannerUpdates(peerIDs []string) (<-chan string, func()) { + return planner.GetPlannerService().SubscribePlannerUpdates(peerIDs...) +} + +func SubscribeWorkflowUpdates(wfID string) (<-chan struct{}, func()) { + return planner.GetPlannerService().SubscribeWorkflowUpdates(wfID) +} + +func RequestPlannerRefresh(peerIDs []string, executionsID string) []string { + return planner.GetPlannerService().Refresh(peerIDs, executionsID) +} + +func ReleaseRefreshOwnership(peerIDs []string, executionsID string) { + planner.GetPlannerService().ReleaseRefreshOwnership(peerIDs, executionsID) +} + +// --------------------------------------------------------------------------- +// Session management +// --------------------------------------------------------------------------- + +func UpsertSessionDrafts( + executionsID string, + execs []*workflow_execution.WorkflowExecution, + purchases, bookings []scheduling_resources.SchedulerObject, + req *tools.APIRequest, +) { + svc := session.NewSessionExecutionsService(executionsID) + svc.UpsertSessionDrafts(purchases, bookings, execs, req) +} + +func CleanupSession(executionsID string, req *tools.APIRequest) { + svc := session.NewSessionExecutionsService(executionsID) + svc.CleanupSession(req) +} + +func UnscheduleExecution(executionID string, req *tools.APIRequest) error { + return execution.Unschedule(executionID, req) +} + +// --------------------------------------------------------------------------- +// Schedule confirmation +// --------------------------------------------------------------------------- + +func Schedule( + ws *WorkflowSchedule, + wfID string, + req *tools.APIRequest, +) (*WorkflowSchedule, *workflow.Workflow, []*workflow_execution.WorkflowExecution, error) { + if req == nil { + return ws, nil, nil, fmt.Errorf("no request provided") + } + if ws.UUID == "" { + return ws, nil, nil, fmt.Errorf("no scheduling session: use the Check stream first") + } + + svc := session.NewSessionExecutionsService(ws.UUID) + + executions := svc.LoadSessionExecs() + for _, exec := range executions { + if !exec.ExecDate.IsZero() && exec.ExecDate.Before(time.Now().UTC()) { + return ws, nil, nil, fmt.Errorf("execution %s is obsolete (start date in the past)", exec.GetID()) + } + } + + if err := svc.ConfirmSession(req); err != nil { + return ws, nil, nil, fmt.Errorf("confirm session failed: %w", err) + } + + for _, exec := range executions { + go execution.WatchDeadline(exec.GetID(), exec.ExecutionsID, exec.ExecDate, req) + } + + adminReq := &tools.APIRequest{Admin: true} + obj, _, _ := workflow.NewAccessor(req).LoadOne(wfID) + if obj == nil { + return ws, nil, executions, nil + } + wf := obj.(*workflow.Workflow) + ws.Workflow = wf + ws.WorkflowExecution = executions + wf.GetAccessor(adminReq).UpdateOne(wf.Serialize(wf), wf.GetID()) + return ws, wf, executions, nil +} diff --git a/infrastructure/check.go b/infrastructure/check.go deleted file mode 100644 index a83b691..0000000 --- a/infrastructure/check.go +++ /dev/null @@ -1,343 +0,0 @@ -package infrastructure - -import ( - "errors" - "fmt" - "time" - - oclib "cloud.o-forge.io/core/oc-lib" - "cloud.o-forge.io/core/oc-lib/models/booking/planner" - "cloud.o-forge.io/core/oc-lib/models/resources" - "cloud.o-forge.io/core/oc-lib/models/workflow" - "cloud.o-forge.io/core/oc-lib/tools" -) - -// --------------------------------------------------------------------------- -// Slot availability check -// --------------------------------------------------------------------------- - -const ( - checkWindowHours = 5 // how far ahead to scan for a free slot (hours) - checkStepMin = 15 // time increment per scan step (minutes) - // asapBuffer is the minimum lead time added to time.Now() for as_possible - // and WHEN_POSSIBLE bookings. It absorbs NATS propagation + p2p stream - // latency so the ExpectedStartDate never arrives already in the past at - // the destination peer. - asapBuffer = 2 * time.Minute -) - -// CheckResult holds the outcome of a slot availability check. -type CheckResult struct { - Available bool `json:"available"` - Start time.Time `json:"start"` - End *time.Time `json:"end,omitempty"` - // NextSlot is the nearest free slot found within checkWindowHours when - // the requested slot is unavailable, or the preferred (conflict-free) slot - // when running in preemption mode. - NextSlot *time.Time `json:"next_slot,omitempty"` - Warnings []string `json:"warnings,omitempty"` - // Preemptible is true when the check was run in preemption mode. - Preemptible bool `json:"preemptible,omitempty"` - // SchedulingID is the session identifier the client must supply to Schedule - // in order to confirm the draft bookings created during this Check session. - SchedulingID string `json:"scheduling_id,omitempty"` -} - -// bookingResource is the minimum info needed to verify a resource against the -// planner cache. -type bookingResource struct { - id string // resource MongoDB _id - peerPID string // peer public PeerID (PID) — PlannerCache key - instanceID string // resolved from WorkflowSchedule.SelectedInstances -} - -// Check verifies that all booking-relevant resources (storage and compute) of -// the given workflow have capacity for the requested time slot. -// -// - asap=true → ignore ws.Start, begin searching from time.Now() -// - preemption → always return Available=true but populate Warnings with -// conflicts and NextSlot with the nearest conflict-free alternative -func (ws *WorkflowSchedule) Check(wfID string, asap bool, preemption bool, request *tools.APIRequest) (*CheckResult, error) { - // 1. Load workflow - obj, code, err := workflow.NewAccessor(request).LoadOne(wfID) - if code != 200 || err != nil { - msg := "could not load workflow " + wfID - if err != nil { - msg += ": " + err.Error() - } - return nil, errors.New(msg) - } - wf := obj.(*workflow.Workflow) - - // 2. Resolve start - start := ws.Start - if asap || start.IsZero() { - start = time.Now().UTC().Add(asapBuffer) - } - - // 3. Resolve end – use explicit end/duration or estimate via Planify - end := ws.End - if end == nil { - if ws.DurationS > 0 { - e := start.Add(time.Duration(ws.DurationS * float64(time.Second))) - end = &e - } else { - _, longest, _, _, planErr := wf.Planify( - start, nil, - ws.SelectedInstances, ws.SelectedPartnerships, - ws.SelectedBuyings, ws.SelectedStrategies, - int(ws.BookingMode), request, - ) - if planErr == nil && longest > 0 { - e := start.Add(time.Duration(longest) * time.Second) - end = &e - } - } - } - - // 4. Extract booking-relevant (storage + compute) resources from the graph, - // resolving the selected instance for each resource. - checkables := collectBookingResources(wf, ws.SelectedInstances) - // 5. Check every resource against its peer's planner - unavailable, warnings := checkResourceAvailability(checkables, start, end) - result := &CheckResult{ - Start: start, - End: end, - Warnings: warnings, - } - - // 6. Preemption mode: mark as schedulable regardless of conflicts, but - // surface warnings and the nearest conflict-free alternative. - if preemption { - result.Available = true - result.Preemptible = true - if len(unavailable) > 0 { - result.NextSlot = findNextSlot(checkables, start, end, checkWindowHours) - } - return result, nil - } - - // 7. All resources are free - if len(unavailable) == 0 { - result.Available = true - return result, nil - } - - // 8. Slot unavailable – locate the nearest free slot within the window - result.Available = false - result.NextSlot = findNextSlot(checkables, start, end, checkWindowHours) - return result, nil -} - -// collectBookingResources returns unique storage and compute resources from the -// workflow graph. For each resource the selected instance ID is resolved from -// selectedInstances (the scheduler's SelectedInstances ConfigItem) so the planner -// check targets the exact instance chosen by the user. -func collectBookingResources(wf *workflow.Workflow, selectedInstances workflow.ConfigItem) map[string]bookingResource { - if wf.Graph == nil { - return nil - } - seen := map[string]bool{} - result := map[string]bookingResource{} - - // Resolve MongoDB peer _id (DID) → public PeerID (PID) used as PlannerCache key. - peerAccess := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil) - didToPID := map[string]string{} - resolvePID := func(did string) string { - if pid, ok := didToPID[did]; ok { - return pid - } - if data := peerAccess.LoadOne(did); data.Data != nil { - if p := data.ToPeer(); p != nil { - didToPID[did] = p.PeerID - return p.PeerID - } - } - return "" - } - - resolveInstanceID := func(res interface { - GetID() string - GetCreatorID() string - }) string { - idx := selectedInstances.Get(res.GetID()) - switch r := res.(type) { - case *resources.StorageResource: - if inst := r.GetSelectedInstance(idx); inst != nil { - return inst.GetID() - } - case *resources.ComputeResource: - if inst := r.GetSelectedInstance(idx); inst != nil { - return inst.GetID() - } - } - return "" - } - - for _, item := range wf.GetGraphItems(wf.Graph.IsStorage) { - i := item - _, res := i.GetResource() - if res == nil { - continue - } - id := res.GetID() - if seen[id] { - continue - } - pid := resolvePID(res.GetCreatorID()) - if pid == "" { - continue - } - seen[id] = true - result[pid] = bookingResource{ - id: id, - peerPID: pid, - instanceID: resolveInstanceID(res), - } - } - - for _, item := range wf.GetGraphItems(wf.Graph.IsCompute) { - i := item - _, res := i.GetResource() - if res == nil { - continue - } - id := res.GetID() - if seen[id] { - continue - } - pid := resolvePID(res.GetCreatorID()) - if pid == "" { - continue - } - seen[id] = true - result[pid] = bookingResource{ - id: id, - peerPID: pid, - instanceID: resolveInstanceID(res), - } - } - - return result -} - -// checkResourceAvailability returns the IDs of unavailable resources and -// human-readable warning messages. -func checkResourceAvailability(res map[string]bookingResource, start time.Time, end *time.Time) (unavailable []string, warnings []string) { - for _, r := range res { - plannerMu.RLock() - entry := PlannerCache[r.peerPID] - plannerMu.RUnlock() - if entry == nil || entry.Planner == nil { - warnings = append(warnings, fmt.Sprintf( - "peer %s planner not in cache for resource %s – assuming available", r.peerPID, r.id)) - continue - } - if !checkInstance(entry.Planner, r.id, r.instanceID, start, end) { - unavailable = append(unavailable, r.id) - warnings = append(warnings, fmt.Sprintf( - "resource %s is not available in [%s – %s]", - r.id, start.Format(time.RFC3339), formatOptTime(end))) - } - } - return -} - -// checkInstance checks availability for the specific instance resolved by the -// scheduler. When instanceID is empty (no instance selected / none resolvable), -// it falls back to checking all instances known in the planner and returns true -// if any one has remaining capacity. Returns true when no capacity is recorded. -func checkInstance(p *planner.Planner, resourceID string, instanceID string, start time.Time, end *time.Time) bool { - if instanceID != "" { - return p.Check(resourceID, instanceID, nil, start, end) - } - // Fallback: accept if any known instance has free capacity - caps, ok := p.Capacities[resourceID] - if !ok || len(caps) == 0 { - return true // no recorded usage → assume free - } - for id := range caps { - if p.Check(resourceID, id, nil, start, end) { - return true - } - } - return false -} - -// findNextSlot scans forward from 'from' in checkStepMin increments for up to -// windowH hours and returns the first candidate start time at which all -// resources are simultaneously free. -func findNextSlot(resources map[string]bookingResource, from time.Time, originalEnd *time.Time, windowH int) *time.Time { - duration := time.Hour - if originalEnd != nil { - if d := originalEnd.Sub(from); d > 0 { - duration = d - } - } - step := time.Duration(checkStepMin) * time.Minute - limit := from.Add(time.Duration(windowH) * time.Hour) - for t := from.Add(step); t.Before(limit); t = t.Add(step) { - e := t.Add(duration) - if unavail, _ := checkResourceAvailability(resources, t, &e); len(unavail) == 0 { - return &t - } - } - return nil -} - -func formatOptTime(t *time.Time) string { - if t == nil { - return "open" - } - return t.Format(time.RFC3339) -} - -// GetWorkflowPeerIDs loads the workflow and returns the deduplicated list of -// creator peer IDs for all its storage and compute resources. -// These are the peers whose planners must be watched by a check stream. -func GetWorkflowPeerIDs(wfID string, request *tools.APIRequest) ([]string, error) { - obj, code, err := workflow.NewAccessor(request).LoadOne(wfID) - if code != 200 || err != nil { - msg := "could not load workflow " + wfID - if err != nil { - msg += ": " + err.Error() - } - return nil, errors.New(msg) - } - wf := obj.(*workflow.Workflow) - if wf.Graph == nil { - return nil, nil - } - seen := map[string]bool{} - var peerIDs []string - for _, item := range wf.GetGraphItems(wf.Graph.IsStorage) { - i := item - _, res := i.GetResource() - if res == nil { - continue - } - if id := res.GetCreatorID(); id != "" && !seen[id] { - seen[id] = true - peerIDs = append(peerIDs, id) - } - } - for _, item := range wf.GetGraphItems(wf.Graph.IsCompute) { - i := item - _, res := i.GetResource() - if res == nil { - continue - } - if id := res.GetCreatorID(); id != "" && !seen[id] { - seen[id] = true - peerIDs = append(peerIDs, id) - } - } - realPeersID := []string{} - access := oclib.NewRequestAdmin(oclib.LibDataEnum(tools.PEER), nil) - for _, id := range peerIDs { - if data := access.LoadOne(id); data.Data != nil { - realPeersID = append(realPeersID, data.ToPeer().PeerID) - } - } - return realPeersID, nil -} diff --git a/infrastructure/considers.go b/infrastructure/considers.go deleted file mode 100644 index 3f90973..0000000 --- a/infrastructure/considers.go +++ /dev/null @@ -1,197 +0,0 @@ -package infrastructure - -import ( - "encoding/json" - "fmt" - "sync" - - oclib "cloud.o-forge.io/core/oc-lib" - "cloud.o-forge.io/core/oc-lib/models/common/enum" - "cloud.o-forge.io/core/oc-lib/models/utils" - "cloud.o-forge.io/core/oc-lib/models/workflow" - "cloud.o-forge.io/core/oc-lib/models/workflow_execution" - "cloud.o-forge.io/core/oc-lib/tools" - "oc-scheduler/infrastructure/scheduling" -) - -type executionConsidersPayload struct { - ID string `json:"id"` - ExecutionsID string `json:"executions_id"` - ExecutionID string `json:"execution_id"` - PeerIDs []string `json:"peer_ids"` -} - -// --------------------------------------------------------------------------- -// Per-execution mutex map (replaces the global stateMu) -// --------------------------------------------------------------------------- - -var execLocksMu sync.RWMutex -var execLocks = map[string]*sync.Mutex{} // executionID → per-execution mutex - -// RegisterExecLock creates a mutex entry for the execution. Called when a new execution draft is persisted. -func RegisterExecLock(executionID string) { - execLocksMu.Lock() - execLocks[executionID] = &sync.Mutex{} - execLocksMu.Unlock() -} - -// UnregisterExecLock removes the mutex entry. Called on unschedule and execution deletion. -func UnregisterExecLock(executionID string) { - execLocksMu.Lock() - delete(execLocks, executionID) - execLocksMu.Unlock() -} - -// applyConsidersLocal applies the considers update directly for a confirmed -// booking or purchase (bypasses NATS since updateExecutionState resolves the -// execution from the resource itself). -func applyConsidersLocal(id string, dt tools.DataType) { - payload, err := json.Marshal(&executionConsidersPayload{ID: id}) - if err != nil { - return - } - updateExecutionState(payload, dt) -} - -// EmitConsidersExecution broadcasts a Considers / WORKFLOW_EXECUTION message to all -// storage and compute peers of wf once the execution has transitioned to SCHEDULED. -// Each receiving peer will use it to confirm (IsDraft=false) their local drafts. -func EmitConsidersExecution(exec *workflow_execution.WorkflowExecution, wf *workflow.Workflow) { - if wf == nil || wf.Graph == nil { - return - } - peerIDs, err := GetWorkflowPeerIDs(wf.GetID(), &tools.APIRequest{Admin: true}) - if err != nil { - return - } - if len(peerIDs) == 0 { - return - } - payload, err := json.Marshal(executionConsidersPayload{ - ID: exec.GetID(), - ExecutionID: exec.GetID(), - ExecutionsID: exec.ExecutionsID, - PeerIDs: peerIDs}) - if err != nil { - return - } - b, err := json.Marshal(tools.PropalgationMessage{ - DataType: int(tools.WORKFLOW_EXECUTION), - Action: tools.PB_CONSIDERS, - Payload: payload, - }) - if err != nil { - return - } - tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{ - FromApp: "oc-scheduler", - Datatype: tools.WORKFLOW_EXECUTION, - Method: int(tools.PROPALGATION_EVENT), - Payload: b, - }) -} - -// updateExecutionState sets BookingsState[id]=true (dt==BOOKING) or -// PurchasesState[id]=true (dt==PURCHASE_RESOURCE) on the target execution. -// payload must be JSON-encoded {"id":"...", "execution_id":"..."}. -func updateExecutionState(payload []byte, dt tools.DataType) { - var data executionConsidersPayload - if err := json.Unmarshal(payload, &data); err != nil || data.ID == "" { - return - } - schdata := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).LoadOne(data.ID) - if schdata.Data == nil { - return - } - sch := scheduling.ToSchedulerObject(dt, schdata.Data) - if sch == nil { - return - } - execID := sch.GetExecutionId() - - execLocksMu.RLock() - mu := execLocks[execID] - execLocksMu.RUnlock() - if mu == nil { - fmt.Printf("updateExecutionState: no lock for execution %s, skipping\n", execID) - return - } - mu.Lock() - defer mu.Unlock() - - adminReq := &tools.APIRequest{Admin: true} - res, _, err := workflow_execution.NewAccessor(adminReq).LoadOne(execID) - if err != nil || res == nil { - fmt.Printf("updateExecutionState: could not load execution %s: %v\n", data.ExecutionID, err) - return - } - - exec := res.(*workflow_execution.WorkflowExecution) - fmt.Println("sch.GetExecutionId()", data.ID, exec.BookingsState) - - switch dt { - case tools.BOOKING: - if exec.BookingsState == nil { - exec.BookingsState = map[string]bool{} - } - exec.BookingsState[data.ID] = true - fmt.Println("sch.GetExecutionId()", data.ID) - - case tools.PURCHASE_RESOURCE: - if exec.PurchasesState == nil { - exec.PurchasesState = map[string]bool{} - } - exec.PurchasesState[data.ID] = true - } - allConfirmed := true - for _, st := range exec.BookingsState { - if !st { - allConfirmed = false - break - } - } - for _, st := range exec.PurchasesState { - if !st { - allConfirmed = false - break - } - } - if allConfirmed { - exec.State = enum.SCHEDULED - exec.IsDraft = false - } - if _, _, err := utils.GenericRawUpdateOne(exec, exec.GetID(), workflow_execution.NewAccessor(adminReq)); err != nil { - fmt.Printf("updateExecutionState: could not update execution %s: %v\n", sch.GetExecutionId(), err) - return - } - if allConfirmed { - // Confirm the order and notify all peers that execution is scheduled. - go confirmSessionOrder(exec.ExecutionsID, adminReq) - obj, _, err := workflow.NewAccessor(adminReq).LoadOne(exec.WorkflowID) - if err == nil && obj != nil { - go EmitConsidersExecution(exec, obj.(*workflow.Workflow)) - } - } -} - -// confirmExecutionDrafts is called when a Considers/WORKFLOW_EXECUTION message -// is received from oc-discovery, meaning the originating peer has confirmed the -// execution as SCHEDULED. For every booking and purchase ID listed in the -// execution's states, we confirm the local draft (IsDraft=false). -func confirmExecutionDrafts(payload []byte) { - var data executionConsidersPayload - if err := json.Unmarshal(payload, &data); err != nil { - fmt.Printf("confirmExecutionDrafts: could not parse payload: %v\n", err) - return - } - access := oclib.NewRequestAdmin(oclib.LibDataEnum(tools.WORKFLOW_EXECUTION), nil) - d := access.LoadOne(data.ExecutionID) - if exec := d.ToWorkflowExecution(); exec != nil { - for id := range exec.BookingsState { - go confirmResource(id, tools.BOOKING) - } - for id := range exec.PurchasesState { - go confirmResource(id, tools.PURCHASE_RESOURCE) - } - } -} diff --git a/infrastructure/execution/execution.go b/infrastructure/execution/execution.go new file mode 100644 index 0000000..b1a3130 --- /dev/null +++ b/infrastructure/execution/execution.go @@ -0,0 +1,508 @@ +package execution + +import ( + "context" + "encoding/json" + "fmt" + "oc-scheduler/conf" + "oc-scheduler/infrastructure/planner" + "oc-scheduler/infrastructure/scheduling_resources" + infUtils "oc-scheduler/infrastructure/utils" + "strings" + "sync" + "time" + + oclib "cloud.o-forge.io/core/oc-lib" + "cloud.o-forge.io/core/oc-lib/dbs" + "cloud.o-forge.io/core/oc-lib/models/booking" + "cloud.o-forge.io/core/oc-lib/models/common/enum" + "cloud.o-forge.io/core/oc-lib/models/order" + "cloud.o-forge.io/core/oc-lib/models/utils" + "cloud.o-forge.io/core/oc-lib/models/workflow" + "cloud.o-forge.io/core/oc-lib/models/workflow_execution" + "cloud.o-forge.io/core/oc-lib/tools" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// --------------------------------------------------------------------------- +// Global execution lock registry +// --------------------------------------------------------------------------- + +var execLocksMu sync.RWMutex +var execLocks = map[string]*sync.Mutex{} + +func RegisterExecLock(executionID string) { + execLocksMu.Lock() + execLocks[executionID] = &sync.Mutex{} + execLocksMu.Unlock() +} + +func UnregisterExecLock(executionID string) { + execLocksMu.Lock() + delete(execLocks, executionID) + execLocksMu.Unlock() +} + +func GetExecLock(executionID string) *sync.Mutex { + execLocksMu.RLock() + mu := execLocks[executionID] + execLocksMu.RUnlock() + return mu +} + +// --------------------------------------------------------------------------- +// Considers payload +// --------------------------------------------------------------------------- + +type ConsidersPayload struct { + ID string `json:"id"` + ExecutionsID string `json:"executions_id"` + ExecutionID string `json:"execution_id"` + PeerIDs []string `json:"peer_ids"` +} + +// --------------------------------------------------------------------------- +// Execution state machine — considers +// --------------------------------------------------------------------------- + +func UpdateExecutionState(payload []byte, dt tools.DataType) { + var data ConsidersPayload + if err := json.Unmarshal(payload, &data); err != nil || data.ID == "" { + return + } + schdata := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).LoadOne(data.ID) + if schdata.Data == nil { + return + } + sch := scheduling_resources.ToSchedulerObject(dt, schdata.Data) + if sch == nil { + return + } + execID := sch.GetExecutionId() + + mu := GetExecLock(execID) + if mu == nil { + fmt.Printf("UpdateExecutionState: no lock for execution %s, skipping\n", execID) + return + } + mu.Lock() + defer mu.Unlock() + + adminReq := &tools.APIRequest{Admin: true} + res, _, err := workflow_execution.NewAccessor(adminReq).LoadOne(execID) + if err != nil || res == nil { + fmt.Printf("UpdateExecutionState: could not load execution %s: %v\n", execID, err) + return + } + exec := res.(*workflow_execution.WorkflowExecution) + + switch dt { + case tools.BOOKING: + if exec.BookingsState == nil { + exec.BookingsState = map[string]bool{} + } + exec.BookingsState[data.ID] = true + case tools.PURCHASE_RESOURCE: + if exec.PurchasesState == nil { + exec.PurchasesState = map[string]bool{} + } + exec.PurchasesState[data.ID] = true + } + + allConfirmed := true + for _, st := range exec.BookingsState { + if !st { + allConfirmed = false + break + } + } + if allConfirmed { + for _, st := range exec.PurchasesState { + if !st { + allConfirmed = false + break + } + } + } + if allConfirmed { + exec.State = enum.SCHEDULED + exec.IsDraft = false + } + if _, _, err := utils.GenericRawUpdateOne(exec, exec.GetID(), workflow_execution.NewAccessor(adminReq)); err != nil { + fmt.Printf("UpdateExecutionState: could not update execution %s: %v\n", execID, err) + return + } + if allConfirmed { + go confirmSessionOrder(exec.ExecutionsID, adminReq) + obj, _, err := workflow.NewAccessor(adminReq).LoadOne(exec.WorkflowID) + if err == nil && obj != nil { + go EmitConsidersExecution(exec, obj.(*workflow.Workflow)) + } + } +} + +func confirmSessionOrder(executionsID string, adminReq *tools.APIRequest) { + results, _, _ := order.NewAccessor(adminReq).Search( + &dbs.Filters{And: map[string][]dbs.Filter{ + "executions_id": {{Operator: dbs.EQUAL.String(), Value: executionsID}}, + }}, "", true) + for _, obj := range results { + if o, ok := obj.(*order.Order); ok { + o.IsDraft = false + utils.GenericRawUpdateOne(o, o.GetID(), order.NewAccessor(adminReq)) + } + } +} + +func ConfirmExecutionDrafts(payload []byte) { + var data ConsidersPayload + if err := json.Unmarshal(payload, &data); err != nil { + fmt.Printf("ConfirmExecutionDrafts: could not parse payload: %v\n", err) + return + } + d := oclib.NewRequestAdmin(oclib.LibDataEnum(tools.WORKFLOW_EXECUTION), nil).LoadOne(data.ExecutionID) + if exec := d.ToWorkflowExecution(); exec != nil { + for id := range exec.BookingsState { + go scheduling_resources.Confirm(id, tools.BOOKING) + } + for id := range exec.PurchasesState { + go scheduling_resources.Confirm(id, tools.PURCHASE_RESOURCE) + } + } +} + +func EmitConsidersExecution(exec *workflow_execution.WorkflowExecution, wf *workflow.Workflow) { + if wf == nil || wf.Graph == nil { + return + } + peerIDs, err := infUtils.GetWorkflowPeerIDs(wf.GetID(), &tools.APIRequest{Admin: true}) + if err != nil || len(peerIDs) == 0 { + return + } + payload, err := json.Marshal(ConsidersPayload{ + ID: exec.GetID(), + ExecutionID: exec.GetID(), + ExecutionsID: exec.ExecutionsID, + PeerIDs: peerIDs, + }) + if err != nil { + return + } + b, err := json.Marshal(tools.PropalgationMessage{ + DataType: int(tools.WORKFLOW_EXECUTION), + Action: tools.PB_CONSIDERS, + Payload: payload, + }) + if err != nil { + return + } + tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{ + FromApp: "oc-scheduler", + Datatype: tools.WORKFLOW_EXECUTION, + Method: int(tools.PROPALGATION_EVENT), + Payload: b, + }) +} + +// --------------------------------------------------------------------------- +// Deadline watchers +// --------------------------------------------------------------------------- + +func WatchDeadline(executionID string, ns string, execDate time.Time, request *tools.APIRequest) { + delay := time.Until(execDate.UTC().Add(-1 * time.Minute)) + if delay <= 0 { + go handleDeadline(executionID, ns, request) + return + } + time.AfterFunc(delay, func() { handleDeadline(executionID, ns, request) }) +} + +func handleDeadline(executionID string, ns string, request *tools.APIRequest) { + res, _, err := workflow_execution.NewAccessor(&tools.APIRequest{Admin: true}).LoadOne(executionID) + if err != nil || res == nil { + fmt.Printf("handleDeadline: execution %s not found\n", executionID) + return + } + adminReq := &tools.APIRequest{Admin: true} + exec := res.(*workflow_execution.WorkflowExecution) + if exec.IsDraft { + Unschedule(executionID, request) + workflow_execution.NewAccessor(adminReq).DeleteOne(executionID) + fmt.Printf("handleDeadline: purged draft execution %s\n", executionID) + return + } + if serv, err := tools.NewKubernetesService( + conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort, + conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData); err != nil { + fmt.Printf("handleDeadline: k8s init failed for %s: %v\n", executionID, err) + } else if err := serv.ProvisionExecutionNamespace(context.Background(), ns); err != nil && + !strings.Contains(err.Error(), "already exists") { + fmt.Printf("handleDeadline: failed to provision namespace %s: %v\n", ns, err) + } + go watchEnd(executionID, ns, exec.EndDate, exec.ExecDate) +} + +func watchEnd(executionID string, ns string, endDate *time.Time, execDate time.Time) { + var end time.Time + if endDate != nil { + end = *endDate + } else { + end = execDate.UTC().Add(5 * time.Minute) + } + fire := func() { + serv, err := tools.NewKubernetesService( + conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort, + conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData) + if err != nil { + fmt.Printf("watchEnd: k8s init failed for %s: %v\n", executionID, err) + return + } + if err := serv.TeardownExecutionNamespace(context.Background(), ns); err != nil { + fmt.Printf("watchEnd: failed to teardown namespace %s: %v\n", ns, err) + } + } + if delay := time.Until(end.UTC()); delay <= 0 { + go fire() + } else { + time.AfterFunc(delay, fire) + } +} + +// --------------------------------------------------------------------------- +// Unschedule / Recovery +// --------------------------------------------------------------------------- + +func Unschedule(executionID string, request *tools.APIRequest) error { + adminReq := &tools.APIRequest{Admin: true} + res, _, err := workflow_execution.NewAccessor(adminReq).LoadOne(executionID) + if err != nil || res == nil { + return fmt.Errorf("execution %s not found: %w", executionID, err) + } + exec := res.(*workflow_execution.WorkflowExecution) + for _, byResource := range exec.PeerBookByGraph { + for _, bookingIDs := range byResource { + for _, bkID := range bookingIDs { + bkRes, _, loadErr := booking.NewAccessor(adminReq).LoadOne(bkID) + if loadErr != nil || bkRes == nil { + continue + } + scheduling_resources.GetService().Delete( + tools.BOOKING, + scheduling_resources.ToSchedulerObject(tools.BOOKING, bkRes), + request, + ) + } + } + } + workflow_execution.NewAccessor(adminReq).DeleteOne(executionID) + UnregisterExecLock(executionID) + return nil +} + +func RecoverDraft() { + adminReq := &tools.APIRequest{Admin: true} + results, _, _ := workflow_execution.NewAccessor(adminReq).Search(nil, "*", true) + for _, obj := range results { + exec, ok := obj.(*workflow_execution.WorkflowExecution) + if !ok { + continue + } + RegisterExecLock(exec.GetID()) + go WatchDeadline(exec.GetID(), exec.ExecutionsID, exec.ExecDate, adminReq) + } + fmt.Printf("RecoverDraft: recovered %d executions\n", len(results)) +} + +// --------------------------------------------------------------------------- +// NATS workflow lifecycle handlers +// --------------------------------------------------------------------------- + +func HandleWorkflowStarted(resp tools.NATSResponse) { + var evt tools.WorkflowLifecycleEvent + if err := json.Unmarshal(resp.Payload, &evt); err != nil { + return + } + adminReq := &tools.APIRequest{Admin: true} + res, _, err := workflow_execution.NewAccessor(adminReq).LoadOne(evt.ExecutionID) + if err != nil || res == nil { + return + } + exec := res.(*workflow_execution.WorkflowExecution) + exec.State = enum.STARTED + if evt.RealStart != nil { + exec.ExecDate = *evt.RealStart + } + utils.GenericRawUpdateOne(exec, exec.GetID(), workflow_execution.NewAccessor(adminReq)) +} + +func HandleWorkflowDone(resp tools.NATSResponse) { + var evt tools.WorkflowLifecycleEvent + if err := json.Unmarshal(resp.Payload, &evt); err != nil { + return + } + adminReq := &tools.APIRequest{Admin: true} + res, _, err := workflow_execution.NewAccessor(adminReq).LoadOne(evt.ExecutionID) + if err != nil || res == nil { + return + } + exec := res.(*workflow_execution.WorkflowExecution) + exec.State = enum.BookingStatus(evt.State) + if evt.RealEnd != nil { + exec.EndDate = evt.RealEnd + } + utils.GenericRawUpdateOne(exec, exec.GetID(), workflow_execution.NewAccessor(adminReq)) + for _, step := range evt.Steps { + applyStepToBooking(step, adminReq) + } + self, err := oclib.GetMySelf() + if err == nil && self != nil { + go planner.GetPlannerService().RefreshSelf(self.PeerID, adminReq) + } +} + +func HandleWorkflowStepDone(resp tools.NATSResponse) { + var evt tools.WorkflowLifecycleEvent + if err := json.Unmarshal(resp.Payload, &evt); err != nil || evt.BookingID == "" { + return + } + adminReq := &tools.APIRequest{Admin: true} + res, _, err := booking.NewAccessor(adminReq).LoadOne(evt.BookingID) + if err != nil || res == nil { + return + } + bk := res.(*booking.Booking) + bk.State = enum.BookingStatus(evt.State) + if evt.RealStart != nil { + bk.RealStartDate = evt.RealStart + } + if evt.RealEnd != nil { + bk.RealEndDate = evt.RealEnd + } + utils.GenericRawUpdateOne(bk, bk.GetID(), booking.NewAccessor(adminReq)) + switch bk.State { + case enum.SUCCESS, enum.FAILURE, enum.FORGOTTEN, enum.CANCELLED: + self, err := oclib.GetMySelf() + if err == nil && self != nil { + go planner.GetPlannerService().RefreshSelf(self.PeerID, adminReq) + } + } +} + +func applyStepToBooking(step tools.StepMetric, adminReq *tools.APIRequest) { + res, _, err := booking.NewAccessor(adminReq).LoadOne(step.BookingID) + if err != nil || res == nil { + return + } + bk := res.(*booking.Booking) + switch bk.State { + case enum.SUCCESS, enum.FAILURE, enum.FORGOTTEN, enum.CANCELLED: + return + } + bk.State = enum.BookingStatus(step.State) + if step.RealStart != nil { + bk.RealStartDate = step.RealStart + } + if step.RealEnd != nil { + bk.RealEndDate = step.RealEnd + } + utils.GenericRawUpdateOne(bk, bk.GetID(), booking.NewAccessor(adminReq)) +} + +// --------------------------------------------------------------------------- +// Watchdog — stale execution safety net +// --------------------------------------------------------------------------- + +var processedExecutions sync.Map + +var terminalExecStates = map[enum.BookingStatus]bool{ + enum.SUCCESS: true, enum.FAILURE: true, enum.FORGOTTEN: true, enum.CANCELLED: true, +} + +func WatchExecutions() { + logger := oclib.GetLogger() + logger.Info().Msg("ExecutionWatchdog: started") + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() + for range ticker.C { + if err := scanStaleExecutions(); err != nil { + logger.Error().Msg("ExecutionWatchdog: " + err.Error()) + } + } +} + +func scanStaleExecutions() error { + myself, err := oclib.GetMySelf() + if err != nil { + return fmt.Errorf("could not resolve local peer: %w", err) + } + deadline := time.Now().UTC().Add(-time.Minute) + res := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), "", myself.GetID(), []string{}, nil). + Search(&dbs.Filters{And: map[string][]dbs.Filter{ + "execution_date": {{Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(deadline)}}, + }}, "", false) + if res.Err != "" { + return fmt.Errorf("stale execution search failed: %s", res.Err) + } + for _, dbo := range res.Data { + if exec, ok := dbo.(*workflow_execution.WorkflowExecution); ok { + go emitExecutionFailure(exec) + } + } + return nil +} + +func emitExecutionFailure(exec *workflow_execution.WorkflowExecution) { + logger := oclib.GetLogger() + if _, done := processedExecutions.Load(exec.GetID()); done { + return + } + if terminalExecStates[exec.State] { + processedExecutions.Store(exec.GetID(), struct{}{}) + return + } + now := time.Now().UTC() + steps := make([]tools.StepMetric, 0) + for _, byGraph := range exec.PeerBookByGraph { + for _, bookingIDs := range byGraph { + for _, bookingID := range bookingIDs { + payload, err := json.Marshal(tools.WorkflowLifecycleEvent{ + ExecutionID: exec.GetID(), + ExecutionsID: exec.ExecutionsID, + BookingID: bookingID, + State: enum.FAILURE.EnumIndex(), + RealEnd: &now, + }) + if err != nil { + continue + } + tools.NewNATSCaller().SetNATSPub(tools.WORKFLOW_STEP_DONE_EVENT, tools.NATSResponse{ + FromApp: "oc-scheduler-watchdog", + Method: int(tools.WORKFLOW_STEP_DONE_EVENT), + Payload: payload, + }) + steps = append(steps, tools.StepMetric{ + BookingID: bookingID, + State: enum.FAILURE.EnumIndex(), + RealEnd: &now, + }) + } + } + } + donePayload, err := json.Marshal(tools.WorkflowLifecycleEvent{ + ExecutionID: exec.GetID(), + ExecutionsID: exec.ExecutionsID, + State: enum.FAILURE.EnumIndex(), + RealEnd: &now, + Steps: steps, + }) + if err == nil { + tools.NewNATSCaller().SetNATSPub(tools.WORKFLOW_DONE_EVENT, tools.NATSResponse{ + FromApp: "oc-scheduler-watchdog", + Method: int(tools.WORKFLOW_DONE_EVENT), + Payload: donePayload, + }) + } + logger.Info().Msgf("ExecutionWatchdog: execution %s stale → emitting FAILURE (%d bookings)", + exec.GetID(), len(steps)) + processedExecutions.Store(exec.GetID(), struct{}{}) +} diff --git a/infrastructure/nats.go b/infrastructure/nats.go deleted file mode 100644 index 19cf563..0000000 --- a/infrastructure/nats.go +++ /dev/null @@ -1,75 +0,0 @@ -package infrastructure - -import ( - "encoding/json" - "fmt" - - "cloud.o-forge.io/core/oc-lib/models/booking" - "cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource" - "cloud.o-forge.io/core/oc-lib/models/utils" - "cloud.o-forge.io/core/oc-lib/tools" -) - -// --------------------------------------------------------------------------- -// NATS emission -// --------------------------------------------------------------------------- - -func EmitNATS(peerID string, message tools.PropalgationMessage) { - // PB_CLOSE_PLANNER: notify local watchers so streams re-evaluate. - // Cache mutations (eviction or ownership reset) are the caller's - // responsibility — see evictAfter and ReleaseRefreshOwnership. - if message.Action == tools.PB_CLOSE_PLANNER { - notifyPlannerWatchers(peerID) - } - b, _ := json.Marshal(message) - tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{ - FromApp: "oc-scheduler", - Datatype: -1, - Method: int(tools.PROPALGATION_EVENT), - Payload: b, - }) -} - -// --------------------------------------------------------------------------- -// NATS listeners -// --------------------------------------------------------------------------- - -func ListenNATS() { - tools.NewNATSCaller().ListenNats(map[tools.NATSMethod]func(tools.NATSResponse){ - tools.PLANNER_EXECUTION: handlePlannerExecution, - tools.CONSIDERS_EVENT: handleConsidersEvent, - tools.REMOVE_RESOURCE: handleRemoveResource, - tools.CREATE_RESOURCE: handleCreateResource, - tools.CONFIRM_EVENT: handleConfirm, - }) -} - -// --------------------------------------------------------------------------- -// Draft timeout -// --------------------------------------------------------------------------- - -// draftTimeout deletes a booking or purchase resource if it is still a draft -// after the 10-minute confirmation window has elapsed. -func draftTimeout(id string, dt tools.DataType) { - adminReq := &tools.APIRequest{Admin: true} - var res utils.DBObject - var loadErr error - switch dt { - case tools.BOOKING: - res, _, loadErr = booking.NewAccessor(adminReq).LoadOne(id) - case tools.PURCHASE_RESOURCE: - res, _, loadErr = purchase_resource.NewAccessor(adminReq).LoadOne(id) - default: - return - } - if loadErr != nil || res == nil || !res.IsDrafted() { - return - } - switch dt { - case tools.BOOKING: - booking.NewAccessor(adminReq).DeleteOne(id) - case tools.PURCHASE_RESOURCE: - purchase_resource.NewAccessor(adminReq).DeleteOne(id) - } - fmt.Printf("draftTimeout: %s %s deleted (still draft after 10 min)\n", dt.String(), id) -} diff --git a/infrastructure/nats/nats.go b/infrastructure/nats/nats.go new file mode 100644 index 0000000..46edabd --- /dev/null +++ b/infrastructure/nats/nats.go @@ -0,0 +1,23 @@ +package nats + +import ( + "oc-scheduler/infrastructure/execution" + "oc-scheduler/infrastructure/planner" + + "cloud.o-forge.io/core/oc-lib/tools" +) + +// ListenNATS registers all NATS event handlers and starts listening. +// Each handler is a thin router that delegates to the appropriate service. +func ListenNATS() { + tools.NewNATSCaller().ListenNats(map[tools.NATSMethod]func(tools.NATSResponse){ + tools.PLANNER_EXECUTION: planner.GetPlannerService().HandleStore, + tools.CONSIDERS_EVENT: handleConsidersEvent, + tools.REMOVE_RESOURCE: handleRemoveResource, + tools.CREATE_RESOURCE: handleCreateResource, + tools.CONFIRM_EVENT: handleConfirm, + tools.WORKFLOW_STARTED_EVENT: execution.HandleWorkflowStarted, + tools.WORKFLOW_STEP_DONE_EVENT: execution.HandleWorkflowStepDone, + tools.WORKFLOW_DONE_EVENT: execution.HandleWorkflowDone, + }) +} diff --git a/infrastructure/nats/nats_handlers.go b/infrastructure/nats/nats_handlers.go new file mode 100644 index 0000000..765164a --- /dev/null +++ b/infrastructure/nats/nats_handlers.go @@ -0,0 +1,87 @@ +package nats + +import ( + "encoding/json" + "oc-scheduler/infrastructure/execution" + "oc-scheduler/infrastructure/planner" + "oc-scheduler/infrastructure/scheduling_resources" + + "cloud.o-forge.io/core/oc-lib/models/booking" + "cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource" + "cloud.o-forge.io/core/oc-lib/models/workflow" + "cloud.o-forge.io/core/oc-lib/tools" +) + +// handleConfirm processes a CONFIRM_EVENT: sets IsDraft=false on the resource. +func handleConfirm(resp tools.NATSResponse) { + scheduling_resources.Confirm(string(resp.Payload), resp.Datatype) +} + +// handleConsidersEvent routes CONSIDERS_EVENT to the execution service. +func handleConsidersEvent(resp tools.NATSResponse) { + switch resp.Datatype { + case tools.BOOKING, tools.PURCHASE_RESOURCE: + execution.UpdateExecutionState(resp.Payload, resp.Datatype) + case tools.WORKFLOW_EXECUTION: + execution.ConfirmExecutionDrafts(resp.Payload) + } +} + +// handleRemoveResource routes REMOVE_RESOURCE to the appropriate service. +func handleRemoveResource(resp tools.NATSResponse) { + adminReq := &tools.APIRequest{Admin: true} + switch resp.Datatype { + case tools.WORKFLOW: + var wf workflow.Workflow + if err := json.Unmarshal(resp.Payload, &wf); err != nil { + return + } + planner.GetPlannerService().NotifyWorkflow(wf.GetID()) + case tools.BOOKING: + var p scheduling_resources.RemoveResourcePayload + if err := json.Unmarshal(resp.Payload, &p); err != nil { + return + } + scheduling_resources.GetService().HandleRemoveBooking(p, adminReq) + case tools.PURCHASE_RESOURCE: + var p scheduling_resources.RemoveResourcePayload + if err := json.Unmarshal(resp.Payload, &p); err != nil { + return + } + scheduling_resources.GetService().HandleRemovePurchase(p, adminReq) + } +} + +// handleCreateResource routes CREATE_RESOURCE to the appropriate service. +func handleCreateResource(resp tools.NATSResponse) { + adminReq := &tools.APIRequest{Admin: true} + switch resp.Datatype { + case tools.WORKFLOW: + var wf workflow.Workflow + if err := json.Unmarshal(resp.Payload, &wf); err != nil { + return + } + planner.GetPlannerService().Broadcast(&wf) + planner.GetPlannerService().NotifyWorkflow(wf.GetID()) + case tools.BOOKING: + var bk booking.Booking + if err := json.Unmarshal(resp.Payload, &bk); err != nil { + return + } + needsConsiders := scheduling_resources.GetService().HandleCreateBooking(&bk, adminReq) + if needsConsiders { + payload, _ := json.Marshal(execution.ConsidersPayload{ID: bk.GetID()}) + execution.UpdateExecutionState(payload, tools.BOOKING) + } + case tools.PURCHASE_RESOURCE: + var pr purchase_resource.PurchaseResource + if err := json.Unmarshal(resp.Payload, &pr); err != nil { + return + } + needsConsiders := scheduling_resources.GetService().HandleCreatePurchase(&pr, adminReq) + if needsConsiders { + payload, _ := json.Marshal(execution.ConsidersPayload{ID: pr.GetID()}) + execution.UpdateExecutionState(payload, tools.PURCHASE_RESOURCE) + } + } +} diff --git a/infrastructure/nats_handlers.go b/infrastructure/nats_handlers.go deleted file mode 100644 index 2bc3256..0000000 --- a/infrastructure/nats_handlers.go +++ /dev/null @@ -1,248 +0,0 @@ -package infrastructure - -import ( - "encoding/json" - "fmt" - "time" - - oclib "cloud.o-forge.io/core/oc-lib" - "cloud.o-forge.io/core/oc-lib/models/booking" - "cloud.o-forge.io/core/oc-lib/models/booking/planner" - "cloud.o-forge.io/core/oc-lib/models/common/enum" - "cloud.o-forge.io/core/oc-lib/models/peer" - "cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource" - "cloud.o-forge.io/core/oc-lib/models/utils" - "cloud.o-forge.io/core/oc-lib/models/workflow" - "cloud.o-forge.io/core/oc-lib/tools" -) - -func handleConfirm(resp tools.NATSResponse) { - confirmResource(string(resp.Payload), resp.Datatype) -} - -func handlePlannerExecution(resp tools.NATSResponse) { - m := map[string]interface{}{} - p := planner.Planner{} - if err := json.Unmarshal(resp.Payload, &m); err != nil { - return - } - if err := json.Unmarshal(resp.Payload, &p); err != nil { - return - } - storePlanner(fmt.Sprintf("%v", m["peer_id"]), &p) -} - -func handleConsidersEvent(resp tools.NATSResponse) { - fmt.Println("CONSIDERS_EVENT", resp.Datatype) - switch resp.Datatype { - case tools.BOOKING, tools.PURCHASE_RESOURCE: - fmt.Println("updateExecutionState", resp.Datatype) - updateExecutionState(resp.Payload, resp.Datatype) - case tools.WORKFLOW_EXECUTION: - confirmExecutionDrafts(resp.Payload) - } -} - -func handleRemoveResource(resp tools.NATSResponse) { - switch resp.Datatype { - case tools.WORKFLOW: - wf := workflow.Workflow{} - if err := json.Unmarshal(resp.Payload, &wf); err != nil { - return - } - notifyWorkflowWatchers(wf.GetID()) - case tools.BOOKING: - var p removeResourcePayload - if err := json.Unmarshal(resp.Payload, &p); err != nil { - return - } - self, err := oclib.GetMySelf() - if err != nil || self == nil { - return - } - adminReq := &tools.APIRequest{Admin: true} - res, _, loadErr := booking.NewAccessor(adminReq).LoadOne(p.ID) - if loadErr != nil || res == nil { - return - } - existing := res.(*booking.Booking) - if existing.SchedulerPeerID != p.SchedulerPeerID || existing.ExecutionsID != p.ExecutionsID { - fmt.Println("ListenNATS REMOVE_RESOURCE booking: auth mismatch, ignoring", p.ID) - return - } - booking.NewAccessor(adminReq).DeleteOne(p.ID) - go refreshSelfPlanner(self.PeerID, adminReq) - case tools.PURCHASE_RESOURCE: - var p removeResourcePayload - if err := json.Unmarshal(resp.Payload, &p); err != nil { - return - } - adminReq := &tools.APIRequest{Admin: true} - res, _, loadErr := purchase_resource.NewAccessor(adminReq).LoadOne(p.ID) - if loadErr != nil || res == nil { - return - } - existing := res.(*purchase_resource.PurchaseResource) - if existing.SchedulerPeerID != p.SchedulerPeerID || existing.ExecutionsID != p.ExecutionsID { - fmt.Println("ListenNATS REMOVE_RESOURCE purchase: auth mismatch, ignoring", p.ID) - return - } - purchase_resource.NewAccessor(adminReq).DeleteOne(p.ID) - } -} - -func handleCreateBooking(bk *booking.Booking, self *peer.Peer, adminReq *tools.APIRequest) { - // Upsert: if a booking with this ID already exists, verify auth and update. - if existing, _, loadErr := booking.NewAccessor(adminReq).LoadOne(bk.GetID()); loadErr == nil && existing != nil { - prev := existing.(*booking.Booking) - if prev.SchedulerPeerID != bk.SchedulerPeerID || prev.ExecutionsID != bk.ExecutionsID { - fmt.Println("ListenNATS CREATE_RESOURCE booking upsert: auth mismatch, ignoring", bk.GetID()) - return - } - if !prev.IsDrafted() && bk.IsDraft { - // Already confirmed, refuse downgrade. - return - } - // Expired check only on confirmation (IsDraft→false). - if !bk.IsDraft && !prev.ExpectedStartDate.IsZero() && prev.ExpectedStartDate.Before(time.Now().UTC()) { - fmt.Println("ListenNATS CREATE_RESOURCE booking: expired, deleting", bk.GetID()) - booking.NewAccessor(adminReq).DeleteOne(bk.GetID()) - return - } - if _, _, err := utils.GenericRawUpdateOne(bk, bk.GetID(), booking.NewAccessor(adminReq)); err != nil { - fmt.Println("ListenNATS CREATE_RESOURCE booking update failed:", err) - return - } - go refreshSelfPlanner(self.PeerID, adminReq) - if !bk.IsDraft { - go applyConsidersLocal(bk.GetID(), tools.BOOKING) - } - return - } - // New booking: standard create flow. - if !bk.ExpectedStartDate.IsZero() && bk.ExpectedStartDate.Before(time.Now().UTC()) { - fmt.Println("ListenNATS: booking start date is in the past, discarding") - return - } - plannerMu.RLock() - selfEntry := PlannerCache[self.PeerID] - plannerMu.RUnlock() - if selfEntry != nil && selfEntry.Planner != nil && !checkInstance(selfEntry.Planner, bk.ResourceID, bk.InstanceID, bk.ExpectedStartDate, bk.ExpectedEndDate) { - fmt.Println("ListenNATS: booking conflicts with local planner, discarding") - return - } - bk.IsDraft = true - stored, _, err := booking.NewAccessor(adminReq).StoreOne(bk) - if err != nil { - fmt.Println("ListenNATS: could not store booking:", err) - return - } - storedID := stored.GetID() - go refreshSelfPlanner(self.PeerID, adminReq) - time.AfterFunc(10*time.Minute, func() { draftTimeout(storedID, tools.BOOKING) }) -} - -func handleCreatePurchase(pr *purchase_resource.PurchaseResource, self *peer.Peer, adminReq *tools.APIRequest) { - if pr.DestPeerID != self.GetID() { - return - } - // Upsert: if a purchase with this ID already exists, verify auth and update. - if existing, _, loadErr := purchase_resource.NewAccessor(adminReq).LoadOne(pr.GetID()); loadErr == nil && existing != nil { - prev := existing.(*purchase_resource.PurchaseResource) - if prev.SchedulerPeerID != pr.SchedulerPeerID || prev.ExecutionsID != pr.ExecutionsID { - fmt.Println("ListenNATS CREATE_RESOURCE purchase upsert: auth mismatch, ignoring", pr.GetID()) - return - } - if !prev.IsDrafted() && pr.IsDraft { - return - } - if _, _, err := utils.GenericRawUpdateOne(pr, pr.GetID(), purchase_resource.NewAccessor(adminReq)); err != nil { - fmt.Println("ListenNATS CREATE_RESOURCE purchase update failed:", err) - return - } - if !pr.IsDraft { - go applyConsidersLocal(pr.GetID(), tools.PURCHASE_RESOURCE) - } - return - } - // New purchase: standard create flow. - pr.IsDraft = true - stored, _, err := purchase_resource.NewAccessor(adminReq).StoreOne(pr) - if err != nil { - fmt.Println("ListenNATS: could not store purchase:", err) - return - } - storedID := stored.GetID() - time.AfterFunc(10*time.Minute, func() { draftTimeout(storedID, tools.PURCHASE_RESOURCE) }) -} - -func handleCreateResource(resp tools.NATSResponse) { - switch resp.Datatype { - case tools.WORKFLOW: - wf := workflow.Workflow{} - if err := json.Unmarshal(resp.Payload, &wf); err != nil { - return - } - broadcastPlanner(&wf) - notifyWorkflowWatchers(wf.GetID()) - case tools.BOOKING: - var bk booking.Booking - if err := json.Unmarshal(resp.Payload, &bk); err != nil { - return - } - self, err := oclib.GetMySelf() - /*if err != nil || self == nil || bk.DestPeerID != self.GetID() { - return - }*/ - adminReq := &tools.APIRequest{Admin: true} - _ = err - handleCreateBooking(&bk, self, adminReq) - case tools.PURCHASE_RESOURCE: - var pr purchase_resource.PurchaseResource - if err := json.Unmarshal(resp.Payload, &pr); err != nil { - return - } - self, err := oclib.GetMySelf() - if err != nil || self == nil { - return - } - adminReq := &tools.APIRequest{Admin: true} - handleCreatePurchase(&pr, self, adminReq) - } -} - -// confirmResource sets IsDraft=false for a booking or purchase resource. -// For bookings it also advances State to SCHEDULED and refreshes the local planner. -func confirmResource(id string, dt tools.DataType) { - adminReq := &tools.APIRequest{Admin: true} - switch dt { - case tools.BOOKING: - res, _, err := booking.NewAccessor(adminReq).LoadOne(id) - if err != nil || res == nil { - fmt.Printf("confirmResource: could not load booking %s: %v\n", id, err) - return - } - bk := res.(*booking.Booking) - bk.IsDraft = false - bk.State = enum.SCHEDULED - if _, _, err := utils.GenericRawUpdateOne(bk, id, booking.NewAccessor(adminReq)); err != nil { - fmt.Printf("confirmResource: could not confirm booking %s: %v\n", id, err) - return - } - self, err := oclib.GetMySelf() - if err == nil && self != nil { - go refreshSelfPlanner(self.PeerID, adminReq) - } - case tools.PURCHASE_RESOURCE: - res, _, err := purchase_resource.NewAccessor(adminReq).LoadOne(id) - if err != nil || res == nil { - fmt.Printf("confirmResource: could not load purchase %s: %v\n", id, err) - return - } - pr := res.(*purchase_resource.PurchaseResource) - pr.IsDraft = false - if _, _, err := utils.GenericRawUpdateOne(pr, id, purchase_resource.NewAccessor(adminReq)); err != nil { - fmt.Printf("confirmResource: could not confirm purchase %s: %v\n", id, err) - } - } -} diff --git a/infrastructure/planner.go b/infrastructure/planner.go deleted file mode 100644 index 2c99e7a..0000000 --- a/infrastructure/planner.go +++ /dev/null @@ -1,353 +0,0 @@ -package infrastructure - -import ( - "encoding/json" - "fmt" - "slices" - "sync" - "time" - - oclib "cloud.o-forge.io/core/oc-lib" - "cloud.o-forge.io/core/oc-lib/models/booking/planner" - "cloud.o-forge.io/core/oc-lib/models/workflow" - "cloud.o-forge.io/core/oc-lib/models/workflow/graph" - "cloud.o-forge.io/core/oc-lib/tools" -) - -const plannerTTL = 24 * time.Hour - -// --------------------------------------------------------------------------- -// Planner cache — protected by plannerMu -// --------------------------------------------------------------------------- - -// plannerEntry wraps a planner snapshot with refresh-ownership tracking. -// At most one check session may be the "refresh owner" of a given peer's -// planner at a time: it emits PB_PLANNER to request a fresh snapshot from -// oc-discovery and, on close (clean or forced), emits PB_CLOSE_PLANNER to -// release the stream. Any subsequent session that needs the same peer's -// planner will see Refreshing=true and skip the duplicate request. -type plannerEntry struct { - Planner *planner.Planner - Refreshing bool // true while a PB_PLANNER request is in flight - RefreshOwner string // session UUID that initiated the current refresh -} - -var plannerMu sync.RWMutex -var PlannerCache = map[string]*plannerEntry{} -var plannerAddedAt = map[string]time.Time{} // peerID → first-seen timestamp - -// --------------------------------------------------------------------------- -// Subscriber registries — one keyed by peerID, one by workflowID -// --------------------------------------------------------------------------- - -var subsMu sync.RWMutex -var plannerSubs = map[string][]chan string{} // peerID → channels (deliver peerID) -var workflowSubs = map[string][]chan struct{}{} // workflowID → notification channels - -// subscribePlanners registers interest in planner changes for the given peer IDs. -// The returned channel receives the peerID string (non-blocking) each time any -// of those planners is updated. Call cancel to unregister. -func subscribePlanners(peerIDs []string) (<-chan string, func()) { - ch := make(chan string, 1) - subsMu.Lock() - for _, k := range peerIDs { - plannerSubs[k] = append(plannerSubs[k], ch) - } - subsMu.Unlock() - cancel := func() { - subsMu.Lock() - for _, k := range peerIDs { - subs := plannerSubs[k] - for i, s := range subs { - if s == ch { - plannerSubs[k] = append(subs[:i], subs[i+1:]...) - break - } - } - } - subsMu.Unlock() - } - return ch, cancel -} - -// SubscribePlannerUpdates registers interest in planner changes for the given -// peer IDs. The returned channel receives the peerID string (non-blocking) each -// time any of those planners is updated. Call cancel to unregister. -func SubscribePlannerUpdates(peerIDs []string) (<-chan string, func()) { - return subscribePlanners(peerIDs) -} - -// SubscribeWorkflowUpdates registers interest in workflow modifications for the -// given workflow ID. The returned channel is signalled when the workflow changes -// (peer list may have grown or shrunk). Call cancel to unregister. -func SubscribeWorkflowUpdates(wfID string) (<-chan struct{}, func()) { - ch, cancel := subscribe(&subsMu, workflowSubs, []string{wfID}) - return ch, cancel -} - -// subscribe is the generic helper used by the workflow registry. -func subscribe(mu *sync.RWMutex, registry map[string][]chan struct{}, keys []string) (<-chan struct{}, func()) { - ch := make(chan struct{}, 1) - mu.Lock() - for _, k := range keys { - registry[k] = append(registry[k], ch) - } - mu.Unlock() - cancel := func() { - mu.Lock() - for _, k := range keys { - subs := registry[k] - for i, s := range subs { - if s == ch { - registry[k] = append(subs[:i], subs[i+1:]...) - break - } - } - } - mu.Unlock() - } - return ch, cancel -} - -func notifyPlannerWatchers(peerID string) { - subsMu.RLock() - subs := plannerSubs[peerID] - subsMu.RUnlock() - for _, ch := range subs { - select { - case ch <- peerID: - default: - } - } -} - -func notifyWorkflowWatchers(wfID string) { - notify(&subsMu, workflowSubs, wfID) -} - -func notify(mu *sync.RWMutex, registry map[string][]chan struct{}, key string) { - mu.RLock() - subs := registry[key] - mu.RUnlock() - for _, ch := range subs { - select { - case ch <- struct{}{}: - default: - } - } -} - -// --------------------------------------------------------------------------- -// Cache helpers -// --------------------------------------------------------------------------- - -// storePlanner inserts or updates the planner snapshot for peerID. -// On first insertion it schedules an automatic eviction after plannerTTL. -// Existing refresh-ownership state (Refreshing / RefreshOwner) is preserved -// so that an in-flight request is not inadvertently reset. -// All subscribers interested in this peer are notified. -func storePlanner(peerID string, p *planner.Planner) { - plannerMu.Lock() - entry := PlannerCache[peerID] - isNew := entry == nil - if isNew { - entry = &plannerEntry{} - PlannerCache[peerID] = entry - plannerAddedAt[peerID] = time.Now().UTC() - go evictAfter(peerID, plannerTTL) - } - entry.Planner = p - plannerMu.Unlock() - notifyPlannerWatchers(peerID) -} - -// evictAfter waits ttl from first insertion then deletes the cache entry and -// emits PB_CLOSE_PLANNER so oc-discovery stops streaming for this peer. -// This is the only path that actually removes an entry from PlannerCache; -// session close (ReleaseRefreshOwnership) only resets ownership state. -func evictAfter(peerID string, ttl time.Duration) { - time.Sleep(ttl) - plannerMu.Lock() - _, exists := PlannerCache[peerID] - if exists { - delete(PlannerCache, peerID) - delete(plannerAddedAt, peerID) - } - plannerMu.Unlock() - if exists { - EmitNATS(peerID, tools.PropalgationMessage{Action: tools.PB_CLOSE_PLANNER}) - } -} - -// --------------------------------------------------------------------------- -// Planner refresh / broadcast -// --------------------------------------------------------------------------- - -// RequestPlannerRefresh asks oc-discovery for a fresh planner snapshot for -// each peer in peerIDs. Only the first session to request a given peer becomes -// its "refresh owner": subsequent sessions see Refreshing=true and skip the -// duplicate PB_PLANNER emission. Returns the subset of peerIDs for which this -// session claimed ownership (needed to release on close). -func RequestPlannerRefresh(peerIDs []string, executionsID string) []string { - var owned []string - for _, peerID := range peerIDs { - plannerMu.Lock() - entry := PlannerCache[peerID] - if entry == nil { - entry = &plannerEntry{} - PlannerCache[peerID] = entry - plannerAddedAt[peerID] = time.Now().UTC() - go evictAfter(peerID, plannerTTL) - } - shouldRequest := !entry.Refreshing - if shouldRequest { - entry.Refreshing = true - entry.RefreshOwner = executionsID - } - plannerMu.Unlock() - if shouldRequest { - owned = append(owned, peerID) - if p, err := oclib.GetMySelf(); err == nil && p != nil && p.PeerID == peerID { - // Self peer: generate and cache the planner directly without - // going through NATS / oc-discovery. - go refreshSelfPlanner(peerID, &tools.APIRequest{Admin: true}) - } else { - payload, _ := json.Marshal(map[string]any{"peer_id": peerID}) - fmt.Println("PB_PLANNER", peerID) - EmitNATS(peerID, tools.PropalgationMessage{ - Action: tools.PB_PLANNER, - Payload: payload, - }) - } - } - } - return owned -} - -// ReleaseRefreshOwnership is called when a check session closes (clean or -// forced). For each peer this session owns, it resets the refresh state and -// emits PB_CLOSE_PLANNER so oc-discovery stops the planner stream. -// The planner data itself stays in the cache until TTL eviction. -func ReleaseRefreshOwnership(peerIDs []string, executionsID string) { - for _, peerID := range peerIDs { - plannerMu.Lock() - if entry := PlannerCache[peerID]; entry != nil && entry.RefreshOwner == executionsID { - entry.Refreshing = false - entry.RefreshOwner = "" - } - plannerMu.Unlock() - payload, _ := json.Marshal(map[string]any{"peer_id": peerID}) - EmitNATS(peerID, tools.PropalgationMessage{ - Action: tools.PB_CLOSE_PLANNER, - Payload: payload, - }) - } -} - -// broadcastPlanner iterates the storage and compute peers of the given workflow -// and, for each peer not yet in the cache, emits a PB_PLANNER propagation so -// downstream consumers (oc-discovery, other schedulers) refresh their state. -func broadcastPlanner(wf *workflow.Workflow) { - if wf.Graph == nil { - return - } - items := []graph.GraphItem{} - items = append(items, wf.GetGraphItems(wf.Graph.IsStorage)...) - items = append(items, wf.GetGraphItems(wf.Graph.IsCompute)...) - - seen := []string{} - for _, item := range items { - i := item - _, res := i.GetResource() - if res == nil { - continue - } - creatorID := res.GetCreatorID() - if slices.Contains(seen, creatorID) { - continue - } - - data := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).LoadOne(creatorID) - p := data.ToPeer() - if p == nil { - continue - } - - plannerMu.RLock() - cached := PlannerCache[p.PeerID] - plannerMu.RUnlock() - - // Only request if no snapshot and no refresh already in flight. - if cached == nil || (cached.Planner == nil && !cached.Refreshing) { - payload, err := json.Marshal(map[string]interface{}{"peer_id": p.PeerID}) - if err != nil { - continue - } - seen = append(seen, creatorID) - EmitNATS(p.PeerID, tools.PropalgationMessage{ - Action: tools.PB_PLANNER, - Payload: payload, - }) - } - } -} - -// --------------------------------------------------------------------------- -// Self-planner initialisation -// --------------------------------------------------------------------------- - -// InitSelfPlanner bootstraps our own planner entry at startup. -// It waits (with 15-second retries) for our peer record to be present in the -// database before generating the first planner snapshot and broadcasting it -// on PB_PLANNER. This handles the race between oc-scheduler starting before -// oc-peer has fully registered our node. -func InitSelfPlanner() { - for { - self, err := oclib.GetMySelf() - if err != nil || self == nil { - fmt.Println("InitSelfPlanner: self peer not found yet, retrying in 15s...") - time.Sleep(15 * time.Second) - continue - } - refreshSelfPlanner(self.PeerID, &tools.APIRequest{Admin: true}) - return - } -} - -// --------------------------------------------------------------------------- -// Self-planner refresh -// --------------------------------------------------------------------------- - -// refreshSelfPlanner regenerates the local planner from the current state of -// the booking DB, stores it in PlannerCache under our own node UUID, and -// broadcasts it on PROPALGATION_EVENT / PB_PLANNER so all listeners (including -// oc-discovery) are kept in sync. -// -// It should be called whenever a booking for our own peer is created, whether -// by direct DB insertion (self-peer routing) or upon receiving a CREATE_RESOURCE -// BOOKING message from oc-discovery. -func refreshSelfPlanner(peerID string, request *tools.APIRequest) { - p, err := planner.GenerateShallow(request) - if err != nil { - fmt.Println("refreshSelfPlanner: could not generate planner:", err) - return - } - - // Update the local cache and notify any waiting CheckStream goroutines. - storePlanner(peerID, p) - - // Broadcast the updated planner so remote peers (and oc-discovery) can - // refresh their view of our availability. - type plannerWithPeer struct { - PeerID string `json:"peer_id"` - *planner.Planner - } - plannerPayload, err := json.Marshal(plannerWithPeer{PeerID: peerID, Planner: p}) - if err != nil { - return - } - EmitNATS(peerID, tools.PropalgationMessage{ - Action: tools.PB_PLANNER, - Payload: plannerPayload, - }) -} diff --git a/infrastructure/planner/planner.go b/infrastructure/planner/planner.go new file mode 100644 index 0000000..b6f0607 --- /dev/null +++ b/infrastructure/planner/planner.go @@ -0,0 +1,453 @@ +package planner + +import ( + "encoding/json" + "fmt" + "oc-scheduler/infrastructure/utils" + "slices" + "sync" + "time" + + oclib "cloud.o-forge.io/core/oc-lib" + "cloud.o-forge.io/core/oc-lib/models/booking/planner" + "cloud.o-forge.io/core/oc-lib/models/workflow" + "cloud.o-forge.io/core/oc-lib/models/workflow/graph" + "cloud.o-forge.io/core/oc-lib/tools" +) + +const ( + checkWindowHours = 5 + checkStepMin = 15 // time increment per scan step (minutes) + plannerTTL = 24 * time.Hour +) + +// --------------------------------------------------------------------------- +// Planner cache — protected by plannerMu +// --------------------------------------------------------------------------- + +// plannerEntry wraps a planner snapshot with refresh-ownership tracking. +// At most one check session may be the "refresh owner" of a given peer's +// planner at a time: it emits PB_PLANNER to request a fresh snapshot from +// oc-discovery and, on close (clean or forced), emits PB_CLOSE_PLANNER to +// release the stream. Any subsequent session that needs the same peer's +// planner will see Refreshing=true and skip the duplicate request. +type plannerEntry struct { + Planner *planner.Planner + Refreshing bool // true while a PB_PLANNER request is in flight + RefreshOwner string // session UUID that initiated the current refresh +} + +type PlannerService struct { + Mu sync.RWMutex + Cache map[string]*plannerEntry + SubMu sync.RWMutex + Subs map[string][]chan string + AddedAt map[string]time.Time + WorkflowSubMu sync.RWMutex + WorkflowSubs map[string][]chan struct{} +} + +var singleton *PlannerService + +// InitSelfPlanner bootstraps our own planner entry at startup. +// It waits (with 15-second retries) for our peer record to be present in the +// database before generating the first planner snapshot and broadcasting it +// on PB_PLANNER. This handles the race between oc-scheduler starting before +// oc-peer has fully registered our node. +func InitPlanner() { + singleton = &PlannerService{ + AddedAt: map[string]time.Time{}, + Subs: map[string][]chan string{}, + Cache: map[string]*plannerEntry{}, + WorkflowSubs: map[string][]chan struct{}{}, + } + for { + self, err := oclib.GetMySelf() + if err != nil || self == nil { + fmt.Println("InitPlanner: self peer not found yet, retrying in 15s...") + time.Sleep(15 * time.Second) + continue + } + singleton.RefreshSelf(self.PeerID, &tools.APIRequest{Admin: true}) + return + } +} + +func GetPlannerService() *PlannerService { + return singleton +} + +func (s *PlannerService) HandleStore(resp tools.NATSResponse) { + m := map[string]interface{}{} + p := planner.Planner{} + if err := json.Unmarshal(resp.Payload, &m); err != nil { + return + } + if err := json.Unmarshal(resp.Payload, &p); err != nil { + return + } + s.Store(fmt.Sprintf("%v", m["peer_id"]), &p) +} + +// missingPlannerPeers returns the peer IDs from res whose planner is absent +// or not yet populated in PlannerCache. +// func missingPlannerPeers(res map[string]bookingResource) []string { +func (s *PlannerService) MissingPeers(res map[string]utils.BookingResource) []string { + var out []string + for _, r := range res { + s.Mu.RLock() + entry := s.Cache[r.PeerPID] + s.Mu.RUnlock() + if entry == nil || entry.Planner == nil { + out = append(out, r.PeerPID) + } + } + return out +} + +func (s *PlannerService) FindDate(wfID string, checkables map[string]utils.BookingResource, start time.Time, end *time.Time, preemption bool, asap bool) (time.Time, *time.Time, bool, bool, []string) { + var unavailable, warnings []string + // 4. Preemption: Planify ran (end is resolved), skip availability check. + if preemption { + return start, end, true, true, warnings + } + // 5b. For any peer whose planner is not yet cached, request it and wait + // briefly so the decision is based on real data rather than a blind + // "assume available". The wait is capped to avoid blocking the caller + // when oc-discovery is unreachable. + s.Fill(checkables, wfID) + + unavailable, warnings = s.checkResourceAvailability(checkables, start, end) + + if len(unavailable) == 0 { + //result.Available = true + return start, end, true, false, warnings + } + + // 6. as_possible: find and commit to the next free slot. + if asap { + next := s.findNextSlot(checkables, start, end, checkWindowHours) + if next != nil { + start = *next + if end != nil { + shifted := next.Add(end.Sub(start)) + end = &shifted + } + return start, end, true, false, warnings + } else { + return start, end, false, false, warnings + } + } + return start, end, false, false, warnings +} + +func (s *PlannerService) Fill(checkables map[string]utils.BookingResource, wfID string) { + if missing := s.MissingPeers(checkables); len(missing) > 0 { + const plannerFetchTimeout = 2 * time.Second + tmpSession := "check-oneshot-" + wfID + ch, cancelSub := SubscribeUpdates(s.Subs, &s.SubMu, missing...) + owned := s.Refresh(missing, tmpSession) + select { + case <-ch: + case <-time.After(plannerFetchTimeout): + } + cancelSub() + s.ReleaseRefreshOwnership(owned, tmpSession) + } +} + +// evictAfter waits ttl from first insertion then deletes the cache entry and +// emits PB_CLOSE_PLANNER so oc-discovery stops streaming for this peer. +// This is the only path that actually removes an entry from PlannerCache; +// session close (ReleaseRefreshOwnership) only resets ownership state. +func (s *PlannerService) EvictAfter(peerID string, ttl time.Duration) { + time.Sleep(ttl) + s.Mu.Lock() + _, exists := s.Cache[peerID] + if exists { + delete(s.Cache, peerID) + delete(s.AddedAt, peerID) + } + s.Mu.Unlock() + if exists { + utils.Notify(&s.SubMu, s.Subs, peerID, peerID) + utils.Propalgate(peerID, tools.PropalgationMessage{Action: tools.PB_CLOSE_PLANNER}) + } +} + +// SubscribePlannerUpdates registers interest in planner changes for the given +// peer IDs. The returned channel receives the peerID string (non-blocking) each +// time any of those planners is updated. Call cancel to unregister. +func SubscribeUpdates[T interface{}](subs map[string][]chan T, mu *sync.RWMutex, updates ...string) (<-chan T, func()) { + ch := make(chan T, 1) + mu.Lock() + for _, k := range updates { + subs[k] = append(subs[k], ch) + } + mu.Unlock() + cancel := func() { + mu.Lock() + for _, k := range updates { + subsk := subs[k] + for i, s := range subsk { + if s == ch { + subs[k] = append(subsk[:i], subsk[i+1:]...) + break + } + } + } + mu.Unlock() + } + return ch, cancel +} + +// --------------------------------------------------------------------------- +// Cache helpers +// --------------------------------------------------------------------------- + +func (s *PlannerService) Store(peerID string, p *planner.Planner) { + s.Mu.Lock() + entry := s.Cache[peerID] + isNew := entry == nil + if isNew { + entry = &plannerEntry{} + s.Cache[peerID] = entry + s.AddedAt[peerID] = time.Now().UTC() + go s.EvictAfter(peerID, plannerTTL) + } + entry.Planner = p + s.Mu.Unlock() + utils.Notify[string](&s.SubMu, s.Subs, peerID, peerID) +} + +// --------------------------------------------------------------------------- +// Planner refresh / broadcast +// --------------------------------------------------------------------------- + +// RequestPlannerRefresh asks oc-discovery for a fresh planner snapshot for +// each peer in peerIDs. Only the first session to request a given peer becomes +// its "refresh owner": subsequent sessions see Refreshing=true and skip the +// duplicate PB_PLANNER emission. Returns the subset of peerIDs for which this +// session claimed ownership (needed to release on close). + +// RequestPlannerRefresh +func (s *PlannerService) Refresh(peerIDs []string, executionsID string) []string { + var owned []string + for _, peerID := range peerIDs { + s.Mu.Lock() + entry := s.Cache[peerID] + if entry == nil { + entry = &plannerEntry{} + s.Cache[peerID] = entry + s.AddedAt[peerID] = time.Now().UTC() + go s.EvictAfter(peerID, plannerTTL) + } + shouldRequest := !entry.Refreshing + if shouldRequest { + entry.Refreshing = true + entry.RefreshOwner = executionsID + } + s.Mu.Unlock() + if shouldRequest { + owned = append(owned, peerID) + if p, err := oclib.GetMySelf(); err == nil && p != nil && p.PeerID == peerID { + go s.RefreshSelf(peerID, &tools.APIRequest{Admin: true}) + } else { + payload, _ := json.Marshal(map[string]any{"peer_id": peerID}) + utils.Propalgate(peerID, tools.PropalgationMessage{ + Action: tools.PB_PLANNER, + Payload: payload, + }) + } + } + } + return owned +} + +// ReleaseRefreshOwnership is called when a check session closes (clean or +// forced). For each peer this session owns, it resets the refresh state and +// emits PB_CLOSE_PLANNER so oc-discovery stops the planner stream. +// The planner data itself stays in the cache until TTL eviction. +func (s *PlannerService) ReleaseRefreshOwnership(peerIDs []string, executionsID string) { + for _, peerID := range peerIDs { + s.Mu.Lock() + if entry := s.Cache[peerID]; entry != nil && entry.RefreshOwner == executionsID { + entry.Refreshing = false + entry.RefreshOwner = "" + } + s.Mu.Unlock() + utils.Notify(&s.SubMu, s.Subs, peerID, peerID) + payload, _ := json.Marshal(map[string]any{"peer_id": peerID}) + utils.Propalgate(peerID, tools.PropalgationMessage{ + Action: tools.PB_CLOSE_PLANNER, + Payload: payload, + }) + } +} + +// broadcastPlanner iterates the storage and compute peers of the given workflow +// and, for each peer not yet in the cache, emits a PB_PLANNER propagation so +// downstream consumers (oc-discovery, other schedulers) refresh their state. +func (s *PlannerService) Broadcast(wf *workflow.Workflow) { + if wf.Graph == nil { + return + } + items := []graph.GraphItem{} + items = append(items, wf.GetGraphItems(wf.Graph.IsStorage)...) + items = append(items, wf.GetGraphItems(wf.Graph.IsCompute)...) + + seen := []string{} + for _, item := range items { + _, res := item.GetResource() + if res == nil { + continue + } + creatorID := res.GetCreatorID() + if slices.Contains(seen, creatorID) { + continue + } + + data := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).LoadOne(creatorID) + p := data.ToPeer() + if p == nil { + continue + } + + s.Mu.RLock() + cached := s.Cache[p.PeerID] + s.Mu.RUnlock() + + // Only request if no snapshot and no refresh already in flight. + if cached == nil || (cached.Planner == nil && !cached.Refreshing) { + payload, err := json.Marshal(map[string]interface{}{"peer_id": p.PeerID}) + if err != nil { + continue + } + seen = append(seen, creatorID) + utils.Propalgate(p.PeerID, tools.PropalgationMessage{ + Action: tools.PB_PLANNER, + Payload: payload, + }) + } + } +} + +// --------------------------------------------------------------------------- +// Self-planner refresh +// --------------------------------------------------------------------------- + +func (s *PlannerService) RefreshSelf(peerID string, request *tools.APIRequest) { + p, err := planner.GenerateShallow(request) + if err != nil { + fmt.Println("refreshSelfPlanner: could not generate planner:", err) + return + } + // Update the local cache and notify any waiting CheckStream goroutines. + s.Store(peerID, p) + // Broadcast the updated planner so remote peers (and oc-discovery) can + // refresh their view of our availability. + type plannerWithPeer struct { + PeerID string `json:"peer_id"` + *planner.Planner + } + plannerPayload, err := json.Marshal(plannerWithPeer{PeerID: peerID, Planner: p}) + if err != nil { + return + } + utils.Propalgate(peerID, tools.PropalgationMessage{ + Action: tools.PB_PLANNER, + Payload: plannerPayload, + }) +} + +// findNextSlot scans forward from 'from' in checkStepMin increments for up to +// windowH hours and returns the first candidate start time at which all +// resources are simultaneously free. +func (s *PlannerService) findNextSlot(resources map[string]utils.BookingResource, from time.Time, originalEnd *time.Time, windowH int) *time.Time { + duration := 5 * time.Minute + if originalEnd != nil { + if d := originalEnd.Sub(from); d > 0 { + duration = d + } + } + step := time.Duration(checkStepMin) * time.Minute + limit := from.Add(time.Duration(windowH) * time.Hour) + for t := from.Add(step); t.Before(limit); t = t.Add(step) { + e := t.Add(duration) + if unavail, _ := s.checkResourceAvailability(resources, t, &e); len(unavail) == 0 { + return &t + } + } + return nil +} + +// checkResourceAvailability returns the IDs of unavailable resources and +// human-readable warning messages. +func (s *PlannerService) checkResourceAvailability(res map[string]utils.BookingResource, start time.Time, end *time.Time) (unavailable []string, warnings []string) { + for _, r := range res { + s.Mu.RLock() + entry := s.Cache[r.PeerPID] + s.Mu.RUnlock() + if entry == nil || entry.Planner == nil { + warnings = append(warnings, fmt.Sprintf( + "peer %s planner not in cache for resource %s – assuming available", r.PeerPID, r.ID)) + continue + } + if !s.checkInstance(entry.Planner, r.ID, r.InstanceID, start, end) { + unavailable = append(unavailable, r.ID) + warnings = append(warnings, fmt.Sprintf( + "resource %s is not available in [%s – %s]", + r.ID, start.Format(time.RFC3339), utils.FormatOptTime(end))) + } + } + return +} + +// CheckResourceInstance checks whether a resource/instance is available on the +// local planner cache for the given peer. Called by scheduling_resources when +// validating an incoming booking creation. +func (s *PlannerService) CheckResourceInstance(peerID, resourceID, instanceID string, start time.Time, end *time.Time) bool { + s.Mu.RLock() + entry := s.Cache[peerID] + s.Mu.RUnlock() + if entry == nil || entry.Planner == nil { + return true // no planner cached → assume available + } + return s.checkInstance(entry.Planner, resourceID, instanceID, start, end) +} + +// SubscribePlannerUpdates returns a channel that receives a peerID each time +// one of the given peers' planners is updated. +func (s *PlannerService) SubscribePlannerUpdates(peerIDs ...string) (<-chan string, func()) { + return SubscribeUpdates[string](s.Subs, &s.SubMu, peerIDs...) +} + +// SubscribeWorkflowUpdates returns a channel signalled when the workflow changes. +func (s *PlannerService) SubscribeWorkflowUpdates(wfID string) (<-chan struct{}, func()) { + return SubscribeUpdates[struct{}](s.WorkflowSubs, &s.WorkflowSubMu, wfID) +} + +// NotifyWorkflow signals all subscribers watching wfID. +func (s *PlannerService) NotifyWorkflow(wfID string) { + utils.Notify[struct{}](&s.WorkflowSubMu, s.WorkflowSubs, wfID, struct{}{}) +} + +// checkInstance checks availability for the specific instance resolved by the +// scheduler. When instanceID is empty (no instance selected / none resolvable), +// it falls back to checking all instances known in the planner and returns true +// if any one has remaining capacity. Returns true when no capacity is recorded. +func (s *PlannerService) checkInstance(p *planner.Planner, resourceID string, instanceID string, start time.Time, end *time.Time) bool { + if instanceID != "" { + return p.Check(resourceID, instanceID, nil, start, end) + } + caps, ok := p.Capacities[resourceID] + if !ok || len(caps) == 0 { + return true + } + for id := range caps { + if p.Check(resourceID, id, nil, start, end) { + return true + } + } + return false +} diff --git a/infrastructure/scheduler.go b/infrastructure/scheduler.go deleted file mode 100644 index b16971f..0000000 --- a/infrastructure/scheduler.go +++ /dev/null @@ -1,320 +0,0 @@ -package infrastructure - -import ( - "encoding/json" - "errors" - "fmt" - "oc-scheduler/infrastructure/scheduling" - "strings" - "time" - - oclib "cloud.o-forge.io/core/oc-lib" - "cloud.o-forge.io/core/oc-lib/models/bill" - "cloud.o-forge.io/core/oc-lib/models/booking" - "cloud.o-forge.io/core/oc-lib/models/common/enum" - "cloud.o-forge.io/core/oc-lib/models/common/pricing" - "cloud.o-forge.io/core/oc-lib/models/order" - "cloud.o-forge.io/core/oc-lib/models/peer" - "cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource" - "cloud.o-forge.io/core/oc-lib/models/utils" - "cloud.o-forge.io/core/oc-lib/models/workflow" - "cloud.o-forge.io/core/oc-lib/models/workflow_execution" - "cloud.o-forge.io/core/oc-lib/tools" - "github.com/google/uuid" - "github.com/robfig/cron" -) - -/* -* WorkflowSchedule is a struct that contains the scheduling information of a workflow -* It contains the mode of the schedule (Task or Service), the name of the schedule, the start and end time of the schedule and the cron expression - */ -// it's a flying object only use in a session time. It's not stored in the database -type WorkflowSchedule struct { - UUID string `json:"id" validate:"required"` // ExecutionsID is the list of the executions id of the workflow - Workflow *workflow.Workflow `json:"workflow,omitempty"` // Workflow is the workflow dependancy of the schedule - WorkflowExecution []*workflow_execution.WorkflowExecution `json:"workflow_executions,omitempty"` // WorkflowExecution is the list of executions of the workflow - Message string `json:"message,omitempty"` // Message is the message of the schedule - Warning string `json:"warning,omitempty"` // Warning is the warning message of the schedule - Start time.Time `json:"start" validate:"required,ltfield=End"` // Start is the start time of the schedule, is required and must be less than the End time - End *time.Time `json:"end,omitempty"` // End is the end time of the schedule, is required and must be greater than the Start time - DurationS float64 `json:"duration_s" default:"-1"` // End is the end time of the schedule - Cron string `json:"cron,omitempty"` // here the cron format : ss mm hh dd MM dw task - - BookingMode booking.BookingMode `json:"booking_mode,omitempty"` // BookingMode qualify the preemption order of the scheduling. if no payment allowed with preemption set up When_Possible - SelectedInstances workflow.ConfigItem `json:"selected_instances"` - SelectedPartnerships workflow.ConfigItem `json:"selected_partnerships"` - SelectedBuyings workflow.ConfigItem `json:"selected_buyings"` - SelectedStrategies workflow.ConfigItem `json:"selected_strategies"` - - SelectedBillingStrategy pricing.BillingStrategy `json:"selected_billing_strategy"` - - // Confirm, when true, triggers Schedule() to confirm the drafts held by this session. - Confirm bool `json:"confirm,omitempty"` -} - -func NewScheduler(mode int, start string, end string, durationInS float64, cron string) *WorkflowSchedule { - ws := &WorkflowSchedule{ - UUID: uuid.New().String(), - Start: time.Now().UTC().Add(asapBuffer), - BookingMode: booking.BookingMode(mode), - DurationS: durationInS, - Cron: cron, - } - s, err := time.ParseInLocation("2006-01-02T15:04:05", start, time.UTC) - if err == nil && ws.BookingMode == booking.PLANNED { - ws.Start = s // can apply a defined start other than now, if planned - } - - e, err := time.ParseInLocation("2006-01-02T15:04:05", end, time.UTC) - if err == nil { - ws.End = &e - } - return ws -} - -func (ws *WorkflowSchedule) GetBuyAndBook(wfID string, request *tools.APIRequest) (bool, *workflow.Workflow, []*workflow_execution.WorkflowExecution, []scheduling.SchedulerObject, []scheduling.SchedulerObject, error) { - access := workflow.NewAccessor(request) - res, code, err := access.LoadOne(wfID) - if code != 200 { - return false, nil, []*workflow_execution.WorkflowExecution{}, []scheduling.SchedulerObject{}, []scheduling.SchedulerObject{}, errors.New("could not load the workflow with id: " + err.Error()) - } - wf := res.(*workflow.Workflow) - isPreemptible, longest, priceds, wf, err := wf.Planify(ws.Start, ws.End, - ws.SelectedInstances, ws.SelectedPartnerships, ws.SelectedBuyings, ws.SelectedStrategies, - int(ws.BookingMode), request) - if err != nil { - return false, wf, []*workflow_execution.WorkflowExecution{}, []scheduling.SchedulerObject{}, []scheduling.SchedulerObject{}, err - } - ws.DurationS = longest - ws.Message = "We estimate that the workflow will start at " + ws.Start.String() + " and last " + fmt.Sprintf("%v", ws.DurationS) + " seconds." - if ws.End != nil && ws.Start.Add(time.Duration(longest)*time.Second).After(*ws.End) { - ws.Warning = "The workflow may be too long to be executed in the given time frame, we will try to book it anyway\n" - } - execs, err := ws.GetExecutions(wf, isPreemptible) - if err != nil { - return false, wf, []*workflow_execution.WorkflowExecution{}, []scheduling.SchedulerObject{}, []scheduling.SchedulerObject{}, err - } - purchased := []scheduling.SchedulerObject{} - bookings := []scheduling.SchedulerObject{} - for _, exec := range execs { - for _, obj := range exec.Buy(ws.SelectedBillingStrategy, ws.UUID, wfID, priceds) { - purchased = append(purchased, scheduling.ToSchedulerObject(tools.PURCHASE_RESOURCE, obj)) - } - for _, obj := range exec.Book(ws.UUID, wfID, priceds) { - bookings = append(bookings, scheduling.ToSchedulerObject(tools.BOOKING, obj)) - } - } - return true, wf, execs, purchased, bookings, nil -} - -// GenerateOrder creates a draft order (+ draft bill) for the given purchases and bookings. -// Returns the created order ID and any error. -func (ws *WorkflowSchedule) GenerateOrder(purchases []scheduling.SchedulerObject, bookings []scheduling.SchedulerObject, executionsID string, request *tools.APIRequest) (string, error) { - newOrder := &order.Order{ - AbstractObject: utils.AbstractObject{ - Name: "order_" + request.PeerID + "_" + time.Now().UTC().Format("2006-01-02T15:04:05"), - IsDraft: true, - }, - ExecutionsID: executionsID, - Purchases: []*purchase_resource.PurchaseResource{}, - Bookings: []*booking.Booking{}, - Status: enum.PENDING, - } - for _, purch := range purchases { - newOrder.Purchases = append( - newOrder.Purchases, scheduling.FromSchedulerObject(tools.PURCHASE_RESOURCE, purch).(*purchase_resource.PurchaseResource)) - } - for _, b := range bookings { - newOrder.Bookings = append( - newOrder.Bookings, scheduling.FromSchedulerObject(tools.BOOKING, b).(*booking.Booking)) - } - res, _, err := order.NewAccessor(request).StoreOne(newOrder) - if err != nil { - return "", err - } - if _, err := bill.DraftFirstBill(res.(*order.Order), request); err != nil { - return res.GetID(), err - } - return res.GetID(), nil -} - -func (ws *WorkflowSchedule) Schedules(wfID string, request *tools.APIRequest) (*WorkflowSchedule, *workflow.Workflow, []*workflow_execution.WorkflowExecution, error) { - if request == nil { - return ws, nil, []*workflow_execution.WorkflowExecution{}, errors.New("no request found") - } - selfID, _ := oclib.GetMySelf() - - // If the client provides a scheduling_id from a Check session, confirm the - // pre-created drafts (bookings/purchases). Executions already exist as drafts - // and will be confirmed later by the considers mechanism. - if ws.UUID != "" { - adminReq := &tools.APIRequest{Admin: true} - - // Obsolescence check: abort if any session execution's start date has passed. - executions := loadSessionExecs(ws.UUID) - for _, exec := range executions { - if !exec.ExecDate.IsZero() && exec.ExecDate.Before(time.Now().UTC()) { - return ws, nil, nil, fmt.Errorf("execution %s is obsolete (start date in the past)", exec.GetID()) - } - } - - if err := ConfirmSession(ws.UUID, selfID, request); err != nil { - return ws, nil, []*workflow_execution.WorkflowExecution{}, fmt.Errorf("confirm session failed: %w", err) - } - - for _, exec := range executions { - go WatchExecDeadline(exec.GetID(), exec.ExecutionsID, exec.ExecDate, selfID, request) - } - - obj, _, _ := workflow.NewAccessor(request).LoadOne(wfID) - if obj == nil { - return ws, nil, executions, nil - } - wf := obj.(*workflow.Workflow) - ws.Workflow = wf - ws.WorkflowExecution = executions - wf.GetAccessor(adminReq).UpdateOne(wf.Serialize(wf), wf.GetID()) - return ws, wf, executions, nil - } - - // Schedule must be called from a Check session (ws.UUID set above). - // Direct scheduling without a prior Check session is not supported. - return ws, nil, []*workflow_execution.WorkflowExecution{}, errors.New("no scheduling session: use the Check stream first") -} - -// propagateResource routes a purchase or booking to its destination: -// - If destPeerID matches our own peer (selfMongoID), the object is stored -// directly in the local DB as draft and the local planner is refreshed. -// - Otherwise a NATS CREATE_RESOURCE message is emitted so the destination -// peer can process it asynchronously. -// -// The caller is responsible for setting obj.IsDraft before calling. -func propagateResource(obj utils.DBObject, destPeerID string, dt tools.DataType, selfMongoID *peer.Peer, request *tools.APIRequest, errCh chan error) { - if destPeerID == selfMongoID.GetID() { - stored := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).StoreOne(obj.Serialize(obj)) - if stored.Err != "" || stored.Data == nil { - errCh <- fmt.Errorf("could not store %s locally: %s", dt.String(), stored.Err) - return - } - // The planner tracks booking time-slots only; purchases do not affect it. - if dt == tools.BOOKING { - go refreshSelfPlanner(selfMongoID.PeerID, request) - } - errCh <- nil - return - } - m := obj.Serialize(obj) - if m["dest_peer_id"] != nil { - if data := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).LoadOne(fmt.Sprintf("%v", m["dest_peer_id"])); data.Data != nil { - m["peer_id"] = data.Data.(*peer.Peer).PeerID - } - } else { - fmt.Println("NO DEST ID") - return - } - payload, err := json.Marshal(m) - if err != nil { - errCh <- fmt.Errorf("could not serialize %s: %w", dt.String(), err) - return - } - if b, err := json.Marshal(&tools.PropalgationMessage{ - DataType: dt.EnumIndex(), - Action: tools.PB_CREATE, - Payload: payload, - }); err == nil { - tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{ - FromApp: "oc-scheduler", - Datatype: dt, - Method: int(tools.PROPALGATION_EVENT), - Payload: b, - }) - } - errCh <- nil -} - -/* -* getExecutions is a function that returns the executions of a workflow -* it returns an array of workflow_execution.WorkflowExecution - */ -func (ws *WorkflowSchedule) GetExecutions(workflow *workflow.Workflow, isPreemptible bool) ([]*workflow_execution.WorkflowExecution, error) { - workflows_executions := []*workflow_execution.WorkflowExecution{} - dates, err := ws.GetDates() - if err != nil { - return workflows_executions, err - } - for _, date := range dates { - obj := &workflow_execution.WorkflowExecution{ - AbstractObject: utils.AbstractObject{ - UUID: uuid.New().String(), // set the uuid of the execution - Name: workflow.Name + "_execution_" + date.Start.String(), // set the name of the execution - }, - Priority: 1, - ExecutionsID: ws.UUID, - ExecDate: date.Start, // set the execution date - EndDate: date.End, // set the end date - State: enum.DRAFT, // set the state to 1 (scheduled) - WorkflowID: workflow.GetID(), // set the workflow id dependancy of the execution - } - if ws.BookingMode != booking.PLANNED { - obj.Priority = 0 - } - if ws.BookingMode == booking.PREEMPTED && isPreemptible { - obj.Priority = 7 - } - - ws.SelectedStrategies = obj.SelectedStrategies - ws.SelectedPartnerships = obj.SelectedPartnerships - ws.SelectedBuyings = obj.SelectedBuyings - ws.SelectedInstances = obj.SelectedInstances - - workflows_executions = append(workflows_executions, obj) - } - return workflows_executions, nil -} - -func (ws *WorkflowSchedule) GetDates() ([]Schedule, error) { - schedule := []Schedule{} - if len(ws.Cron) > 0 { // if cron is set then end date should be set - if ws.End == nil { - return schedule, errors.New("a cron task should have an end date") - } - if ws.DurationS <= 0 { - ws.DurationS = ws.End.Sub(ws.Start).Seconds() - } - cronStr := strings.Split(ws.Cron, " ") // split the cron string to treat it - if len(cronStr) < 6 { // if the cron string is less than 6 fields, return an error because format is : ss mm hh dd MM dw (6 fields) - return schedule, errors.New("Bad cron message: (" + ws.Cron + "). Should be at least ss mm hh dd MM dw") - } - subCron := strings.Join(cronStr[:6], " ") - // cron should be parsed as ss mm hh dd MM dw t (min 6 fields) - specParser := cron.NewParser(cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) // create a new cron parser - sched, err := specParser.Parse(subCron) // parse the cron string - if err != nil { - return schedule, errors.New("Bad cron message: " + err.Error()) - } - // loop through the cron schedule to set the executions - for s := sched.Next(ws.Start); !s.IsZero() && s.Before(*ws.End); s = sched.Next(s) { - e := s.Add(time.Duration(ws.DurationS) * time.Second) - schedule = append(schedule, Schedule{ - Start: s, - End: &e, - }) - } - } else { // if no cron, set the execution to the start date - schedule = append(schedule, Schedule{ - Start: ws.Start, - End: ws.End, - }) - } - return schedule, nil -} - -type Schedule struct { - Start time.Time - End *time.Time -} - -/* -* TODO : LARGEST GRAIN PLANIFYING THE WORKFLOW WHEN OPTION IS SET -* SET PROTECTION BORDER TIME - */ diff --git a/infrastructure/scheduler/scheduler.go b/infrastructure/scheduler/scheduler.go new file mode 100644 index 0000000..d7ff695 --- /dev/null +++ b/infrastructure/scheduler/scheduler.go @@ -0,0 +1,235 @@ +package scheduler + +import ( + "errors" + "fmt" + "oc-scheduler/infrastructure/planner" + "oc-scheduler/infrastructure/scheduling_resources" + infUtils "oc-scheduler/infrastructure/utils" + "strings" + "time" + + "cloud.o-forge.io/core/oc-lib/models/booking" + "cloud.o-forge.io/core/oc-lib/models/common/enum" + "cloud.o-forge.io/core/oc-lib/models/common/pricing" + "cloud.o-forge.io/core/oc-lib/models/utils" + "cloud.o-forge.io/core/oc-lib/models/workflow" + "cloud.o-forge.io/core/oc-lib/models/workflow_execution" + "cloud.o-forge.io/core/oc-lib/tools" + "github.com/google/uuid" + "github.com/robfig/cron" +) + +const asapBuffer = 2 * time.Minute + +// Schedule holds a resolved start/end pair for a single execution slot. +type Schedule struct { + Start time.Time + End *time.Time +} + +// WorkflowSchedule is the flying session object for a scheduling interaction. +// It is never persisted; it lives only for the duration of a WebSocket check session. +type WorkflowSchedule struct { + UUID string `json:"id" validate:"required"` + Workflow *workflow.Workflow `json:"workflow,omitempty"` + WorkflowExecution []*workflow_execution.WorkflowExecution `json:"workflow_executions,omitempty"` + Message string `json:"message,omitempty"` + Warning string `json:"warning,omitempty"` + Start time.Time `json:"start" validate:"required,ltfield=End"` + End *time.Time `json:"end,omitempty"` + DurationS float64 `json:"duration_s" default:"-1"` + Cron string `json:"cron,omitempty"` + + BookingMode booking.BookingMode `json:"booking_mode,omitempty"` + SelectedInstances workflow.ConfigItem `json:"selected_instances"` + SelectedPartnerships workflow.ConfigItem `json:"selected_partnerships"` + SelectedBuyings workflow.ConfigItem `json:"selected_buyings"` + SelectedStrategies workflow.ConfigItem `json:"selected_strategies"` + SelectedBillingStrategy pricing.BillingStrategy `json:"selected_billing_strategy"` + + // Confirm, when true, triggers Schedule() to confirm the drafts held by this session. + Confirm bool `json:"confirm,omitempty"` +} + +// CheckResult is the response payload for an availability check. +type CheckResult struct { + Available bool `json:"available"` + Start time.Time `json:"start"` + End *time.Time `json:"end,omitempty"` + Warnings []string `json:"warnings,omitempty"` + Preemptible bool `json:"preemptible,omitempty"` + // SchedulingID is the session UUID the client must supply when confirming. + SchedulingID string `json:"scheduling_id,omitempty"` +} + +// --------------------------------------------------------------------------- +// Check — availability +// --------------------------------------------------------------------------- + +// Check verifies whether the requested slot is available across all resource peers. +func (ws *WorkflowSchedule) Check(wfID string, asap bool, preemption bool, request *tools.APIRequest) (*CheckResult, error) { + fmt.Println("CHECK", asap, "/", preemption) + obj, code, err := workflow.NewAccessor(request).LoadOne(wfID) + if code != 200 || err != nil { + msg := "could not load workflow " + wfID + if err != nil { + msg += ": " + err.Error() + } + return nil, errors.New(msg) + } + wf := obj.(*workflow.Workflow) + + start := ws.Start + if asap || start.IsZero() { + start = time.Now().UTC().Add(asapBuffer) + } + + end := ws.End + if end == nil { + if ws.DurationS > 0 { + e := start.Add(time.Duration(ws.DurationS * float64(time.Second))) + end = &e + } else { + _, longest, _, _, planErr := wf.Planify( + start, nil, + ws.SelectedInstances, ws.SelectedPartnerships, + ws.SelectedBuyings, ws.SelectedStrategies, + int(ws.BookingMode), nil, request, + ) + if planErr == nil && longest > 0 { + e := start.Add(time.Duration(longest) * time.Second) + end = &e + } + } + } + + checkables := infUtils.CollectBookingResources(wf, ws.SelectedInstances) + start, end, available, preemptible, warnings := planner.GetPlannerService().FindDate(wfID, checkables, start, end, preemption, asap) + + return &CheckResult{ + Start: start, + End: end, + Available: available, + Preemptible: preemptible, + Warnings: warnings, + }, nil +} + +// --------------------------------------------------------------------------- +// GetBuyAndBook — generate scheduling resources +// --------------------------------------------------------------------------- + +// GetBuyAndBook runs Planify to generate the purchases and bookings for this session. +func (ws *WorkflowSchedule) GetBuyAndBook(wfID string, request *tools.APIRequest) ( + bool, + *workflow.Workflow, + []*workflow_execution.WorkflowExecution, + []scheduling_resources.SchedulerObject, + []scheduling_resources.SchedulerObject, + error, +) { + res, code, err := workflow.NewAccessor(request).LoadOne(wfID) + if code != 200 { + return false, nil, nil, nil, nil, + errors.New("could not load the workflow: " + err.Error()) + } + wf := res.(*workflow.Workflow) + isPreemptible, longest, priceds, wf, err := wf.Planify( + ws.Start, ws.End, + ws.SelectedInstances, ws.SelectedPartnerships, + ws.SelectedBuyings, ws.SelectedStrategies, + int(ws.BookingMode), nil, request, + ) + if err != nil { + return false, wf, nil, nil, nil, err + } + ws.DurationS = longest + ws.Message = "We estimate that the workflow will start at " + ws.Start.String() + + " and last " + fmt.Sprintf("%v", ws.DurationS) + " seconds." + if ws.End != nil && ws.Start.Add(time.Duration(longest)*time.Second).After(*ws.End) { + ws.Warning = "The workflow may be too long to be executed in the given time frame, we will try to book it anyway\n" + } + + execs, err := ws.GenerateExecutions(wf, isPreemptible) + if err != nil { + return false, wf, nil, nil, nil, err + } + + var purchased, bookings []scheduling_resources.SchedulerObject + for _, exec := range execs { + for _, obj := range exec.Buy(ws.SelectedBillingStrategy, ws.UUID, wfID, priceds) { + purchased = append(purchased, scheduling_resources.ToSchedulerObject(tools.PURCHASE_RESOURCE, obj)) + } + for _, obj := range exec.Book(ws.UUID, wfID, priceds) { + bookings = append(bookings, scheduling_resources.ToSchedulerObject(tools.BOOKING, obj)) + } + } + return true, wf, execs, purchased, bookings, nil +} + +// --------------------------------------------------------------------------- +// GenerateExecutions / GetDates +// --------------------------------------------------------------------------- + +// GenerateExecutions expands the cron schedule into WorkflowExecution instances. +func (ws *WorkflowSchedule) GenerateExecutions(wf *workflow.Workflow, isPreemptible bool) ([]*workflow_execution.WorkflowExecution, error) { + dates, err := ws.GetDates() + if err != nil { + return nil, err + } + var executions []*workflow_execution.WorkflowExecution + for _, date := range dates { + obj := &workflow_execution.WorkflowExecution{ + AbstractObject: utils.AbstractObject{ + UUID: uuid.New().String(), + Name: wf.Name + "_execution_" + date.Start.String(), + }, + Priority: 1, + ExecutionsID: ws.UUID, + ExecDate: date.Start, + EndDate: date.End, + State: enum.DRAFT, + WorkflowID: wf.GetID(), + } + if ws.BookingMode != booking.PLANNED { + obj.Priority = 0 + } + if ws.BookingMode == booking.PREEMPTED && isPreemptible { + obj.Priority = 7 + } + executions = append(executions, obj) + } + return executions, nil +} + +// GetDates parses the cron expression and returns execution date slots. +func (ws *WorkflowSchedule) GetDates() ([]Schedule, error) { + var schedule []Schedule + if len(ws.Cron) > 0 { + if ws.End == nil { + return schedule, errors.New("a cron task should have an end date") + } + if ws.DurationS <= 0 { + ws.DurationS = ws.End.Sub(ws.Start).Seconds() + } + cronStr := strings.Split(ws.Cron, " ") + if len(cronStr) < 6 { + return schedule, errors.New("Bad cron message: (" + ws.Cron + "). Should be at least ss mm hh dd MM dw") + } + subCron := strings.Join(cronStr[:6], " ") + specParser := cron.NewParser(cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) + sched, err := specParser.Parse(subCron) + if err != nil { + return schedule, errors.New("Bad cron message: " + err.Error()) + } + for s := sched.Next(ws.Start); !s.IsZero() && s.Before(*ws.End); s = sched.Next(s) { + e := s.Add(time.Duration(ws.DurationS) * time.Second) + schedule = append(schedule, Schedule{Start: s, End: &e}) + } + } else { + schedule = append(schedule, Schedule{Start: ws.Start, End: ws.End}) + } + return schedule, nil +} + diff --git a/infrastructure/scheduling/objects.go b/infrastructure/scheduling_resources/objects.go similarity index 62% rename from infrastructure/scheduling/objects.go rename to infrastructure/scheduling_resources/objects.go index cb03d4d..4f54d17 100644 --- a/infrastructure/scheduling/objects.go +++ b/infrastructure/scheduling_resources/objects.go @@ -1,4 +1,4 @@ -package scheduling +package scheduling_resources import ( "encoding/json" @@ -77,66 +77,25 @@ func ToSchedulerObject(dt tools.DataType, obj utils.ShallowDBObject) SchedulerOb return nil } -func (b *ScheduledBooking) GetExecutionId() string { - return b.ExecutionID -} - -func (b *ScheduledPurchase) GetExecutionId() string { - return b.ExecutionID -} - -func (b *ScheduledBooking) GetExecutionsId() string { - return b.ExecutionsID -} - -func (b *ScheduledPurchase) GetExecutionsId() string { - return b.ExecutionsID -} - -func (b *ScheduledBooking) GetPeerSession() string { - return b.SchedulerPeerID -} - -func (b *ScheduledPurchase) GetPeerSession() string { - return b.SchedulerPeerID -} - -func (b *ScheduledBooking) GetDestPeer() string { - return b.DestPeerID -} - -func (b *ScheduledPurchase) GetDestPeer() string { - return b.DestPeerID -} +func (b *ScheduledBooking) GetExecutionId() string { return b.ExecutionID } +func (b *ScheduledPurchase) GetExecutionId() string { return b.ExecutionID } +func (b *ScheduledBooking) GetExecutionsId() string { return b.ExecutionsID } +func (b *ScheduledPurchase) GetExecutionsId() string { return b.ExecutionsID } +func (b *ScheduledBooking) GetPeerSession() string { return b.SchedulerPeerID } +func (b *ScheduledPurchase) GetPeerSession() string { return b.SchedulerPeerID } +func (b *ScheduledBooking) GetDestPeer() string { return b.DestPeerID } +func (b *ScheduledPurchase) GetDestPeer() string { return b.DestPeerID } func (b *ScheduledBooking) GetKey() string { return b.ResourceID + "/" + b.InstanceID + "/" + tools.BOOKING.String() } - func (b *ScheduledPurchase) GetKey() string { return b.ResourceID + "/" + b.InstanceID + "/" + tools.PURCHASE_RESOURCE.String() } -func (b *ScheduledBooking) SetIsDraft(ok bool) { - b.IsDraft = ok -} - -func (b *ScheduledPurchase) SetIsDraft(ok bool) { - b.IsDraft = ok -} - -func (b *ScheduledBooking) SetSchedulerPeerID(peerID string) { - b.SchedulerPeerID = peerID -} - -func (b *ScheduledPurchase) SetSchedulerPeerID(peerID string) { - b.SchedulerPeerID = peerID -} - -func (b *ScheduledBooking) SetExecutionsID(ei string) { - b.ExecutionsID = ei -} - -func (b *ScheduledPurchase) SetExecutionsID(ei string) { - b.ExecutionsID = ei -} +func (b *ScheduledBooking) SetIsDraft(ok bool) { b.IsDraft = ok } +func (b *ScheduledPurchase) SetIsDraft(ok bool) { b.IsDraft = ok } +func (b *ScheduledBooking) SetSchedulerPeerID(p string) { b.SchedulerPeerID = p } +func (b *ScheduledPurchase) SetSchedulerPeerID(p string) { b.SchedulerPeerID = p } +func (b *ScheduledBooking) SetExecutionsID(ei string) { b.ExecutionsID = ei } +func (b *ScheduledPurchase) SetExecutionsID(ei string) { b.ExecutionsID = ei } diff --git a/infrastructure/scheduling_resources/service.go b/infrastructure/scheduling_resources/service.go new file mode 100644 index 0000000..7177560 --- /dev/null +++ b/infrastructure/scheduling_resources/service.go @@ -0,0 +1,474 @@ +package scheduling_resources + +import ( + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + oclib "cloud.o-forge.io/core/oc-lib" + "cloud.o-forge.io/core/oc-lib/models/booking" + "cloud.o-forge.io/core/oc-lib/models/common/enum" + "cloud.o-forge.io/core/oc-lib/models/peer" + "cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource" + "cloud.o-forge.io/core/oc-lib/models/utils" + "cloud.o-forge.io/core/oc-lib/tools" + "oc-scheduler/infrastructure/planner" +) + +// --------------------------------------------------------------------------- +// Service +// --------------------------------------------------------------------------- + +// SchedulingResourcesService manages the lifecycle of Booking and PurchaseResource +// as SchedulerObjects. It caches the local peer identity so every operation can +// route correctly without calling oclib.GetMySelf() on each request. +type SchedulingResourcesService struct { + mu sync.RWMutex + selfPeer *peer.Peer +} + +var singleton *SchedulingResourcesService + +func init() { + singleton = &SchedulingResourcesService{} +} + +// GetService returns the singleton SchedulingResourcesService. +func GetService() *SchedulingResourcesService { + return singleton +} + +// Self returns the cached local peer, lazily resolving it on first call. +func (s *SchedulingResourcesService) Self() *peer.Peer { + s.mu.RLock() + p := s.selfPeer + s.mu.RUnlock() + if p != nil { + return p + } + p, _ = oclib.GetMySelf() + if p != nil { + s.mu.Lock() + s.selfPeer = p + s.mu.Unlock() + } + return p +} + +// InvalidateSelf clears the cached self peer (e.g. after a peer re-registration). +func (s *SchedulingResourcesService) InvalidateSelf() { + s.mu.Lock() + s.selfPeer = nil + s.mu.Unlock() +} + +// --------------------------------------------------------------------------- +// RemoveResourcePayload +// --------------------------------------------------------------------------- + +// RemoveResourcePayload is sent via NATS REMOVE_RESOURCE so the receiver can +// verify the delete order comes from the original scheduler session. +type RemoveResourcePayload struct { + ID string `json:"id"` + SchedulerPeerID string `json:"scheduler_peer_id"` + ExecutionsID string `json:"executions_id"` +} + +// --------------------------------------------------------------------------- +// Propagation — creation +// --------------------------------------------------------------------------- + +// PropagateCreate routes a new booking/purchase draft to its destination: +// - local peer → store in DB + refresh planner +// - remote peer → emit NATS PROPALGATION_EVENT/PB_CREATE +func (s *SchedulingResourcesService) PropagateCreate( + obj utils.DBObject, + destPeerID string, + dt tools.DataType, + request *tools.APIRequest, + errCh chan error, +) { + selfID := s.Self() + if selfID == nil { + errCh <- fmt.Errorf("PropagateCreate: local peer not available") + return + } + + if destPeerID == selfID.GetID() { + stored := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).StoreOne(obj.Serialize(obj)) + if stored.Err != "" || stored.Data == nil { + errCh <- fmt.Errorf("could not store %s locally: %s", dt.String(), stored.Err) + return + } + if dt == tools.BOOKING { + planner.GetPlannerService().RefreshSelf(selfID.PeerID, request) + } + errCh <- nil + return + } + + m := obj.Serialize(obj) + if m["dest_peer_id"] != nil { + if data := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).LoadOne(fmt.Sprintf("%v", m["dest_peer_id"])); data.Data != nil { + m["peer_id"] = data.Data.(*peer.Peer).PeerID + } + } else if m["peerless"] == true { + originRef := fmt.Sprintf("%v", m["origin_ref"]) + if !isValidPeerlessRef(originRef) { + emitPeerBehaviorReport(request.PeerID, tools.BehaviorFraud, + "peerless booking with invalid or unrecognised Origin.Ref", originRef) + errCh <- fmt.Errorf("peerless booking rejected: invalid Origin.Ref %q", originRef) + return + } + stored := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).StoreOne(m) + if stored.Err != "" || stored.Data == nil { + errCh <- fmt.Errorf("could not store peerless %s locally: %s", dt.String(), stored.Err) + return + } + if dt == tools.BOOKING { + planner.GetPlannerService().RefreshSelf(selfID.PeerID, request) + } + errCh <- nil + return + } else { + fmt.Println("PropagateCreate: no dest_peer_id and not peerless, skipping") + errCh <- nil + return + } + + payload, err := json.Marshal(m) + if err != nil { + errCh <- fmt.Errorf("could not serialize %s: %w", dt.String(), err) + return + } + b, err := json.Marshal(&tools.PropalgationMessage{ + DataType: dt.EnumIndex(), + Action: tools.PB_CREATE, + Payload: payload, + }) + if err == nil { + tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{ + FromApp: "oc-scheduler", + Datatype: dt, + Method: int(tools.PROPALGATION_EVENT), + Payload: b, + }) + } + errCh <- nil +} + +// --------------------------------------------------------------------------- +// Propagation — update / confirmation +// --------------------------------------------------------------------------- + +// PropagateWrite routes a booking/purchase update to its destination. +// Returns true when the resource was confirmed locally (IsDraft=false on self peer) +// and the caller must trigger considers via execution.UpdateExecutionState. +func (s *SchedulingResourcesService) PropagateWrite( + obj utils.DBObject, + destPeerID string, + dt tools.DataType, + request *tools.APIRequest, +) bool { + selfID := s.Self() + if selfID == nil { + fmt.Println("PropagateWrite: local peer not available") + return false + } + + if destPeerID == selfID.GetID() { + if _, _, err := utils.GenericRawUpdateOne(obj, obj.GetID(), obj.GetAccessor(request)); err != nil { + fmt.Printf("PropagateWrite: local update failed for %s %s: %v\n", dt, obj.GetID(), err) + return false + } + if dt == tools.BOOKING { + planner.GetPlannerService().RefreshSelf(selfID.PeerID, request) + } + return !obj.IsDrafted() + } + + payload, err := json.Marshal(obj) + if err != nil { + return false + } + tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{ + FromApp: "oc-scheduler", + Datatype: dt, + Method: int(tools.CREATE_RESOURCE), + Payload: payload, + }) + return false +} + +// --------------------------------------------------------------------------- +// Deletion +// --------------------------------------------------------------------------- + +// Delete removes a booking/purchase from its destination peer (local or NATS). +func (s *SchedulingResourcesService) Delete(dt tools.DataType, bk SchedulerObject, request *tools.APIRequest) { + selfID := s.Self() + if selfID == nil { + fmt.Println("Delete: local peer not available") + return + } + + if bk.GetDestPeer() == selfID.GetID() { + data := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).DeleteOne(bk.GetID()) + fmt.Println("Delete scheduling resource", bk.GetID(), data.Err) + if dt == tools.BOOKING { + planner.GetPlannerService().RefreshSelf(selfID.PeerID, request) + } + return + } + EmitNATSRemove(bk.GetID(), bk.GetPeerSession(), bk.GetExecutionsId(), dt) +} + +// EmitNATSRemove sends a REMOVE_RESOURCE NATS event with auth fields. +func EmitNATSRemove(id, schedulerPeerID, executionsID string, dt tools.DataType) { + payload, _ := json.Marshal(RemoveResourcePayload{ + ID: id, + SchedulerPeerID: schedulerPeerID, + ExecutionsID: executionsID, + }) + tools.NewNATSCaller().SetNATSPub(tools.REMOVE_RESOURCE, tools.NATSResponse{ + FromApp: "oc-scheduler", + Datatype: dt, + Method: int(tools.REMOVE_RESOURCE), + Payload: payload, + }) +} + +// --------------------------------------------------------------------------- +// Confirmation +// --------------------------------------------------------------------------- + +// Confirm sets IsDraft=false on a booking or purchase. +// For bookings, also advances State to SCHEDULED and refreshes the self planner. +func Confirm(id string, dt tools.DataType) { + adminReq := &tools.APIRequest{Admin: true} + switch dt { + case tools.BOOKING: + res, _, err := booking.NewAccessor(adminReq).LoadOne(id) + if err != nil || res == nil { + fmt.Printf("Confirm: could not load booking %s: %v\n", id, err) + return + } + bk := res.(*booking.Booking) + bk.IsDraft = false + bk.State = enum.SCHEDULED + if _, _, err := utils.GenericRawUpdateOne(bk, id, booking.NewAccessor(adminReq)); err != nil { + fmt.Printf("Confirm: could not confirm booking %s: %v\n", id, err) + return + } + if self := GetService().Self(); self != nil { + planner.GetPlannerService().RefreshSelf(self.PeerID, adminReq) + } + case tools.PURCHASE_RESOURCE: + res, _, err := purchase_resource.NewAccessor(adminReq).LoadOne(id) + if err != nil || res == nil { + fmt.Printf("Confirm: could not load purchase %s: %v\n", id, err) + return + } + pr := res.(*purchase_resource.PurchaseResource) + pr.IsDraft = false + if _, _, err := utils.GenericRawUpdateOne(pr, id, purchase_resource.NewAccessor(adminReq)); err != nil { + fmt.Printf("Confirm: could not confirm purchase %s: %v\n", id, err) + } + } +} + +// DraftTimeout deletes a booking/purchase if it is still a draft after 10 minutes. +func DraftTimeout(id string, dt tools.DataType) { + adminReq := &tools.APIRequest{Admin: true} + var res utils.DBObject + var loadErr error + switch dt { + case tools.BOOKING: + res, _, loadErr = booking.NewAccessor(adminReq).LoadOne(id) + case tools.PURCHASE_RESOURCE: + res, _, loadErr = purchase_resource.NewAccessor(adminReq).LoadOne(id) + default: + return + } + if loadErr != nil || res == nil || !res.IsDrafted() { + return + } + switch dt { + case tools.BOOKING: + booking.NewAccessor(adminReq).DeleteOne(id) + case tools.PURCHASE_RESOURCE: + purchase_resource.NewAccessor(adminReq).DeleteOne(id) + } + fmt.Printf("DraftTimeout: %s %s deleted (still draft after 10 min)\n", dt.String(), id) +} + +// --------------------------------------------------------------------------- +// NATS handlers — incoming booking/purchase +// --------------------------------------------------------------------------- + +// HandleCreateBooking processes an incoming booking from NATS. +// Returns true if the booking was confirmed (IsDraft→false) and considers must be triggered. +func (s *SchedulingResourcesService) HandleCreateBooking(bk *booking.Booking, adminReq *tools.APIRequest) bool { + self := s.Self() + if self == nil { + return false + } + + if existing, _, loadErr := booking.NewAccessor(adminReq).LoadOne(bk.GetID()); loadErr == nil && existing != nil { + prev := existing.(*booking.Booking) + if prev.SchedulerPeerID != bk.SchedulerPeerID || prev.ExecutionsID != bk.ExecutionsID { + fmt.Println("HandleCreateBooking: auth mismatch, ignoring", bk.GetID()) + return false + } + if !prev.IsDrafted() && bk.IsDraft { + return false + } + if !bk.IsDraft && !prev.ExpectedStartDate.IsZero() && prev.ExpectedStartDate.Before(time.Now().UTC()) { + fmt.Println("HandleCreateBooking: expired, deleting", bk.GetID()) + booking.NewAccessor(adminReq).DeleteOne(bk.GetID()) + return false + } + if _, _, err := utils.GenericRawUpdateOne(bk, bk.GetID(), booking.NewAccessor(adminReq)); err != nil { + fmt.Println("HandleCreateBooking: update failed:", err) + return false + } + planner.GetPlannerService().RefreshSelf(self.PeerID, adminReq) + return !bk.IsDraft + } + + // New booking + if !bk.ExpectedStartDate.IsZero() && bk.ExpectedStartDate.Before(time.Now().UTC()) { + fmt.Println("HandleCreateBooking: start date in the past, discarding") + return false + } + if !planner.GetPlannerService().CheckResourceInstance(self.PeerID, bk.ResourceID, bk.InstanceID, bk.ExpectedStartDate, bk.ExpectedEndDate) { + fmt.Println("HandleCreateBooking: conflicts with local planner, discarding") + return false + } + bk.IsDraft = true + stored, _, err := booking.NewAccessor(adminReq).StoreOne(bk) + if err != nil { + fmt.Println("HandleCreateBooking: could not store:", err) + return false + } + storedID := stored.GetID() + planner.GetPlannerService().RefreshSelf(self.PeerID, adminReq) + time.AfterFunc(10*time.Minute, func() { DraftTimeout(storedID, tools.BOOKING) }) + return false +} + +// HandleCreatePurchase processes an incoming purchase from NATS. +// Returns true if considers must be triggered. +func (s *SchedulingResourcesService) HandleCreatePurchase(pr *purchase_resource.PurchaseResource, adminReq *tools.APIRequest) bool { + self := s.Self() + if self == nil { + return false + } + if pr.DestPeerID != self.GetID() { + return false + } + + if existing, _, loadErr := purchase_resource.NewAccessor(adminReq).LoadOne(pr.GetID()); loadErr == nil && existing != nil { + prev := existing.(*purchase_resource.PurchaseResource) + if prev.SchedulerPeerID != pr.SchedulerPeerID || prev.ExecutionsID != pr.ExecutionsID { + fmt.Println("HandleCreatePurchase: auth mismatch, ignoring", pr.GetID()) + return false + } + if !prev.IsDrafted() && pr.IsDraft { + return false + } + if _, _, err := utils.GenericRawUpdateOne(pr, pr.GetID(), purchase_resource.NewAccessor(adminReq)); err != nil { + fmt.Println("HandleCreatePurchase: update failed:", err) + return false + } + return !pr.IsDraft + } + + pr.IsDraft = true + stored, _, err := purchase_resource.NewAccessor(adminReq).StoreOne(pr) + if err != nil { + fmt.Println("HandleCreatePurchase: could not store:", err) + return false + } + storedID := stored.GetID() + time.AfterFunc(10*time.Minute, func() { DraftTimeout(storedID, tools.PURCHASE_RESOURCE) }) + return false +} + +// HandleRemoveBooking verifies auth and deletes the booking. +func (s *SchedulingResourcesService) HandleRemoveBooking(p RemoveResourcePayload, adminReq *tools.APIRequest) { + res, _, loadErr := booking.NewAccessor(adminReq).LoadOne(p.ID) + if loadErr != nil || res == nil { + return + } + existing := res.(*booking.Booking) + if existing.SchedulerPeerID != p.SchedulerPeerID || existing.ExecutionsID != p.ExecutionsID { + fmt.Println("HandleRemoveBooking: auth mismatch, ignoring", p.ID) + return + } + booking.NewAccessor(adminReq).DeleteOne(p.ID) + if self := s.Self(); self != nil { + planner.GetPlannerService().RefreshSelf(self.PeerID, adminReq) + } +} + +// HandleRemovePurchase verifies auth and deletes the purchase. +func (s *SchedulingResourcesService) HandleRemovePurchase(p RemoveResourcePayload, adminReq *tools.APIRequest) { + res, _, loadErr := purchase_resource.NewAccessor(adminReq).LoadOne(p.ID) + if loadErr != nil || res == nil { + return + } + existing := res.(*purchase_resource.PurchaseResource) + if existing.SchedulerPeerID != p.SchedulerPeerID || existing.ExecutionsID != p.ExecutionsID { + fmt.Println("HandleRemovePurchase: auth mismatch, ignoring", p.ID) + return + } + purchase_resource.NewAccessor(adminReq).DeleteOne(p.ID) +} + +// --------------------------------------------------------------------------- +// Internal helpers +// --------------------------------------------------------------------------- + +var knownRegistryPrefixes = []string{ + "docker.io/", "index.docker.io/", "ghcr.io/", "quay.io/", + "registry.hub.docker.com/", "gcr.io/", "public.ecr.aws/", +} + +func isValidPeerlessRef(ref string) bool { + if ref == "" || ref == "" { + return false + } + for _, prefix := range knownRegistryPrefixes { + if strings.HasPrefix(ref, prefix) && len(ref) > len(prefix) { + return true + } + } + return false +} + +func emitPeerBehaviorReport(targetPeerDID string, severity tools.BehaviorSeverity, reason, evidence string) { + if targetPeerDID == "" { + return + } + report := tools.PeerBehaviorReport{ + ReporterApp: "oc-scheduler", + TargetPeerID: targetPeerDID, + Severity: severity, + Reason: reason, + Evidence: evidence, + At: time.Now().UTC(), + } + payload, err := json.Marshal(report) + if err != nil { + return + } + tools.NewNATSCaller().SetNATSPub(tools.PEER_BEHAVIOR_EVENT, tools.NATSResponse{ + FromApp: "oc-scheduler", + Datatype: tools.PEER, + Method: int(tools.PEER_BEHAVIOR_EVENT), + Payload: payload, + }) +} diff --git a/infrastructure/session.go b/infrastructure/session.go deleted file mode 100644 index 0334f68..0000000 --- a/infrastructure/session.go +++ /dev/null @@ -1,395 +0,0 @@ -package infrastructure - -import ( - "context" - "encoding/json" - "fmt" - "oc-scheduler/conf" - "oc-scheduler/infrastructure/scheduling" - "time" - - oclib "cloud.o-forge.io/core/oc-lib" - "cloud.o-forge.io/core/oc-lib/dbs" - "cloud.o-forge.io/core/oc-lib/models/booking" - "cloud.o-forge.io/core/oc-lib/models/order" - "cloud.o-forge.io/core/oc-lib/models/peer" - "cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource" - "cloud.o-forge.io/core/oc-lib/models/utils" - "cloud.o-forge.io/core/oc-lib/models/workflow_execution" - "cloud.o-forge.io/core/oc-lib/tools" -) - -// removeResourcePayload is sent via NATS REMOVE_RESOURCE so the receiver can -// verify the delete order comes from the original scheduler session. -type removeResourcePayload struct { - ID string `json:"id"` - SchedulerPeerID string `json:"scheduler_peer_id"` - ExecutionsID string `json:"executions_id"` -} - -// --------------------------------------------------------------------------- -// DB helpers — objects are found via executions_id -// --------------------------------------------------------------------------- - -func sessionIDFilter(field, id string) *dbs.Filters { - return &dbs.Filters{ - And: map[string][]dbs.Filter{ - field: {{Operator: dbs.EQUAL.String(), Value: id}}, - }, - } -} - -func loadSession(executionsID string, dt tools.DataType) []scheduling.SchedulerObject { - results := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).Search( - sessionIDFilter("executions_id", executionsID), "", true) - out := make([]scheduling.SchedulerObject, 0, len(results.Data)) - for _, obj := range results.Data { - out = append(out, scheduling.ToSchedulerObject(dt, obj)) - } - return out -} - -func loadSessionExecs(executionsID string) []*workflow_execution.WorkflowExecution { - adminReq := &tools.APIRequest{Admin: true} - results, _, _ := workflow_execution.NewAccessor(adminReq).Search( - sessionIDFilter("executions_id", executionsID), "", true) - out := make([]*workflow_execution.WorkflowExecution, 0, len(results)) - for _, obj := range results { - if exec, ok := obj.(*workflow_execution.WorkflowExecution); ok { - out = append(out, exec) - } - } - return out -} - -func loadSessionOrder(executionsID string) *order.Order { - adminReq := &tools.APIRequest{Admin: true} - results, _, _ := order.NewAccessor(adminReq).Search( - sessionIDFilter("executions_id", executionsID), "", true) - for _, obj := range results { - if o, ok := obj.(*order.Order); ok { - return o - } - } - return nil -} - -// --------------------------------------------------------------------------- -// Session upsert -// --------------------------------------------------------------------------- - -// UpsertSessionDrafts creates or updates draft bookings/purchases/executions for a -// Check session. Existing objects are found via the DB (executions_id). -// Called on first successful check and on user date changes. -// -// - bookings/purchases: upserted by (resourceID, instanceID); stale ones deleted -// - executions: replaced on every call (dates may have changed) -// - order: created once, updated on subsequent calls -func (ws *WorkflowSchedule) UpsertSessionDrafts(wfID, executionsID string, selfID *peer.Peer, request *tools.APIRequest) { - _, _, execs, purchases, bookings, err := ws.GetBuyAndBook(wfID, request) - if err != nil { - return - } - - adminReq := &tools.APIRequest{Admin: true} - - // --- bookings --- - existing := map[string]scheduling.SchedulerObject{} - seen := map[string]bool{} - for dt, datas := range map[tools.DataType][]scheduling.SchedulerObject{ - tools.BOOKING: bookings, tools.PURCHASE_RESOURCE: purchases, - } { - for _, bk := range loadSession(executionsID, dt) { - existing[bk.GetKey()] = bk - } - upsertSessionDrafts(dt, datas, existing, seen, selfID, executionsID, request) - for key, prev := range existing { - if !seen[key] { - deleteScheduling(dt, prev, selfID, request) - } - } - - } - // --- executions: replace on every call (dates may have changed) --- - for _, old := range loadSessionExecs(executionsID) { - UnregisterExecLock(old.GetID()) - workflow_execution.NewAccessor(adminReq).DeleteOne(old.GetID()) - } - for _, exec := range execs { - exec.ExecutionsID = executionsID - exec.IsDraft = true - ex, _, err := utils.GenericStoreOne(exec, workflow_execution.NewAccessor(adminReq)) - if err == nil { - RegisterExecLock(ex.GetID()) - go WatchExecDeadline( - ex.GetID(), executionsID, exec.ExecDate, selfID, request) - } - } - - // --- order: create once, update on subsequent calls --- - if existing := loadSessionOrder(executionsID); existing == nil { - ws.GenerateOrder(purchases, bookings, executionsID, request) - } else { - for _, purch := range purchases { - existing.Purchases = append( - existing.Purchases, scheduling.FromSchedulerObject(tools.PURCHASE_RESOURCE, purch).(*purchase_resource.PurchaseResource)) - } - for _, b := range bookings { - existing.Bookings = append( - existing.Bookings, scheduling.FromSchedulerObject(tools.BOOKING, b).(*booking.Booking)) - } - utils.GenericRawUpdateOne(existing, existing.GetID(), order.NewAccessor(adminReq)) - } -} - -// --------------------------------------------------------------------------- -// Session lifecycle -// --------------------------------------------------------------------------- - -func upsertSessionDrafts(dt tools.DataType, datas []scheduling.SchedulerObject, existing map[string]scheduling.SchedulerObject, - seen map[string]bool, selfID *peer.Peer, - executionsID string, request *tools.APIRequest) { - fmt.Println("UpsertSessionDrafts", len(datas), len(existing)) - for _, bk := range datas { - bk.SetSchedulerPeerID(selfID.PeerID) - bk.SetExecutionsID(executionsID) - seen[bk.GetKey()] = true - if prev, ok := existing[bk.GetKey()]; ok { - bk.SetID(prev.GetID()) - bk.SetIsDraft(false) - // Convert to concrete type (Booking/PurchaseResource) so that - // GenericRawUpdateOne serializes the real struct, not the wrapper. - propagateWriteResource( - scheduling.FromSchedulerDBObject(dt, bk), bk.GetDestPeer(), dt, selfID, request) - } else { - errCh := make(chan error, 1) - propagateResource(scheduling.FromSchedulerDBObject(dt, bk), bk.GetDestPeer(), dt, selfID, request, errCh) - <-errCh - } - } -} - -// CleanupSession deletes all draft bookings/purchases/executions/order for a -// session (called when the WebSocket closes without a confirm). -func CleanupSession(self *peer.Peer, executionsID string, selfID *peer.Peer, request *tools.APIRequest) { - adminReq := &tools.APIRequest{Admin: true} - for _, exec := range loadSessionExecs(executionsID) { - UnscheduleExecution(exec.GetID(), selfID, request) - workflow_execution.NewAccessor(adminReq).DeleteOne(exec.GetID()) - } - if o := loadSessionOrder(executionsID); o != nil { - order.NewAccessor(adminReq).DeleteOne(o.GetID()) - } -} - -// ConfirmSession flips all session drafts to IsDraft=false and propagates them. -// The considers mechanism then transitions executions to IsDraft=false once -// all remote peers acknowledge. -func ConfirmSession(executionsID string, selfID *peer.Peer, request *tools.APIRequest) error { - for _, dt := range []tools.DataType{tools.BOOKING, tools.PURCHASE_RESOURCE} { - for _, bk := range loadSession(executionsID, dt) { - bk.SetIsDraft(false) - propagateWriteResource( - scheduling.FromSchedulerDBObject(dt, bk), bk.GetDestPeer(), dt, selfID, request) - } - } - return nil -} - -// confirmSessionOrder sets the order IsDraft=false once all considers are received. -func confirmSessionOrder(executionsID string, adminReq *tools.APIRequest) { - if o := loadSessionOrder(executionsID); o != nil { - o.IsDraft = false - utils.GenericRawUpdateOne(o, o.GetID(), order.NewAccessor(adminReq)) - } -} - -// --------------------------------------------------------------------------- -// Propagation -// --------------------------------------------------------------------------- - -// propagateWriteResource routes a booking/purchase write to its destination: -// - local peer → DB upsert; emits considers on confirm (IsDraft=false) -// - remote peer → NATS CREATE_RESOURCE (receiver upserts) -func propagateWriteResource(obj utils.DBObject, destPeerID string, dt tools.DataType, selfID *peer.Peer, request *tools.APIRequest) { - if destPeerID == selfID.GetID() { - if _, _, err := utils.GenericRawUpdateOne(obj, obj.GetID(), obj.GetAccessor(request)); err != nil { - fmt.Printf("propagateWriteResource: local update failed for %s %s: %v\n", dt, obj.GetID(), err) - return - } - if dt == tools.BOOKING { - go refreshSelfPlanner(selfID.PeerID, request) - } - fmt.Println("IS DRAFTED", obj.IsDrafted()) - if !obj.IsDrafted() { - if payload, err := json.Marshal(&executionConsidersPayload{ - ID: obj.GetID(), - }); err == nil { - go updateExecutionState(payload, dt) - } - } - return - } - payload, err := json.Marshal(obj) - if err != nil { - return - } - tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{ - FromApp: "oc-scheduler", - Datatype: dt, - Method: int(tools.CREATE_RESOURCE), - Payload: payload, - }) -} - -// deleteBooking deletes a booking from its destination peer (local DB or NATS). -func deleteScheduling(dt tools.DataType, bk scheduling.SchedulerObject, selfID *peer.Peer, request *tools.APIRequest) { - if bk.GetDestPeer() == selfID.GetID() { - oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).DeleteOne(bk.GetID()) - go refreshSelfPlanner(selfID.PeerID, request) - return - } - emitNATSRemove(bk.GetID(), bk.GetPeerSession(), bk.GetExecutionsId(), dt) -} - -// emitNATSRemove sends a REMOVE_RESOURCE event to the remote peer carrying -// auth fields so the receiver can verify the delete is legitimate. -func emitNATSRemove(id, schedulerPeerID, executionsID string, dt tools.DataType) { - payload, _ := json.Marshal(removeResourcePayload{ - ID: id, - SchedulerPeerID: schedulerPeerID, - ExecutionsID: executionsID, - }) - tools.NewNATSCaller().SetNATSPub(tools.REMOVE_RESOURCE, tools.NATSResponse{ - FromApp: "oc-scheduler", - Datatype: dt, - Method: int(tools.REMOVE_RESOURCE), - Payload: payload, - }) -} - -// --------------------------------------------------------------------------- -// Deadline watchers -// --------------------------------------------------------------------------- - -// WatchExecDeadline fires one minute before the execution start date. -// If the execution is still a draft it is purged; otherwise the namespace -// is created and a WatchExecEnd watcher is armed. -// If the deadline has already passed (e.g. after a process restart), it fires immediately. -func WatchExecDeadline(executionID string, ns string, execDate time.Time, selfID *peer.Peer, request *tools.APIRequest) { - fmt.Println("WatchExecDeadline") - delay := time.Until(execDate.UTC().Add(-1 * time.Minute)) - if delay <= 0 { - go handleExecDeadline(executionID, ns, selfID, request) - return - } - time.AfterFunc(delay, func() { handleExecDeadline(executionID, ns, selfID, request) }) -} - -func handleExecDeadline(executionID string, ns string, selfID *peer.Peer, request *tools.APIRequest) { - adminReq := &tools.APIRequest{Admin: true} - res, _, err := workflow_execution.NewAccessor(adminReq).LoadOne(executionID) - if err != nil || res == nil { - fmt.Printf("handleExecDeadline: execution %s not found\n", executionID) - return - } - exec := res.(*workflow_execution.WorkflowExecution) - if exec.IsDraft { - UnscheduleExecution(executionID, selfID, request) - workflow_execution.NewAccessor(adminReq).DeleteOne(executionID) - fmt.Printf("handleExecDeadline: purged draft execution %s\n", executionID) - return - } - if serv, err := tools.NewKubernetesService( - conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort, - conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData); err != nil { - fmt.Printf("handleExecDeadline: k8s init failed for %s: %v\n", executionID, err) - } else if err := serv.ProvisionExecutionNamespace(context.Background(), ns); err != nil { - fmt.Printf("handleExecDeadline: failed to provision namespace for %s: %v\n", ns, err) - } - go WatchExecEnd(executionID, ns, exec.EndDate, exec.ExecDate) -} - -// WatchExecEnd fires at the execution end date (ExecDate+1h when EndDate is nil) -// and deletes the Kubernetes namespace associated with the execution. -func WatchExecEnd(executionID string, ns string, endDate *time.Time, execDate time.Time) { - var end time.Time - if endDate != nil { - end = *endDate - } else { - end = execDate.UTC().Add(time.Hour) - } - delay := time.Until(end.UTC()) - fire := func() { - serv, err := tools.NewKubernetesService( - conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort, - conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData) - if err != nil { - fmt.Printf("WatchExecEnd: k8s init failed for %s: %v\n", executionID, err) - return - } - if err := serv.TeardownExecutionNamespace(context.Background(), ns); err != nil { - fmt.Printf("WatchExecEnd: failed to teardown namespace %s: %v\n", ns, err) - } - } - if delay <= 0 { - go fire() - return - } - time.AfterFunc(delay, fire) -} - -// RecoverDraftExecutions is called at startup to restore deadline watchers for -// draft executions that survived a process restart. Executions already past -// their deadline are purged immediately. -func RecoverDraftExecutions() { - adminReq := &tools.APIRequest{Admin: true} - var selfID *peer.Peer - for selfID == nil { - selfID, _ = oclib.GetMySelf() - if selfID == nil { - time.Sleep(5 * time.Second) - } - } - results, _, _ := workflow_execution.NewAccessor(adminReq).Search(nil, "*", true) - for _, obj := range results { - exec, ok := obj.(*workflow_execution.WorkflowExecution) - if !ok { - continue - } - RegisterExecLock(exec.GetID()) - go WatchExecDeadline(exec.GetID(), exec.ExecutionsID, exec.ExecDate, selfID, adminReq) - } - fmt.Printf("RecoverDraftExecutions: recovered %d draft executions\n", len(results)) -} - -// --------------------------------------------------------------------------- -// Unschedule -// --------------------------------------------------------------------------- - -// UnscheduleExecution deletes all bookings for an execution (via PeerBookByGraph) -// then deletes the execution itself. -func UnscheduleExecution(executionID string, selfID *peer.Peer, request *tools.APIRequest) error { - fmt.Println("UnscheduleExecution") - adminReq := &tools.APIRequest{Admin: true} - res, _, err := workflow_execution.NewAccessor(adminReq).LoadOne(executionID) - if err != nil || res == nil { - return fmt.Errorf("execution %s not found: %w", executionID, err) - } - exec := res.(*workflow_execution.WorkflowExecution) - for _, byResource := range exec.PeerBookByGraph { - for _, bookingIDs := range byResource { - for _, bkID := range bookingIDs { - bkRes, _, loadErr := booking.NewAccessor(adminReq).LoadOne(bkID) - fmt.Println("UnscheduleExecution", bkID, loadErr) - if loadErr != nil || bkRes == nil { - continue - } - deleteScheduling(tools.BOOKING, scheduling.ToSchedulerObject(tools.BOOKING, bkRes), selfID, request) - } - } - } - workflow_execution.NewAccessor(adminReq).DeleteOne(executionID) - UnregisterExecLock(executionID) - return nil -} diff --git a/infrastructure/session/session.go b/infrastructure/session/session.go new file mode 100644 index 0000000..c4ab668 --- /dev/null +++ b/infrastructure/session/session.go @@ -0,0 +1,233 @@ +package session + +import ( + "encoding/json" + "fmt" + "oc-scheduler/infrastructure/execution" + "oc-scheduler/infrastructure/scheduling_resources" + "sync" + "time" + + "cloud.o-forge.io/core/oc-lib/dbs" + "cloud.o-forge.io/core/oc-lib/models/bill" + "cloud.o-forge.io/core/oc-lib/models/booking" + "cloud.o-forge.io/core/oc-lib/models/common/enum" + "cloud.o-forge.io/core/oc-lib/models/order" + "cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource" + "cloud.o-forge.io/core/oc-lib/models/utils" + "cloud.o-forge.io/core/oc-lib/models/workflow_execution" + "cloud.o-forge.io/core/oc-lib/tools" + + oclib "cloud.o-forge.io/core/oc-lib" +) + +type SessionExecutionsService struct { + Mu sync.RWMutex + ExecutionsSessionID string +} + +func NewSessionExecutionsService(sessionID string) *SessionExecutionsService { + return &SessionExecutionsService{ExecutionsSessionID: sessionID} +} + +// --------------------------------------------------------------------------- +// DB helpers +// --------------------------------------------------------------------------- + +func (s *SessionExecutionsService) sessionIDFilter(field, id string) *dbs.Filters { + return &dbs.Filters{ + And: map[string][]dbs.Filter{ + field: {{Operator: dbs.EQUAL.String(), Value: id}}, + }, + } +} + +func (s *SessionExecutionsService) loadSession(dt tools.DataType) []scheduling_resources.SchedulerObject { + results := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil).Search( + s.sessionIDFilter("executions_id", s.ExecutionsSessionID), "", true) + out := make([]scheduling_resources.SchedulerObject, 0, len(results.Data)) + for _, obj := range results.Data { + out = append(out, scheduling_resources.ToSchedulerObject(dt, obj)) + } + return out +} + +func (s *SessionExecutionsService) LoadSessionExecs() []*workflow_execution.WorkflowExecution { + adminReq := &tools.APIRequest{Admin: true} + results, _, _ := workflow_execution.NewAccessor(adminReq).Search( + s.sessionIDFilter("executions_id", s.ExecutionsSessionID), "", true) + out := make([]*workflow_execution.WorkflowExecution, 0) + for _, obj := range results { + if exec, ok := obj.(*workflow_execution.WorkflowExecution); ok { + out = append(out, exec) + } + } + return out +} + +func (s *SessionExecutionsService) loadSessionOrder() *order.Order { + adminReq := &tools.APIRequest{Admin: true} + results, _, _ := order.NewAccessor(adminReq).Search( + s.sessionIDFilter("executions_id", s.ExecutionsSessionID), "", true) + for _, obj := range results { + if o, ok := obj.(*order.Order); ok { + return o + } + } + return nil +} + +// --------------------------------------------------------------------------- +// Session upsert +// --------------------------------------------------------------------------- + +func (s *SessionExecutionsService) UpsertSessionDrafts( + purchases, bookings []scheduling_resources.SchedulerObject, + execs []*workflow_execution.WorkflowExecution, + request *tools.APIRequest, +) { + adminReq := &tools.APIRequest{Admin: true} + + for dt, datas := range map[tools.DataType][]scheduling_resources.SchedulerObject{ + tools.BOOKING: bookings, + tools.PURCHASE_RESOURCE: purchases, + } { + existing := map[string]scheduling_resources.SchedulerObject{} + seen := map[string]bool{} + for _, bk := range s.loadSession(dt) { + existing[bk.GetKey()] = bk + } + s.upsertDrafts(dt, datas, existing, seen, request) + for key, prev := range existing { + if !seen[key] { + scheduling_resources.GetService().Delete(dt, prev, request) + } + } + } + + for _, old := range s.LoadSessionExecs() { + execution.UnregisterExecLock(old.GetID()) + workflow_execution.NewAccessor(adminReq).DeleteOne(old.GetID()) + } + for _, exec := range execs { + exec.ExecutionsID = s.ExecutionsSessionID + exec.IsDraft = true + ex, _, err := utils.GenericStoreOne(exec, workflow_execution.NewAccessor(adminReq)) + if err == nil { + execution.RegisterExecLock(ex.GetID()) + go execution.WatchDeadline(ex.GetID(), s.ExecutionsSessionID, exec.ExecDate, request) + } + } + + if existing := s.loadSessionOrder(); existing == nil { + GenerateOrder(purchases, bookings, s.ExecutionsSessionID, request) + } else { + for _, purch := range purchases { + existing.Purchases = append(existing.Purchases, + scheduling_resources.FromSchedulerObject(tools.PURCHASE_RESOURCE, purch).(*purchase_resource.PurchaseResource)) + } + for _, b := range bookings { + existing.Bookings = append(existing.Bookings, + scheduling_resources.FromSchedulerObject(tools.BOOKING, b).(*booking.Booking)) + } + utils.GenericRawUpdateOne(existing, existing.GetID(), order.NewAccessor(adminReq)) + } +} + +func (s *SessionExecutionsService) upsertDrafts( + dt tools.DataType, + datas []scheduling_resources.SchedulerObject, + existing map[string]scheduling_resources.SchedulerObject, + seen map[string]bool, + request *tools.APIRequest, +) { + self := scheduling_resources.GetService().Self() + fmt.Println("upsertDrafts", len(datas), len(existing)) + for _, bk := range datas { + if self != nil { + bk.SetSchedulerPeerID(self.PeerID) + } + bk.SetExecutionsID(s.ExecutionsSessionID) + seen[bk.GetKey()] = true + if prev, ok := existing[bk.GetKey()]; ok { + bk.SetID(prev.GetID()) + bk.SetIsDraft(false) + needsConsiders := scheduling_resources.GetService().PropagateWrite( + scheduling_resources.FromSchedulerDBObject(dt, bk), bk.GetDestPeer(), dt, request) + if needsConsiders { + if payload, err := json.Marshal(execution.ConsidersPayload{ID: bk.GetID()}); err == nil { + go execution.UpdateExecutionState(payload, dt) + } + } + } else { + errCh := make(chan error, 1) + scheduling_resources.GetService().PropagateCreate( + scheduling_resources.FromSchedulerDBObject(dt, bk), bk.GetDestPeer(), dt, request, errCh) + <-errCh + } + } +} + +// --------------------------------------------------------------------------- +// Session lifecycle +// --------------------------------------------------------------------------- + +func (s *SessionExecutionsService) CleanupSession(request *tools.APIRequest) { + adminReq := &tools.APIRequest{Admin: true} + for _, exec := range s.LoadSessionExecs() { + execution.Unschedule(exec.GetID(), request) + workflow_execution.NewAccessor(adminReq).DeleteOne(exec.GetID()) + } + if o := s.loadSessionOrder(); o != nil { + order.NewAccessor(adminReq).DeleteOne(o.GetID()) + } +} + +func GenerateOrder( + purchases, bookings []scheduling_resources.SchedulerObject, + executionsID string, + request *tools.APIRequest, +) (string, error) { + newOrder := &order.Order{ + AbstractObject: utils.AbstractObject{ + Name: "order_" + request.PeerID + "_" + time.Now().UTC().Format("2006-01-02T15:04:05"), + IsDraft: true, + }, + ExecutionsID: executionsID, + Purchases: []*purchase_resource.PurchaseResource{}, + Bookings: []*booking.Booking{}, + Status: enum.PENDING, + } + for _, purch := range purchases { + newOrder.Purchases = append(newOrder.Purchases, + scheduling_resources.FromSchedulerObject(tools.PURCHASE_RESOURCE, purch).(*purchase_resource.PurchaseResource)) + } + for _, b := range bookings { + newOrder.Bookings = append(newOrder.Bookings, + scheduling_resources.FromSchedulerObject(tools.BOOKING, b).(*booking.Booking)) + } + res, _, err := order.NewAccessor(request).StoreOne(newOrder) + if err != nil { + return "", err + } + if _, err := bill.DraftFirstBill(res.(*order.Order), request); err != nil { + return res.GetID(), err + } + return res.GetID(), nil +} + +func (s *SessionExecutionsService) ConfirmSession(request *tools.APIRequest) error { + for _, dt := range []tools.DataType{tools.BOOKING, tools.PURCHASE_RESOURCE} { + for _, bk := range s.loadSession(dt) { + bk.SetIsDraft(false) + needsConsiders := scheduling_resources.GetService().PropagateWrite( + scheduling_resources.FromSchedulerDBObject(dt, bk), bk.GetDestPeer(), dt, request) + if needsConsiders { + if payload, err := json.Marshal(execution.ConsidersPayload{ID: bk.GetID()}); err == nil { + go execution.UpdateExecutionState(payload, dt) + } + } + } + } + return nil +} diff --git a/infrastructure/utils/utils.go b/infrastructure/utils/utils.go new file mode 100644 index 0000000..3ad4dd9 --- /dev/null +++ b/infrastructure/utils/utils.go @@ -0,0 +1,186 @@ +package utils + +import ( + "encoding/json" + "errors" + "sync" + "time" + + oclib "cloud.o-forge.io/core/oc-lib" + "cloud.o-forge.io/core/oc-lib/models/resources" + "cloud.o-forge.io/core/oc-lib/models/workflow" + "cloud.o-forge.io/core/oc-lib/tools" +) + +type BookingResource struct { + ID string // resource MongoDB _id + PeerPID string // peer public PeerID (PID) — PlannerCache key + InstanceID string // resolved from WorkflowSchedule.SelectedInstances +} + +// collectBookingResources returns unique storage and compute resources from the +// workflow graph. For each resource the selected instance ID is resolved from +// selectedInstances (the scheduler's SelectedInstances ConfigItem) so the planner +// check targets the exact instance chosen by the user. +func CollectBookingResources(wf *workflow.Workflow, selectedInstances workflow.ConfigItem) map[string]BookingResource { + if wf.Graph == nil { + return nil + } + seen := map[string]bool{} + result := map[string]BookingResource{} + + // Resolve MongoDB peer _id (DID) → public PeerID (PID) used as PlannerCache key. + peerAccess := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil) + didToPID := map[string]string{} + resolvePID := func(did string) string { + if pid, ok := didToPID[did]; ok { + return pid + } + if data := peerAccess.LoadOne(did); data.Data != nil { + if p := data.ToPeer(); p != nil { + didToPID[did] = p.PeerID + return p.PeerID + } + } + return "" + } + + resolveInstanceID := func(res interface { + GetID() string + GetCreatorID() string + }) string { + idx := selectedInstances.Get(res.GetID()) + switch r := res.(type) { + case *resources.StorageResource: + if inst := r.GetSelectedInstance(idx); inst != nil { + return inst.GetID() + } + case *resources.ComputeResource: + if inst := r.GetSelectedInstance(idx); inst != nil { + return inst.GetID() + } + } + return "" + } + + for _, item := range wf.GetGraphItems(wf.Graph.IsStorage) { + _, res := item.GetResource() + if res == nil { + continue + } + id := res.GetID() + if seen[id] { + continue + } + pid := resolvePID(res.GetCreatorID()) + if pid == "" { + continue + } + seen[id] = true + result[pid] = BookingResource{ + ID: id, + PeerPID: pid, + InstanceID: resolveInstanceID(res), + } + } + + for _, item := range wf.GetGraphItems(wf.Graph.IsCompute) { + _, res := item.GetResource() + if res == nil { + continue + } + id := res.GetID() + if seen[id] { + continue + } + pid := resolvePID(res.GetCreatorID()) + if pid == "" { + continue + } + seen[id] = true + result[pid] = BookingResource{ + ID: id, + PeerPID: pid, + InstanceID: resolveInstanceID(res), + } + } + + return result +} + +// GetWorkflowPeerIDs loads the workflow and returns the deduplicated list of +// creator peer IDs for all its storage and compute resources. +// These are the peers whose planners must be watched by a check stream. +func GetWorkflowPeerIDs(wfID string, request *tools.APIRequest) ([]string, error) { + obj, code, err := workflow.NewAccessor(request).LoadOne(wfID) + if code != 200 || err != nil { + msg := "could not load workflow " + wfID + if err != nil { + msg += ": " + err.Error() + } + return nil, errors.New(msg) + } + wf := obj.(*workflow.Workflow) + if wf.Graph == nil { + return nil, nil + } + seen := map[string]bool{} + var peerIDs []string + for _, item := range wf.GetGraphItems(wf.Graph.IsStorage) { + _, res := item.GetResource() + if res == nil { + continue + } + if id := res.GetCreatorID(); id != "" && !seen[id] { + seen[id] = true + peerIDs = append(peerIDs, id) + } + } + for _, item := range wf.GetGraphItems(wf.Graph.IsCompute) { + _, res := item.GetResource() + if res == nil { + continue + } + if id := res.GetCreatorID(); id != "" && !seen[id] { + seen[id] = true + peerIDs = append(peerIDs, id) + } + } + realPeersID := []string{} + access := oclib.NewRequestAdmin(oclib.LibDataEnum(tools.PEER), nil) + for _, id := range peerIDs { + if data := access.LoadOne(id); data.Data != nil { + realPeersID = append(realPeersID, data.ToPeer().PeerID) + } + } + return realPeersID, nil +} + +func FormatOptTime(t *time.Time) string { + if t == nil { + return "open" + } + return t.Format(time.RFC3339) +} + +func Notify[T interface{}](mu *sync.RWMutex, registry map[string][]chan T, key string, toAdd T) { + mu.RLock() + subs := registry[key] + mu.RUnlock() + for _, ch := range subs { + select { + case ch <- toAdd: + default: + } + } +} + +func Propalgate(peerID string, message tools.PropalgationMessage) { + b, _ := json.Marshal(message) + tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{ + FromApp: "oc-scheduler", + Datatype: -1, + Method: int(tools.PROPALGATION_EVENT), + Payload: b, + }) +} diff --git a/logs.log b/logs.log new file mode 100644 index 0000000..9e545fd --- /dev/null +++ b/logs.log @@ -0,0 +1,1118 @@ +init global config instance failed. If you do not use this, just ignore it. open conf/app.conf: no such file or directory +{"level":"info","time":"2026-03-23T07:15:00Z","message":"Config file found : /etc/oc/scheduler.json"} +{"level":"info","time":"2026-03-23T07:15:00Z","message":"Config file found : /etc/oc/scheduler.json"} +2026-03-23T07:15:00Z INF Connecting tomongodb://mongo:27017/ +zerolog: could not write event: failed to send log to Loki: Post "http://loki:3100/loki/api/v1/push": dial tcp: lookup loki on 127.0.0.11:53: server misbehaving +2026-03-23T07:15:10Z INF ExecutionWatchdog: started +2026/03/23 07:15:10.497 [I] http server Running on http://:8080 +zerolog: could not write event: failed to send log to Loki: Post "http://loki:3100/loki/api/v1/push": dial tcp: lookup loki on 127.0.0.11:53: server misbehaving +NATS Connect err dial tcp: lookup nats on 127.0.0.11:53: server misbehaving +2026-03-23T07:15:10Z ERR Could not connect to NATS +2026-03-23T07:15:10Z ERR Could not connect to NATS +zerolog: could not write event: failed to send log to Loki: Post "http://loki:3100/loki/api/v1/push": dial tcp: lookup loki on 127.0.0.11:53: server misbehaving +zerolog: could not write event: failed to send log to Loki: Post "http://loki:3100/loki/api/v1/push": dial tcp: lookup loki on 127.0.0.11:53: server misbehaving +InitSelfPlanner: self peer not found yet, retrying in 15s... +panic: runtime error: invalid memory address or nil pointer dereference +[signal SIGSEGV: segmentation violation code=0x1 addr=0x58 pc=0x936569] + +goroutine 198 [running]: +go.mongodb.org/mongo-driver/mongo.(*Collection).find(0x0, {0x2037a58, 0x7238244e5b0}, {0x1b7c580?, 0x723826b2c30?}, 0x1, {0x72382703df0, 0x1, 0x1}) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/mongo/collection.go:1239 +0x89 +go.mongodb.org/mongo-driver/mongo.(*Collection).Find(0x0, {0x2037a58?, 0x7238244e5b0?}, {0x1b7c580, 0x723826b2c30}, {0x72382703df0, 0x1, 0x1}) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/mongo/collection.go:1229 +0xae +cloud.o-forge.io/core/oc-lib/dbs/mongo.(*MongoDB).Search(0x31ed760, 0x723823cb980, {0x1e2dccd, 0x4}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/dbs/mongo/mongo.go:294 +0x1b6 +cloud.o-forge.io/core/oc-lib/models/utils.GenericSearch[...](0x0, {0x0?, 0x7af72f83c028?}, 0x7af72fa0ba00?, 0x72382703eb0?, 0x0, {0x204bc60, 0x72382bb40c0}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:185 +0xa7 +cloud.o-forge.io/core/oc-lib/models/utils.(*AbstractAccessor[...]).Search(0x205b2e0, 0x723823cb980, {0x0, 0x0}, 0x0) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/abstracts.go:267 +0xba +cloud.o-forge.io/core/oc-lib/models/utils.GetMySelf({0x204bbc0, 0x72382bb40c0}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:201 +0x130 +cloud.o-forge.io/core/oc-lib.GetMySelf() + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/entrypoint.go:70 +0x36 +oc-scheduler/infrastructure.InitSelfPlanner() + /oc-scheduler/infrastructure/planner.go:306 +0x65 +created by main.main in goroutine 1 + /oc-scheduler/main.go:26 +0x1fd +init global config instance failed. If you do not use this, just ignore it. open conf/app.conf: no such file or directory +{"level":"info","time":"2026-03-23T07:28:11Z","message":"Config file found : /etc/oc/scheduler.json"} +{"level":"info","time":"2026-03-23T07:28:11Z","message":"Config file found : /etc/oc/scheduler.json"} +2026-03-23T07:28:11Z INF Connecting tomongodb://mongo:27017/ +2026-03-23T07:28:11Z INF Connecting mongo client to db DC_myDC +2026-03-23T07:28:11Z INF Database is READY +2026-03-23T07:28:11Z INF ExecutionWatchdog: started +2026-03-23T07:28:11Z INF Listening to confirm_event +2026-03-23T07:28:11Z INF Listening to discovery +2026-03-23T07:28:11Z INF Listening to planner_execution +Published on discovery +2026-03-23T07:28:11Z INF Listening to workflow_done_event +2026-03-23T07:28:11Z INF Listening to create_resource +2026-03-23T07:28:11Z INF Listening to considers_event +2026-03-23T07:28:11Z INF Listening to workflow_started_event +2026-03-23T07:28:11Z INF Listening to workflow_step_done_event +2026-03-23T07:28:11Z INF Listening to remove_resource +RecoverDraftExecutions: recovered 0 draft executions +2026/03/23 07:28:11.800 [I] http server Running on http://:8080 +Published on propalgation_event +2026-03-23T07:35:16Z INF Catching discovery... /app -  +2026-03-23T07:36:16Z INF Catching workflow step done event... oc-datacenter - invalid +runtime: goroutine stack exceeds 1000000000-byte limit +runtime: sp=0x2f56693c0a30 stack=[0x2f56693c0000, 0x2f56893c0000] +fatal error: stack overflow + +runtime stack: +runtime.throw({0x1e4bc16?, 0x2f554ee80160?}) + /usr/local/go/src/runtime/panic.go:1229 +0x48 fp=0x2f554ec07e98 sp=0x2f554ec07e68 pc=0x48c6a8 +runtime.newstack() + /usr/local/go/src/runtime/stack.go:1178 +0x5fd fp=0x2f554ec07fc8 sp=0x2f554ec07e98 pc=0x47119d +runtime.morestack() + /usr/local/go/src/runtime/asm_amd64.s:681 +0x7d fp=0x2f554ec07fd0 sp=0x2f554ec07fc8 pc=0x4926bd + +goroutine 182 gp=0x2f554e9b70e0 m=13 mp=0x2f554e501008 [running]: +go.mongodb.org/mongo-driver/x/mongo/driver.filterDeprioritizedServers({0x2f557a32b040, 0x1, 0x1}, {0x0, 0x0, 0x0?}) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/operation.go:341 +0x55f fp=0x2f56693c0a40 sp=0x2f56693c0a38 pc=0x8761bf +go.mongodb.org/mongo-driver/x/mongo/driver.(*opServerSelector).SelectServer(0x2f557a325620, {{0x2f556c6cd040, 0x1, 0x1}, {0x0, 0x0}, 0x1, 0x1e, 0x2f554e4f1260, {0x0, ...}}, ...) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/operation.go:390 +0x92 fp=0x2f56693c0ab8 sp=0x2f56693c0a40 pc=0x876292 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*Topology).selectServerFromDescription(0x4?, {{0x2f556c6cd040, 0x1, 0x1}, {0x0, 0x0}, 0x1, 0x1e, 0x2f554e4f1260, {0x0, ...}}, ...) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/topology.go:791 +0x3dc fp=0x2f56693c0dc8 sp=0x2f56693c0ab8 pc=0x920cbc +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*Topology).SelectServer(0x2f554e6f35f0, {0x20379b0, 0x2f557a325680}, {0x202a5c0, 0x2f557a325620}) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/topology.go:571 +0x5df fp=0x2f56693c17c0 sp=0x2f56693c0dc8 pc=0x91e9ff +go.mongodb.org/mongo-driver/x/mongo/driver.Operation.selectServer({0x2f557a332e20, {0x2f554e7c5628, 0x7}, {0x202ff00, 0x2f554e6f35f0}, 0x2f557a332e30, {0x2028920, 0x2f557a3255f0}, 0x2f554ebee3f0, 0x2f554e7afef0, ...}, ...) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/operation.go:425 +0x47a fp=0x2f56693c1960 sp=0x2f56693c17c0 pc=0x87675a +go.mongodb.org/mongo-driver/x/mongo/driver.Operation.getServerAndConnection({0x2f557a332e20, {0x2f554e7c5628, 0x7}, {0x202ff00, 0x2f554e6f35f0}, 0x2f557a332e30, {0x2028920, 0x2f557a3255f0}, 0x2f554ebee3f0, 0x2f554e7afef0, ...}, ...) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/operation.go:434 +0x88 fp=0x2f56693c1f30 sp=0x2f56693c1960 pc=0x876868 +go.mongodb.org/mongo-driver/x/mongo/driver.Operation.Execute({0x2f557a332e20, {0x2f554e7c5628, 0x7}, {0x202ff00, 0x2f554e6f35f0}, 0x2f557a332e30, {0x2028920, 0x2f557a3255f0}, 0x2f554ebee3f0, 0x2f554e7afef0, ...}, ...) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/operation.go:639 +0x6e5 fp=0x2f56693c3108 sp=0x2f56693c1f30 pc=0x877605 +go.mongodb.org/mongo-driver/x/mongo/driver/operation.(*Find).Execute(0x2f557a330d88, {0x2037a58, 0x2f55813e7180}) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/operation/find.go:117 +0x2c5 fp=0x2f56693c3338 sp=0x2f56693c3108 pc=0x88c405 +go.mongodb.org/mongo-driver/mongo.(*Collection).find(0x2f554e62d0a0, {0x2037a58, 0x2f55813e7180}, {0x1c06200?, 0x2f557a325560?}, 0x0, {0x2f55792b9dc0, 0x1, 0x1}) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/mongo/collection.go:1395 +0x132b fp=0x2f56693c3558 sp=0x2f56693c3338 pc=0x93780b +go.mongodb.org/mongo-driver/mongo.(*Collection).FindOne(0x2f554e62d0a0, {0x2037a58?, 0x2f55813e7180?}, {0x1c06200, 0x2f557a325560}, {0x0, 0x0, 0x489ee5?}) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/mongo/collection.go:1452 +0x553 fp=0x2f56693c3660 sp=0x2f56693c3558 pc=0x938393 +cloud.o-forge.io/core/oc-lib/dbs/mongo.(*MongoDB).LoadOne(0x31ed760, {0x2f557a3288d0, 0x24}, {0x1e39ad3, 0x7}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/dbs/mongo/mongo.go:273 +0x18d fp=0x2f56693c36d0 sp=0x2f56693c3660 pc=0x947d8d +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f557a3288d0, 0x24}, 0x2f557a31ea80, 0x2f56693c3750, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:143 +0x86 fp=0x2f56693c3718 sp=0x2f56693c36d0 pc=0x183ac26 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f557a3288d0, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56693c3780 sp=0x2f56693c3718 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f557a3288d0, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56693c3828 sp=0x2f56693c3780 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f557a250780}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56693c38c8 sp=0x2f56693c3828 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f557a22bb90, 0x24}, 0x2f557a250780, 0x2f56693c3948, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56693c3910 sp=0x2f56693c38c8 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f557a22bb90, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56693c3978 sp=0x2f56693c3910 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f557a22bb90, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56693c3a20 sp=0x2f56693c3978 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f557a250600}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56693c3ac0 sp=0x2f56693c3a20 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f557a22b980, 0x24}, 0x2f557a250600, 0x2f56693c3b40, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56693c3b08 sp=0x2f56693c3ac0 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f557a22b980, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56693c3b70 sp=0x2f56693c3b08 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f557a22b980, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56693c3c18 sp=0x2f56693c3b70 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f557a31e900}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56693c3cb8 sp=0x2f56693c3c18 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f557a3286c0, 0x24}, 0x2f557a31e900, 0x2f56693c3d38, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56693c3d00 sp=0x2f56693c3cb8 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f557a3286c0, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56693c3d68 sp=0x2f56693c3d00 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f557a3286c0, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56693c3e10 sp=0x2f56693c3d68 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f557a250480}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56693c3eb0 sp=0x2f56693c3e10 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f557a22b770, 0x24}, 0x2f557a250480, 0x2f56693c3f30, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56693c3ef8 sp=0x2f56693c3eb0 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f557a22b770, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56693c3f60 sp=0x2f56693c3ef8 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f557a22b770, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56693c4008 sp=0x2f56693c3f60 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f557a2b2d80}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56693c40a8 sp=0x2f56693c4008 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f557a2ab530, 0x24}, 0x2f557a2b2d80, 0x2f56693c4128, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56693c40f0 sp=0x2f56693c40a8 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f557a2ab530, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56693c4158 sp=0x2f56693c40f0 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f557a2ab530, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56693c4200 sp=0x2f56693c4158 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f557a31e780}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56693c42a0 sp=0x2f56693c4200 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f557a3284b0, 0x24}, 0x2f557a31e780, 0x2f56693c4320, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56693c42e8 sp=0x2f56693c42a0 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f557a3284b0, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56693c4350 sp=0x2f56693c42e8 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f557a3284b0, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56693c43f8 sp=0x2f56693c4350 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f557a2b2c00}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56693c4498 sp=0x2f56693c43f8 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f557a2ab320, 0x24}, 0x2f557a2b2c00, 0x2f56693c4518, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56693c44e0 sp=0x2f56693c4498 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f557a2ab320, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56693c4548 sp=0x2f56693c44e0 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f557a2ab320, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56693c45f0 sp=0x2f56693c4548 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f557a250300}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56693c4690 sp=0x2f56693c45f0 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f557a22b560, 0x24}, 0x2f557a250300, 0x2f56693c4710, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56693c46d8 sp=0x2f56693c4690 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f557a22b560, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56693c4740 sp=0x2f56693c46d8 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f557a22b560, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56693c47e8 sp=0x2f56693c4740 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f557a31e600}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56693c4888 sp=0x2f56693c47e8 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f557a3282a0, 0x24}, 0x2f557a31e600, 0x2f56693c4908, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56693c48d0 sp=0x2f56693c4888 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f557a3282a0, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56693c4938 sp=0x2f56693c48d0 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f557a3282a0, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56693c49e0 sp=0x2f56693c4938 pc=0x18379e9 +...4260677 frames elided... +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f554e9f0030, 0x24}, 0x2f554e740000, 0x2f554eaf2678, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56893be640 sp=0x2f56893be5f8 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f554e9f0030, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56893be6a8 sp=0x2f56893be640 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f554e9f0030, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56893be750 sp=0x2f56893be6a8 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f554eb82600}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56893be7f0 sp=0x2f56893be750 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f554ec1c6f0, 0x24}, 0x2f554eb82600, 0x2f554eaf2870, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56893be838 sp=0x2f56893be7f0 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f554ec1c6f0, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56893be8a0 sp=0x2f56893be838 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f554ec1c6f0, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56893be948 sp=0x2f56893be8a0 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f554e3e0480}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56893be9e8 sp=0x2f56893be948 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f554eaae6f0, 0x24}, 0x2f554e3e0480, 0x2f554eaf2a68, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56893bea30 sp=0x2f56893be9e8 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f554eaae6f0, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56893bea98 sp=0x2f56893bea30 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f554eaae6f0, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56893beb40 sp=0x2f56893bea98 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f554eb82480}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56893bebe0 sp=0x2f56893beb40 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f554ec1c4e0, 0x24}, 0x2f554eb82480, 0x2f554eaf2c60, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56893bec28 sp=0x2f56893bebe0 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f554ec1c4e0, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56893bec90 sp=0x2f56893bec28 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f554ec1c4e0, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56893bed38 sp=0x2f56893bec90 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f554e4fd680}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56893bedd8 sp=0x2f56893bed38 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f554e946600, 0x24}, 0x2f554e4fd680, 0x2f554eaf2e58, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56893bee20 sp=0x2f56893bedd8 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f554e946600, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56893bee88 sp=0x2f56893bee20 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f554e946600, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56893bef30 sp=0x2f56893bee88 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f554e3e0300}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56893befd0 sp=0x2f56893bef30 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f554eaae4e0, 0x24}, 0x2f554e3e0300, 0x2f554eaf3050, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56893bf018 sp=0x2f56893befd0 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f554eaae4e0, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56893bf080 sp=0x2f56893bf018 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f554eaae4e0, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56893bf128 sp=0x2f56893bf080 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f554eb82300}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56893bf1c8 sp=0x2f56893bf128 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f554ec1c2d0, 0x24}, 0x2f554eb82300, 0x2f554eaf3248, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56893bf210 sp=0x2f56893bf1c8 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f554ec1c2d0, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56893bf278 sp=0x2f56893bf210 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f554ec1c2d0, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56893bf320 sp=0x2f56893bf278 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f554e3e0180}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56893bf3c0 sp=0x2f56893bf320 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f554eaae2d0, 0x24}, 0x2f554e3e0180, 0x2f554eaf3440, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56893bf408 sp=0x2f56893bf3c0 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f554eaae2d0, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56893bf470 sp=0x2f56893bf408 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f554eaae2d0, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56893bf518 sp=0x2f56893bf470 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f554eb82180}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56893bf5b8 sp=0x2f56893bf518 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f554ec1c0c0, 0x24}, 0x2f554eb82180, 0x2f554eaf3638, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56893bf600 sp=0x2f56893bf5b8 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f554ec1c0c0, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56893bf668 sp=0x2f56893bf600 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f554ec1c0c0, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56893bf710 sp=0x2f56893bf668 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f554e3e0000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56893bf7b0 sp=0x2f56893bf710 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f554eaae030, 0x24}, 0x2f554e3e0000, 0x2f554e513830, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56893bf7f8 sp=0x2f56893bf7b0 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f554eaae030, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56893bf860 sp=0x2f56893bf7f8 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f554eaae030, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56893bf908 sp=0x2f56893bf860 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f554e4fcc00}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56893bf9a8 sp=0x2f56893bf908 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f554e946390, 0x24}, 0x2f554e4fcc00, 0x2f554e513a28, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56893bf9f0 sp=0x2f56893bf9a8 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f554e946390, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56893bfa58 sp=0x2f56893bf9f0 pc=0x1839aae +cloud.o-forge.io/core/oc-lib/models/utils.GenericDeleteOne({0x2f554e946390, 0x24}, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:71 +0x49 fp=0x2f56893bfb00 sp=0x2f56893bfa58 pc=0x18379e9 +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne.func1({0x204d6e0, 0x2f554eb82000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:47 +0x15c fp=0x2f56893bfba0 sp=0x2f56893bfb00 pc=0x1839c3c +cloud.o-forge.io/core/oc-lib/models/utils.GenericLoadOne[...]({0x2f554ec1c090, 0x24}, 0x2f554eb82000, 0x2f554e513c20, {0x204b800, 0x2f554e536000}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/utils/common.go:154 +0x1cb fp=0x2f56893bfbe8 sp=0x2f56893bfba0 pc=0x183ad6b +cloud.o-forge.io/core/oc-lib/models/booking.(*BookingMongoAccessor).LoadOne(0x2f554e536000, {0x2f554ec1c090, 0x24}) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/models/booking/booking_mongo_accessor.go:43 +0x6e fp=0x2f56893bfc50 sp=0x2f56893bfbe8 pc=0x1839aae +oc-scheduler/infrastructure.handleWorkflowStepDone({{0x2f554e650020, 0xd}, 0x0, {0x0, 0x0}, {0x0, 0x0, 0x0}, 0xf, {0x0, ...}, ...}) + /oc-scheduler/infrastructure/nats_handlers.go:224 +0xaf fp=0x2f56893bfdc8 sp=0x2f56893bfc50 pc=0x194134f +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).listenForChange(0x0?, {{0x202e4b8, 0x2f554e939608}, 0xff, {0x0, 0x0}, {0x2f554e519200, 0x1, 0x1f4}, {0x2f554e7afa10, ...}, ...}, ...) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:168 +0x2e3 fp=0x2f56893bff38 sp=0x2f56893bfdc8 pc=0x17c8003 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats.gowrap2() + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x8a fp=0x2f56893bffe0 sp=0x2f56893bff38 pc=0x17c77ea +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f56893bffe8 sp=0x2f56893bffe0 pc=0x494001 +created by cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats in goroutine 37 + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x2be + +goroutine 1 gp=0x2f554e3c21e0 m=nil [chan receive, 10 minutes]: +runtime.gopark(0x0?, 0x5000000000080?, 0xa8?, 0x5?, 0x76bcc088f498?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554ea4dd18 sp=0x2f554ea4dcf8 pc=0x48c7ce +runtime.chanrecv(0x2f554e550080, 0x0, 0x1) + /usr/local/go/src/runtime/chan.go:667 +0x4ae fp=0x2f554ea4dd90 sp=0x2f554ea4dd18 pc=0x41ca2e +runtime.chanrecv1(0x2f554ef87e70?, 0x1?) + /usr/local/go/src/runtime/chan.go:509 +0x12 fp=0x2f554ea4ddb8 sp=0x2f554ea4dd90 pc=0x41c552 +github.com/beego/beego/v2/server/web.(*HttpServer).Run(0x2f554e6e4900, {0x0, 0x0}, {0x0, 0x0, 0x19674b5?}) + /go/pkg/mod/github.com/beego/beego/v2@v2.3.8/server/web/server.go:304 +0x767 fp=0x2f554ea4dec0 sp=0x2f554ea4ddb8 pc=0xb6c6c7 +github.com/beego/beego/v2/server/web.Run({0x0?, 0x1e3f33a?, 0x0?}) + /go/pkg/mod/github.com/beego/beego/v2@v2.3.8/server/web/beego.go:54 +0x55 fp=0x2f554ea4df00 sp=0x2f554ea4dec0 pc=0xb596b5 +main.main() + /oc-scheduler/main.go:29 +0x225 fp=0x2f554ea4df48 sp=0x2f554ea4df00 pc=0x19674c5 +runtime.main() + /usr/local/go/src/runtime/proc.go:290 +0x2d5 fp=0x2f554ea4dfe0 sp=0x2f554ea4df48 pc=0x456955 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554ea4dfe8 sp=0x2f554ea4dfe0 pc=0x494001 + +goroutine 2 gp=0x2f554e3c2780 m=nil [force gc (idle), 4 minutes]: +runtime.gopark(0x32db7098806?, 0x0?, 0x0?, 0x0?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e464fa8 sp=0x2f554e464f88 pc=0x48c7ce +runtime.goparkunlock(...) + /usr/local/go/src/runtime/proc.go:468 +runtime.forcegchelper() + /usr/local/go/src/runtime/proc.go:375 +0xb3 fp=0x2f554e464fe0 sp=0x2f554e464fa8 pc=0x456c73 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e464fe8 sp=0x2f554e464fe0 pc=0x494001 +created by runtime.init.7 in goroutine 1 + /usr/local/go/src/runtime/proc.go:363 +0x1a + +goroutine 3 gp=0x2f554e3c2d20 m=nil [GC sweep wait]: +runtime.gopark(0x1?, 0x0?, 0x0?, 0x0?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e465788 sp=0x2f554e465768 pc=0x48c7ce +runtime.goparkunlock(...) + /usr/local/go/src/runtime/proc.go:468 +runtime.bgsweep(0x2f554e47c000) + /usr/local/go/src/runtime/mgcsweep.go:324 +0x151 fp=0x2f554e4657c8 sp=0x2f554e465788 pc=0x43ddb1 +runtime.gcenable.gowrap1() + /usr/local/go/src/runtime/mgc.go:214 +0x17 fp=0x2f554e4657e0 sp=0x2f554e4657c8 pc=0x42f157 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e4657e8 sp=0x2f554e4657e0 pc=0x494001 +created by runtime.gcenable in goroutine 1 + /usr/local/go/src/runtime/mgc.go:214 +0x66 + +goroutine 4 gp=0x2f554e3c2f00 m=nil [GC scavenge wait]: +runtime.gopark(0x10000?, 0xfeabb?, 0x0?, 0x0?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e465f78 sp=0x2f554e465f58 pc=0x48c7ce +runtime.goparkunlock(...) + /usr/local/go/src/runtime/proc.go:468 +runtime.(*scavengerState).park(0x31ed980) + /usr/local/go/src/runtime/mgcscavenge.go:425 +0x49 fp=0x2f554e465fa8 sp=0x2f554e465f78 pc=0x43b829 +runtime.bgscavenge(0x2f554e47c000) + /usr/local/go/src/runtime/mgcscavenge.go:658 +0x59 fp=0x2f554e465fc8 sp=0x2f554e465fa8 pc=0x43bdb9 +runtime.gcenable.gowrap2() + /usr/local/go/src/runtime/mgc.go:215 +0x17 fp=0x2f554e465fe0 sp=0x2f554e465fc8 pc=0x42f117 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e465fe8 sp=0x2f554e465fe0 pc=0x494001 +created by runtime.gcenable in goroutine 1 + /usr/local/go/src/runtime/mgc.go:215 +0xa5 + +goroutine 5 gp=0x2f554e3c34a0 m=nil [GOMAXPROCS updater (idle), 10 minutes]: +runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e466788 sp=0x2f554e466768 pc=0x48c7ce +runtime.goparkunlock(...) + /usr/local/go/src/runtime/proc.go:468 +runtime.updateMaxProcsGoroutine() + /usr/local/go/src/runtime/proc.go:7095 +0xe7 fp=0x2f554e4667e0 sp=0x2f554e466788 pc=0x465247 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e4667e8 sp=0x2f554e4667e0 pc=0x494001 +created by runtime.defaultGOMAXPROCSUpdateEnable in goroutine 1 + /usr/local/go/src/runtime/proc.go:7083 +0x37 + +goroutine 6 gp=0x2f554e3c3680 m=nil [finalizer wait, 10 minutes]: +runtime.gopark(0x466295?, 0x3213e80?, 0x13?, 0x0?, 0x2f554e464670?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e464620 sp=0x2f554e464600 pc=0x48c7ce +runtime.runFinalizers() + /usr/local/go/src/runtime/mfinal.go:210 +0x107 fp=0x2f554e4647e0 sp=0x2f554e464620 pc=0x42e0c7 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e4647e8 sp=0x2f554e4647e0 pc=0x494001 +created by runtime.createfing in goroutine 1 + /usr/local/go/src/runtime/mfinal.go:172 +0x3d + +goroutine 7 gp=0x2f554e60c780 m=nil [cleanup wait, 10 minutes]: +runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e466f68 sp=0x2f554e466f48 pc=0x48c7ce +runtime.goparkunlock(...) + /usr/local/go/src/runtime/proc.go:468 +runtime.(*cleanupQueue).dequeue(0x31ede80) + /usr/local/go/src/runtime/mcleanup.go:522 +0xd4 fp=0x2f554e466fa0 sp=0x2f554e466f68 pc=0x42a734 +runtime.runCleanups() + /usr/local/go/src/runtime/mcleanup.go:718 +0x45 fp=0x2f554e466fe0 sp=0x2f554e466fa0 pc=0x42ada5 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e466fe8 sp=0x2f554e466fe0 pc=0x494001 +created by runtime.(*cleanupQueue).createGs in goroutine 1 + /usr/local/go/src/runtime/mcleanup.go:672 +0xa5 + +goroutine 17 gp=0x2f554e60d0e0 m=nil [GC worker (idle)]: +runtime.gopark(0x34b409dc0f5?, 0x2f554e518000?, 0x1c?, 0xa?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e467740 sp=0x2f554e467720 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e4677c8 sp=0x2f554e467740 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e4677e0 sp=0x2f554e4677c8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e4677e8 sp=0x2f554e4677e0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 33 gp=0x2f554e5043c0 m=nil [GC worker (idle)]: +runtime.gopark(0x34b409da959?, 0x3?, 0xc5?, 0x1b?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e460740 sp=0x2f554e460720 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e4607c8 sp=0x2f554e460740 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e4607e0 sp=0x2f554e4607c8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e4607e8 sp=0x2f554e4607e0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 10 gp=0x2f554e60d2c0 m=nil [GC worker (idle)]: +runtime.gopark(0x34b2bbc5cd2?, 0x1?, 0x84?, 0x78?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e467f40 sp=0x2f554e467f20 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e467fc8 sp=0x2f554e467f40 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e467fe0 sp=0x2f554e467fc8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e467fe8 sp=0x2f554e467fe0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 11 gp=0x2f554e60d4a0 m=nil [GC worker (idle)]: +runtime.gopark(0x34b2bbc55be?, 0x3?, 0xb0?, 0xa9?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e750740 sp=0x2f554e750720 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e7507c8 sp=0x2f554e750740 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e7507e0 sp=0x2f554e7507c8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e7507e8 sp=0x2f554e7507e0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 12 gp=0x2f554e60d680 m=nil [GC worker (idle)]: +runtime.gopark(0x34b2bbd8a2c?, 0x1?, 0x57?, 0x48?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e750f40 sp=0x2f554e750f20 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e750fc8 sp=0x2f554e750f40 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e750fe0 sp=0x2f554e750fc8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e750fe8 sp=0x2f554e750fe0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 13 gp=0x2f554e60d860 m=nil [GC worker (idle)]: +runtime.gopark(0x34b409d983b?, 0x3?, 0x78?, 0x1c?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e751740 sp=0x2f554e751720 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e7517c8 sp=0x2f554e751740 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e7517e0 sp=0x2f554e7517c8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e7517e8 sp=0x2f554e7517e0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 14 gp=0x2f554e60da40 m=nil [GC worker (idle)]: +runtime.gopark(0x3213e80?, 0x1?, 0x49?, 0xc4?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e751f40 sp=0x2f554e751f20 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e751fc8 sp=0x2f554e751f40 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e751fe0 sp=0x2f554e751fc8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e751fe8 sp=0x2f554e751fe0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 15 gp=0x2f554e60dc20 m=nil [GC worker (idle)]: +runtime.gopark(0x34250cc3176?, 0x3?, 0xca?, 0x7a?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e752740 sp=0x2f554e752720 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e7527c8 sp=0x2f554e752740 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e7527e0 sp=0x2f554e7527c8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e7527e8 sp=0x2f554e7527e0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 18 gp=0x2f554e784d20 m=nil [GC worker (idle)]: +runtime.gopark(0x34b409f4057?, 0x1?, 0x1a?, 0x12?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e74c740 sp=0x2f554e74c720 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e74c7c8 sp=0x2f554e74c740 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e74c7e0 sp=0x2f554e74c7c8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e74c7e8 sp=0x2f554e74c7e0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 34 gp=0x2f554e5045a0 m=nil [GC worker (idle)]: +runtime.gopark(0x34b409dba58?, 0x3?, 0x7f?, 0xaf?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e460f40 sp=0x2f554e460f20 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e460fc8 sp=0x2f554e460f40 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e460fe0 sp=0x2f554e460fc8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e460fe8 sp=0x2f554e460fe0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 16 gp=0x2f554e754000 m=nil [GC worker (idle)]: +runtime.gopark(0x34b2bbc550a?, 0x1?, 0x2b?, 0x3f?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e752f40 sp=0x2f554e752f20 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e752fc8 sp=0x2f554e752f40 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e752fe0 sp=0x2f554e752fc8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e752fe8 sp=0x2f554e752fe0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 19 gp=0x2f554e784f00 m=nil [GC worker (idle)]: +runtime.gopark(0x34b2bbd7e95?, 0x3?, 0x84?, 0x5f?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e74cf40 sp=0x2f554e74cf20 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e74cfc8 sp=0x2f554e74cf40 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e74cfe0 sp=0x2f554e74cfc8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e74cfe8 sp=0x2f554e74cfe0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 20 gp=0x2f554e7850e0 m=nil [GC worker (idle)]: +runtime.gopark(0x34b409c632c?, 0x1?, 0xda?, 0x32?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e74d740 sp=0x2f554e74d720 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e74d7c8 sp=0x2f554e74d740 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e74d7e0 sp=0x2f554e74d7c8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e74d7e8 sp=0x2f554e74d7e0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 21 gp=0x2f554e7852c0 m=nil [GC worker (idle)]: +runtime.gopark(0x34b2bbc1b66?, 0x3?, 0x3?, 0x3a?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e74df40 sp=0x2f554e74df20 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e74dfc8 sp=0x2f554e74df40 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e74dfe0 sp=0x2f554e74dfc8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e74dfe8 sp=0x2f554e74dfe0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 22 gp=0x2f554e7854a0 m=nil [GC worker (idle)]: +runtime.gopark(0x34b409d9717?, 0x1?, 0xb3?, 0xe6?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e74e740 sp=0x2f554e74e720 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e74e7c8 sp=0x2f554e74e740 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e74e7e0 sp=0x2f554e74e7c8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e74e7e8 sp=0x2f554e74e7e0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 23 gp=0x2f554e785680 m=nil [GC worker (idle)]: +runtime.gopark(0x34b409c6416?, 0x3?, 0x1e?, 0xa3?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e74ef40 sp=0x2f554e74ef20 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e74efc8 sp=0x2f554e74ef40 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e74efe0 sp=0x2f554e74efc8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e74efe8 sp=0x2f554e74efe0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 24 gp=0x2f554e785860 m=nil [GC worker (idle)]: +runtime.gopark(0x34b2bbc1ae3?, 0x1?, 0x8e?, 0x77?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e74f740 sp=0x2f554e74f720 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e74f7c8 sp=0x2f554e74f740 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e74f7e0 sp=0x2f554e74f7c8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e74f7e8 sp=0x2f554e74f7e0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 25 gp=0x2f554e785a40 m=nil [GC worker (idle)]: +runtime.gopark(0x34b2bbc55cf?, 0x1?, 0x8d?, 0x95?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e74ff40 sp=0x2f554e74ff20 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e74ffc8 sp=0x2f554e74ff40 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e74ffe0 sp=0x2f554e74ffc8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e74ffe8 sp=0x2f554e74ffe0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 26 gp=0x2f554e785c20 m=nil [GC worker (idle)]: +runtime.gopark(0x34b409c62e4?, 0x1?, 0x74?, 0x45?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e894740 sp=0x2f554e894720 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e8947c8 sp=0x2f554e894740 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e8947e0 sp=0x2f554e8947c8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e8947e8 sp=0x2f554e8947e0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 27 gp=0x2f554e898000 m=nil [GC worker (idle)]: +runtime.gopark(0x34b2bbc512e?, 0x1?, 0xb6?, 0xb2?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e894f40 sp=0x2f554e894f20 pc=0x48c7ce +runtime.gcBgMarkWorker(0x2f554e7d85b0) + /usr/local/go/src/runtime/mgc.go:1791 +0xeb fp=0x2f554e894fc8 sp=0x2f554e894f40 pc=0x431c2b +runtime.gcBgMarkStartWorkers.gowrap1() + /usr/local/go/src/runtime/mgc.go:1695 +0x17 fp=0x2f554e894fe0 sp=0x2f554e894fc8 pc=0x431b17 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e894fe8 sp=0x2f554e894fe0 pc=0x494001 +created by runtime.gcBgMarkStartWorkers in goroutine 1 + /usr/local/go/src/runtime/mgc.go:1695 +0x105 + +goroutine 50 gp=0x2f554e504b40 m=nil [select, 10 minutes]: +runtime.gopark(0x2f554e461f78?, 0x2?, 0x0?, 0x0?, 0x2f554e461f64?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e461e00 sp=0x2f554e461de0 pc=0x48c7ce +runtime.selectgo(0x2f554e461f78, 0x2f554e461f60, 0x0?, 0x0, 0x0?, 0x1) + /usr/local/go/src/runtime/select.go:351 +0xaa5 fp=0x2f554e461f30 sp=0x2f554e461e00 pc=0x46a105 +github.com/goraz/onion.(*Onion).watchLayer(0x2f554ebd2870, {0x2037898, 0x3211ac0}, {0x202e558, 0x2f554e9395d8}) + /go/pkg/mod/github.com/goraz/onion@v0.1.3/onion.go:44 +0xdc fp=0x2f554e461fa8 sp=0x2f554e461f30 pc=0x77fd5c +github.com/goraz/onion.(*Onion).AddLayersContext.gowrap1() + /go/pkg/mod/github.com/goraz/onion@v0.1.3/onion.go:90 +0x27 fp=0x2f554e461fe0 sp=0x2f554e461fa8 pc=0x780247 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e461fe8 sp=0x2f554e461fe0 pc=0x494001 +created by github.com/goraz/onion.(*Onion).AddLayersContext in goroutine 1 + /go/pkg/mod/github.com/goraz/onion@v0.1.3/onion.go:90 +0x152 + +goroutine 53 gp=0x2f554e504f00 m=nil [select, 10 minutes]: +runtime.gopark(0x2f554e462f78?, 0x2?, 0x40?, 0xbe?, 0x2f554e462f64?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e462e00 sp=0x2f554e462de0 pc=0x48c7ce +runtime.selectgo(0x2f554e462f78, 0x2f554e462f60, 0x2f554e462f9c?, 0x0, 0x2?, 0x1) + /usr/local/go/src/runtime/select.go:351 +0xaa5 fp=0x2f554e462f30 sp=0x2f554e462e00 pc=0x46a105 +github.com/goraz/onion.(*Onion).watchLayer(0x2f554ebd2910, {0x2037898, 0x3211ac0}, {0x202e558, 0x2f554e9395f0}) + /go/pkg/mod/github.com/goraz/onion@v0.1.3/onion.go:44 +0xdc fp=0x2f554e462fa8 sp=0x2f554e462f30 pc=0x77fd5c +github.com/goraz/onion.(*Onion).AddLayersContext.gowrap1() + /go/pkg/mod/github.com/goraz/onion@v0.1.3/onion.go:90 +0x27 fp=0x2f554e462fe0 sp=0x2f554e462fa8 pc=0x780247 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e462fe8 sp=0x2f554e462fe0 pc=0x494001 +created by github.com/goraz/onion.(*Onion).AddLayersContext in goroutine 1 + /go/pkg/mod/github.com/goraz/onion@v0.1.3/onion.go:90 +0x152 + +goroutine 37 gp=0x2f554e5050e0 m=nil [sync.WaitGroup.Wait, 10 minutes]: +runtime.gopark(0x2f554e513cf0?, 0x425334?, 0x30?, 0x82?, 0x18?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554ea4ac90 sp=0x2f554ea4ac70 pc=0x48c7ce +runtime.goparkunlock(...) + /usr/local/go/src/runtime/proc.go:468 +runtime.semacquire1(0x2f554e6e2438, 0x0, 0x1, 0x0, 0x19) + /usr/local/go/src/runtime/sema.go:192 +0x232 fp=0x2f554ea4acf8 sp=0x2f554ea4ac90 pc=0x46af92 +sync.runtime_SemacquireWaitGroup(0x2f554e513d48?, 0xd3?) + /usr/local/go/src/runtime/sema.go:114 +0x2e fp=0x2f554ea4ad30 sp=0x2f554ea4acf8 pc=0x48df8e +sync.(*WaitGroup).Wait(0x2f554e6e2430) + /usr/local/go/src/sync/waitgroup.go:206 +0x85 fp=0x2f554ea4ad58 sp=0x2f554ea4ad30 pc=0x49a945 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats(0x3211ac0, 0x2f554e513fa0) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:115 +0x405 fp=0x2f554ea4af00 sp=0x2f554ea4ad58 pc=0x17c7725 +oc-scheduler/infrastructure.ListenNATS() + /oc-scheduler/infrastructure/nats.go:38 +0x257 fp=0x2f554ea4afe0 sp=0x2f554ea4af00 pc=0x193ee77 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554ea4afe8 sp=0x2f554ea4afe0 pc=0x494001 +created by main.main in goroutine 1 + /oc-scheduler/main.go:25 +0x1f1 + +goroutine 59 gp=0x2f554e505a40 m=nil [sync.Cond.Wait, 10 minutes]: +runtime.gopark(0x2f554e9b2538?, 0x41c480?, 0x20?, 0x4d?, 0x3?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554eb89d10 sp=0x2f554eb89cf0 pc=0x48c7ce +runtime.goparkunlock(...) + /usr/local/go/src/runtime/proc.go:468 +sync.runtime_notifyListWait(0x2f554e7cae50, 0x3) + /usr/local/go/src/runtime/sema.go:617 +0x1b3 fp=0x2f554eb89d60 sp=0x2f554eb89d10 pc=0x48e213 +sync.(*Cond).Wait(0x0?) + /usr/local/go/src/sync/cond.go:71 +0x73 fp=0x2f554eb89d98 sp=0x2f554eb89d60 pc=0x498c13 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*pool).createConnections.func2() + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/pool.go:1165 +0xcb fp=0x2f554eb89e38 sp=0x2f554eb89d98 pc=0x9106cb +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*pool).createConnections(0x2f554eb83800, {0x20379e8, 0x2f554ebd2cd0}, 0x2f554e95cdc0?) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/pool.go:1187 +0x104 fp=0x2f554eb89fb0 sp=0x2f554eb89e38 pc=0x910064 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.newPool.gowrap1() + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/pool.go:243 +0x25 fp=0x2f554eb89fe0 sp=0x2f554eb89fb0 pc=0x90a745 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554eb89fe8 sp=0x2f554eb89fe0 pc=0x494001 +created by go.mongodb.org/mongo-driver/x/mongo/driver/topology.newPool in goroutine 1 + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/pool.go:243 +0x705 + +goroutine 60 gp=0x2f554e505c20 m=nil [sync.Cond.Wait, 10 minutes]: +runtime.gopark(0x0?, 0x912120?, 0x40?, 0x98?, 0x2f554e883d78?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554f00cd10 sp=0x2f554f00ccf0 pc=0x48c7ce +runtime.goparkunlock(...) + /usr/local/go/src/runtime/proc.go:468 +sync.runtime_notifyListWait(0x2f554e7cae50, 0x4) + /usr/local/go/src/runtime/sema.go:617 +0x1b3 fp=0x2f554f00cd60 sp=0x2f554f00cd10 pc=0x48e213 +sync.(*Cond).Wait(0x0?) + /usr/local/go/src/sync/cond.go:71 +0x73 fp=0x2f554f00cd98 sp=0x2f554f00cd60 pc=0x498c13 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*pool).createConnections.func2() + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/pool.go:1165 +0xcb fp=0x2f554f00ce38 sp=0x2f554f00cd98 pc=0x9106cb +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*pool).createConnections(0x2f554eb83800, {0x20379e8, 0x2f554ebd2cd0}, 0x0?) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/pool.go:1187 +0x104 fp=0x2f554f00cfb0 sp=0x2f554f00ce38 pc=0x910064 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.newPool.gowrap1() + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/pool.go:243 +0x25 fp=0x2f554f00cfe0 sp=0x2f554f00cfb0 pc=0x90a745 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554f00cfe8 sp=0x2f554f00cfe0 pc=0x494001 +created by go.mongodb.org/mongo-driver/x/mongo/driver/topology.newPool in goroutine 1 + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/pool.go:243 +0x705 + +goroutine 61 gp=0x2f554e8981e0 m=nil [select]: +runtime.gopark(0x2f554e528f08?, 0x3?, 0x0?, 0x0?, 0x2f554e528ea2?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e528d28 sp=0x2f554e528d08 pc=0x48c7ce +runtime.selectgo(0x2f554e528f08, 0x2f554e528e9c, 0x0?, 0x0, 0x0?, 0x1) + /usr/local/go/src/runtime/select.go:351 +0xaa5 fp=0x2f554e528e58 sp=0x2f554e528d28 pc=0x46a105 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*pool).maintain(0x2f554eb83800, {0x20379e8, 0x2f554ebd2cd0}, 0x0?) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/pool.go:1299 +0x20d fp=0x2f554e528fb0 sp=0x2f554e528e58 pc=0x910d0d +go.mongodb.org/mongo-driver/x/mongo/driver/topology.newPool.gowrap2() + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/pool.go:250 +0x25 fp=0x2f554e528fe0 sp=0x2f554e528fb0 pc=0x90a705 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e528fe8 sp=0x2f554e528fe0 pc=0x494001 +created by go.mongodb.org/mongo-driver/x/mongo/driver/topology.newPool in goroutine 1 + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/pool.go:250 +0x877 + +goroutine 62 gp=0x2f554e8983c0 m=nil [IO wait]: +runtime.gopark(0xee152e4f8?, 0x0?, 0x0?, 0x0?, 0xb?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e8c8120 sp=0x2f554e8c8100 pc=0x48c7ce +runtime.netpollblock(0x4e38d8?, 0x41a946?, 0x0?) + /usr/local/go/src/runtime/netpoll.go:575 +0xf7 fp=0x2f554e8c8158 sp=0x2f554e8c8120 pc=0x44f497 +internal/poll.runtime_pollWait(0x76bc79c66600, 0x72) + /usr/local/go/src/runtime/netpoll.go:351 +0x85 fp=0x2f554e8c8178 sp=0x2f554e8c8158 pc=0x48b9a5 +internal/poll.(*pollDesc).wait(0x2f554e94c200?, 0x2f556c6a5a14?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 fp=0x2f554e8c81a0 sp=0x2f554e8c8178 pc=0x503087 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0x2f554e94c200, {0x2f556c6a5a14, 0x4, 0x4}) + /usr/local/go/src/internal/poll/fd_unix.go:165 +0x2ae fp=0x2f554e8c8238 sp=0x2f554e8c81a0 pc=0x5042ae +net.(*netFD).Read(0x2f554e94c200, {0x2f556c6a5a14?, 0x30?, 0x2f554e501008?}) + /usr/local/go/src/net/fd_posix.go:68 +0x25 fp=0x2f554e8c8280 sp=0x2f554e8c8238 pc=0x5e03a5 +net.(*conn).Read(0x2f554e902160, {0x2f556c6a5a14?, 0x2f554e8c8300?, 0x425d85?}) + /usr/local/go/src/net/net.go:196 +0x45 fp=0x2f554e8c82c8 sp=0x2f554e8c8280 pc=0x5f10e5 +io.ReadAtLeast({0x20277c0, 0x2f554e902160}, {0x2f556c6a5a14, 0x4, 0x4}, 0x4) + /usr/local/go/src/io/io.go:335 +0x8e fp=0x2f554e8c8310 sp=0x2f554e8c82c8 pc=0x4d4bae +io.ReadFull(...) + /usr/local/go/src/io/io.go:354 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*connection).read(0x2f554e584588, {0x20379e8, 0x2f554e666000}) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/connection.go:523 +0x231 fp=0x2f554e8c8440 sp=0x2f554e8c8310 pc=0x901ed1 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*connection).readWireMessage(0x2f554e584588, {0x20379e8, 0x2f554e666000}) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/connection.go:452 +0x186 fp=0x2f554e8c8548 sp=0x2f554e8c8440 pc=0x9018c6 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.initConnection.ReadWireMessage({0x0?}, {0x20379e8?, 0x2f554e666000?}) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/connection.go:651 +0x1d fp=0x2f554e8c8570 sp=0x2f554e8c8548 pc=0x902a9d +go.mongodb.org/mongo-driver/x/mongo/driver.Operation.readWireMessage({0x2f556c6d45b0, {0x1e34834, 0x5}, {0x2033c20, 0x2f556c6d45a0}, 0x2f556c6d45c0, {0x0, 0x0}, 0x0, 0x0, ...}, ...) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/operation.go:1103 +0x4c fp=0x2f554e8c86f0 sp=0x2f554e8c8570 pc=0x87bfcc +go.mongodb.org/mongo-driver/x/mongo/driver.Operation.ExecuteExhaust({0x2f556c6d45b0, {0x1e34834, 0x5}, {0x2033c20, 0x2f556c6d45a0}, 0x2f556c6d45c0, {0x0, 0x0}, 0x0, 0x0, ...}, ...) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/operation_exhaust.go:21 +0x10f fp=0x2f554e8c8ac0 sp=0x2f554e8c86f0 pc=0x883eaf +go.mongodb.org/mongo-driver/x/mongo/driver/operation.(*Hello).StreamResponse(0x2f554e8de000, {0x20379e8, 0x2f554e666000}, {0x2048ca8, 0x2f554e584588}) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/operation/hello.go:572 +0x1e6 fp=0x2f554e8c8d00 sp=0x2f554e8c8ac0 pc=0x88f286 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*Server).check(_) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/server.go:862 +0x925 fp=0x2f554e8c95b0 sp=0x2f554e8c8d00 pc=0x919705 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*Server).update(0x2f554e87bc20) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/server.go:605 +0x2f5 fp=0x2f554e8c9fc8 sp=0x2f554e8c95b0 pc=0x916c35 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*Server).Connect.gowrap1() + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/server.go:252 +0x17 fp=0x2f554e8c9fe0 sp=0x2f554e8c9fc8 pc=0x914517 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e8c9fe8 sp=0x2f554e8c9fe0 pc=0x494001 +created by go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*Server).Connect in goroutine 1 + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/server.go:252 +0x1bb + +goroutine 28 gp=0x2f554e904d20 m=nil [select]: +runtime.gopark(0x2f554e8fdea8?, 0x2?, 0xb8?, 0xdd?, 0x2f554e8fde5c?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e8fdcf0 sp=0x2f554e8fdcd0 pc=0x48c7ce +runtime.selectgo(0x2f554e8fdea8, 0x2f554e8fde58, 0x31ed5e0?, 0x0, 0x0?, 0x1) + /usr/local/go/src/runtime/select.go:351 +0xaa5 fp=0x2f554e8fde20 sp=0x2f554e8fdcf0 pc=0x46a105 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*rttMonitor).runHellos(0x2f554e4c8c60, 0x2f554e640008) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/rtt_monitor.go:156 +0x10f fp=0x2f554e8fdef0 sp=0x2f554e8fde20 pc=0x9129af +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*rttMonitor).start(0x2f554e4c8c60) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/rtt_monitor.go:130 +0xf8 fp=0x2f554e8fdfa8 sp=0x2f554e8fdef0 pc=0x912718 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*rttMonitor).connect.func1() + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/rtt_monitor.go:90 +0x49 fp=0x2f554e8fdfe0 sp=0x2f554e8fdfa8 pc=0x912469 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e8fdfe8 sp=0x2f554e8fdfe0 pc=0x494001 +created by go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*rttMonitor).connect in goroutine 62 + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/rtt_monitor.go:87 +0xb8 + +goroutine 35 gp=0x2f554e9050e0 m=nil [sync.WaitGroup.Wait, 10 minutes]: +runtime.gopark(0x2f554e8fddb0?, 0x4253ef?, 0x0?, 0x7?, 0x18?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554ebecd50 sp=0x2f554ebecd30 pc=0x48c7ce +runtime.goparkunlock(...) + /usr/local/go/src/runtime/proc.go:468 +runtime.semacquire1(0x2f554ec8c068, 0x0, 0x1, 0x0, 0x19) + /usr/local/go/src/runtime/sema.go:192 +0x232 fp=0x2f554ebecdb8 sp=0x2f554ebecd50 pc=0x46af92 +sync.runtime_SemacquireWaitGroup(0x2f554e8fde08?, 0xd3?) + /usr/local/go/src/runtime/sema.go:114 +0x2e fp=0x2f554ebecdf0 sp=0x2f554ebecdb8 pc=0x48df8e +sync.(*WaitGroup).Wait(0x2f554ec8c060) + /usr/local/go/src/sync/waitgroup.go:206 +0x85 fp=0x2f554ebece18 sp=0x2f554ebecdf0 pc=0x49a945 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats(0x3211ac0, 0x2f554e52e600) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:115 +0x405 fp=0x2f554ebecfc0 sp=0x2f554ebece18 pc=0x17c7725 +cloud.o-forge.io/core/oc-lib/tools.(*API).ListenRouter.gowrap1() + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/api.go:97 +0x1b fp=0x2f554ebecfe0 sp=0x2f554ebecfc0 pc=0x17c4b3b +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554ebecfe8 sp=0x2f554ebecfe0 pc=0x494001 +created by cloud.o-forge.io/core/oc-lib/tools.(*API).ListenRouter in goroutine 1 + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/api.go:97 +0xa5 + +goroutine 40 gp=0x2f554e784960 m=nil [chan receive]: +runtime.gopark(0x2f554e670be0?, 0x2f554e670b60?, 0x0?, 0x58?, 0x7?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554eb87dd0 sp=0x2f554eb87db0 pc=0x48c7ce +runtime.chanrecv(0x2f554e670b60, 0x2f554e8fdfa0, 0x1) + /usr/local/go/src/runtime/chan.go:667 +0x4ae fp=0x2f554eb87e48 sp=0x2f554eb87dd0 pc=0x41ca2e +runtime.chanrecv2(0xdf8475800?, 0x1e67eaa?) + /usr/local/go/src/runtime/chan.go:514 +0x12 fp=0x2f554eb87e70 sp=0x2f554eb87e48 pc=0x41c572 +oc-scheduler/infrastructure.WatchExecutions() + /oc-scheduler/infrastructure/watchdog.go:41 +0x15e fp=0x2f554eb87fe0 sp=0x2f554eb87e70 pc=0x194c23e +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554eb87fe8 sp=0x2f554eb87fe0 pc=0x494001 +created by main.main in goroutine 1 + /oc-scheduler/main.go:28 +0x215 + +goroutine 45 gp=0x2f554e9b63c0 m=nil [sync.Cond.Wait, 10 minutes]: +runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554ed86f18 sp=0x2f554ed86ef8 pc=0x48c7ce +runtime.goparkunlock(...) + /usr/local/go/src/runtime/proc.go:468 +sync.runtime_notifyListWait(0x2f554e64a390, 0x0) + /usr/local/go/src/runtime/sema.go:617 +0x1b3 fp=0x2f554ed86f68 sp=0x2f554ed86f18 pc=0x48e213 +sync.(*Cond).Wait(0x0?) + /usr/local/go/src/sync/cond.go:71 +0x73 fp=0x2f554ed86fa0 sp=0x2f554ed86f68 pc=0x498c13 +github.com/nats-io/nats%2ego.(*asyncCallbacksHandler).asyncCBDispatcher(0x2f554e622220) + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:3103 +0x5a fp=0x2f554ed86fc8 sp=0x2f554ed86fa0 pc=0x17a211a +github.com/nats-io/nats%2ego.Options.Connect.gowrap1() + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:1664 +0x17 fp=0x2f554ed86fe0 sp=0x2f554ed86fc8 pc=0x179acd7 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554ed86fe8 sp=0x2f554ed86fe0 pc=0x494001 +created by github.com/nats-io/nats%2ego.Options.Connect in goroutine 35 + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:1664 +0x3ba + +goroutine 46 gp=0x2f554e9b6780 m=nil [chan receive, 4 minutes]: +runtime.gopark(0x53a559?, 0x2f554e5381b0?, 0xc0?, 0x8b?, 0x2f554e502058?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554ef8fd28 sp=0x2f554ef8fd08 pc=0x48c7ce +runtime.chanrecv(0x2f554e63a380, 0x2f554e58de88, 0x1) + /usr/local/go/src/runtime/chan.go:667 +0x4ae fp=0x2f554ef8fda0 sp=0x2f554ef8fd28 pc=0x41ca2e +runtime.chanrecv2(0x2f554ed0e248?, 0x4?) + /usr/local/go/src/runtime/chan.go:514 +0x12 fp=0x2f554ef8fdc8 sp=0x2f554ef8fda0 pc=0x41c572 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).listenForChange(0x90a705?, {{0x202e4b8, 0x2f554e939608}, 0xff, {0x0, 0x0}, {0x2f554e519200, 0x1, 0x1f4}, {0x2f554e7afa10, ...}, ...}, ...) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:164 +0x2f8 fp=0x2f554ef8ff38 sp=0x2f554ef8fdc8 pc=0x17c8018 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats.gowrap2() + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x8a fp=0x2f554ef8ffe0 sp=0x2f554ef8ff38 pc=0x17c77ea +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554ef8ffe8 sp=0x2f554ef8ffe0 pc=0x494001 +created by cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats in goroutine 35 + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x2be + +goroutine 43 gp=0x2f554e56ab40 m=nil [IO wait]: +runtime.gopark(0x142?, 0x2f554e56af00?, 0x0?, 0x0?, 0xb?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554ea4bd80 sp=0x2f554ea4bd60 pc=0x48c7ce +runtime.netpollblock(0x4e38d8?, 0x41a946?, 0x0?) + /usr/local/go/src/runtime/netpoll.go:575 +0xf7 fp=0x2f554ea4bdb8 sp=0x2f554ea4bd80 pc=0x44f497 +internal/poll.runtime_pollWait(0x76bc79961000, 0x72) + /usr/local/go/src/runtime/netpoll.go:351 +0x85 fp=0x2f554ea4bdd8 sp=0x2f554ea4bdb8 pc=0x48b9a5 +internal/poll.(*pollDesc).wait(0x2f554ed1c000?, 0x2f554ea96000?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 fp=0x2f554ea4be00 sp=0x2f554ea4bdd8 pc=0x503087 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0x2f554ed1c000, {0x2f554ea96000, 0x8000, 0x8000}) + /usr/local/go/src/internal/poll/fd_unix.go:165 +0x2ae fp=0x2f554ea4be98 sp=0x2f554ea4be00 pc=0x5042ae +net.(*netFD).Read(0x2f554ed1c000, {0x2f554ea96000?, 0xa002f554ed85f20?, 0x489eec?}) + /usr/local/go/src/net/fd_posix.go:68 +0x25 fp=0x2f554ea4bee0 sp=0x2f554ea4be98 pc=0x5e03a5 +net.(*conn).Read(0x2f554ed20020, {0x2f554ea96000?, 0x1d6c460?, 0x1?}) + /usr/local/go/src/net/net.go:196 +0x45 fp=0x2f554ea4bf28 sp=0x2f554ea4bee0 pc=0x5f10e5 +github.com/nats-io/nats%2ego.(*natsReader).Read(0x2f554ec08040) + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:2052 +0x7f fp=0x2f554ea4bf58 sp=0x2f554ea4bf28 pc=0x179c1bf +github.com/nats-io/nats%2ego.(*Conn).readLoop(0x2f554e63d508) + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:3178 +0xef fp=0x2f554ea4bfc8 sp=0x2f554ea4bf58 pc=0x17a24cf +github.com/nats-io/nats%2ego.(*Conn).processConnectInit.gowrap2() + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:2487 +0x17 fp=0x2f554ea4bfe0 sp=0x2f554ea4bfc8 pc=0x179f177 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554ea4bfe8 sp=0x2f554ea4bfe0 pc=0x494001 +created by github.com/nats-io/nats%2ego.(*Conn).processConnectInit in goroutine 35 + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:2487 +0x2b3 + +goroutine 177 gp=0x2f554e56ad20 m=nil [IO wait]: +runtime.gopark(0x12b?, 0x2f554e9b6960?, 0x0?, 0x0?, 0xb?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554ef89d80 sp=0x2f554ef89d60 pc=0x48c7ce +runtime.netpollblock(0x4e38d8?, 0x41a946?, 0x0?) + /usr/local/go/src/runtime/netpoll.go:575 +0xf7 fp=0x2f554ef89db8 sp=0x2f554ef89d80 pc=0x44f497 +internal/poll.runtime_pollWait(0x76bc79c66000, 0x72) + /usr/local/go/src/runtime/netpoll.go:351 +0x85 fp=0x2f554ef89dd8 sp=0x2f554ef89db8 pc=0x48b9a5 +internal/poll.(*pollDesc).wait(0x2f554e9b8180?, 0x2f554ea8a000?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 fp=0x2f554ef89e00 sp=0x2f554ef89dd8 pc=0x503087 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0x2f554e9b8180, {0x2f554ea8a000, 0x8000, 0x8000}) + /usr/local/go/src/internal/poll/fd_unix.go:165 +0x2ae fp=0x2f554ef89e98 sp=0x2f554ef89e00 pc=0x5042ae +net.(*netFD).Read(0x2f554e9b8180, {0x2f554ea8a000?, 0xa002f554ed82720?, 0x489eec?}) + /usr/local/go/src/net/fd_posix.go:68 +0x25 fp=0x2f554ef89ee0 sp=0x2f554ef89e98 pc=0x5e03a5 +net.(*conn).Read(0x2f554e902188, {0x2f554ea8a000?, 0x1d6c460?, 0x1?}) + /usr/local/go/src/net/net.go:196 +0x45 fp=0x2f554ef89f28 sp=0x2f554ef89ee0 pc=0x5f10e5 +github.com/nats-io/nats%2ego.(*natsReader).Read(0x2f554ec08000) + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:2052 +0x7f fp=0x2f554ef89f58 sp=0x2f554ef89f28 pc=0x179c1bf +github.com/nats-io/nats%2ego.(*Conn).readLoop(0x2f554e4b9c08) + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:3178 +0xef fp=0x2f554ef89fc8 sp=0x2f554ef89f58 pc=0x17a24cf +github.com/nats-io/nats%2ego.(*Conn).processConnectInit.gowrap2() + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:2487 +0x17 fp=0x2f554ef89fe0 sp=0x2f554ef89fc8 pc=0x179f177 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554ef89fe8 sp=0x2f554ef89fe0 pc=0x494001 +created by github.com/nats-io/nats%2ego.(*Conn).processConnectInit in goroutine 37 + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:2487 +0x2b3 + +goroutine 44 gp=0x2f554e56af00 m=nil [chan receive, 10 minutes]: +runtime.gopark(0x77?, 0x2f554ed866b8?, 0xd3?, 0x89?, 0x2f554ed86700?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554ed866a0 sp=0x2f554ed86680 pc=0x48c7ce +runtime.chanrecv(0x2f554e522070, 0x2f554ed86760, 0x1) + /usr/local/go/src/runtime/chan.go:667 +0x4ae fp=0x2f554ed86718 sp=0x2f554ed866a0 pc=0x41ca2e +runtime.chanrecv2(0x2f554e63d530?, 0x0?) + /usr/local/go/src/runtime/chan.go:514 +0x12 fp=0x2f554ed86740 sp=0x2f554ed86718 pc=0x41c572 +github.com/nats-io/nats%2ego.(*Conn).flusher(0x2f554e63d508) + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:3601 +0xe5 fp=0x2f554ed867c8 sp=0x2f554ed86740 pc=0x17a43a5 +github.com/nats-io/nats%2ego.(*Conn).processConnectInit.gowrap3() + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:2488 +0x17 fp=0x2f554ed867e0 sp=0x2f554ed867c8 pc=0x179f137 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554ed867e8 sp=0x2f554ed867e0 pc=0x494001 +created by github.com/nats-io/nats%2ego.(*Conn).processConnectInit in goroutine 35 + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:2488 +0x2f9 + +goroutine 178 gp=0x2f554e9b6960 m=nil [chan receive, 10 minutes]: +runtime.gopark(0x77?, 0x2f554ed82eb8?, 0xd3?, 0x89?, 0x2f554ed82f00?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554ed82ea0 sp=0x2f554ed82e80 pc=0x48c7ce +runtime.chanrecv(0x2f554e494850, 0x2f554ed82f60, 0x1) + /usr/local/go/src/runtime/chan.go:667 +0x4ae fp=0x2f554ed82f18 sp=0x2f554ed82ea0 pc=0x41ca2e +runtime.chanrecv2(0x2f554e4b9c30?, 0x0?) + /usr/local/go/src/runtime/chan.go:514 +0x12 fp=0x2f554ed82f40 sp=0x2f554ed82f18 pc=0x41c572 +github.com/nats-io/nats%2ego.(*Conn).flusher(0x2f554e4b9c08) + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:3601 +0xe5 fp=0x2f554ed82fc8 sp=0x2f554ed82f40 pc=0x17a43a5 +github.com/nats-io/nats%2ego.(*Conn).processConnectInit.gowrap3() + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:2488 +0x17 fp=0x2f554ed82fe0 sp=0x2f554ed82fc8 pc=0x179f137 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554ed82fe8 sp=0x2f554ed82fe0 pc=0x494001 +created by github.com/nats-io/nats%2ego.(*Conn).processConnectInit in goroutine 37 + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:2488 +0x2f9 + +goroutine 179 gp=0x2f554e9b6b40 m=nil [sync.Cond.Wait, 10 minutes]: +runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554ed87718 sp=0x2f554ed876f8 pc=0x48c7ce +runtime.goparkunlock(...) + /usr/local/go/src/runtime/proc.go:468 +sync.runtime_notifyListWait(0x2f554e93c290, 0x0) + /usr/local/go/src/runtime/sema.go:617 +0x1b3 fp=0x2f554ed87768 sp=0x2f554ed87718 pc=0x48e213 +sync.(*Cond).Wait(0x0?) + /usr/local/go/src/sync/cond.go:71 +0x73 fp=0x2f554ed877a0 sp=0x2f554ed87768 pc=0x498c13 +github.com/nats-io/nats%2ego.(*asyncCallbacksHandler).asyncCBDispatcher(0x2f554e93e2e0) + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:3103 +0x5a fp=0x2f554ed877c8 sp=0x2f554ed877a0 pc=0x17a211a +github.com/nats-io/nats%2ego.Options.Connect.gowrap1() + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:1664 +0x17 fp=0x2f554ed877e0 sp=0x2f554ed877c8 pc=0x179acd7 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554ed877e8 sp=0x2f554ed877e0 pc=0x494001 +created by github.com/nats-io/nats%2ego.Options.Connect in goroutine 37 + /go/pkg/mod/github.com/nats-io/nats.go@v1.44.0/nats.go:1664 +0x3ba + +goroutine 180 gp=0x2f554e9b6d20 m=nil [chan receive, 10 minutes]: +runtime.gopark(0x0?, 0x2f554e4b9c08?, 0x30?, 0x80?, 0xd?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554eb8ad28 sp=0x2f554eb8ad08 pc=0x48c7ce +runtime.chanrecv(0x2f554e642460, 0x2f554ea17e88, 0x1) + /usr/local/go/src/runtime/chan.go:667 +0x4ae fp=0x2f554eb8ada0 sp=0x2f554eb8ad28 pc=0x41ca2e +runtime.chanrecv2(0x0?, 0x2f554e4b9c08?) + /usr/local/go/src/runtime/chan.go:514 +0x12 fp=0x2f554eb8adc8 sp=0x2f554eb8ada0 pc=0x41c572 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).listenForChange(0x0?, {{0x202e4b8, 0x2f554e939608}, 0xff, {0x0, 0x0}, {0x2f554e519200, 0x1, 0x1f4}, {0x2f554e7afa10, ...}, ...}, ...) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:164 +0x2f8 fp=0x2f554eb8af38 sp=0x2f554eb8adc8 pc=0x17c8018 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats.gowrap2() + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x8a fp=0x2f554eb8afe0 sp=0x2f554eb8af38 pc=0x17c77ea +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554eb8afe8 sp=0x2f554eb8afe0 pc=0x494001 +created by cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats in goroutine 37 + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x2be + +goroutine 181 gp=0x2f554e9b6f00 m=nil [chan receive, 10 minutes]: +runtime.gopark(0x0?, 0x2f554e4b9c08?, 0x30?, 0x60?, 0x16?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554ea4cd28 sp=0x2f554ea4cd08 pc=0x48c7ce +runtime.chanrecv(0x2f554e670cb0, 0x2f554eaa3e88, 0x1) + /usr/local/go/src/runtime/chan.go:667 +0x4ae fp=0x2f554ea4cda0 sp=0x2f554ea4cd28 pc=0x41ca2e +runtime.chanrecv2(0x0?, 0x2f554e4b9c08?) + /usr/local/go/src/runtime/chan.go:514 +0x12 fp=0x2f554ea4cdc8 sp=0x2f554ea4cda0 pc=0x41c572 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).listenForChange(0x31eb970?, {{0x202e4b8, 0x2f554e939608}, 0xff, {0x0, 0x0}, {0x2f554e519200, 0x1, 0x1f4}, {0x2f554e7afa10, ...}, ...}, ...) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:164 +0x2f8 fp=0x2f554ea4cf38 sp=0x2f554ea4cdc8 pc=0x17c8018 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats.gowrap2() + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x8a fp=0x2f554ea4cfe0 sp=0x2f554ea4cf38 pc=0x17c77ea +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554ea4cfe8 sp=0x2f554ea4cfe0 pc=0x494001 +created by cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats in goroutine 37 + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x2be + +goroutine 183 gp=0x2f554e9b72c0 m=nil [chan receive, 10 minutes]: +runtime.gopark(0x0?, 0x2f554e4b9c08?, 0x8?, 0x61?, 0x13?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554eb88d28 sp=0x2f554eb88d08 pc=0x48c7ce +runtime.chanrecv(0x2f554e62ddc0, 0x2f554e499e88, 0x1) + /usr/local/go/src/runtime/chan.go:667 +0x4ae fp=0x2f554eb88da0 sp=0x2f554eb88d28 pc=0x41ca2e +runtime.chanrecv2(0x0?, 0x2f554e4b9c08?) + /usr/local/go/src/runtime/chan.go:514 +0x12 fp=0x2f554eb88dc8 sp=0x2f554eb88da0 pc=0x41c572 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).listenForChange(0x0?, {{0x202e4b8, 0x2f554e939608}, 0xff, {0x0, 0x0}, {0x2f554e519200, 0x1, 0x1f4}, {0x2f554e7afa10, ...}, ...}, ...) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:164 +0x2f8 fp=0x2f554eb88f38 sp=0x2f554eb88dc8 pc=0x17c8018 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats.gowrap2() + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x8a fp=0x2f554eb88fe0 sp=0x2f554eb88f38 pc=0x17c77ea +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554eb88fe8 sp=0x2f554eb88fe0 pc=0x494001 +created by cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats in goroutine 37 + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x2be + +goroutine 184 gp=0x2f554e9b74a0 m=nil [chan receive, 10 minutes]: +runtime.gopark(0x0?, 0x2f554e4b9c08?, 0x18?, 0x0?, 0x11?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554f00dd28 sp=0x2f554f00dd08 pc=0x48c7ce +runtime.chanrecv(0x2f554e6384d0, 0x2f554ea15e88, 0x1) + /usr/local/go/src/runtime/chan.go:667 +0x4ae fp=0x2f554f00dda0 sp=0x2f554f00dd28 pc=0x41ca2e +runtime.chanrecv2(0x0?, 0x2f554e4b9c08?) + /usr/local/go/src/runtime/chan.go:514 +0x12 fp=0x2f554f00ddc8 sp=0x2f554f00dda0 pc=0x41c572 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).listenForChange(0x0?, {{0x202e4b8, 0x2f554e939608}, 0xff, {0x0, 0x0}, {0x2f554e519200, 0x1, 0x1f4}, {0x2f554e7afa10, ...}, ...}, ...) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:164 +0x2f8 fp=0x2f554f00df38 sp=0x2f554f00ddc8 pc=0x17c8018 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats.gowrap2() + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x8a fp=0x2f554f00dfe0 sp=0x2f554f00df38 pc=0x17c77ea +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554f00dfe8 sp=0x2f554f00dfe0 pc=0x494001 +created by cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats in goroutine 37 + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x2be + +goroutine 185 gp=0x2f554e9b7680 m=nil [chan receive, 10 minutes]: +runtime.gopark(0x0?, 0x2f554e4b9c08?, 0xc0?, 0x84?, 0xf?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554eb8bd28 sp=0x2f554eb8bd08 pc=0x48c7ce +runtime.chanrecv(0x2f554e67a850, 0x2f554e49be88, 0x1) + /usr/local/go/src/runtime/chan.go:667 +0x4ae fp=0x2f554eb8bda0 sp=0x2f554eb8bd28 pc=0x41ca2e +runtime.chanrecv2(0x0?, 0x2f554e4b9c08?) + /usr/local/go/src/runtime/chan.go:514 +0x12 fp=0x2f554eb8bdc8 sp=0x2f554eb8bda0 pc=0x41c572 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).listenForChange(0x0?, {{0x202e4b8, 0x2f554e939608}, 0xff, {0x0, 0x0}, {0x2f554e519200, 0x1, 0x1f4}, {0x2f554e7afa10, ...}, ...}, ...) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:164 +0x2f8 fp=0x2f554eb8bf38 sp=0x2f554eb8bdc8 pc=0x17c8018 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats.gowrap2() + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x8a fp=0x2f554eb8bfe0 sp=0x2f554eb8bf38 pc=0x17c77ea +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554eb8bfe8 sp=0x2f554eb8bfe0 pc=0x494001 +created by cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats in goroutine 37 + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x2be + +goroutine 186 gp=0x2f554e9b7860 m=nil [chan receive, 10 minutes]: +runtime.gopark(0x0?, 0x2f554e4b9c08?, 0x0?, 0x0?, 0xf?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554f00bd28 sp=0x2f554f00bd08 pc=0x48c7ce +runtime.chanrecv(0x2f554e930000, 0x2f554e58de88, 0x1) + /usr/local/go/src/runtime/chan.go:667 +0x4ae fp=0x2f554f00bda0 sp=0x2f554f00bd28 pc=0x41ca2e +runtime.chanrecv2(0x0?, 0x2f554e4b9c08?) + /usr/local/go/src/runtime/chan.go:514 +0x12 fp=0x2f554f00bdc8 sp=0x2f554f00bda0 pc=0x41c572 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).listenForChange(0x0?, {{0x202e4b8, 0x2f554e939608}, 0xff, {0x0, 0x0}, {0x2f554e519200, 0x1, 0x1f4}, {0x2f554e7afa10, ...}, ...}, ...) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:164 +0x2f8 fp=0x2f554f00bf38 sp=0x2f554f00bdc8 pc=0x17c8018 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats.gowrap2() + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x8a fp=0x2f554f00bfe0 sp=0x2f554f00bf38 pc=0x17c77ea +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554f00bfe8 sp=0x2f554f00bfe0 pc=0x494001 +created by cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats in goroutine 37 + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x2be + +goroutine 187 gp=0x2f554e9b7a40 m=nil [chan receive, 10 minutes]: +runtime.gopark(0x0?, 0x2f554e4b9c08?, 0x0?, 0xc0?, 0xf?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554ebebd28 sp=0x2f554ebebd08 pc=0x48c7ce +runtime.chanrecv(0x2f554e6382a0, 0x2f554e58be88, 0x1) + /usr/local/go/src/runtime/chan.go:667 +0x4ae fp=0x2f554ebebda0 sp=0x2f554ebebd28 pc=0x41ca2e +runtime.chanrecv2(0x0?, 0x2f554e4b9c08?) + /usr/local/go/src/runtime/chan.go:514 +0x12 fp=0x2f554ebebdc8 sp=0x2f554ebebda0 pc=0x41c572 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).listenForChange(0x0?, {{0x202e4b8, 0x2f554e939608}, 0xff, {0x0, 0x0}, {0x2f554e519200, 0x1, 0x1f4}, {0x2f554e7afa10, ...}, ...}, ...) + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:164 +0x2f8 fp=0x2f554ebebf38 sp=0x2f554ebebdc8 pc=0x17c8018 +cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats.gowrap2() + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x8a fp=0x2f554ebebfe0 sp=0x2f554ebebf38 pc=0x17c77ea +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554ebebfe8 sp=0x2f554ebebfe0 pc=0x494001 +created by cloud.o-forge.io/core/oc-lib/tools.(*natsCaller).ListenNats in goroutine 37 + /go/pkg/mod/cloud.o-forge.io/core/oc-lib@v0.0.0-20260323071124-ea2a98d84aec/tools/nats_caller.go:113 +0x2be + +goroutine 121 gp=0x2f554e755680 m=nil [IO wait, 10 minutes]: +runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554f009b98 sp=0x2f554f009b78 pc=0x48c7ce +runtime.netpollblock(0x48a173?, 0x41a946?, 0x0?) + /usr/local/go/src/runtime/netpoll.go:575 +0xf7 fp=0x2f554f009bd0 sp=0x2f554f009b98 pc=0x44f497 +internal/poll.runtime_pollWait(0x76bc79960600, 0x72) + /usr/local/go/src/runtime/netpoll.go:351 +0x85 fp=0x2f554f009bf0 sp=0x2f554f009bd0 pc=0x48b9a5 +internal/poll.(*pollDesc).wait(0x2f554ed3c080?, 0x370016?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 fp=0x2f554f009c18 sp=0x2f554f009bf0 pc=0x503087 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Accept(0x2f554ed3c080) + /usr/local/go/src/internal/poll/fd_unix.go:613 +0x28c fp=0x2f554f009cc0 sp=0x2f554f009c18 pc=0x50810c +net.(*netFD).accept(0x2f554ed3c080) + /usr/local/go/src/net/fd_unix.go:150 +0x29 fp=0x2f554f009d78 sp=0x2f554f009cc0 pc=0x5e20e9 +net.(*TCPListener).accept(0x2f554e93c000) + /usr/local/go/src/net/tcpsock_posix.go:159 +0x1b fp=0x2f554f009dc8 sp=0x2f554f009d78 pc=0x5fc03b +net.(*TCPListener).Accept(0x2f554e93c000) + /usr/local/go/src/net/tcpsock.go:387 +0x30 fp=0x2f554f009df8 sp=0x2f554f009dc8 pc=0x5fb2d0 +net/http.(*onceCloseListener).Accept(0x2037898?) + :1 +0x1b fp=0x2f554f009e10 sp=0x2f554f009df8 pc=0x77b59b +net/http.(*Server).Serve(0x2f554e51e200, {0x2035530, 0x2f554e93c000}) + /usr/local/go/src/net/http/server.go:3434 +0x30c fp=0x2f554f009f40 sp=0x2f554f009e10 pc=0x75382c +net/http.(*Server).ListenAndServe(0x2f554e51e200) + /usr/local/go/src/net/http/server.go:3360 +0x72 fp=0x2f554f009f70 sp=0x2f554f009f40 pc=0x753412 +github.com/beego/beego/v2/server/web.(*HttpServer).Run.func5() + /go/pkg/mod/github.com/beego/beego/v2@v2.3.8/server/web/server.go:296 +0x234 fp=0x2f554f009fe0 sp=0x2f554f009f70 pc=0xb6ccd4 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554f009fe8 sp=0x2f554f009fe0 pc=0x494001 +created by github.com/beego/beego/v2/server/web.(*HttpServer).Run in goroutine 1 + /go/pkg/mod/github.com/beego/beego/v2@v2.3.8/server/web/server.go:278 +0x758 + +goroutine 211 gp=0x2f554e755860 m=nil [sleep, 10 minutes]: +runtime.gopark(0x2bdda798f7c?, 0x0?, 0x0?, 0x0?, 0x0?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f554e892f10 sp=0x2f554e892ef0 pc=0x48c7ce +time.Sleep(0x4e94914f0000) + /usr/local/go/src/runtime/time.go:363 +0x165 fp=0x2f554e892f68 sp=0x2f554e892f10 pc=0x4909e5 +oc-scheduler/infrastructure.evictAfter({0x2f554e9a20c0, 0x34}, 0x0?) + /oc-scheduler/infrastructure/planner.go:169 +0x25 fp=0x2f554e892fb8 sp=0x2f554e892f68 pc=0x1943885 +oc-scheduler/infrastructure.storePlanner.gowrap1() + /oc-scheduler/infrastructure/planner.go:157 +0x25 fp=0x2f554e892fe0 sp=0x2f554e892fb8 pc=0x1943845 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f554e892fe8 sp=0x2f554e892fe0 pc=0x494001 +created by oc-scheduler/infrastructure.storePlanner in goroutine 38 + /oc-scheduler/infrastructure/planner.go:157 +0x19f + +goroutine 3160911 gp=0x2f554ea69a40 m=nil [select]: +runtime.gopark(0x2f55a7fbff80?, 0x2?, 0xe0?, 0xe9?, 0x2f55a7fbff7c?) + /usr/local/go/src/runtime/proc.go:462 +0xce fp=0x2f55a7fbfe18 sp=0x2f55a7fbfdf8 pc=0x48c7ce +runtime.selectgo(0x2f55a7fbff80, 0x2f55a7fbff78, 0x2037898?, 0x0, 0x202db68?, 0x1) + /usr/local/go/src/runtime/select.go:351 +0xaa5 fp=0x2f55a7fbff48 sp=0x2f55a7fbfe18 pc=0x46a105 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*cancellListener).Listen(0x2f554e93a1b0, {0x20379e8, 0x2f554e666000}, 0x2f556c6d45d0) + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/connection.go:928 +0x74 fp=0x2f55a7fbffb0 sp=0x2f55a7fbff48 pc=0x904914 +go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*connection).read.gowrap1() + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/connection.go:495 +0x28 fp=0x2f55a7fbffe0 sp=0x2f55a7fbffb0 pc=0x902608 +runtime.goexit({}) + /usr/local/go/src/runtime/asm_amd64.s:1771 +0x1 fp=0x2f55a7fbffe8 sp=0x2f55a7fbffe0 pc=0x494001 +created by go.mongodb.org/mongo-driver/x/mongo/driver/topology.(*connection).read in goroutine 62 + /go/pkg/mod/go.mongodb.org/mongo-driver@v1.17.4/x/mongo/driver/topology/connection.go:495 +0x156 diff --git a/main.go b/main.go index 1b86153..9642a68 100644 --- a/main.go +++ b/main.go @@ -25,5 +25,6 @@ func main() { go infrastructure.ListenNATS() go infrastructure.InitSelfPlanner() go infrastructure.RecoverDraftExecutions() + go infrastructure.WatchExecutions() beego.Run() } diff --git a/ws.go b/ws.go index a265880..2b7045c 100644 --- a/ws.go +++ b/ws.go @@ -27,7 +27,7 @@ func main() { token := "" // Body JSON envoyé comme premier message WebSocket (WorkflowSchedule). // Seuls start + duration_s sont requis si as_possible=true. - body := `{"start":"` + time.Now().UTC().Format(time.RFC3339) + `","duration_s":3600}` + body := `{"start":"` + time.Now().UTC().Format(time.RFC3339) + `"}` if len(args) >= 1 { url = args[0] @@ -104,7 +104,7 @@ func main() { return case <-dateChangeTick.C: newStart := time.Now().UTC().Add(3 * time.Minute) - update := `{"start":"` + newStart.Format(time.RFC3339) + `","duration_s":3600}` + update := `{"start":"` + newStart.Format(time.RFC3339) + `"}` fmt.Printf("\n[sim] Envoi mise à jour de date → %s\n\n", update) if err := websocket.Message.Send(ws, update); err != nil { fmt.Printf("Erreur envoi mise à jour : %v\n", err)