Compare commits

..

71 Commits

Author SHA1 Message Date
pb
88c88cac5b testing simplyfied urlFormat() method which works thanks to traefik 2025-03-13 16:57:27 +01:00
pb
1ae38c98ad correct path for ADMIRALTY_NODESAPI 2025-03-13 11:58:57 +01:00
pb
2d517cc594 Correcting an error in CallGet() 2025-03-12 15:41:10 +01:00
pb
a9c82bd261 Replaced the return of Call[Method]() by the stored value of the resp.Body 2025-03-12 15:37:03 +01:00
pb
79aec86f5f Replaced the return of Call[Method]() by the stored value of the resp.Body 2025-03-12 15:26:00 +01:00
pb
9b3dfc7576 fixing how last result in stored in httpcaller 2025-03-12 12:13:55 +01:00
pb
037ae74782 modified the way HTTPCaller store last resposne 2025-03-12 12:09:55 +01:00
pb
b81c60a3ce modified the way HTTPCaller store last resposne 2025-03-12 12:00:32 +01:00
pb
363ac94c47 debug instructions 2025-03-12 11:35:25 +01:00
pb
378f9e5095 Added a new http.Response field to HTTPCaller to store results for each call 2025-03-12 10:39:20 +01:00
pb
659b494ee4 Added a new field to HTTPCaller to store results for each call 2025-03-12 09:45:17 +01:00
pb
92965c6af2 Added more information in error when LaunchPeerExecution method doesn't match caller's 2025-03-11 16:48:05 +01:00
pb
70cb5aec9f changed some variable name for better understanding of process in LaunchPeerExecution 2025-03-11 12:03:35 +01:00
pb
d59e77d5a2 changed how url is consructed in LaunchPeerExecution by placing meth after peer url + dt 2025-03-05 16:42:22 +01:00
pb
ff1b857ab0 removed caller from checkPeerStatus() parameters by adding the path to url 2025-03-05 11:54:14 +01:00
pb
dbdccdb920 Corrected urlFormat() from peer_cache which constructed url with API's name at the end 2025-03-05 11:01:46 +01:00
pb
fd3fef72d3 correct DefaultAPI list 2025-03-03 10:55:00 +01:00
pb
1890fd4f71 added the resources used for admiralty in datacenter API 2025-03-03 10:33:37 +01:00
pb
95af3cb515 removed caller from checkPeerStatus() parameters by adding the path to url 2025-03-03 10:32:52 +01:00
pb
3acebc451e Added the required tag to ExecutionsID 2025-02-26 11:00:37 +01:00
mr
5111c9c8be discovery clear view 2025-02-19 15:29:42 +01:00
mr
3ecb0e9d96 set up auth for workspace 2025-02-19 11:41:52 +01:00
mr
b4a1766677 better load & peear cache for traefik 2025-02-19 11:03:12 +01:00
mr
241c6a5a08 casual debug, time added before change of state bookin & exec 2025-02-19 08:55:11 +01:00
mr
7c30633bde verifyAuthAction for instance 2025-02-18 14:02:29 +01:00
mr
81d3406305 verifyAuthAction for instance 2025-02-18 12:55:49 +01:00
mr
04f7537066 save 2025-02-18 12:39:16 +01:00
mr
6bf058ab5c save 2025-02-18 11:11:40 +01:00
mr
b771b5d25e save 2025-02-18 10:25:08 +01:00
mr
6e6ed4ea2c debug 2025-02-18 09:53:55 +01:00
mr
a098f0a672 conf 2025-02-18 09:01:21 +01:00
mr
cafadec146 missing res access 2025-02-17 08:25:19 +01:00
mr
0940b63961 correct 2025-02-14 16:16:25 +01:00
mr
a2dca94dca correct 2025-02-14 15:01:49 +01:00
mr
085a8718e0 correct 2025-02-13 15:11:23 +01:00
mr
271cc2caa0 workflow scheduler create booking with a booking execution lot id 2025-02-13 09:50:18 +01:00
mr
42b60ca5cd workflow scheduler create booking with a booking execution lot id 2025-02-13 09:10:24 +01:00
mr
4920322d0a workflow scheduler create booking with a booking execution lot id 2025-02-13 08:26:26 +01:00
mr
c7c1535ba9 workflow scheduler create booking with a booking execution lot id 2025-02-12 16:08:15 +01:00
mr
576f53f81b workflow scheduler create booking with a booking execution lot id 2025-02-12 15:45:03 +01:00
mr
c0e6247fb8 workflow scheduler create booking with a booking execution lot id 2025-02-12 15:27:05 +01:00
mr
3e85fdc779 workflow scheduler create booking with a booking execution lot id 2025-02-12 14:14:28 +01:00
mr
4833bcb710 workflow scheduler create booking with a booking execution lot id 2025-02-12 14:08:57 +01:00
mr
7d69d65dd2 workflow scheduler create booking with a booking execution lot id 2025-02-12 11:41:34 +01:00
mr
a098b3797a workflow scheduler create booking with a booking execution lot id 2025-02-11 15:33:01 +01:00
mr
7d03676ac2 workflow scheduler create booking with a booking execution lot id 2025-02-11 14:11:12 +01:00
mr
945b7a893e workflow scheduler create booking with a booking execution lot id 2025-02-11 13:58:24 +01:00
mr
ef028cb2b9 workflow scheduler create booking with a booking execution lot id 2025-02-11 13:54:06 +01:00
mr
4cfd0a1789 workflow scheduler create booking with a booking execution lot id 2025-02-11 12:28:04 +01:00
mr
7c57cf34a8 workflow scheduler create booking with a booking execution lot id 2025-02-11 12:13:34 +01:00
mr
019b590b4f workflow scheduler create booking with a booking execution lot id 2025-02-11 11:26:02 +01:00
mr
d82ae166a1 add purchase resource in model catalog 2025-02-11 09:16:18 +01:00
mr
ffaa67fb5d add purchase resource in model catalog 2025-02-11 08:30:38 +01:00
mr
a573a4ce71 add purchase resource in model catalog 2025-02-11 07:55:15 +01:00
mr
52d5a1fbf9 add purchase resource in model catalog 2025-02-10 13:10:42 +01:00
mr
4ad32401fd add purchase resource in model catalog 2025-02-10 13:04:13 +01:00
mr
f663ec80f5 add purchase resource in model catalog 2025-02-10 11:32:55 +01:00
mr
e55727d9e2 add purchase resource in model catalog 2025-02-10 10:42:37 +01:00
mr
4a178d01e3 add purchase resource in model catalog 2025-02-10 09:58:46 +01:00
mr
3d13833572 workflow execution evolved 2025-02-07 11:41:12 +01:00
mr
31ec352b57 workflow execution evolved 2025-02-07 08:29:57 +01:00
mr
940ef17f7b workflow execution evolved 2025-02-06 12:56:51 +01:00
mr
ad3293da9d workflow execution evolved 2025-02-06 11:13:06 +01:00
mr
3ffff7d32c workflow execution evolved 2025-02-06 09:56:00 +01:00
mr
e646cfef0b workflow execution evolved 2025-02-06 09:08:35 +01:00
plm
5255ffc2f7 Merging issue#4 2025-01-10 17:43:31 +01:00
plm
fd1c579ec4 Removing required field on PeerId, see #7 2025-01-10 17:39:58 +01:00
plm
0f4adeea86 Same prefix for all builtin microservices in opencloud 2025-01-08 16:55:42 +01:00
plm
245f3adea3 Merge branch 'issue#4' 2024-12-16 09:18:58 +01:00
plm
21d08204b5 Fixing env based layer; not using onion builtin mechanism to preserve opencloud conf key/value format 2024-12-16 09:17:54 +01:00
plm
1de4888599 Remove extra underscore character; it breaks the env var loading 2024-12-10 14:01:47 +01:00
28 changed files with 339 additions and 175 deletions

View File

@ -26,12 +26,12 @@ import (
func GetConfLoader() *onion.Onion { func GetConfLoader() *onion.Onion {
logger := zerolog.New(os.Stdout).With().Timestamp().Logger() logger := zerolog.New(os.Stdout).With().Timestamp().Logger()
AppName := GetAppName() AppName := GetAppName()
EnvPrefix := strings.ToUpper(AppName[0:2]+AppName[3:]) + "_" EnvPrefix := "OC_"
defaultConfigFile := "/etc/oc/" + AppName[3:] + ".json" defaultConfigFile := "/etc/oc/" + AppName[3:] + ".json"
localConfigFile := "./" + AppName[3:] + ".json" localConfigFile := "./" + AppName[3:] + ".json"
var configFile string var configFile string
var o *onion.Onion var o *onion.Onion
l3 := onion.NewEnvLayerPrefix("_", EnvPrefix) l3 := GetEnvVarLayer(EnvPrefix)
l2, err := onion.NewFileLayer(localConfigFile, nil) l2, err := onion.NewFileLayer(localConfigFile, nil)
if err == nil { if err == nil {
logger.Info().Msg("Local config file found " + localConfigFile + ", overriding default file") logger.Info().Msg("Local config file found " + localConfigFile + ", overriding default file")
@ -54,3 +54,17 @@ func GetConfLoader() *onion.Onion {
} }
return o return o
} }
func GetEnvVarLayer(prefix string) onion.Layer {
envVars := make(map[string]interface{})
for _, e := range os.Environ() {
pair := strings.SplitN(e, "=", 2)
key := pair[0]
if strings.HasPrefix(key, prefix) {
envVars[strings.TrimPrefix(key, prefix)] = pair[1]
}
}
return onion.NewMapLayer(envVars)
}

View File

@ -261,7 +261,7 @@ func (r *Request) Schedule(wfID string, scheduler *workflow_execution.WorkflowSc
} }
func (r *Request) CheckBooking(wfID string, start string, end string, durationInS float64, cron string) bool { func (r *Request) CheckBooking(wfID string, start string, end string, durationInS float64, cron string) bool {
ok, _, _, err := workflow_execution.NewScheduler(start, end, durationInS, cron).CheckBooking(wfID, &tools.APIRequest{ ok, _, _, _, err := workflow_execution.NewScheduler(start, end, durationInS, cron).CheckBooking(wfID, &tools.APIRequest{
Caller: r.caller, Caller: r.caller,
Username: r.user, Username: r.user,
PeerID: r.peerID, PeerID: r.peerID,
@ -563,9 +563,9 @@ func (l *LibData) ToRule() *rule.Rule {
return nil return nil
} }
func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecutions { func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecution {
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_EXECUTION { if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_EXECUTION {
return l.Data.(*workflow_execution.WorkflowExecutions) return l.Data.(*workflow_execution.WorkflowExecution)
} }
return nil return nil
} }

View File

@ -15,6 +15,7 @@ import (
*/ */
type Booking struct { type Booking struct {
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name) utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
ExecutionsID string `json:"executions_id,omitempty" bson:"executions_id,omitempty" validate:"required"` // ExecutionsID is the ID of the executions
DestPeerID string `json:"dest_peer_id,omitempty"` // DestPeerID is the ID of the destination peer DestPeerID string `json:"dest_peer_id,omitempty"` // DestPeerID is the ID of the destination peer
WorkflowID string `json:"workflow_id,omitempty" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow WorkflowID string `json:"workflow_id,omitempty" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow
ExecutionID string `json:"execution_id,omitempty" bson:"execution_id,omitempty" validate:"required"` ExecutionID string `json:"execution_id,omitempty" bson:"execution_id,omitempty" validate:"required"`
@ -80,10 +81,6 @@ func (d *Booking) GetDelayOnDuration() time.Duration {
return d.GetRealDuration() - d.GetUsualDuration() return d.GetRealDuration() - d.GetUsualDuration()
} }
func (d *Booking) GetName() string {
return d.GetID() + "_" + d.ExpectedStartDate.String()
}
func (d *Booking) GetAccessor(request *tools.APIRequest) utils.Accessor { func (d *Booking) GetAccessor(request *tools.APIRequest) utils.Accessor {
return NewAccessor(request) // Create a new instance of the accessor return NewAccessor(request) // Create a new instance of the accessor
} }
@ -93,7 +90,7 @@ func (d *Booking) VerifyAuth(request *tools.APIRequest) bool {
} }
func (r *Booking) StoreDraftDefault() { func (r *Booking) StoreDraftDefault() {
r.IsDraft = true r.IsDraft = false
} }
func (r *Booking) CanUpdate(set utils.DBObject) (bool, utils.DBObject) { func (r *Booking) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {

View File

@ -1,6 +1,7 @@
package booking package booking
import ( import (
"errors"
"time" "time"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
@ -33,7 +34,11 @@ func (a *bookingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error)
} }
func (a *bookingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (a *bookingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
return utils.GenericUpdateOne(set, id, a, &Booking{}) if set.(*Booking).State == 0 {
return nil, 400, errors.New("state is required")
}
realSet := &Booking{State: set.(*Booking).State}
return utils.GenericUpdateOne(realSet, id, a, &Booking{})
} }
func (a *bookingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *bookingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
@ -46,13 +51,15 @@ func (a *bookingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int
func (a *bookingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (a *bookingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
return utils.GenericLoadOne[*Booking](id, func(d utils.DBObject) (utils.DBObject, int, error) { return utils.GenericLoadOne[*Booking](id, func(d utils.DBObject) (utils.DBObject, int, error) {
if d.(*Booking).State == enum.DRAFT && time.Now().UTC().After(d.(*Booking).ExpectedStartDate) { now := time.Now()
now = now.Add(time.Second * -60)
if d.(*Booking).State == enum.DRAFT && now.UTC().After(d.(*Booking).ExpectedStartDate) {
return utils.GenericDeleteOne(d.GetID(), a) return utils.GenericDeleteOne(d.GetID(), a)
} }
if (d.(*Booking).ExpectedEndDate) == nil { if (d.(*Booking).ExpectedEndDate) == nil {
d.(*Booking).State = enum.FORGOTTEN d.(*Booking).State = enum.FORGOTTEN
utils.GenericRawUpdateOne(d, id, a) utils.GenericRawUpdateOne(d, id, a)
} else if d.(*Booking).State == enum.SCHEDULED && time.Now().UTC().After(*&d.(*Booking).ExpectedStartDate) { } else if d.(*Booking).State == enum.SCHEDULED && now.UTC().After(d.(*Booking).ExpectedStartDate) {
d.(*Booking).State = enum.DELAYED d.(*Booking).State = enum.DELAYED
utils.GenericRawUpdateOne(d, id, a) utils.GenericRawUpdateOne(d, id, a)
} }
@ -70,11 +77,13 @@ func (a *bookingMongoAccessor) Search(filters *dbs.Filters, search string, isDra
func (a *bookingMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject { func (a *bookingMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
return func(d utils.DBObject) utils.ShallowDBObject { return func(d utils.DBObject) utils.ShallowDBObject {
if d.(*Booking).State == enum.DRAFT && time.Now().UTC().After(d.(*Booking).ExpectedStartDate) { now := time.Now()
now = now.Add(time.Second * -60)
if d.(*Booking).State == enum.DRAFT && now.UTC().After(d.(*Booking).ExpectedStartDate) {
utils.GenericDeleteOne(d.GetID(), a) utils.GenericDeleteOne(d.GetID(), a)
return nil return nil
} }
if d.(*Booking).State == enum.SCHEDULED && time.Now().UTC().After(*&d.(*Booking).ExpectedStartDate) { if d.(*Booking).State == enum.SCHEDULED && now.UTC().After(d.(*Booking).ExpectedStartDate) {
d.(*Booking).State = enum.DELAYED d.(*Booking).State = enum.DELAYED
utils.GenericRawUpdateOne(d, d.GetID(), a) utils.GenericRawUpdateOne(d, d.GetID(), a)
} }

View File

@ -42,13 +42,15 @@ const (
S3 S3
MEMORY MEMORY
HARDWARE HARDWARE
AZURE
GCS
) )
// String() - Returns the string representation of the storage type // String() - Returns the string representation of the storage type
func (t StorageType) String() string { func (t StorageType) String() string {
return [...]string{"FILE", "STREAM", "API", "DATABASE", "S3", "MEMORY", "HARDWARE"}[t] return [...]string{"FILE", "STREAM", "API", "DATABASE", "S3", "MEMORY", "HARDWARE", "AZURE", "GCS"}[t]
} }
func TypeList() []StorageType { func TypeList() []StorageType {
return []StorageType{FILE, STREAM, API, DATABASE, S3, MEMORY, HARDWARE} return []StorageType{FILE, STREAM, API, DATABASE, S3, MEMORY, HARDWARE, AZURE, GCS}
} }

View File

@ -7,7 +7,7 @@ import (
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
func GetPlannerNearestStart(start time.Time, planned map[tools.DataType][]pricing.PricedItemITF, request *tools.APIRequest) float64 { func GetPlannerNearestStart(start time.Time, planned map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest) float64 {
near := float64(10000000000) // set a high value near := float64(10000000000) // set a high value
for _, items := range planned { // loop through the planned items for _, items := range planned { // loop through the planned items
for _, priced := range items { // loop through the priced items for _, priced := range items { // loop through the priced items
@ -23,7 +23,7 @@ func GetPlannerNearestStart(start time.Time, planned map[tools.DataType][]pricin
return near return near
} }
func GetPlannerLongestTime(end *time.Time, planned map[tools.DataType][]pricing.PricedItemITF, request *tools.APIRequest) float64 { func GetPlannerLongestTime(end *time.Time, planned map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest) float64 {
if end == nil { if end == nil {
return -1 return -1
} }

View File

@ -28,7 +28,7 @@ var models = map[string]func() utils.DBObject{
tools.STORAGE_RESOURCE.String(): func() utils.DBObject { return &resource.StorageResource{} }, tools.STORAGE_RESOURCE.String(): func() utils.DBObject { return &resource.StorageResource{} },
tools.PROCESSING_RESOURCE.String(): func() utils.DBObject { return &resource.ProcessingResource{} }, tools.PROCESSING_RESOURCE.String(): func() utils.DBObject { return &resource.ProcessingResource{} },
tools.WORKFLOW.String(): func() utils.DBObject { return &w2.Workflow{} }, tools.WORKFLOW.String(): func() utils.DBObject { return &w2.Workflow{} },
tools.WORKFLOW_EXECUTION.String(): func() utils.DBObject { return &workflow_execution.WorkflowExecutions{} }, tools.WORKFLOW_EXECUTION.String(): func() utils.DBObject { return &workflow_execution.WorkflowExecution{} },
tools.WORKSPACE.String(): func() utils.DBObject { return &w3.Workspace{} }, tools.WORKSPACE.String(): func() utils.DBObject { return &w3.Workspace{} },
tools.PEER.String(): func() utils.DBObject { return &peer.Peer{} }, tools.PEER.String(): func() utils.DBObject { return &peer.Peer{} },
tools.COLLABORATIVE_AREA.String(): func() utils.DBObject { return &collaborative_area.CollaborativeArea{} }, tools.COLLABORATIVE_AREA.String(): func() utils.DBObject { return &collaborative_area.CollaborativeArea{} },

View File

@ -68,10 +68,10 @@ func (o *Order) Pay(scheduler *workflow_execution.WorkflowSchedule, request *too
} else { } else {
o.IsDraft = false o.IsDraft = false
} }
for _, exec := range scheduler.WorkflowExecutions { for _, exec := range scheduler.WorkflowExecution {
exec.IsDraft = false exec.IsDraft = false
_, code, err := utils.GenericUpdateOne(exec, exec.GetID(), _, code, err := utils.GenericUpdateOne(exec, exec.GetID(),
workflow_execution.NewAccessor(request), &workflow_execution.WorkflowExecutions{}) workflow_execution.NewAccessor(request), &workflow_execution.WorkflowExecution{})
if code != 200 || err != nil { if code != 200 || err != nil {
return errors.New("could not update the workflow execution" + fmt.Sprintf("%v", err)) return errors.New("could not update the workflow execution" + fmt.Sprintf("%v", err))
} }
@ -100,7 +100,7 @@ func (o *Order) draftStoreFromModel(scheduler *workflow_execution.WorkflowSchedu
o.IsDraft = true o.IsDraft = true
o.OrderBy = request.PeerID o.OrderBy = request.PeerID
o.WorkflowExecutionIDs = []string{} // create an array of ids o.WorkflowExecutionIDs = []string{} // create an array of ids
for _, exec := range scheduler.WorkflowExecutions { for _, exec := range scheduler.WorkflowExecution {
o.WorkflowExecutionIDs = append(o.WorkflowExecutionIDs, exec.GetID()) o.WorkflowExecutionIDs = append(o.WorkflowExecutionIDs, exec.GetID())
} }
// set the name of the order // set the name of the order
@ -166,12 +166,12 @@ func (o *Order) draftBookOrder(scheduler *workflow_execution.WorkflowSchedule, r
if request == nil { if request == nil {
return draftedBookings, errors.New("no request found") return draftedBookings, errors.New("no request found")
} }
for _, exec := range scheduler.WorkflowExecutions { for _, exec := range scheduler.WorkflowExecution {
_, priceds, _, err := scheduler.Workflow.Planify(exec.ExecDate, exec.EndDate, request) _, priceds, _, err := scheduler.Workflow.Planify(exec.ExecDate, exec.EndDate, request)
if err != nil { if err != nil {
return draftedBookings, errors.New("could not planify the workflow" + fmt.Sprintf("%v", err)) return draftedBookings, errors.New("could not planify the workflow" + fmt.Sprintf("%v", err))
} }
bookings := exec.Book(scheduler.Workflow.UUID, priceds) bookings := exec.Book(scheduler.UUID, scheduler.Workflow.UUID, priceds)
for _, booking := range bookings { for _, booking := range bookings {
_, err := (&peer.Peer{}).LaunchPeerExecution(booking.DestPeerID, "", _, err := (&peer.Peer{}).LaunchPeerExecution(booking.DestPeerID, "",
tools.BOOKING, tools.POST, booking.Serialize(booking), request.Caller) tools.BOOKING, tools.POST, booking.Serialize(booking), request.Caller)

View File

@ -4,7 +4,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"regexp"
"strings" "strings"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
@ -29,47 +28,40 @@ type PeerCache struct {
} }
// urlFormat formats the URL of the peer with the data type API function // urlFormat formats the URL of the peer with the data type API function
func (p *PeerCache) urlFormat(url string, dt tools.DataType) string { func (p *PeerCache) urlFormat(hostUrl string, dt tools.DataType) string {
// localhost is replaced by the local peer URL // localhost is replaced by the local peer URL
// because localhost must collide on a web request security protocol // because localhost must collide on a web request security protocol
localhost := "" /*localhost := ""
if strings.Contains(url, "localhost") { if strings.Contains(hostUrl, "localhost") {
localhost = "localhost" localhost = "localhost"
} }
if strings.Contains(url, "127.0.0.1") { if strings.Contains(hostUrl, "127.0.0.1") {
localhost = "127.0.0.1" localhost = "127.0.0.1"
} }
if localhost != "" { if localhost != "" {
r := regexp.MustCompile("(" + localhost + ":[0-9]+)") r := regexp.MustCompile("(" + localhost + ":[0-9]+)")
t := r.FindString(url) t := r.FindString(hostUrl)
if t != "" { if t != "" {
url = strings.Replace(url, t, dt.API()+":8080/oc", -1) hostUrl = strings.Replace(hostUrl, t, dt.API()+":8080/oc", -1)
} else { } else {
url = strings.ReplaceAll(url, localhost, dt.API()+":8080/oc") hostUrl = strings.ReplaceAll(hostUrl, localhost, dt.API()+":8080/oc")
} }
} else { } else {*/
url = url + "/" + dt.API() hostUrl = hostUrl + "/" + strings.ReplaceAll(dt.API(), "oc-", "")
} //}
return url fmt.Println("Contacting", hostUrl)
return hostUrl
} }
// checkPeerStatus checks the status of a peer // checkPeerStatus checks the status of a peer
func (p *PeerCache) checkPeerStatus(peerID string, appName string, caller *tools.HTTPCaller) (*Peer, bool) { func (p *PeerCache) checkPeerStatus(peerID string, appName string) (*Peer, bool) {
api := tools.API{} api := tools.API{}
access := NewShallowAccessor() access := NewShallowAccessor()
res, code, _ := access.LoadOne(peerID) // Load the peer from db res, code, _ := access.LoadOne(peerID) // Load the peer from db
if code != 200 { // no peer no party if code != 200 { // no peer no party
return nil, false return nil, false
} }
methods := caller.URLS[tools.PEER] // Get the methods url of the peer url := p.urlFormat(res.(*Peer).Url, tools.PEER) + "/status" // Format the URL
if methods == nil {
return res.(*Peer), false
}
meth := methods[tools.POST] // Get the POST method to check status
if meth == "" {
return res.(*Peer), false
}
url := p.urlFormat(res.(*Peer).Url, tools.PEER) + meth // Format the URL
state, services := api.CheckRemotePeer(url) state, services := api.CheckRemotePeer(url)
res.(*Peer).ServicesState = services // Update the services states of the peer res.(*Peer).ServicesState = services // Update the services states of the peer
access.UpdateOne(res, peerID) // Update the peer in the db access.UpdateOne(res, peerID) // Update the peer in the db
@ -77,23 +69,24 @@ func (p *PeerCache) checkPeerStatus(peerID string, appName string, caller *tools
} }
// LaunchPeerExecution launches an execution on a peer // LaunchPeerExecution launches an execution on a peer
// The method contacts the path described by : peer.Url + datatype path (from enums) + replacement of id by dataID
func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string, func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string,
dt tools.DataType, method tools.METHOD, body interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) { dt tools.DataType, method tools.METHOD, body interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) {
fmt.Println("Launching peer execution on", caller.URLS, dt, method) fmt.Println("Launching peer execution on", caller.URLS, dt, method)
methods := caller.URLS[dt] // Get the methods url of the data type methods := caller.URLS[dt] // Get the methods url of the data type
if m, ok := methods[method]; !ok || m == "" { if m, ok := methods[method]; !ok || m == "" {
return nil, errors.New("no path found") return nil, errors.New("Requested method " + method.String() + " not declared in HTTPCaller")
} }
meth := methods[method] // Get the method url to execute path := methods[method] // Get the path corresponding to the action we want to execute
meth = strings.ReplaceAll(meth, ":id", dataID) // Replace the id in the url in case of a DELETE / UPDATE method (it's a standard naming in OC) path = strings.ReplaceAll(path, ":id", dataID) // Replace the id in the path in case of a DELETE / UPDATE method (it's a standard naming in OC)
url := "" url := ""
// Check the status of the peer // Check the status of the peer
if mypeer, ok := p.checkPeerStatus(peerID, dt.API(), caller); !ok && mypeer != nil { if mypeer, ok := p.checkPeerStatus(peerID, dt.API()); !ok && mypeer != nil {
// If the peer is not reachable, add the execution to the failed executions list // If the peer is not reachable, add the execution to the failed executions list
pexec := &PeerExecution{ pexec := &PeerExecution{
Method: method.String(), Method: method.String(),
Url: p.urlFormat((mypeer.Url)+meth, dt), Url: p.urlFormat((mypeer.Url), dt) + path, // the url is constitued of : host URL + resource path + action path (ex : mypeer.com/datacenter/resourcetype/path/to/action)
Body: body, Body: body,
DataType: dt.EnumIndex(), DataType: dt.EnumIndex(),
DataID: dataID, DataID: dataID,
@ -106,7 +99,7 @@ func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string,
return nil, errors.New("peer not found") return nil, errors.New("peer not found")
} }
// If the peer is reachable, launch the execution // If the peer is reachable, launch the execution
url = p.urlFormat((mypeer.Url)+meth, dt) // Format the URL url = p.urlFormat((mypeer.Url), dt) + path // Format the URL
tmp := mypeer.FailedExecution // Get the failed executions list tmp := mypeer.FailedExecution // Get the failed executions list
mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list
NewShallowAccessor().UpdateOne(mypeer, peerID) // Update the peer in the db NewShallowAccessor().UpdateOne(mypeer, peerID) // Update the peer in the db

View File

@ -17,7 +17,7 @@ import (
* it defines the resource compute * it defines the resource compute
*/ */
type ComputeResource struct { type ComputeResource struct {
AbstractIntanciatedResource[*ComputeResourceInstance] AbstractInstanciatedResource[*ComputeResourceInstance]
Architecture string `json:"architecture,omitempty" bson:"architecture,omitempty"` // Architecture is the architecture Architecture string `json:"architecture,omitempty" bson:"architecture,omitempty"` // Architecture is the architecture
Infrastructure enum.InfrastructureType `json:"infrastructure" bson:"infrastructure" default:"-1"` // Infrastructure is the infrastructure Infrastructure enum.InfrastructureType `json:"infrastructure" bson:"infrastructure" default:"-1"` // Infrastructure is the infrastructure
} }
@ -35,7 +35,7 @@ func (abs *ComputeResource) ConvertToPricedResource(
if t != tools.COMPUTE_RESOURCE { if t != tools.COMPUTE_RESOURCE {
return nil return nil
} }
p := abs.AbstractIntanciatedResource.ConvertToPricedResource(t, request) p := abs.AbstractInstanciatedResource.ConvertToPricedResource(t, request)
priced := p.(*PricedResource) priced := p.(*PricedResource)
return &PricedComputeResource{ return &PricedComputeResource{
PricedResource: *priced, PricedResource: *priced,
@ -52,6 +52,7 @@ type ComputeNode struct {
type ComputeResourceInstance struct { type ComputeResourceInstance struct {
ResourceInstance[*ComputeResourcePartnership] ResourceInstance[*ComputeResourcePartnership]
Source string `json:"source,omitempty" bson:"source,omitempty"` // Source is the source of the resource
SecurityLevel string `json:"security_level,omitempty" bson:"security_level,omitempty"` SecurityLevel string `json:"security_level,omitempty" bson:"security_level,omitempty"`
PowerSources []string `json:"power_sources,omitempty" bson:"power_sources,omitempty"` PowerSources []string `json:"power_sources,omitempty" bson:"power_sources,omitempty"`
AnnualCO2Emissions float64 `json:"annual_co2_emissions,omitempty" bson:"co2_emissions,omitempty"` AnnualCO2Emissions float64 `json:"annual_co2_emissions,omitempty" bson:"co2_emissions,omitempty"`

View File

@ -16,7 +16,7 @@ import (
* it defines the resource data * it defines the resource data
*/ */
type DataResource struct { type DataResource struct {
AbstractIntanciatedResource[*DataInstance] AbstractInstanciatedResource[*DataInstance]
Type string `bson:"type,omitempty" json:"type,omitempty"` Type string `bson:"type,omitempty" json:"type,omitempty"`
Quality string `bson:"quality,omitempty" json:"quality,omitempty"` Quality string `bson:"quality,omitempty" json:"quality,omitempty"`
OpenData bool `bson:"open_data" json:"open_data" default:"false"` // Type is the type of the storage OpenData bool `bson:"open_data" json:"open_data" default:"false"` // Type is the type of the storage
@ -42,7 +42,7 @@ func (abs *DataResource) ConvertToPricedResource(
if t != tools.DATA_RESOURCE { if t != tools.DATA_RESOURCE {
return nil return nil
} }
p := abs.AbstractIntanciatedResource.ConvertToPricedResource(t, request) p := abs.AbstractInstanciatedResource.ConvertToPricedResource(t, request)
priced := p.(*PricedResource) priced := p.(*PricedResource)
return &PricedDataResource{ return &PricedDataResource{
PricedResource: *priced, PricedResource: *priced,

View File

@ -12,6 +12,7 @@ type ResourceInterface interface {
ConvertToPricedResource(t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF ConvertToPricedResource(t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF
GetType() string GetType() string
GetSelectedInstance() utils.DBObject GetSelectedInstance() utils.DBObject
ClearEnv() utils.DBObject
SetAllowedInstances(request *tools.APIRequest) SetAllowedInstances(request *tools.APIRequest)
} }
@ -20,6 +21,7 @@ type ResourceInstanceITF interface {
GetID() string GetID() string
GetName() string GetName() string
StoreDraftDefault() StoreDraftDefault()
ClearEnv()
GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF
GetPeerGroups() ([]ResourcePartnerITF, []map[string][]string) GetPeerGroups() ([]ResourcePartnerITF, []map[string][]string)
ClearPeerGroups() ClearPeerGroups()

View File

@ -25,7 +25,7 @@ type ProcessingUsage struct {
* it defines the resource processing * it defines the resource processing
*/ */
type ProcessingResource struct { type ProcessingResource struct {
AbstractIntanciatedResource[*ProcessingInstance] AbstractInstanciatedResource[*ProcessingInstance]
Infrastructure enum.InfrastructureType `json:"infrastructure" bson:"infrastructure" default:"-1"` // Infrastructure is the infrastructure Infrastructure enum.InfrastructureType `json:"infrastructure" bson:"infrastructure" default:"-1"` // Infrastructure is the infrastructure
IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service
Usage *ProcessingUsage `bson:"usage,omitempty" json:"usage,omitempty"` // Usage is the usage of the processing Usage *ProcessingUsage `bson:"usage,omitempty" json:"usage,omitempty"` // Usage is the usage of the processing

View File

@ -47,12 +47,12 @@ func (r *AbstractResource) CanDelete() bool {
return r.IsDraft // only draft bookings can be deleted return r.IsDraft // only draft bookings can be deleted
} }
type AbstractIntanciatedResource[T ResourceInstanceITF] struct { type AbstractInstanciatedResource[T ResourceInstanceITF] struct {
AbstractResource // AbstractResource contains the basic fields of an object (id, name) AbstractResource // AbstractResource contains the basic fields of an object (id, name)
Instances []T `json:"instances,omitempty" bson:"instances,omitempty"` // Bill is the bill of the resource // Bill is the bill of the resource Instances []T `json:"instances,omitempty" bson:"instances,omitempty"` // Bill is the bill of the resource // Bill is the bill of the resource
} }
func (abs *AbstractIntanciatedResource[T]) ConvertToPricedResource( func (abs *AbstractInstanciatedResource[T]) ConvertToPricedResource(
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF { t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
instances := map[string]string{} instances := map[string]string{}
profiles := []pricing.PricingProfileITF{} profiles := []pricing.PricingProfileITF{}
@ -71,7 +71,14 @@ func (abs *AbstractIntanciatedResource[T]) ConvertToPricedResource(
} }
} }
func (r *AbstractIntanciatedResource[T]) GetSelectedInstance() utils.DBObject { func (abs *AbstractInstanciatedResource[T]) ClearEnv() utils.DBObject {
for _, instance := range abs.Instances {
instance.ClearEnv()
}
return abs
}
func (r *AbstractInstanciatedResource[T]) GetSelectedInstance() utils.DBObject {
if r.SelectedInstanceIndex != nil && len(r.Instances) > *r.SelectedInstanceIndex { if r.SelectedInstanceIndex != nil && len(r.Instances) > *r.SelectedInstanceIndex {
return r.Instances[*r.SelectedInstanceIndex] return r.Instances[*r.SelectedInstanceIndex]
} }
@ -81,11 +88,14 @@ func (r *AbstractIntanciatedResource[T]) GetSelectedInstance() utils.DBObject {
return nil return nil
} }
func (abs *AbstractIntanciatedResource[T]) SetAllowedInstances(request *tools.APIRequest) { func (abs *AbstractInstanciatedResource[T]) SetAllowedInstances(request *tools.APIRequest) {
if request != nil && request.PeerID == abs.CreatorID && request.PeerID != "" {
return
}
abs.Instances = verifyAuthAction[T](abs.Instances, request) abs.Instances = verifyAuthAction[T](abs.Instances, request)
} }
func (d *AbstractIntanciatedResource[T]) Trim() { func (d *AbstractInstanciatedResource[T]) Trim() {
d.Type = d.GetType() d.Type = d.GetType()
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.CreatorID}}).IsMySelf(); !ok { if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.CreatorID}}).IsMySelf(); !ok {
for _, instance := range d.Instances { for _, instance := range d.Instances {
@ -94,7 +104,7 @@ func (d *AbstractIntanciatedResource[T]) Trim() {
} }
} }
func (abs *AbstractIntanciatedResource[T]) VerifyAuth(request *tools.APIRequest) bool { func (abs *AbstractInstanciatedResource[T]) VerifyAuth(request *tools.APIRequest) bool {
return len(verifyAuthAction[T](abs.Instances, request)) > 0 || abs.AbstractObject.VerifyAuth(request) return len(verifyAuthAction[T](abs.Instances, request)) > 0 || abs.AbstractObject.VerifyAuth(request)
} }
@ -127,6 +137,11 @@ type GeoPoint struct {
Longitude float64 `json:"longitude,omitempty" bson:"longitude,omitempty"` Longitude float64 `json:"longitude,omitempty" bson:"longitude,omitempty"`
} }
type Credentials struct {
Login string `json:"login,omitempty" bson:"login,omitempty"`
Pass string `json:"password,omitempty" bson:"password,omitempty"`
}
type ResourceInstance[T ResourcePartnerITF] struct { type ResourceInstance[T ResourcePartnerITF] struct {
utils.AbstractObject utils.AbstractObject
Location GeoPoint `json:"location,omitempty" bson:"location,omitempty"` Location GeoPoint `json:"location,omitempty" bson:"location,omitempty"`
@ -138,6 +153,12 @@ type ResourceInstance[T ResourcePartnerITF] struct {
Partnerships []T `json:"partnerships,omitempty" bson:"partnerships,omitempty"` Partnerships []T `json:"partnerships,omitempty" bson:"partnerships,omitempty"`
} }
func (ri *ResourceInstance[T]) ClearEnv() {
ri.Env = []models.Param{}
ri.Inputs = []models.Param{}
ri.Outputs = []models.Param{}
}
func (ri *ResourceInstance[T]) GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF { func (ri *ResourceInstance[T]) GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF {
pricings := []pricing.PricingProfileITF{} pricings := []pricing.PricingProfileITF{}
for _, p := range ri.Partnerships { for _, p := range ri.Partnerships {

View File

@ -65,6 +65,12 @@ func (wfa *resourceMongoAccessor[T]) LoadAll(isDraft bool) ([]utils.ShallowDBObj
} }
func (wfa *resourceMongoAccessor[T]) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) { func (wfa *resourceMongoAccessor[T]) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
if filters == nil && search == "*" {
return utils.GenericLoadAll[T](func(d utils.DBObject) utils.ShallowDBObject {
d.(T).SetAllowedInstances(wfa.Request)
return d
}, isDraft, wfa)
}
return utils.GenericSearch[T](filters, search, wfa.getResourceFilter(search), return utils.GenericSearch[T](filters, search, wfa.getResourceFilter(search),
func(d utils.DBObject) utils.ShallowDBObject { func(d utils.DBObject) utils.ShallowDBObject {
d.(T).SetAllowedInstances(wfa.Request) d.(T).SetAllowedInstances(wfa.Request)
@ -73,9 +79,6 @@ func (wfa *resourceMongoAccessor[T]) Search(filters *dbs.Filters, search string,
} }
func (abs *resourceMongoAccessor[T]) getResourceFilter(search string) *dbs.Filters { func (abs *resourceMongoAccessor[T]) getResourceFilter(search string) *dbs.Filters {
if search == "*" {
search = ""
}
return &dbs.Filters{ return &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
"abstractintanciatedresource.abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}}, "abstractintanciatedresource.abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},

View File

@ -17,7 +17,7 @@ import (
* it defines the resource storage * it defines the resource storage
*/ */
type StorageResource struct { type StorageResource struct {
AbstractIntanciatedResource[*StorageResourceInstance] // AbstractResource contains the basic fields of an object (id, name) AbstractInstanciatedResource[*StorageResourceInstance] // AbstractResource contains the basic fields of an object (id, name)
StorageType enum.StorageType `bson:"storage_type" json:"storage_type" default:"-1"` // Type is the type of the storage StorageType enum.StorageType `bson:"storage_type" json:"storage_type" default:"-1"` // Type is the type of the storage
Acronym string `bson:"acronym,omitempty" json:"acronym,omitempty"` // Acronym is the acronym of the storage Acronym string `bson:"acronym,omitempty" json:"acronym,omitempty"` // Acronym is the acronym of the storage
} }
@ -35,7 +35,7 @@ func (abs *StorageResource) ConvertToPricedResource(
if t != tools.STORAGE_RESOURCE { if t != tools.STORAGE_RESOURCE {
return nil return nil
} }
p := abs.AbstractIntanciatedResource.ConvertToPricedResource(t, request) p := abs.AbstractInstanciatedResource.ConvertToPricedResource(t, request)
priced := p.(*PricedResource) priced := p.(*PricedResource)
return &PricedStorageResource{ return &PricedStorageResource{
PricedResource: *priced, PricedResource: *priced,
@ -44,6 +44,7 @@ func (abs *StorageResource) ConvertToPricedResource(
type StorageResourceInstance struct { type StorageResourceInstance struct {
ResourceInstance[*StorageResourcePartnership] ResourceInstance[*StorageResourcePartnership]
Credentials *Credentials `json:"credentials,omitempty" bson:"credentials,omitempty"`
Source string `bson:"source,omitempty" json:"source,omitempty"` // Source is the source of the storage Source string `bson:"source,omitempty" json:"source,omitempty"` // Source is the source of the storage
Local bool `bson:"local" json:"local"` Local bool `bson:"local" json:"local"`
SecurityLevel string `bson:"security_level,omitempty" json:"security_level,omitempty"` SecurityLevel string `bson:"security_level,omitempty" json:"security_level,omitempty"`
@ -54,6 +55,13 @@ type StorageResourceInstance struct {
Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage
} }
func (ri *StorageResourceInstance) ClearEnv() {
ri.Credentials = nil
ri.Env = []models.Param{}
ri.Inputs = []models.Param{}
ri.Outputs = []models.Param{}
}
func (ri *StorageResourceInstance) StoreDraftDefault() { func (ri *StorageResourceInstance) StoreDraftDefault() {
found := false found := false
for _, p := range ri.ResourceInstance.Env { for _, p := range ri.ResourceInstance.Env {

View File

@ -23,6 +23,10 @@ func (r *WorkflowResource) GetType() string {
return tools.WORKFLOW_RESOURCE.String() return tools.WORKFLOW_RESOURCE.String()
} }
func (d *WorkflowResource) ClearEnv() utils.DBObject {
return d
}
func (d *WorkflowResource) Trim() { func (d *WorkflowResource) Trim() {
/* EMPTY */ /* EMPTY */
} }

View File

@ -91,7 +91,7 @@ func (ao *AbstractObject) UpToDate(user string, peer string, create bool) {
} }
func (ao *AbstractObject) VerifyAuth(request *tools.APIRequest) bool { func (ao *AbstractObject) VerifyAuth(request *tools.APIRequest) bool {
return ao.AccessMode == Public || (request != nil && ao.CreatorID == request.PeerID) return ao.AccessMode == Public || (request != nil && ao.CreatorID == request.PeerID && request.PeerID != "")
} }
func (ao *AbstractObject) GetObjectFilters(search string) *dbs.Filters { func (ao *AbstractObject) GetObjectFilters(search string) *dbs.Filters {

View File

@ -66,7 +66,6 @@ func GenericDeleteOne(id string, a Accessor) (DBObject, int, error) {
return nil, 403, errors.New("you are not allowed to delete :" + a.GetType().String()) return nil, 403, errors.New("you are not allowed to delete :" + a.GetType().String())
} }
if err != nil { if err != nil {
a.GetLogger().Error().Msg("Could not retrieve " + id + " to db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
if a.ShouldVerifyAuth() && !res.VerifyAuth(a.GetRequest()) { if a.ShouldVerifyAuth() && !res.VerifyAuth(a.GetRequest()) {
@ -114,7 +113,6 @@ func GenericLoadOne[T DBObject](id string, f func(DBObject) (DBObject, int, erro
var data T var data T
res_mongo, code, err := mongo.MONGOService.LoadOne(id, a.GetType().String()) res_mongo, code, err := mongo.MONGOService.LoadOne(id, a.GetType().String())
if err != nil { if err != nil {
a.GetLogger().Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
res_mongo.Decode(&data) res_mongo.Decode(&data)
@ -128,7 +126,6 @@ func genericLoadAll[T DBObject](res *mgb.Cursor, code int, err error, onlyDraft
objs := []ShallowDBObject{} objs := []ShallowDBObject{}
var results []T var results []T
if err != nil { if err != nil {
a.GetLogger().Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
if err = res.All(mongo.MngoCtx, &results); err != nil { if err = res.All(mongo.MngoCtx, &results); err != nil {

View File

@ -128,8 +128,8 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
return true, nil return true, nil
} }
func (wf *Workflow) Planify(start time.Time, end *time.Time, request *tools.APIRequest) (float64, map[tools.DataType][]pricing.PricedItemITF, *Workflow, error) { func (wf *Workflow) Planify(start time.Time, end *time.Time, request *tools.APIRequest) (float64, map[tools.DataType]map[string]pricing.PricedItemITF, *Workflow, error) {
priceds := map[tools.DataType][]pricing.PricedItemITF{} priceds := map[tools.DataType]map[string]pricing.PricedItemITF{}
ps, priceds, err := plan[*resources.ProcessingResource](tools.PROCESSING_RESOURCE, wf, priceds, request, wf.Graph.IsProcessing, ps, priceds, err := plan[*resources.ProcessingResource](tools.PROCESSING_RESOURCE, wf, priceds, request, wf.Graph.IsProcessing,
func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) { func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) {
return start.Add(time.Duration(wf.Graph.GetAverageTimeProcessingBeforeStart(0, res.GetID(), request)) * time.Second), priced.GetExplicitDurationInS() return start.Add(time.Duration(wf.Graph.GetAverageTimeProcessingBeforeStart(0, res.GetID(), request)) * time.Second), priced.GetExplicitDurationInS()
@ -189,12 +189,12 @@ func (wf *Workflow) Planify(start time.Time, end *time.Time, request *tools.APIR
return longest, priceds, wf, nil return longest, priceds, wf, nil
} }
func plan[T resources.ResourceInterface](dt tools.DataType, wf *Workflow, priceds map[tools.DataType][]pricing.PricedItemITF, request *tools.APIRequest, func plan[T resources.ResourceInterface](dt tools.DataType, wf *Workflow, priceds map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest,
f func(graph.GraphItem) bool, start func(resources.ResourceInterface, pricing.PricedItemITF) (time.Time, float64), end func(time.Time, float64) *time.Time) ([]T, map[tools.DataType][]pricing.PricedItemITF, error) { f func(graph.GraphItem) bool, start func(resources.ResourceInterface, pricing.PricedItemITF) (time.Time, float64), end func(time.Time, float64) *time.Time) ([]T, map[tools.DataType]map[string]pricing.PricedItemITF, error) {
resources := []T{} resources := []T{}
for _, item := range wf.GetGraphItems(f) { for _, item := range wf.GetGraphItems(f) {
if priceds[dt] == nil { if priceds[dt] == nil {
priceds[dt] = []pricing.PricedItemITF{} priceds[dt] = map[string]pricing.PricedItemITF{}
} }
dt, realItem := item.GetResource() dt, realItem := item.GetResource()
if realItem == nil { if realItem == nil {
@ -212,7 +212,7 @@ func plan[T resources.ResourceInterface](dt tools.DataType, wf *Workflow, priced
priced.SetLocationEnd(*e) priced.SetLocationEnd(*e)
} }
resources = append(resources, realItem.(T)) resources = append(resources, realItem.(T))
priceds[dt] = append(priceds[dt], priced) priceds[dt][item.ID] = priced
} }
return resources, priceds, nil return resources, priceds, nil
} }

View File

@ -95,12 +95,12 @@ func (a *workflowMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.
if set.(*Workflow).Graph != nil && set.(*Workflow).Graph.Partial { if set.(*Workflow).Graph != nil && set.(*Workflow).Graph.Partial {
return nil, 403, errors.New("you are not allowed to update a partial workflow") return nil, 403, errors.New("you are not allowed to update a partial workflow")
} }
res, code, err := utils.GenericUpdateOne(a.verifyResource(set), id, a, &Workflow{}) res, code, err := utils.GenericUpdateOne(set, id, a, &Workflow{})
if code != 200 { if code != 200 {
return nil, code, err return nil, code, err
} }
workflow := res.(*Workflow) workflow := res.(*Workflow)
a.execute(workflow, false, false) // update the workspace for the workflow a.execute(workflow, false, true) // update the workspace for the workflow
a.share(workflow, false, a.GetCaller()) // share the update to the peers a.share(workflow, false, a.GetCaller()) // share the update to the peers
return res, code, nil return res, code, nil
} }
@ -119,12 +119,19 @@ func (a *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, i
workflow := res.(*Workflow) workflow := res.(*Workflow)
a.share(workflow, false, a.GetCaller()) // share the creation to the peers a.share(workflow, false, a.GetCaller()) // share the creation to the peers
a.execute(workflow, false, false) // store the workspace for the workflow a.execute(workflow, false, true) // store the workspace for the workflow
return res, code, nil return res, code, nil
} }
// CopyOne copies a workflow in the database // CopyOne copies a workflow in the database
func (a *workflowMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *workflowMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
wf := data.(*Workflow)
for _, item := range wf.Graph.Items {
_, obj := item.GetResource()
if obj != nil {
obj.ClearEnv()
}
}
return utils.GenericStoreOne(data, a) return utils.GenericStoreOne(data, a)
} }
@ -207,10 +214,10 @@ func (a *workflowMongoAccessor) verifyResource(obj utils.DBObject) utils.DBObjec
} else if t == tools.DATA_RESOURCE { } else if t == tools.DATA_RESOURCE {
access = resources.NewAccessor[*resources.DataResource](t, a.GetRequest(), func() utils.DBObject { return &resources.DataResource{} }) access = resources.NewAccessor[*resources.DataResource](t, a.GetRequest(), func() utils.DBObject { return &resources.DataResource{} })
} else { } else {
wf.Graph.Clear(item.Data.GetID()) wf.Graph.Clear(resource.GetID())
} }
if error := utils.VerifyAccess(access, resource.GetID()); error != nil { if error := utils.VerifyAccess(access, resource.GetID()); error != nil {
wf.Graph.Clear(item.Data.GetID()) wf.Graph.Clear(resource.GetID())
} }
} }
return wf return wf

View File

@ -15,39 +15,41 @@ import (
) )
/* /*
* WorkflowExecutions is a struct that represents a list of workflow executions * WorkflowExecution is a struct that represents a list of workflow executions
* Warning: No user can write (del, post, put) a workflow execution, it is only used by the system * Warning: No user can write (del, post, put) a workflow execution, it is only used by the system
* workflows generate their own executions * workflows generate their own executions
*/ */
type WorkflowExecutions struct { type WorkflowExecution struct {
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name) utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
PeerBookByGraph map[string]map[string][]string `json:"peer_book_by_graph,omitempty" bson:"peer_book_by_graph,omitempty"` // BookByResource is a map of the resource id and the list of the booking id
ExecutionsID string `json:"executions_id,omitempty" bson:"executions_id,omitempty"`
ExecDate time.Time `json:"execution_date,omitempty" bson:"execution_date,omitempty" validate:"required"` // ExecDate is the execution date of the workflow, is required ExecDate time.Time `json:"execution_date,omitempty" bson:"execution_date,omitempty" validate:"required"` // ExecDate is the execution date of the workflow, is required
EndDate *time.Time `json:"end_date,omitempty" bson:"end_date,omitempty"` // EndDate is the end date of the workflow EndDate *time.Time `json:"end_date,omitempty" bson:"end_date,omitempty"` // EndDate is the end date of the workflow
State enum.BookingStatus `json:"state" bson:"state" default:"0"` // TEMPORARY TODO DEFAULT 1 -> 0 State is the state of the workflow State enum.BookingStatus `json:"state" bson:"state" default:"0"` // TEMPORARY TODO DEFAULT 1 -> 0 State is the state of the workflow
WorkflowID string `json:"workflow_id" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow WorkflowID string `json:"workflow_id" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow
} }
func (r *WorkflowExecutions) StoreDraftDefault() { func (r *WorkflowExecution) StoreDraftDefault() {
r.IsDraft = true // TODO: TEMPORARY r.IsDraft = false // TODO: TEMPORARY
r.State = enum.DRAFT r.State = enum.SCHEDULED
} }
func (r *WorkflowExecutions) CanUpdate(set utils.DBObject) (bool, utils.DBObject) { func (r *WorkflowExecution) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
if r.State != set.(*WorkflowExecutions).State { if r.State != set.(*WorkflowExecution).State {
return true, &WorkflowExecutions{State: set.(*WorkflowExecutions).State} // only state can be updated return true, &WorkflowExecution{State: set.(*WorkflowExecution).State} // only state can be updated
} }
return !r.IsDraft, set // only draft buying can be updated return !r.IsDraft, set // only draft buying can be updated
} }
func (r *WorkflowExecutions) CanDelete() bool { func (r *WorkflowExecution) CanDelete() bool {
return r.IsDraft // only draft bookings can be deleted return r.IsDraft // only draft bookings can be deleted
} }
func (wfa *WorkflowExecutions) Equals(we *WorkflowExecutions) bool { func (wfa *WorkflowExecution) Equals(we *WorkflowExecution) bool {
return wfa.ExecDate.Equal(we.ExecDate) && wfa.WorkflowID == we.WorkflowID return wfa.ExecDate.Equal(we.ExecDate) && wfa.WorkflowID == we.WorkflowID
} }
func (ws *WorkflowExecutions) PurgeDraft(request *tools.APIRequest) error { func (ws *WorkflowExecution) PurgeDraft(request *tools.APIRequest) error {
if ws.EndDate == nil { if ws.EndDate == nil {
// if no end... then Book like a savage // if no end... then Book like a savage
e := ws.ExecDate.Add(time.Hour) e := ws.ExecDate.Add(time.Hour)
@ -74,7 +76,7 @@ func (ws *WorkflowExecutions) PurgeDraft(request *tools.APIRequest) error {
} }
// tool to transform the argo status to a state // tool to transform the argo status to a state
func (wfa *WorkflowExecutions) ArgoStatusToState(status string) *WorkflowExecutions { func (wfa *WorkflowExecution) ArgoStatusToState(status string) *WorkflowExecution {
status = strings.ToLower(status) status = strings.ToLower(status)
switch status { switch status {
case "succeeded": // Succeeded case "succeeded": // Succeeded
@ -89,38 +91,54 @@ func (wfa *WorkflowExecutions) ArgoStatusToState(status string) *WorkflowExecuti
return wfa return wfa
} }
func (r *WorkflowExecutions) GenerateID() { func (r *WorkflowExecution) GenerateID() {
if r.UUID == "" {
r.UUID = uuid.New().String() r.UUID = uuid.New().String()
}
} }
func (d *WorkflowExecutions) GetName() string { func (d *WorkflowExecution) GetName() string {
return d.UUID + "_" + d.ExecDate.String() return d.UUID + "_" + d.ExecDate.String()
} }
func (d *WorkflowExecutions) GetAccessor(request *tools.APIRequest) utils.Accessor { func (d *WorkflowExecution) GetAccessor(request *tools.APIRequest) utils.Accessor {
return NewAccessor(request) // Create a new instance of the accessor return NewAccessor(request) // Create a new instance of the accessor
} }
func (d *WorkflowExecutions) VerifyAuth(request *tools.APIRequest) bool { func (d *WorkflowExecution) VerifyAuth(request *tools.APIRequest) bool {
return true return true
} }
func (d *WorkflowExecutions) Book(wfID string, priceds map[tools.DataType][]pricing.PricedItemITF) []*booking.Booking { func (d *WorkflowExecution) Book(executionsID string, wfID string, priceds map[tools.DataType]map[string]pricing.PricedItemITF) []*booking.Booking {
booking := d.bookEach(wfID, tools.STORAGE_RESOURCE, priceds[tools.STORAGE_RESOURCE]) booking := d.bookEach(executionsID, wfID, tools.STORAGE_RESOURCE, priceds[tools.STORAGE_RESOURCE])
booking = append(booking, d.bookEach(wfID, tools.PROCESSING_RESOURCE, priceds[tools.PROCESSING_RESOURCE])...) booking = append(booking, d.bookEach(executionsID, wfID, tools.PROCESSING_RESOURCE, priceds[tools.PROCESSING_RESOURCE])...)
return booking return booking
} }
func (d *WorkflowExecutions) bookEach(wfID string, dt tools.DataType, priceds []pricing.PricedItemITF) []*booking.Booking { func (d *WorkflowExecution) bookEach(executionsID string, wfID string, dt tools.DataType, priceds map[string]pricing.PricedItemITF) []*booking.Booking {
items := []*booking.Booking{} items := []*booking.Booking{}
for _, priced := range priceds { for itemID, priced := range priceds {
if d.PeerBookByGraph == nil {
d.PeerBookByGraph = map[string]map[string][]string{}
}
if d.PeerBookByGraph[priced.GetCreatorID()] == nil {
d.PeerBookByGraph[priced.GetCreatorID()] = map[string][]string{}
}
if d.PeerBookByGraph[priced.GetCreatorID()][itemID] == nil {
d.PeerBookByGraph[priced.GetCreatorID()][itemID] = []string{}
}
start := d.ExecDate start := d.ExecDate
if s := priced.GetLocationStart(); s != nil { if s := priced.GetLocationStart(); s != nil {
start = *s start = *s
} }
end := start.Add(time.Duration(priced.GetExplicitDurationInS()) * time.Second) end := start.Add(time.Duration(priced.GetExplicitDurationInS()) * time.Second)
bookingItem := &booking.Booking{ bookingItem := &booking.Booking{
State: enum.DRAFT, AbstractObject: utils.AbstractObject{
UUID: uuid.New().String(),
Name: d.GetName() + "_" + executionsID + "_" + wfID,
},
ExecutionsID: executionsID,
State: enum.SCHEDULED,
ResourceID: priced.GetID(), ResourceID: priced.GetID(),
ResourceType: dt, ResourceType: dt,
DestPeerID: priced.GetCreatorID(), DestPeerID: priced.GetCreatorID(),
@ -130,6 +148,8 @@ func (d *WorkflowExecutions) bookEach(wfID string, dt tools.DataType, priceds []
ExpectedEndDate: &end, ExpectedEndDate: &end,
} }
items = append(items, bookingItem) items = append(items, bookingItem)
d.PeerBookByGraph[priced.GetCreatorID()][itemID] = append(
d.PeerBookByGraph[priced.GetCreatorID()][itemID], bookingItem.GetID())
} }
return items return items
} }

View File

@ -43,11 +43,11 @@ func (wfa *workflowExecutionMongoAccessor) DeleteOne(id string) (utils.DBObject,
} }
func (wfa *workflowExecutionMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (wfa *workflowExecutionMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
if set.(*WorkflowExecutions).State == 0 { if set.(*WorkflowExecution).State == 0 {
return nil, 400, errors.New("state is required") return nil, 400, errors.New("state is required")
} }
realSet := WorkflowExecutions{State: set.(*WorkflowExecutions).State} realSet := WorkflowExecution{State: set.(*WorkflowExecution).State}
return utils.GenericUpdateOne(&realSet, id, wfa, &WorkflowExecutions{}) return utils.GenericUpdateOne(&realSet, id, wfa, &WorkflowExecution{})
} }
func (wfa *workflowExecutionMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (wfa *workflowExecutionMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
@ -59,13 +59,15 @@ func (wfa *workflowExecutionMongoAccessor) CopyOne(data utils.DBObject) (utils.D
} }
func (a *workflowExecutionMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (a *workflowExecutionMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
return utils.GenericLoadOne[*WorkflowExecutions](id, func(d utils.DBObject) (utils.DBObject, int, error) { return utils.GenericLoadOne[*WorkflowExecution](id, func(d utils.DBObject) (utils.DBObject, int, error) {
if d.(*WorkflowExecutions).State == enum.DRAFT && !a.shallow && time.Now().UTC().After(d.(*WorkflowExecutions).ExecDate) { now := time.Now()
now = now.Add(time.Second * -60)
if d.(*WorkflowExecution).State == enum.DRAFT && !a.shallow && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
utils.GenericDeleteOne(d.GetID(), newShallowAccessor(a.Request)) utils.GenericDeleteOne(d.GetID(), newShallowAccessor(a.Request))
return nil, 404, errors.New("not found") return nil, 404, errors.New("not found")
} }
if d.(*WorkflowExecutions).State == enum.SCHEDULED && !a.shallow && time.Now().UTC().After(d.(*WorkflowExecutions).ExecDate) { if d.(*WorkflowExecution).State == enum.SCHEDULED && !a.shallow && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
d.(*WorkflowExecutions).State = enum.FORGOTTEN d.(*WorkflowExecution).State = enum.FORGOTTEN
utils.GenericRawUpdateOne(d, id, newShallowAccessor(a.Request)) utils.GenericRawUpdateOne(d, id, newShallowAccessor(a.Request))
} }
return d, 200, nil return d, 200, nil
@ -73,21 +75,23 @@ func (a *workflowExecutionMongoAccessor) LoadOne(id string) (utils.DBObject, int
} }
func (a *workflowExecutionMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) { func (a *workflowExecutionMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
return utils.GenericLoadAll[*WorkflowExecutions](a.getExec(), isDraft, a) return utils.GenericLoadAll[*WorkflowExecution](a.getExec(), isDraft, a)
} }
func (a *workflowExecutionMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) { func (a *workflowExecutionMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
return utils.GenericSearch[*WorkflowExecutions](filters, search, a.GetExecFilters(search), a.getExec(), isDraft, a) return utils.GenericSearch[*WorkflowExecution](filters, search, a.GetExecFilters(search), a.getExec(), isDraft, a)
} }
func (a *workflowExecutionMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject { func (a *workflowExecutionMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
return func(d utils.DBObject) utils.ShallowDBObject { return func(d utils.DBObject) utils.ShallowDBObject {
if d.(*WorkflowExecutions).State == enum.DRAFT && time.Now().UTC().After(d.(*WorkflowExecutions).ExecDate) { now := time.Now()
now = now.Add(time.Second * -60)
if d.(*WorkflowExecution).State == enum.DRAFT && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
utils.GenericDeleteOne(d.GetID(), newShallowAccessor(a.Request)) utils.GenericDeleteOne(d.GetID(), newShallowAccessor(a.Request))
return nil return nil
} }
if d.(*WorkflowExecutions).State == enum.SCHEDULED && time.Now().UTC().After(d.(*WorkflowExecutions).ExecDate) { if d.(*WorkflowExecution).State == enum.SCHEDULED && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
d.(*WorkflowExecutions).State = enum.FORGOTTEN d.(*WorkflowExecution).State = enum.FORGOTTEN
utils.GenericRawUpdateOne(d, d.GetID(), newShallowAccessor(a.Request)) utils.GenericRawUpdateOne(d, d.GetID(), newShallowAccessor(a.Request))
return d return d
} }

View File

@ -6,11 +6,13 @@ import (
"strings" "strings"
"time" "time"
"cloud.o-forge.io/core/oc-lib/models/booking"
"cloud.o-forge.io/core/oc-lib/models/common/enum" "cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/models/workflow" "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
"github.com/robfig/cron" "github.com/robfig/cron"
) )
@ -20,8 +22,9 @@ import (
*/ */
// it's a flying object only use in a session time. It's not stored in the database // it's a flying object only use in a session time. It's not stored in the database
type WorkflowSchedule struct { type WorkflowSchedule struct {
UUID string `json:"id" validate:"required"` // ExecutionsID is the list of the executions id of the workflow
Workflow *workflow.Workflow `json:"workflow,omitempty"` // Workflow is the workflow dependancy of the schedule Workflow *workflow.Workflow `json:"workflow,omitempty"` // Workflow is the workflow dependancy of the schedule
WorkflowExecutions []*WorkflowExecutions `json:"workflow_executions,omitempty"` // WorkflowExecutions is the list of executions of the workflow WorkflowExecution []*WorkflowExecution `json:"workflow_executions,omitempty"` // WorkflowExecution is the list of executions of the workflow
Message string `json:"message,omitempty"` // Message is the message of the schedule Message string `json:"message,omitempty"` // Message is the message of the schedule
Warning string `json:"warning,omitempty"` // Warning is the warning message of the schedule Warning string `json:"warning,omitempty"` // Warning is the warning message of the schedule
Start time.Time `json:"start" validate:"required,ltfield=End"` // Start is the start time of the schedule, is required and must be less than the End time Start time.Time `json:"start" validate:"required,ltfield=End"` // Start is the start time of the schedule, is required and must be less than the End time
@ -36,6 +39,7 @@ func NewScheduler(start string, end string, durationInS float64, cron string) *W
return nil return nil
} }
ws := &WorkflowSchedule{ ws := &WorkflowSchedule{
UUID: uuid.New().String(),
Start: s, Start: s,
DurationS: durationInS, DurationS: durationInS,
Cron: cron, Cron: cron,
@ -47,19 +51,19 @@ func NewScheduler(start string, end string, durationInS float64, cron string) *W
return ws return ws
} }
func (ws *WorkflowSchedule) CheckBooking(wfID string, request *tools.APIRequest) (bool, *workflow.Workflow, []*WorkflowExecutions, error) { func (ws *WorkflowSchedule) CheckBooking(wfID string, request *tools.APIRequest) (bool, *workflow.Workflow, []*WorkflowExecution, []*booking.Booking, error) {
if request.Caller == nil && request.Caller.URLS == nil && request.Caller.URLS[tools.BOOKING] == nil || request.Caller.URLS[tools.BOOKING][tools.GET] == "" { if request.Caller == nil && request.Caller.URLS == nil && request.Caller.URLS[tools.BOOKING] == nil || request.Caller.URLS[tools.BOOKING][tools.GET] == "" {
return false, nil, []*WorkflowExecutions{}, errors.New("no caller defined") return false, nil, []*WorkflowExecution{}, []*booking.Booking{}, errors.New("no caller defined")
} }
access := workflow.NewAccessor(request) access := workflow.NewAccessor(request)
res, code, err := access.LoadOne(wfID) res, code, err := access.LoadOne(wfID)
if code != 200 { if code != 200 {
return false, nil, []*WorkflowExecutions{}, errors.New("could not load the workflow with id: " + err.Error()) return false, nil, []*WorkflowExecution{}, []*booking.Booking{}, errors.New("could not load the workflow with id: " + err.Error())
} }
wf := res.(*workflow.Workflow) wf := res.(*workflow.Workflow)
longest, priceds, wf, err := wf.Planify(ws.Start, ws.End, request) longest, priceds, wf, err := wf.Planify(ws.Start, ws.End, request)
if err != nil { if err != nil {
return false, wf, []*WorkflowExecutions{}, err return false, wf, []*WorkflowExecution{}, []*booking.Booking{}, err
} }
ws.DurationS = longest ws.DurationS = longest
ws.Message = "We estimate that the workflow will start at " + ws.Start.String() + " and last " + fmt.Sprintf("%v", ws.DurationS) + " seconds." ws.Message = "We estimate that the workflow will start at " + ws.Start.String() + " and last " + fmt.Sprintf("%v", ws.DurationS) + " seconds."
@ -68,10 +72,11 @@ func (ws *WorkflowSchedule) CheckBooking(wfID string, request *tools.APIRequest)
} }
execs, err := ws.getExecutions(wf) execs, err := ws.getExecutions(wf)
if err != nil { if err != nil {
return false, wf, []*WorkflowExecutions{}, err return false, wf, []*WorkflowExecution{}, []*booking.Booking{}, err
} }
bookings := []*booking.Booking{}
for _, exec := range execs { for _, exec := range execs {
bookings := exec.Book(wfID, priceds) bookings = append(bookings, exec.Book(ws.UUID, wfID, priceds)...)
for _, b := range bookings { for _, b := range bookings {
meth := request.Caller.URLS[tools.BOOKING][tools.GET] meth := request.Caller.URLS[tools.BOOKING][tools.GET]
meth = strings.ReplaceAll(meth, ":id", b.ResourceID) meth = strings.ReplaceAll(meth, ":id", b.ResourceID)
@ -80,43 +85,49 @@ func (ws *WorkflowSchedule) CheckBooking(wfID string, request *tools.APIRequest)
request.Caller.URLS[tools.BOOKING][tools.GET] = meth request.Caller.URLS[tools.BOOKING][tools.GET] = meth
_, err := (&peer.Peer{}).LaunchPeerExecution(b.DestPeerID, b.ResourceID, tools.BOOKING, tools.GET, nil, request.Caller) _, err := (&peer.Peer{}).LaunchPeerExecution(b.DestPeerID, b.ResourceID, tools.BOOKING, tools.GET, nil, request.Caller)
if err != nil { if err != nil {
return false, wf, execs, err return false, wf, execs, bookings, err
} }
} }
} }
return true, wf, execs, nil return true, wf, execs, bookings, nil
} }
func (ws *WorkflowSchedule) Schedules(wfID string, request *tools.APIRequest) (*WorkflowSchedule, *workflow.Workflow, []*WorkflowExecutions, error) { func (ws *WorkflowSchedule) Schedules(wfID string, request *tools.APIRequest) (*WorkflowSchedule, *workflow.Workflow, []*WorkflowExecution, error) {
if request == nil { if request == nil {
return ws, nil, []*WorkflowExecutions{}, errors.New("no request found") return ws, nil, []*WorkflowExecution{}, errors.New("no request found")
} }
c := request.Caller c := request.Caller
if c == nil || c.URLS == nil || c.URLS[tools.BOOKING] == nil { if c == nil || c.URLS == nil || c.URLS[tools.BOOKING] == nil {
return ws, nil, []*WorkflowExecutions{}, errors.New("no caller defined") return ws, nil, []*WorkflowExecution{}, errors.New("no caller defined")
} }
methods := c.URLS[tools.BOOKING] methods := c.URLS[tools.BOOKING]
if _, ok := methods[tools.GET]; !ok { if _, ok := methods[tools.GET]; !ok {
return ws, nil, []*WorkflowExecutions{}, errors.New("no path found") return ws, nil, []*WorkflowExecution{}, errors.New("no path found")
} }
ok, wf, executions, err := ws.CheckBooking(wfID, request) ok, wf, executions, bookings, err := ws.CheckBooking(wfID, request)
ws.WorkflowExecution = executions
if !ok || err != nil { if !ok || err != nil {
return ws, nil, []*WorkflowExecutions{}, errors.New("could not book the workflow : " + fmt.Sprintf("%v", err)) return ws, nil, executions, errors.New("could not book the workflow : " + fmt.Sprintf("%v", err))
} }
ws.Workflow = wf ws.Workflow = wf
ws.WorkflowExecutions = executions for _, booking := range bookings {
_, err := (&peer.Peer{}).LaunchPeerExecution(booking.DestPeerID, "",
tools.BOOKING, tools.POST, booking.Serialize(booking), request.Caller)
if err != nil {
return ws, wf, executions, errors.New("could not launch the peer execution : " + fmt.Sprintf("%v", err))
}
}
fmt.Println("Schedules")
for _, exec := range executions { for _, exec := range executions {
err := exec.PurgeDraft(request) err := exec.PurgeDraft(request)
if err != nil { if err != nil {
return ws, nil, []*WorkflowExecutions{}, errors.New("purge draft" + fmt.Sprintf("%v", err)) return ws, nil, []*WorkflowExecution{}, errors.New("purge draft" + fmt.Sprintf("%v", err))
} }
exec.GenerateID()
exec.StoreDraftDefault() exec.StoreDraftDefault()
// Should DELETE the previous execution2
utils.GenericStoreOne(exec, NewAccessor(request)) utils.GenericStoreOne(exec, NewAccessor(request))
} }
fmt.Println("Schedules")
return ws, wf, executions, nil return ws, wf, executions, nil
} }
@ -132,17 +143,19 @@ VERIFY THAT WE HANDLE DIFFERENCE BETWEEN LOCATION TIME && BOOKING
* getExecutions is a function that returns the executions of a workflow * getExecutions is a function that returns the executions of a workflow
* it returns an array of workflow_execution.WorkflowExecution * it returns an array of workflow_execution.WorkflowExecution
*/ */
func (ws *WorkflowSchedule) getExecutions(workflow *workflow.Workflow) ([]*WorkflowExecutions, error) { func (ws *WorkflowSchedule) getExecutions(workflow *workflow.Workflow) ([]*WorkflowExecution, error) {
workflows_executions := []*WorkflowExecutions{} workflows_executions := []*WorkflowExecution{}
dates, err := ws.getDates() dates, err := ws.getDates()
if err != nil { if err != nil {
return workflows_executions, err return workflows_executions, err
} }
for _, date := range dates { for _, date := range dates {
obj := &WorkflowExecutions{ obj := &WorkflowExecution{
AbstractObject: utils.AbstractObject{ AbstractObject: utils.AbstractObject{
UUID: uuid.New().String(), // set the uuid of the execution
Name: workflow.Name + "_execution_" + date.Start.String(), // set the name of the execution Name: workflow.Name + "_execution_" + date.Start.String(), // set the name of the execution
}, },
ExecutionsID: ws.UUID,
ExecDate: date.Start, // set the execution date ExecDate: date.Start, // set the execution date
EndDate: date.End, // set the end date EndDate: date.End, // set the end date
State: enum.DRAFT, // set the state to 1 (scheduled) State: enum.DRAFT, // set the state to 1 (scheduled)

View File

@ -73,8 +73,10 @@ func (a *workspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject,
filters := &dbs.Filters{ filters := &dbs.Filters{
Or: map[string][]dbs.Filter{ Or: map[string][]dbs.Filter{
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: data.GetName() + "_workspace"}}, "abstractobject.name": {{Operator: dbs.LIKE.String(), Value: data.GetName() + "_workspace"}},
"abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: a.GetPeerID()}},
}, },
} }
// filters *dbs.Filters, word string, isDraft bool
res, _, err := a.Search(filters, "", true) // Search for the workspace res, _, err := a.Search(filters, "", true) // Search for the workspace
if err == nil && len(res) > 0 { // If the workspace already exists, return an error if err == nil && len(res) > 0 { // If the workspace already exists, return an error
return nil, 409, errors.New("a workspace with the same name already exists") return nil, 409, errors.New("a workspace with the same name already exists")

View File

@ -115,8 +115,8 @@ func (a *API) SubscribeRouter(infos []*beego.ControllerInfo) {
// CheckRemotePeer checks the state of a remote peer // CheckRemotePeer checks the state of a remote peer
func (a *API) CheckRemotePeer(url string) (State, map[string]int) { func (a *API) CheckRemotePeer(url string) (State, map[string]int) {
// Check if the database is up // Check if the database is up
caller := NewHTTPCaller(map[DataType]map[METHOD]string{}) // Create a new http caller
var resp APIStatusResponse var resp APIStatusResponse
caller := NewHTTPCaller(map[DataType]map[METHOD]string{}) // Create a new http caller
b, err := caller.CallPost(url, "", map[string]interface{}{}) // Call the status endpoint of the peer b, err := caller.CallPost(url, "", map[string]interface{}{}) // Call the status endpoint of the peer
if err != nil { if err != nil {
return DEAD, map[string]int{} // If the peer is not reachable, return dead return DEAD, map[string]int{} // If the peer is not reachable, return dead

View File

@ -21,6 +21,11 @@ const (
WORKSPACE_HISTORY WORKSPACE_HISTORY
ORDER ORDER
PURCHASE_RESOURCE PURCHASE_RESOURCE
ADMIRALTY_SOURCE
ADMIRALTY_TARGET
ADMIRALTY_SECRET
ADMIRALTY_KUBECONFIG
ADMIRALTY_NODES
) )
var NOAPI = "" var NOAPI = ""
@ -30,6 +35,11 @@ var WORKFLOWAPI = "oc-workflow"
var WORKSPACEAPI = "oc-workspace" var WORKSPACEAPI = "oc-workspace"
var PEERSAPI = "oc-peer" var PEERSAPI = "oc-peer"
var DATACENTERAPI = "oc-datacenter" var DATACENTERAPI = "oc-datacenter"
var ADMIRALTY_SOURCEAPI = DATACENTERAPI+"/admiralty/source"
var ADMIRALTY_TARGETAPI = DATACENTERAPI+"/admiralty/target"
var ADMIRALTY_SECRETAPI = DATACENTERAPI+"/admiralty/secret"
var ADMIRALTY_KUBECONFIGAPI = DATACENTERAPI+"/admiralty/kubeconfig"
var ADMIRALTY_NODESAPI = DATACENTERAPI+"/admiralty/node"
// Bind the standard API name to the data type // Bind the standard API name to the data type
var DefaultAPI = [...]string{ var DefaultAPI = [...]string{
@ -50,6 +60,11 @@ var DefaultAPI = [...]string{
NOAPI, NOAPI,
NOAPI, NOAPI,
NOAPI, NOAPI,
ADMIRALTY_SOURCEAPI,
ADMIRALTY_TARGETAPI,
ADMIRALTY_SECRETAPI,
ADMIRALTY_KUBECONFIGAPI,
ADMIRALTY_NODESAPI,
} }
// Bind the standard data name to the data type // Bind the standard data name to the data type
@ -71,6 +86,11 @@ var Str = [...]string{
"workspace_history", "workspace_history",
"order", "order",
"purchase_resource", "purchase_resource",
"admiralty_source",
"admiralty_target",
"admiralty_secret",
"admiralty_kubeconfig",
"admiralty_node",
} }
func FromInt(i int) string { func FromInt(i int) string {
@ -91,5 +111,5 @@ func (d DataType) EnumIndex() int {
} }
func DataTypeList() []DataType { func DataTypeList() []DataType {
return []DataType{DATA_RESOURCE, PROCESSING_RESOURCE, STORAGE_RESOURCE, COMPUTE_RESOURCE, WORKFLOW_RESOURCE, WORKFLOW, WORKFLOW_EXECUTION, WORKSPACE, PEER, COLLABORATIVE_AREA, RULE, BOOKING, WORKFLOW_HISTORY, WORKSPACE_HISTORY, ORDER, PURCHASE_RESOURCE} return []DataType{DATA_RESOURCE, PROCESSING_RESOURCE, STORAGE_RESOURCE, COMPUTE_RESOURCE, WORKFLOW_RESOURCE, WORKFLOW, WORKFLOW_EXECUTION, WORKSPACE, PEER, COLLABORATIVE_AREA, RULE, BOOKING, WORKFLOW_HISTORY, WORKSPACE_HISTORY, ORDER, PURCHASE_RESOURCE,ADMIRALTY_SOURCE,ADMIRALTY_TARGET,ADMIRALTY_SECRET,ADMIRALTY_KUBECONFIG,ADMIRALTY_NODES}
} }

View File

@ -3,6 +3,7 @@ package tools
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
@ -51,6 +52,7 @@ var HTTPCallerInstance = &HTTPCaller{} // Singleton instance of the HTTPCaller
type HTTPCaller struct { type HTTPCaller struct {
URLS map[DataType]map[METHOD]string // Map of the different methods and their urls URLS map[DataType]map[METHOD]string // Map of the different methods and their urls
Disabled bool // Disabled flag Disabled bool // Disabled flag
LastResults map[string]interface{} // Used to store information regarding the last execution of a given method on a given data type
} }
// NewHTTPCaller creates a new instance of the HTTP Caller // NewHTTPCaller creates a new instance of the HTTP Caller
@ -76,17 +78,33 @@ func (caller *HTTPCaller) CallGet(url string, subpath string, types ...string) (
return nil, err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close()
return io.ReadAll(resp.Body) err = caller.StoreResp(resp)
if err != nil {
return nil, err
}
return caller.LastResults["body"].([]byte), nil
} }
// CallPut calls the DELETE method on the HTTP server // CallPut calls the DELETE method on the HTTP server
func (caller *HTTPCaller) CallDelete(url string, subpath string) ([]byte, error) { func (caller *HTTPCaller) CallDelete(url string, subpath string) ([]byte, error) {
resp, err := http.NewRequest("DELETE", url+subpath, nil) req, err := http.NewRequest("DELETE", url+subpath, nil)
if err != nil || resp == nil || resp.Body == nil { if err != nil {
return nil, err
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil || req == nil || req.Body == nil {
return nil, err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close()
return io.ReadAll(resp.Body)
err = caller.StoreResp(resp)
if err != nil {
return nil, err
}
return caller.LastResults["body"].([]byte), nil
} }
// CallPost calls the POST method on the HTTP server // CallPost calls the POST method on the HTTP server
@ -105,7 +123,12 @@ func (caller *HTTPCaller) CallPost(url string, subpath string, body interface{},
return nil, err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close()
return io.ReadAll(resp.Body) err = caller.StoreResp(resp)
if err != nil {
return nil, err
}
return caller.LastResults["body"].([]byte), nil
} }
// CallPost calls the POST method on the HTTP server // CallPost calls the POST method on the HTTP server
@ -123,7 +146,12 @@ func (caller *HTTPCaller) CallPut(url string, subpath string, body map[string]in
return nil, err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close()
return io.ReadAll(resp.Body) err = caller.StoreResp(resp)
if err != nil {
return nil, err
}
return caller.LastResults["body"].([]byte), nil
} }
// CallRaw calls the Raw method on the HTTP server // CallRaw calls the Raw method on the HTTP server
@ -143,7 +171,12 @@ func (caller *HTTPCaller) CallRaw(method string, url string, subpath string,
req.AddCookie(c) req.AddCookie(c)
} }
client := &http.Client{} client := &http.Client{}
return client.Do(req) resp, err := client.Do(req)
if err != nil {
return nil, err
}
return resp, nil
} }
// CallRaw calls the Raw method on the HTTP server // CallRaw calls the Raw method on the HTTP server
@ -163,3 +196,17 @@ func (caller *HTTPCaller) CallForm(method string, url string, subpath string,
client := &http.Client{} client := &http.Client{}
return client.Do(req) return client.Do(req)
} }
func (caller *HTTPCaller) StoreResp(resp *http.Response) error {
caller.LastResults = make(map[string]interface{})
caller.LastResults["header"] = resp.Header
caller.LastResults["code"] = resp.StatusCode
data, err := io.ReadAll(resp.Body)
if err != nil {
fmt.Println("Error reading the body of the last request")
return err
}
caller.LastResults["body"] = data
return nil
}