Compare commits
218 Commits
78157b80d2
...
bugfix/lag
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cd7ae788b1 | ||
|
|
0d96cc53bf | ||
|
|
66fc3c5b35 | ||
|
|
5ab3eb8a38 | ||
|
|
fec23b4acd | ||
|
|
901622fee0 | ||
|
|
527e622774 | ||
|
|
7223b79fe8 | ||
|
|
1ade41aeae | ||
|
|
58dc579255 | ||
|
|
370dac201b | ||
|
|
2a763006db | ||
|
|
522c66653b | ||
|
|
b57f050b81 | ||
|
|
41ebcf150a | ||
|
|
1499def6ad | ||
|
|
adbab0f5d7 | ||
| 88c88cac5b | |||
| 1ae38c98ad | |||
| 2d517cc594 | |||
| a9c82bd261 | |||
| 79aec86f5f | |||
| 9b3dfc7576 | |||
| 037ae74782 | |||
| b81c60a3ce | |||
| 363ac94c47 | |||
| 378f9e5095 | |||
| 659b494ee4 | |||
| 92965c6af2 | |||
| 70cb5aec9f | |||
| d59e77d5a2 | |||
| ff1b857ab0 | |||
| dbdccdb920 | |||
| fd3fef72d3 | |||
| 1890fd4f71 | |||
| 95af3cb515 | |||
| 3acebc451e | |||
|
|
5111c9c8be | ||
|
|
3ecb0e9d96 | ||
|
|
b4a1766677 | ||
|
|
241c6a5a08 | ||
|
|
7c30633bde | ||
|
|
81d3406305 | ||
|
|
04f7537066 | ||
|
|
6bf058ab5c | ||
|
|
b771b5d25e | ||
|
|
6e6ed4ea2c | ||
|
|
a098f0a672 | ||
|
|
cafadec146 | ||
|
|
0940b63961 | ||
|
|
a2dca94dca | ||
|
|
085a8718e0 | ||
|
|
271cc2caa0 | ||
|
|
42b60ca5cd | ||
|
|
4920322d0a | ||
|
|
c7c1535ba9 | ||
|
|
576f53f81b | ||
|
|
c0e6247fb8 | ||
|
|
3e85fdc779 | ||
|
|
4833bcb710 | ||
|
|
7d69d65dd2 | ||
|
|
a098b3797a | ||
|
|
7d03676ac2 | ||
|
|
945b7a893e | ||
|
|
ef028cb2b9 | ||
|
|
4cfd0a1789 | ||
|
|
7c57cf34a8 | ||
|
|
019b590b4f | ||
|
|
d82ae166a1 | ||
|
|
ffaa67fb5d | ||
|
|
a573a4ce71 | ||
|
|
52d5a1fbf9 | ||
|
|
4ad32401fd | ||
|
|
f663ec80f5 | ||
|
|
e55727d9e2 | ||
|
|
4a178d01e3 | ||
|
|
3d13833572 | ||
|
|
31ec352b57 | ||
|
|
940ef17f7b | ||
|
|
ad3293da9d | ||
|
|
3ffff7d32c | ||
|
|
e646cfef0b | ||
|
|
88b7cfe2fd | ||
|
|
7201cabb43 | ||
|
|
a8e2445c10 | ||
|
|
69bf951866 | ||
|
|
3061df4f13 | ||
|
|
2ccb57ffb0 | ||
|
|
847fce07bb | ||
|
|
f481cde465 | ||
|
|
bf114b39b7 | ||
|
|
22d15fe395 | ||
|
|
14977c7b2c | ||
|
|
8d9bb20538 | ||
|
|
6a977203ab | ||
|
|
275bd56fe6 | ||
|
|
2662709fed | ||
|
|
64bea2a66e | ||
|
|
6807614ac8 | ||
|
|
676f2f4caa | ||
|
|
a2f2d0ebef | ||
|
|
b2113bff62 | ||
|
|
892bd93471 | ||
|
|
3ec0d554ed | ||
|
|
976a5cedcb | ||
|
|
107ce25801 | ||
|
|
6350491f9f | ||
|
|
c78f758202 | ||
|
|
787c01b4be | ||
|
|
826d7586b1 | ||
|
|
84d20c52fa | ||
|
|
b176874c2b | ||
|
|
df2c38199c | ||
|
|
ede2d5fd53 | ||
|
|
d111a97521 | ||
|
|
330768490a | ||
|
|
74a1f66d26 | ||
|
|
598774b0b1 | ||
|
|
bf1d4a4001 | ||
|
|
db85d1a48b | ||
|
|
3ff7b47995 | ||
|
|
8b03df7923 | ||
|
|
98dc733240 | ||
|
|
c02e3dffcf | ||
|
|
1cdbcca7f7 | ||
|
|
9b8acb83cb | ||
|
|
7ca360be6a | ||
|
|
6c4fab1adc | ||
|
|
6551b1b97d | ||
|
|
71d9bd4678 | ||
|
|
1ad9ce09cb | ||
|
|
d731277914 | ||
|
|
24fe99cfa5 | ||
|
|
2a373e7368 | ||
|
|
68bacf5da4 | ||
|
|
ed158ffdcb | ||
|
|
fbb55e64dc | ||
|
|
1521b8fac5 | ||
|
|
97d466818a | ||
|
|
c1888f8921 | ||
|
|
db6049bab3 | ||
|
|
5cc68bca6d | ||
|
|
49e495f062 | ||
|
|
1952d905d2 | ||
|
|
2205ac9b58 | ||
|
|
e9017767d1 | ||
|
|
ad660b0ce8 | ||
|
|
d15fdac27b | ||
|
|
386881c283 | ||
|
|
8cba10c4fe | ||
|
|
f8ac3154e1 | ||
|
|
df04133551 | ||
|
|
99693d8ec0 | ||
|
|
0e798dac50 | ||
|
|
e6ac7d0da6 | ||
|
|
9c71730d9c | ||
|
|
4be954a6f3 | ||
|
|
e9278111a6 | ||
|
|
ed1e761052 | ||
|
|
86b1e4ad5d | ||
|
|
062c1afe85 | ||
|
|
fa00980352 | ||
|
|
2a93b17d71 | ||
|
|
287aa3dea3 | ||
|
|
8ab313e6cb | ||
|
|
cccb54d38f | ||
|
|
67940296d2 | ||
|
|
67ebeca1f4 | ||
|
|
b45e882559 | ||
|
|
745bb58c59 | ||
|
|
bf5a16f41b | ||
|
|
bc12fb53be | ||
|
|
0d83885b9b | ||
|
|
de585a7234 | ||
|
|
5c2980fb36 | ||
|
|
e741a95cdb | ||
|
|
19eb5239a6 | ||
|
|
305f260503 | ||
|
|
d1f6331ff8 | ||
|
|
67b8215adf | ||
|
|
58b36f2823 | ||
|
|
2452d37acf | ||
|
|
8e4ebbf622 | ||
|
|
b85ca8674b | ||
|
|
c63a1fef6c | ||
|
|
66196da877 | ||
|
|
e5c7dbe4cb | ||
|
|
f72ceecc19 | ||
|
|
ed787683f4 | ||
|
|
d44fb976e4 | ||
|
|
fb1b44e1d1 | ||
|
|
d00109daf3 | ||
|
|
367613a9d5 | ||
|
|
b990fe42d3 | ||
|
|
7d11c23eba | ||
|
|
450fab437c | ||
|
|
a4a249bab8 | ||
|
|
d9c9f05cd2 | ||
|
|
68f4189283 | ||
|
|
0e0540af43 | ||
|
|
555c5acb26 | ||
|
|
b48e2cb3e5 | ||
|
|
cf1c5f2186 | ||
|
|
be38030395 | ||
|
|
ad69c04951 | ||
|
|
abd6c1d712 | ||
|
|
a55f4c449c | ||
|
|
1a4694c891 | ||
|
|
b782248da7 | ||
|
|
ae9a80c8f3 | ||
|
|
918006302b | ||
|
|
f30076e0f5 | ||
| 5255ffc2f7 | |||
| fd1c579ec4 | |||
| 0f4adeea86 | |||
| 245f3adea3 | |||
| 21d08204b5 | |||
| 1de4888599 |
@@ -26,12 +26,12 @@ import (
|
||||
func GetConfLoader() *onion.Onion {
|
||||
logger := zerolog.New(os.Stdout).With().Timestamp().Logger()
|
||||
AppName := GetAppName()
|
||||
EnvPrefix := strings.ToUpper(AppName[0:2]+AppName[3:]) + "_"
|
||||
EnvPrefix := "OC_"
|
||||
defaultConfigFile := "/etc/oc/" + AppName[3:] + ".json"
|
||||
localConfigFile := "./" + AppName[3:] + ".json"
|
||||
var configFile string
|
||||
var o *onion.Onion
|
||||
l3 := onion.NewEnvLayerPrefix("_", EnvPrefix)
|
||||
l3 := GetEnvVarLayer(EnvPrefix)
|
||||
l2, err := onion.NewFileLayer(localConfigFile, nil)
|
||||
if err == nil {
|
||||
logger.Info().Msg("Local config file found " + localConfigFile + ", overriding default file")
|
||||
@@ -54,3 +54,17 @@ func GetConfLoader() *onion.Onion {
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func GetEnvVarLayer(prefix string) onion.Layer {
|
||||
envVars := make(map[string]interface{})
|
||||
|
||||
for _, e := range os.Environ() {
|
||||
pair := strings.SplitN(e, "=", 2)
|
||||
key := pair[0]
|
||||
if strings.HasPrefix(key, prefix) {
|
||||
envVars[strings.TrimPrefix(key, prefix)] = pair[1]
|
||||
}
|
||||
}
|
||||
|
||||
return onion.NewMapLayer(envVars)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package mongo
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
@@ -49,7 +48,7 @@ func (m *MongoDB) Init(collections []string, config MongoConf) {
|
||||
mngoCollections = collections
|
||||
mngoConfig = config
|
||||
if err := m.createClient(config.GetUrl(), false); err != nil {
|
||||
m.Logger.Error().Msg(err.Error())
|
||||
// m.Logger.Error().Msg(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -171,12 +170,12 @@ func (m *MongoDB) DeleteOne(id string, collection_name string) (int64, int, erro
|
||||
filter := bson.M{"_id": id}
|
||||
targetDBCollection := CollectionMap[collection_name]
|
||||
opts := options.Delete().SetHint(bson.D{{Key: "_id", Value: 1}})
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
//defer cancel()
|
||||
|
||||
result, err := targetDBCollection.DeleteOne(MngoCtx, filter, opts)
|
||||
if err != nil {
|
||||
m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
|
||||
// m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
|
||||
return 0, 404, err
|
||||
}
|
||||
return result.DeletedCount, 200, nil
|
||||
@@ -192,12 +191,12 @@ func (m *MongoDB) DeleteMultiple(f map[string]interface{}, collection_name strin
|
||||
}
|
||||
targetDBCollection := CollectionMap[collection_name]
|
||||
opts := options.Delete().SetHint(bson.D{{Key: "_id", Value: 1}})
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
//defer cancel()
|
||||
|
||||
result, err := targetDBCollection.DeleteMany(MngoCtx, filter, opts)
|
||||
if err != nil {
|
||||
m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
|
||||
// m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
|
||||
return 0, 404, err
|
||||
}
|
||||
return result.DeletedCount, 200, nil
|
||||
@@ -215,11 +214,11 @@ func (m *MongoDB) UpdateMultiple(set interface{}, filter map[string]interface{},
|
||||
f = append(f, bson.E{Key: k, Value: v})
|
||||
}
|
||||
targetDBCollection := CollectionMap[collection_name]
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 50*time.Second)
|
||||
defer cancel()
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
//defer cancel()
|
||||
res, err := targetDBCollection.UpdateMany(MngoCtx, f, dbs.InputToBson(doc, true))
|
||||
if err != nil {
|
||||
m.Logger.Error().Msg("Couldn't update resource: " + err.Error())
|
||||
// m.Logger.Error().Msg("Couldn't update resource: " + err.Error())
|
||||
return 0, 404, err
|
||||
}
|
||||
return res.UpsertedCount, 200, nil
|
||||
@@ -234,11 +233,11 @@ func (m *MongoDB) UpdateOne(set interface{}, id string, collection_name string)
|
||||
bson.Unmarshal(b, &doc)
|
||||
filter := bson.M{"_id": id}
|
||||
targetDBCollection := CollectionMap[collection_name]
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 50*time.Second)
|
||||
defer cancel()
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
//defer cancel()
|
||||
_, err := targetDBCollection.UpdateOne(MngoCtx, filter, dbs.InputToBson(doc, true))
|
||||
if err != nil {
|
||||
m.Logger.Error().Msg("Couldn't update resource: " + err.Error())
|
||||
// m.Logger.Error().Msg("Couldn't update resource: " + err.Error())
|
||||
return "", 404, err
|
||||
}
|
||||
return id, 200, nil
|
||||
@@ -253,12 +252,12 @@ func (m *MongoDB) StoreOne(obj interface{}, id string, collection_name string) (
|
||||
bson.Unmarshal(b, &doc)
|
||||
doc["_id"] = id
|
||||
targetDBCollection := CollectionMap[collection_name]
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
//defer cancel()
|
||||
|
||||
_, err := targetDBCollection.InsertOne(MngoCtx, doc)
|
||||
if err != nil {
|
||||
m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
|
||||
// m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
|
||||
return "", 409, err
|
||||
}
|
||||
|
||||
@@ -271,12 +270,12 @@ func (m *MongoDB) LoadOne(id string, collection_name string) (*mongo.SingleResul
|
||||
}
|
||||
filter := bson.M{"_id": id}
|
||||
targetDBCollection := CollectionMap[collection_name]
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
//defer cancel()
|
||||
|
||||
res := targetDBCollection.FindOne(MngoCtx, filter)
|
||||
if res.Err() != nil {
|
||||
m.Logger.Error().Msg("Couldn't find resource " + id + ". Error : " + res.Err().Error())
|
||||
// m.Logger.Error().Msg("Couldn't find resource " + id + ". Error : " + res.Err().Error())
|
||||
err := res.Err()
|
||||
return nil, 404, err
|
||||
}
|
||||
@@ -288,8 +287,7 @@ func (m *MongoDB) Search(filters *dbs.Filters, collection_name string) (*mongo.C
|
||||
return nil, 503, err
|
||||
}
|
||||
opts := options.Find()
|
||||
opts.SetLimit(100)
|
||||
fmt.Println("Filters: ", CollectionMap, collection_name)
|
||||
opts.SetLimit(1000)
|
||||
targetDBCollection := CollectionMap[collection_name]
|
||||
orList := bson.A{}
|
||||
andList := bson.A{}
|
||||
@@ -315,8 +313,8 @@ func (m *MongoDB) Search(filters *dbs.Filters, collection_name string) (*mongo.C
|
||||
}
|
||||
}
|
||||
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
// defer cancel()
|
||||
if cursor, err := targetDBCollection.Find(
|
||||
MngoCtx,
|
||||
f,
|
||||
@@ -338,12 +336,12 @@ func (m *MongoDB) LoadFilter(filter map[string]interface{}, collection_name stri
|
||||
}
|
||||
targetDBCollection := CollectionMap[collection_name]
|
||||
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
//defer cancel()
|
||||
|
||||
res, err := targetDBCollection.Find(MngoCtx, f)
|
||||
if err != nil {
|
||||
m.Logger.Error().Msg("Couldn't find any resources. Error : " + err.Error())
|
||||
// m.Logger.Error().Msg("Couldn't find any resources. Error : " + err.Error())
|
||||
return nil, 404, err
|
||||
}
|
||||
return res, 200, nil
|
||||
@@ -355,12 +353,12 @@ func (m *MongoDB) LoadAll(collection_name string) (*mongo.Cursor, int, error) {
|
||||
}
|
||||
targetDBCollection := CollectionMap[collection_name]
|
||||
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
//defer cancel()
|
||||
|
||||
res, err := targetDBCollection.Find(MngoCtx, bson.D{})
|
||||
if err != nil {
|
||||
m.Logger.Error().Msg("Couldn't find any resources. Error : " + err.Error())
|
||||
// m.Logger.Error().Msg("Couldn't find any resources. Error : " + err.Error())
|
||||
return nil, 404, err
|
||||
}
|
||||
return res, 200, nil
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/order"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
w2 "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||
@@ -197,48 +196,6 @@ func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string,
|
||||
}()
|
||||
logs.CreateLogger("main")
|
||||
mongo.MONGOService.Init(models.GetModelsNames(), config.GetConfig()) // init the mongo service
|
||||
/*
|
||||
Here we will check if the resource model is already stored in the database
|
||||
If not we will store it
|
||||
Resource model is the model that will define the structure of the resources
|
||||
*/
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
for _, model := range []string{tools.DATA_RESOURCE.String(), tools.PROCESSING_RESOURCE.String(), tools.STORAGE_RESOURCE.String(), tools.COMPUTE_RESOURCE.String(), tools.WORKFLOW_RESOURCE.String()} {
|
||||
data, code, _ := accessor.Search(nil, model, true)
|
||||
if code == 404 || len(data) == 0 {
|
||||
refs := map[string]string{}
|
||||
m := map[string]resource_model.Model{}
|
||||
// for now only processing is specified here (not an elegant way)
|
||||
if model == tools.DATA_RESOURCE.String() || model == tools.STORAGE_RESOURCE.String() {
|
||||
refs["path"] = "string"
|
||||
}
|
||||
if model == tools.PROCESSING_RESOURCE.String() {
|
||||
m["command"] = resource_model.Model{
|
||||
Type: "string",
|
||||
ReadOnly: false,
|
||||
}
|
||||
m["args"] = resource_model.Model{
|
||||
Type: "string",
|
||||
ReadOnly: false,
|
||||
}
|
||||
m["env"] = resource_model.Model{
|
||||
Type: "string",
|
||||
ReadOnly: false,
|
||||
}
|
||||
m["volumes"] = resource_model.Model{
|
||||
Type: "map[string]string",
|
||||
ReadOnly: false,
|
||||
}
|
||||
}
|
||||
accessor.StoreOne(&resource_model.ResourceModel{
|
||||
ResourceType: model,
|
||||
VarRefs: refs,
|
||||
Model: map[string]map[string]resource_model.Model{
|
||||
"container": m,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -289,21 +246,22 @@ func ToScheduler(m interface{}) (n *workflow_execution.WorkflowSchedule) {
|
||||
return m.(*workflow_execution.WorkflowSchedule)
|
||||
}
|
||||
|
||||
func (r *Request) Schedule(wfID string, start string, end string, durationInS float64, cron string) (*workflow_execution.WorkflowSchedule, error) {
|
||||
scheduler := workflow_execution.NewScheduler(start, end, durationInS, cron)
|
||||
if _, _, err := scheduler.Schedules(wfID, &tools.APIRequest{
|
||||
func (r *Request) Schedule(wfID string, scheduler *workflow_execution.WorkflowSchedule) (*workflow_execution.WorkflowSchedule, error) {
|
||||
ws, _, _, err := scheduler.Schedules(wfID, &tools.APIRequest{
|
||||
Caller: r.caller,
|
||||
Username: r.user,
|
||||
PeerID: r.peerID,
|
||||
Groups: r.groups,
|
||||
}); err != nil {
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return scheduler, nil
|
||||
fmt.Println("BAM", ws)
|
||||
return ws, nil
|
||||
}
|
||||
|
||||
func (r *Request) CheckBooking(wfID string, start string, end string, durationInS float64, cron string) bool {
|
||||
ok, _, _, err := workflow_execution.NewScheduler(start, end, durationInS, cron).CheckBooking(wfID, &tools.APIRequest{
|
||||
ok, _, _, _, err := workflow_execution.NewScheduler(start, end, durationInS, cron).CheckBooking(wfID, &tools.APIRequest{
|
||||
Caller: r.caller,
|
||||
Username: r.user,
|
||||
PeerID: r.peerID,
|
||||
@@ -605,9 +563,9 @@ func (l *LibData) ToRule() *rule.Rule {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecutions {
|
||||
func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecution {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_EXECUTION {
|
||||
return l.Data.(*workflow_execution.WorkflowExecutions)
|
||||
return l.Data.(*workflow_execution.WorkflowExecution)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
1
go.mod
1
go.mod
@@ -38,7 +38,6 @@ require (
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/kr/text v0.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/marcinwyszynski/geopoint v0.0.0-20140302213024-cf2a6f750c5b
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
|
||||
2
go.sum
2
go.sum
@@ -55,8 +55,6 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/marcinwyszynski/geopoint v0.0.0-20140302213024-cf2a6f750c5b h1:XBF8THPBy28s2ryI7+/Jf/847unLWxYMpJveX5Kox+0=
|
||||
github.com/marcinwyszynski/geopoint v0.0.0-20140302213024-cf2a6f750c5b/go.mod h1:z1oqhOuuYpPHmUmAK2aNygKFlPdb4o3PppQnVTRFdrI=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
@@ -14,13 +14,14 @@ import (
|
||||
* Booking is a struct that represents a booking
|
||||
*/
|
||||
type Booking struct {
|
||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||
DestPeerID string `json:"dest_peer_id,omitempty"` // DestPeerID is the ID of the destination peer
|
||||
WorkflowID string `json:"workflow_id,omitempty" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow
|
||||
ExecutionID string `json:"execution_id,omitempty" bson:"execution_id,omitempty" validate:"required"`
|
||||
State common.ScheduledType `json:"state,omitempty" bson:"state,omitempty" validate:"required"` // State is the state of the booking
|
||||
ExpectedStartDate time.Time `json:"expected_start_date,omitempty" bson:"expected_start_date,omitempty" validate:"required"` // ExpectedStartDate is the expected start date of the booking
|
||||
ExpectedEndDate *time.Time `json:"expected_end_date,omitempty" bson:"expected_end_date,omitempty" validate:"required"` // ExpectedEndDate is the expected end date of the booking
|
||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||
ExecutionsID string `json:"executions_id,omitempty" bson:"executions_id,omitempty" validate:"required"` // ExecutionsID is the ID of the executions
|
||||
DestPeerID string `json:"dest_peer_id,omitempty"` // DestPeerID is the ID of the destination peer
|
||||
WorkflowID string `json:"workflow_id,omitempty" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow
|
||||
ExecutionID string `json:"execution_id,omitempty" bson:"execution_id,omitempty" validate:"required"`
|
||||
State enum.BookingStatus `json:"state,omitempty" bson:"state,omitempty" validate:"required"` // State is the state of the booking
|
||||
ExpectedStartDate time.Time `json:"expected_start_date,omitempty" bson:"expected_start_date,omitempty" validate:"required"` // ExpectedStartDate is the expected start date of the booking
|
||||
ExpectedEndDate *time.Time `json:"expected_end_date,omitempty" bson:"expected_end_date,omitempty" validate:"required"` // ExpectedEndDate is the expected end date of the booking
|
||||
|
||||
RealStartDate *time.Time `json:"real_start_date,omitempty" bson:"real_start_date,omitempty"` // RealStartDate is the real start date of the booking
|
||||
RealEndDate *time.Time `json:"real_end_date,omitempty" bson:"real_end_date,omitempty"` // RealEndDate is the real end date of the booking
|
||||
@@ -41,7 +42,7 @@ func (wfa *Booking) Check(id string, start time.Time, end *time.Time, parrallelA
|
||||
res, code, err := accessor.Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date
|
||||
"resource_id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
||||
"state": {{Operator: dbs.EQUAL.String(), Value: common.DRAFT.EnumIndex()}},
|
||||
"state": {{Operator: dbs.EQUAL.String(), Value: enum.DRAFT.EnumIndex()}},
|
||||
"expected_start_date": {
|
||||
{Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(*end)},
|
||||
{Operator: dbs.GTE.String(), Value: primitive.NewDateTimeFromTime(start)},
|
||||
@@ -80,10 +81,6 @@ func (d *Booking) GetDelayOnDuration() time.Duration {
|
||||
return d.GetRealDuration() - d.GetUsualDuration()
|
||||
}
|
||||
|
||||
func (d *Booking) GetName() string {
|
||||
return d.GetID() + "_" + d.ExpectedStartDate.String()
|
||||
}
|
||||
|
||||
func (d *Booking) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
||||
return NewAccessor(request) // Create a new instance of the accessor
|
||||
}
|
||||
@@ -93,7 +90,7 @@ func (d *Booking) VerifyAuth(request *tools.APIRequest) bool {
|
||||
}
|
||||
|
||||
func (r *Booking) StoreDraftDefault() {
|
||||
r.IsDraft = true
|
||||
r.IsDraft = false
|
||||
}
|
||||
|
||||
func (r *Booking) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
package booking
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
@@ -33,7 +34,11 @@ func (a *bookingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error)
|
||||
}
|
||||
|
||||
func (a *bookingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericUpdateOne(set, id, a, &Booking{})
|
||||
if set.(*Booking).State == 0 {
|
||||
return nil, 400, errors.New("state is required")
|
||||
}
|
||||
realSet := &Booking{State: set.(*Booking).State}
|
||||
return utils.GenericUpdateOne(realSet, id, a, &Booking{})
|
||||
}
|
||||
|
||||
func (a *bookingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
@@ -46,11 +51,16 @@ func (a *bookingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int
|
||||
|
||||
func (a *bookingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[*Booking](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
now := time.Now()
|
||||
now = now.Add(time.Second * -60)
|
||||
if d.(*Booking).State == enum.DRAFT && now.UTC().After(d.(*Booking).ExpectedStartDate) {
|
||||
return utils.GenericDeleteOne(d.GetID(), a)
|
||||
}
|
||||
if (d.(*Booking).ExpectedEndDate) == nil {
|
||||
d.(*Booking).State = common.FORGOTTEN
|
||||
d.(*Booking).State = enum.FORGOTTEN
|
||||
utils.GenericRawUpdateOne(d, id, a)
|
||||
} else if d.(*Booking).State == common.SCHEDULED && time.Now().UTC().After(*&d.(*Booking).ExpectedStartDate) {
|
||||
d.(*Booking).State = common.DELAYED
|
||||
} else if d.(*Booking).State == enum.SCHEDULED && now.UTC().After(d.(*Booking).ExpectedStartDate) {
|
||||
d.(*Booking).State = enum.DELAYED
|
||||
utils.GenericRawUpdateOne(d, id, a)
|
||||
}
|
||||
return d, 200, nil
|
||||
@@ -67,8 +77,14 @@ func (a *bookingMongoAccessor) Search(filters *dbs.Filters, search string, isDra
|
||||
|
||||
func (a *bookingMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
|
||||
return func(d utils.DBObject) utils.ShallowDBObject {
|
||||
if d.(*Booking).State == common.SCHEDULED && time.Now().UTC().After(*&d.(*Booking).ExpectedStartDate) {
|
||||
d.(*Booking).State = common.DELAYED
|
||||
now := time.Now()
|
||||
now = now.Add(time.Second * -60)
|
||||
if d.(*Booking).State == enum.DRAFT && now.UTC().After(d.(*Booking).ExpectedStartDate) {
|
||||
utils.GenericDeleteOne(d.GetID(), a)
|
||||
return nil
|
||||
}
|
||||
if d.(*Booking).State == enum.SCHEDULED && now.UTC().After(d.(*Booking).ExpectedStartDate) {
|
||||
d.(*Booking).State = enum.DELAYED
|
||||
utils.GenericRawUpdateOne(d, d.GetID(), a)
|
||||
}
|
||||
return d
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package collaborative_area
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
@@ -34,7 +33,7 @@ type CollaborativeArea struct {
|
||||
Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"` // Attributes is the attributes of the workspace (TODO)
|
||||
Workspaces []string `json:"workspaces" bson:"workspaces"` // Workspaces is the workspaces of the workspace
|
||||
Workflows []string `json:"workflows" bson:"workflows"` // Workflows is the workflows of the workspace
|
||||
AllowedPeersGroup map[string][]string `json:"allowed_peers_group,omitempty" bson:"allowed_peers_group,omitempty"` // AllowedPeersGroup is the group of allowed peers
|
||||
AllowedPeersGroup map[string][]string `json:"allowed_peers_group" bson:"allowed_peers_group"` // AllowedPeersGroup is the group of allowed peers
|
||||
Rules []string `json:"rules" bson:"rules,omitempty"` // Rules is the rules of the workspace
|
||||
|
||||
SharedRules []*rule.Rule `json:"shared_rules,omitempty" bson:"-"` // SharedRules is the shared rules of the workspace
|
||||
@@ -44,6 +43,9 @@ type CollaborativeArea struct {
|
||||
}
|
||||
|
||||
func (ao *CollaborativeArea) Clear(peerID string) {
|
||||
if ao.AllowedPeersGroup == nil {
|
||||
ao.AllowedPeersGroup = map[string][]string{}
|
||||
}
|
||||
ao.CreatorID = peerID
|
||||
if config.GetConfig().Whitelist {
|
||||
ao.AllowedPeersGroup[peerID] = []string{"*"}
|
||||
@@ -72,7 +74,6 @@ func (ao *CollaborativeArea) Clear(peerID string) {
|
||||
func (ao *CollaborativeArea) VerifyAuth(request *tools.APIRequest) bool {
|
||||
if (ao.AllowedPeersGroup != nil || config.GetConfig().Whitelist) && request != nil {
|
||||
if grps, ok := ao.AllowedPeersGroup[request.PeerID]; ok || config.GetConfig().Whitelist {
|
||||
fmt.Println("grps", grps, "ok", ok, "config.GetConfig().Whitelist", config.GetConfig().Whitelist)
|
||||
if slices.Contains(grps, "*") || (!ok && config.GetConfig().Whitelist) {
|
||||
return true
|
||||
}
|
||||
@@ -91,8 +92,12 @@ func (d *CollaborativeArea) GetAccessor(request *tools.APIRequest) utils.Accesso
|
||||
}
|
||||
|
||||
func (d *CollaborativeArea) Trim() *CollaborativeArea {
|
||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.CreatorID}}).IsMySelf(); !ok {
|
||||
d.AllowedPeersGroup = map[string][]string{}
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *CollaborativeArea) StoreDraftDefault() {
|
||||
d.AllowedPeersGroup = map[string][]string{
|
||||
d.CreatorID: []string{"*"},
|
||||
}
|
||||
d.IsDraft = false
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package collaborative_area
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
@@ -33,10 +32,10 @@ func NewAccessor(request *tools.APIRequest) *collaborativeAreaMongoAccessor {
|
||||
Request: request,
|
||||
Type: tools.COLLABORATIVE_AREA,
|
||||
},
|
||||
workspaceAccessor: (&workspace.Workspace{}).GetAccessor(nil),
|
||||
workflowAccessor: (&w.Workflow{}).GetAccessor(nil),
|
||||
peerAccessor: (&peer.Peer{}).GetAccessor(nil),
|
||||
ruleAccessor: (&rule.Rule{}).GetAccessor(nil),
|
||||
workspaceAccessor: (&workspace.Workspace{}).GetAccessor(request),
|
||||
workflowAccessor: (&w.Workflow{}).GetAccessor(request),
|
||||
peerAccessor: (&peer.Peer{}).GetAccessor(request),
|
||||
ruleAccessor: (&rule.Rule{}).GetAccessor(request),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,11 +66,10 @@ func (a *collaborativeAreaMongoAccessor) StoreOne(data utils.DBObject) (utils.DB
|
||||
_, id := (&peer.Peer{}).IsMySelf() // get the local peer
|
||||
data.(*CollaborativeArea).Clear(id) // set the creator
|
||||
// retrieve or proper peer
|
||||
dd, code, err := a.peerAccessor.Search(nil, "0", true)
|
||||
if code != 200 || len(dd) == 0 {
|
||||
return nil, code, errors.New("Could not retrieve the peer" + err.Error())
|
||||
if data.(*CollaborativeArea).CollaborativeAreaRule != nil {
|
||||
data.(*CollaborativeArea).CollaborativeAreaRule = &CollaborativeAreaRule{}
|
||||
}
|
||||
data.(*CollaborativeArea).CollaborativeAreaRule.Creator = dd[0].GetID()
|
||||
data.(*CollaborativeArea).CollaborativeAreaRule.Creator = id
|
||||
d, code, err := utils.GenericStoreOne(data.(*CollaborativeArea).Trim(), a)
|
||||
if code == 200 {
|
||||
a.sharedWorkflow(d.(*CollaborativeArea), d.GetID()) // create all shared workflows
|
||||
@@ -93,6 +91,7 @@ func filterEnrich[T utils.ShallowDBObject](arr []string, isDrafted bool, a utils
|
||||
"abstractobject.id": {{Operator: dbs.IN.String(), Value: arr}},
|
||||
},
|
||||
}, "", isDrafted)
|
||||
fmt.Println(res, arr, isDrafted, a)
|
||||
if code == 200 {
|
||||
for _, r := range res {
|
||||
new = append(new, r.(T))
|
||||
@@ -102,38 +101,46 @@ func filterEnrich[T utils.ShallowDBObject](arr []string, isDrafted bool, a utils
|
||||
}
|
||||
|
||||
// enrich is a function that enriches the CollaborativeArea with the shared objects
|
||||
func (a *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *CollaborativeArea, isDrafted bool) *CollaborativeArea {
|
||||
sharedWorkspace.SharedWorkspaces = append(sharedWorkspace.SharedWorkspaces,
|
||||
filterEnrich[*workspace.Workspace](sharedWorkspace.Workspaces, isDrafted, a.workspaceAccessor)...)
|
||||
sharedWorkspace.SharedWorkflows = append(sharedWorkspace.SharedWorkflows,
|
||||
filterEnrich[*workflow.Workflow](sharedWorkspace.Workflows, isDrafted, a.workflowAccessor)...)
|
||||
func (a *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *CollaborativeArea, isDrafted bool, request *tools.APIRequest) *CollaborativeArea {
|
||||
sharedWorkspace.SharedWorkspaces = filterEnrich[*workspace.Workspace](sharedWorkspace.Workspaces, isDrafted, a.workspaceAccessor)
|
||||
sharedWorkspace.SharedWorkflows = filterEnrich[*workflow.Workflow](sharedWorkspace.Workflows, isDrafted, a.workflowAccessor)
|
||||
peerskey := []string{}
|
||||
for k := range sharedWorkspace.AllowedPeersGroup {
|
||||
peerskey = append(peerskey, k)
|
||||
fmt.Println("PEERS 1", sharedWorkspace.AllowedPeersGroup)
|
||||
for k, v := range sharedWorkspace.AllowedPeersGroup {
|
||||
canFound := false
|
||||
for _, t := range request.Groups {
|
||||
if slices.Contains(v, t) {
|
||||
canFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
fmt.Println("PEERS 2", canFound, v)
|
||||
if slices.Contains(v, "*") || canFound {
|
||||
peerskey = append(peerskey, k)
|
||||
}
|
||||
}
|
||||
sharedWorkspace.SharedPeers = append(sharedWorkspace.SharedPeers,
|
||||
filterEnrich[*peer.Peer](peerskey, isDrafted, a.peerAccessor)...)
|
||||
sharedWorkspace.SharedRules = append(sharedWorkspace.SharedRules,
|
||||
filterEnrich[*rule.Rule](sharedWorkspace.Rules, isDrafted, a.ruleAccessor)...)
|
||||
fmt.Println("PEERS", peerskey)
|
||||
sharedWorkspace.SharedPeers = filterEnrich[*peer.Peer](peerskey, isDrafted, a.peerAccessor)
|
||||
sharedWorkspace.SharedRules = filterEnrich[*rule.Rule](sharedWorkspace.Rules, isDrafted, a.ruleAccessor)
|
||||
return sharedWorkspace
|
||||
}
|
||||
|
||||
func (a *collaborativeAreaMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[*CollaborativeArea](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
return a.enrich(d.(*CollaborativeArea), true), 200, nil
|
||||
return a.enrich(d.(*CollaborativeArea), false, a.Request), 200, nil
|
||||
}, a)
|
||||
}
|
||||
|
||||
func (a *collaborativeAreaMongoAccessor) LoadAll(isDrafted bool) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericLoadAll[*CollaborativeArea](func(d utils.DBObject) utils.ShallowDBObject {
|
||||
return a.enrich(d.(*CollaborativeArea), true)
|
||||
return a.enrich(d.(*CollaborativeArea), isDrafted, a.Request)
|
||||
}, isDrafted, a)
|
||||
}
|
||||
|
||||
func (a *collaborativeAreaMongoAccessor) Search(filters *dbs.Filters, search string, isDrafted bool) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericSearch[*CollaborativeArea](filters, search, (&CollaborativeArea{}).GetObjectFilters(search),
|
||||
func(d utils.DBObject) utils.ShallowDBObject {
|
||||
return a.enrich(d.(*CollaborativeArea), true)
|
||||
return a.enrich(d.(*CollaborativeArea), isDrafted, a.Request)
|
||||
}, isDrafted, a)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
package common
|
||||
|
||||
// CPU is a struct that represents a CPU
|
||||
type CPU struct {
|
||||
Model string `bson:"platform,omitempty" json:"platform,omitempty"`
|
||||
FrequencyGhz float64 `bson:"frenquency,omitempty" json:"frenquency,omitempty"`
|
||||
Cores int `bson:"cores,omitempty" json:"cores,omitempty"`
|
||||
Architecture string `bson:"architecture,omitempty" json:"architecture,omitempty"`
|
||||
}
|
||||
|
||||
type RAM struct {
|
||||
SizeGb float64 `bson:"size,omitempty" json:"size,omitempty" description:"Units in MB"`
|
||||
Ecc bool `bson:"ecc" json:"ecc" default:"true"`
|
||||
}
|
||||
|
||||
type GPU struct {
|
||||
Model string `bson:"platform,omitempty" json:"platform,omitempty"`
|
||||
MemoryGb float64 `bson:"memory,omitempty" json:"memory,omitempty" description:"Units in MB"`
|
||||
}
|
||||
|
||||
type InfrastructureType int
|
||||
|
||||
const (
|
||||
DOCKER InfrastructureType = iota
|
||||
KUBERNETES
|
||||
SLURM
|
||||
HW
|
||||
CONDOR
|
||||
)
|
||||
|
||||
func (t InfrastructureType) String() string {
|
||||
return [...]string{"DOCKER", "KUBERNETES", "SLURM", "HW", "CONDOR"}[t]
|
||||
}
|
||||
20
models/common/enum/infrastructure.go
Normal file
20
models/common/enum/infrastructure.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package enum
|
||||
|
||||
type InfrastructureType int
|
||||
|
||||
const (
|
||||
DOCKER InfrastructureType = iota
|
||||
KUBERNETES
|
||||
SLURM
|
||||
HW
|
||||
CONDOR
|
||||
)
|
||||
|
||||
func (t InfrastructureType) String() string {
|
||||
return [...]string{"DOCKER", "KUBERNETES", "SLURM", "HW", "CONDOR"}[t]
|
||||
}
|
||||
|
||||
// get list of all infrastructure types
|
||||
func InfrastructureList() []InfrastructureType {
|
||||
return []InfrastructureType{DOCKER, KUBERNETES, SLURM, HW, CONDOR}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package common
|
||||
package enum
|
||||
|
||||
type StorageSize int
|
||||
|
||||
@@ -7,12 +7,23 @@ const (
|
||||
GB StorageSize = iota
|
||||
MB
|
||||
KB
|
||||
TB
|
||||
)
|
||||
|
||||
var argoType = [...]string{
|
||||
"Gi",
|
||||
"Mi",
|
||||
"Ki",
|
||||
"Ti",
|
||||
}
|
||||
|
||||
// Size to string
|
||||
func (t StorageSize) String() string {
|
||||
return [...]string{"GB", "MB", "KB", "TB"}[t]
|
||||
}
|
||||
|
||||
func SizeList() []StorageSize {
|
||||
return []StorageSize{GB, MB, KB, TB}
|
||||
}
|
||||
|
||||
// New creates a new instance of the StorageResource struct
|
||||
@@ -31,9 +42,15 @@ const (
|
||||
S3
|
||||
MEMORY
|
||||
HARDWARE
|
||||
AZURE
|
||||
GCS
|
||||
)
|
||||
|
||||
// String() - Returns the string representation of the storage type
|
||||
func (t StorageType) String() string {
|
||||
return [...]string{"FILE", "STREAM", "API", "DATABASE", "S3", "MEMORY", "HARDWARE"}[t]
|
||||
return [...]string{"FILE", "STREAM", "API", "DATABASE", "S3", "MEMORY", "HARDWARE", "AZURE", "GCS"}[t]
|
||||
}
|
||||
|
||||
func TypeList() []StorageType {
|
||||
return []StorageType{FILE, STREAM, API, DATABASE, S3, MEMORY, HARDWARE, AZURE, GCS}
|
||||
}
|
||||
64
models/common/enum/status.go
Normal file
64
models/common/enum/status.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package enum
|
||||
|
||||
type CompletionStatus int
|
||||
|
||||
const (
|
||||
DRAFTED CompletionStatus = iota
|
||||
PENDING
|
||||
CANCEL
|
||||
PARTIAL
|
||||
PAID
|
||||
DISPUTED
|
||||
OVERDUE
|
||||
REFUND
|
||||
)
|
||||
|
||||
func (d CompletionStatus) String() string {
|
||||
return [...]string{"drafted", "pending", "cancel", "partial", "paid", "disputed", "overdue", "refund"}[d]
|
||||
}
|
||||
|
||||
func CompletionStatusList() []CompletionStatus {
|
||||
return []CompletionStatus{DRAFTED, PENDING, CANCEL, PARTIAL, PAID, DISPUTED, OVERDUE, REFUND}
|
||||
}
|
||||
|
||||
type BookingStatus int
|
||||
|
||||
const (
|
||||
DRAFT BookingStatus = iota
|
||||
SCHEDULED
|
||||
STARTED
|
||||
FAILURE
|
||||
SUCCESS
|
||||
FORGOTTEN
|
||||
DELAYED
|
||||
CANCELLED
|
||||
)
|
||||
|
||||
var str = [...]string{
|
||||
"draft",
|
||||
"scheduled",
|
||||
"started",
|
||||
"failure",
|
||||
"success",
|
||||
"forgotten",
|
||||
"delayed",
|
||||
"cancelled",
|
||||
}
|
||||
|
||||
func FromInt(i int) string {
|
||||
return str[i]
|
||||
}
|
||||
|
||||
func (d BookingStatus) String() string {
|
||||
return str[d]
|
||||
}
|
||||
|
||||
// EnumIndex - Creating common behavior - give the type a EnumIndex functio
|
||||
func (d BookingStatus) EnumIndex() int {
|
||||
return int(d)
|
||||
}
|
||||
|
||||
// List
|
||||
func StatusList() []BookingStatus {
|
||||
return []BookingStatus{DRAFT, SCHEDULED, STARTED, FAILURE, SUCCESS, FORGOTTEN, DELAYED, CANCELLED}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package common
|
||||
package models
|
||||
|
||||
type Container struct {
|
||||
Image string `json:"image,omitempty" bson:"image,omitempty"` // Image is the container image TEMPO
|
||||
20
models/common/models/devices.go
Normal file
20
models/common/models/devices.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package models
|
||||
|
||||
// CPU is a struct that represents a CPU
|
||||
type CPU struct {
|
||||
Model string `bson:"model,omitempty" json:"model,omitempty"`
|
||||
FrequencyGhz float64 `bson:"frequency,omitempty" json:"frequency,omitempty"`
|
||||
Cores int `bson:"cores,omitempty" json:"cores,omitempty"`
|
||||
Architecture string `bson:"architecture,omitempty" json:"architecture,omitempty"`
|
||||
}
|
||||
|
||||
type RAM struct {
|
||||
SizeGb float64 `bson:"size,omitempty" json:"size,omitempty" description:"Units in MB"`
|
||||
Ecc bool `bson:"ecc" json:"ecc" default:"true"`
|
||||
}
|
||||
|
||||
type GPU struct {
|
||||
Model string `bson:"model,omitempty" json:"model,omitempty"`
|
||||
MemoryGb float64 `bson:"memory,omitempty" json:"memory,omitempty" description:"Units in MB"`
|
||||
Cores map[string]int `bson:"cores,omitempty" json:"cores,omitempty"`
|
||||
}
|
||||
21
models/common/models/inoutputs.go
Normal file
21
models/common/models/inoutputs.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package models
|
||||
|
||||
type Artifact struct {
|
||||
AttrPath string `json:"attr_path,omitempty" bson:"attr_path,omitempty" validate:"required"`
|
||||
AttrFrom string `json:"from_path,omitempty" bson:"from_path,omitempty"`
|
||||
Readonly bool `json:"readonly" bson:"readonly" default:"true"`
|
||||
}
|
||||
|
||||
type Param struct {
|
||||
Name string `json:"name" bson:"name" validate:"required"`
|
||||
Attr string `json:"attr,omitempty" bson:"attr,omitempty"`
|
||||
Value string `json:"value,omitempty" bson:"value,omitempty"`
|
||||
Origin string `json:"origin,omitempty" bson:"origin,omitempty"`
|
||||
Readonly bool `json:"readonly" bson:"readonly" default:"true"`
|
||||
Optionnal bool `json:"optionnal" bson:"optionnal" default:"true"`
|
||||
}
|
||||
|
||||
type InOutputs struct {
|
||||
Params []Param `json:"parameters" bson:"parameters"`
|
||||
Artifacts []Artifact `json:"artifacts" bson:"artifacts"`
|
||||
}
|
||||
42
models/common/planner.go
Normal file
42
models/common/planner.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
func GetPlannerNearestStart(start time.Time, planned map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest) float64 {
|
||||
near := float64(10000000000) // set a high value
|
||||
for _, items := range planned { // loop through the planned items
|
||||
for _, priced := range items { // loop through the priced items
|
||||
if priced.GetLocationStart() == nil { // if the start is nil,
|
||||
continue // skip the iteration
|
||||
}
|
||||
newS := priced.GetLocationStart() // get the start
|
||||
if newS.Sub(start).Seconds() < near { // if the difference between the start and the new start is less than the nearest start
|
||||
near = newS.Sub(start).Seconds()
|
||||
}
|
||||
}
|
||||
}
|
||||
return near
|
||||
}
|
||||
|
||||
func GetPlannerLongestTime(end *time.Time, planned map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest) float64 {
|
||||
if end == nil {
|
||||
return -1
|
||||
}
|
||||
longestTime := float64(0)
|
||||
for _, priced := range planned[tools.PROCESSING_RESOURCE] {
|
||||
if priced.GetLocationEnd() == nil {
|
||||
continue
|
||||
}
|
||||
newS := priced.GetLocationEnd()
|
||||
if end == nil && longestTime < newS.Sub(*end).Seconds() {
|
||||
longestTime = newS.Sub(*end).Seconds()
|
||||
}
|
||||
// get the nearest start from start var
|
||||
}
|
||||
return longestTime
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
)
|
||||
|
||||
type PricingProfileITF interface {
|
||||
GetID() string
|
||||
GetPrice(quantity float64, val float64, start time.Time, end time.Time, params ...string) (float64, error)
|
||||
IsPurchased() bool
|
||||
GetOverrideStrategyValue() int
|
||||
@@ -19,15 +18,18 @@ const (
|
||||
REFUND_ON_EARLY_END
|
||||
)
|
||||
|
||||
type AccessPricingProfile[T Strategy] struct { // only use for acces such as : DATA && PROCESSING
|
||||
ID string `json:"id,omitempty" bson:"id,omitempty"` // ID is the ID of the pricing
|
||||
Pricing PricingStrategy[T] `json:"price,omitempty" bson:"price,omitempty"` // Price is the price of the resource
|
||||
DefaultRefund RefundType `json:"default_refund" bson:"default_refund"` // DefaultRefund is the default refund type of the pricing
|
||||
RefundRatio int32 `json:"refund_ratio" bson:"refund_ratio" default:"0"` // RefundRatio is the refund ratio if missing
|
||||
func (t RefundType) String() string {
|
||||
return [...]string{"REFUND ON DEAD END", "REFUND ON ERROR", "REFUND ON EARLY END"}[t]
|
||||
}
|
||||
|
||||
func (b *AccessPricingProfile[T]) GetID() string {
|
||||
return b.ID
|
||||
func RefundTypeList() []RefundType {
|
||||
return []RefundType{REFUND_DEAD_END, REFUND_ON_ERROR, REFUND_ON_EARLY_END}
|
||||
}
|
||||
|
||||
type AccessPricingProfile[T Strategy] struct { // only use for acces such as : DATA && PROCESSING
|
||||
Pricing PricingStrategy[T] `json:"pricing,omitempty" bson:"pricing,omitempty"` // Price is the price of the resource
|
||||
DefaultRefund RefundType `json:"default_refund" bson:"default_refund"` // DefaultRefund is the default refund type of the pricing
|
||||
RefundRatio int32 `json:"refund_ratio" bson:"refund_ratio" default:"0"` // RefundRatio is the refund ratio if missing
|
||||
}
|
||||
|
||||
func (b *AccessPricingProfile[T]) GetOverrideStrategyValue() int {
|
||||
@@ -42,17 +44,21 @@ const (
|
||||
GARANTED
|
||||
)
|
||||
|
||||
func ExploitPrivilegeStrategyList() []ExploitPrivilegeStrategy {
|
||||
return []ExploitPrivilegeStrategy{BASIC, GARANTED_ON_DELAY, GARANTED}
|
||||
}
|
||||
|
||||
func (t ExploitPrivilegeStrategy) String() string {
|
||||
return [...]string{"BASIC", "GARANTED_ON_DELAY", "GARANTED"}[t]
|
||||
return [...]string{"NO GARANTY", "GARANTED ON SPECIFIC DELAY", "GARANTED"}[t]
|
||||
}
|
||||
|
||||
type ExploitPricingProfile[T Strategy] struct { // only use for exploit such as : STORAGE, COMPUTE, WORKFLOW
|
||||
AccessPricingProfile[T]
|
||||
AdditionnalRefundTypes []RefundType `json:"refund_types" bson:"refund_types"` // RefundTypes is the refund types of the pricing
|
||||
|
||||
PrivilegeStrategy ExploitPrivilegeStrategy `json:"privilege_strategy,omitempty" bson:"privilege_strategy,omitempty"` // Strategy is the strategy of the pricing
|
||||
GarantedDelaySecond uint
|
||||
PrivilegeStrategy ExploitPrivilegeStrategy `json:"privilege_strategy,omitempty" bson:"privilege_strategy,omitempty"` // Strategy is the strategy of the pricing
|
||||
GarantedDelaySecond uint `json:"garanted_delay_second,omitempty" bson:"garanted_delay_second,omitempty"` // GarantedDelaySecond is the garanted delay of the pricing
|
||||
|
||||
Exceeding bool
|
||||
Exceeding bool `json:"exceeding" bson:"exceeding"` // Exceeding is the exceeding of the bill
|
||||
ExceedingRatio int32 `json:"exceeding_ratio" bson:"exceeding_ratio" default:"0"` // ExceedingRatio is the exceeding ratio of the bill
|
||||
}
|
||||
|
||||
@@ -15,6 +15,14 @@ const (
|
||||
PAY_PER_USE
|
||||
)
|
||||
|
||||
func (t BuyingStrategy) String() string {
|
||||
return [...]string{"UNLIMITED", "SUBSCRIPTION", "PAY PER USE"}[t]
|
||||
}
|
||||
|
||||
func BuyingStrategyList() []BuyingStrategy {
|
||||
return []BuyingStrategy{UNLIMITED, SUBSCRIPTION, PAY_PER_USE}
|
||||
}
|
||||
|
||||
type Strategy interface {
|
||||
GetStrategy() string
|
||||
GetStrategyValue() int
|
||||
@@ -32,6 +40,14 @@ const (
|
||||
PER_MONTH
|
||||
)
|
||||
|
||||
func (t TimePricingStrategy) String() string {
|
||||
return [...]string{"ONCE", "PER SECOND", "PER MINUTE", "PER HOUR", "PER DAY", "PER WEEK", "PER MONTH"}[t]
|
||||
}
|
||||
|
||||
func TimePricingStrategyList() []TimePricingStrategy {
|
||||
return []TimePricingStrategy{ONCE, PER_SECOND, PER_MINUTE, PER_HOUR, PER_DAY, PER_WEEK, PER_MONTH}
|
||||
}
|
||||
|
||||
func (t TimePricingStrategy) GetStrategy() string {
|
||||
return [...]string{"ONCE", "PER_SECOND", "PER_MINUTE", "PER_HOUR", "PER_DAY", "PER_WEEK", "PER_MONTH"}[t]
|
||||
}
|
||||
@@ -82,38 +98,17 @@ func BookingEstimation(t TimePricingStrategy, price float64, locationDurationInS
|
||||
case PER_MONTH:
|
||||
return p * float64(locationDurationInSecond/2592000), nil
|
||||
}
|
||||
return 0, errors.New("Pricing strategy not found")
|
||||
return 0, errors.New("pricing strategy not found")
|
||||
}
|
||||
|
||||
// hmmmm
|
||||
type PricingStrategy[T Strategy] struct {
|
||||
Price float64 `json:"Price" bson:"Price" default:"0"` // Price is the Price of the pricing
|
||||
Price float64 `json:"price" bson:"price" default:"0"` // Price is the Price of the pricing
|
||||
Currency string `json:"currency" bson:"currency" default:"USD"` // Currency is the currency of the pricing
|
||||
BuyingStrategy BuyingStrategy `json:"buying_strategy" bson:"buying_strategy" default:"0"` // BuyingStrategy is the buying strategy of the pricing
|
||||
TimePricingStrategy TimePricingStrategy `json:"time_pricing_strategy" bson:"time_pricing_strategy" default:"0"` // TimePricingStrategy is the time pricing strategy of the pricing
|
||||
OverrideStrategy T `json:"override_strategy" bson:"override_strategy" default:"-1"` // Modulation is the modulation of the pricing
|
||||
}
|
||||
|
||||
func (p PricingStrategy[T]) SetStrategy(Price float64, BuyingStrategy BuyingStrategy, TimePricingStrategy TimePricingStrategy) error {
|
||||
if TimePricingStrategy == ONCE && (BuyingStrategy != UNLIMITED || BuyingStrategy != PAY_PER_USE) {
|
||||
return errors.New("time pricing strategy can only be set to ONCE if buying strategy is UNLIMITED or PAY_PER_USE")
|
||||
} else if BuyingStrategy == SUBSCRIPTION && (TimePricingStrategy == ONCE) {
|
||||
return errors.New("subscription duration in second must be set if buying strategy is SUBSCRIPTION")
|
||||
}
|
||||
p.Price = Price
|
||||
p.BuyingStrategy = BuyingStrategy
|
||||
p.TimePricingStrategy = TimePricingStrategy
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p PricingStrategy[T]) SetSpecificPerUseStrategy(strategy T) error {
|
||||
if p.BuyingStrategy == UNLIMITED {
|
||||
return errors.New("UNLIMITED buying strategy can't have a specific strategy, Price is set on buying")
|
||||
}
|
||||
p.OverrideStrategy = strategy
|
||||
return nil
|
||||
}
|
||||
|
||||
// QUANTITY can be how many of gb core per example
|
||||
func (p PricingStrategy[T]) GetPrice(amountOfData float64, bookingTimeDuration float64, start time.Time, end *time.Time) (float64, error) {
|
||||
if p.BuyingStrategy == SUBSCRIPTION {
|
||||
return BookingEstimation(p.GetTimePricingStrategy(), p.Price*float64(amountOfData), bookingTimeDuration, start, end)
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
package common
|
||||
|
||||
type ScheduledType int
|
||||
|
||||
const (
|
||||
DRAFT ScheduledType = iota
|
||||
SCHEDULED
|
||||
STARTED
|
||||
FAILURE
|
||||
SUCCESS
|
||||
FORGOTTEN
|
||||
DELAYED
|
||||
CANCELLED
|
||||
)
|
||||
|
||||
var str = [...]string{
|
||||
"draft",
|
||||
"scheduled",
|
||||
"started",
|
||||
"failure",
|
||||
"success",
|
||||
"forgotten",
|
||||
"delayed",
|
||||
"cancelled",
|
||||
}
|
||||
|
||||
func FromInt(i int) string {
|
||||
return str[i]
|
||||
}
|
||||
|
||||
func (d ScheduledType) String() string {
|
||||
return str[d]
|
||||
}
|
||||
|
||||
// EnumIndex - Creating common behavior - give the type a EnumIndex functio
|
||||
func (d ScheduledType) EnumIndex() int {
|
||||
return int(d)
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package models
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/order"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
||||
@@ -10,7 +11,6 @@ import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
resource "cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
w2 "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||
@@ -28,9 +28,8 @@ var models = map[string]func() utils.DBObject{
|
||||
tools.STORAGE_RESOURCE.String(): func() utils.DBObject { return &resource.StorageResource{} },
|
||||
tools.PROCESSING_RESOURCE.String(): func() utils.DBObject { return &resource.ProcessingResource{} },
|
||||
tools.WORKFLOW.String(): func() utils.DBObject { return &w2.Workflow{} },
|
||||
tools.WORKFLOW_EXECUTION.String(): func() utils.DBObject { return &workflow_execution.WorkflowExecutions{} },
|
||||
tools.WORKFLOW_EXECUTION.String(): func() utils.DBObject { return &workflow_execution.WorkflowExecution{} },
|
||||
tools.WORKSPACE.String(): func() utils.DBObject { return &w3.Workspace{} },
|
||||
tools.RESOURCE_MODEL.String(): func() utils.DBObject { return &resource_model.ResourceModel{} },
|
||||
tools.PEER.String(): func() utils.DBObject { return &peer.Peer{} },
|
||||
tools.COLLABORATIVE_AREA.String(): func() utils.DBObject { return &collaborative_area.CollaborativeArea{} },
|
||||
tools.RULE.String(): func() utils.DBObject { return &rule.Rule{} },
|
||||
@@ -38,6 +37,7 @@ var models = map[string]func() utils.DBObject{
|
||||
tools.WORKFLOW_HISTORY.String(): func() utils.DBObject { return &w2.WorkflowHistory{} },
|
||||
tools.WORKSPACE_HISTORY.String(): func() utils.DBObject { return &w3.WorkspaceHistory{} },
|
||||
tools.ORDER.String(): func() utils.DBObject { return &order.Order{} },
|
||||
tools.PURCHASE_RESOURCE.String(): func() utils.DBObject { return &purchase_resource.PurchaseResource{} },
|
||||
}
|
||||
|
||||
// Model returns the model object based on the model type
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource"
|
||||
@@ -20,24 +21,12 @@ import (
|
||||
* Booking is a struct that represents a booking
|
||||
*/
|
||||
|
||||
type OrderStatus = int
|
||||
|
||||
const (
|
||||
DRAFT OrderStatus = iota
|
||||
PENDING
|
||||
CANCELLED
|
||||
PARTIAL
|
||||
PAID
|
||||
DISPUTED
|
||||
OVERDUE
|
||||
REFUND
|
||||
)
|
||||
|
||||
type Order struct {
|
||||
utils.AbstractObject
|
||||
OrderBy string `json:"order_by" bson:"order_by" validate:"required"`
|
||||
WorkflowID string `json:"workflow_id" bson:"workflow_id" validate:"required"`
|
||||
WorkflowExecutionIDs []string `json:"workflow_execution_ids" bson:"workflow_execution_ids" validate:"required"`
|
||||
Status OrderStatus `json:"status" bson:"status" default:"0"`
|
||||
Status enum.CompletionStatus `json:"status" bson:"status" default:"0"`
|
||||
SubOrders map[string]*PeerOrder `json:"sub_orders" bson:"sub_orders"`
|
||||
Total float64 `json:"total" bson:"total" validate:"required"`
|
||||
}
|
||||
@@ -69,7 +58,7 @@ func (o *Order) Pay(scheduler *workflow_execution.WorkflowSchedule, request *too
|
||||
if _, err := o.draftBookOrder(scheduler, request); err != nil {
|
||||
return err
|
||||
}
|
||||
o.Status = PENDING
|
||||
o.Status = enum.PENDING
|
||||
_, code, err := o.GetAccessor(request).UpdateOne(o, o.GetID())
|
||||
if code != 200 || err != nil {
|
||||
return errors.New("could not update the order" + fmt.Sprintf("%v", err))
|
||||
@@ -79,10 +68,10 @@ func (o *Order) Pay(scheduler *workflow_execution.WorkflowSchedule, request *too
|
||||
} else {
|
||||
o.IsDraft = false
|
||||
}
|
||||
for _, exec := range scheduler.WorkflowExecutions {
|
||||
for _, exec := range scheduler.WorkflowExecution {
|
||||
exec.IsDraft = false
|
||||
_, code, err := utils.GenericUpdateOne(exec, exec.GetID(),
|
||||
workflow_execution.NewAccessor(request), &workflow_execution.WorkflowExecutions{})
|
||||
workflow_execution.NewAccessor(request), &workflow_execution.WorkflowExecution{})
|
||||
if code != 200 || err != nil {
|
||||
return errors.New("could not update the workflow execution" + fmt.Sprintf("%v", err))
|
||||
}
|
||||
@@ -102,23 +91,25 @@ func (o *Order) draftStoreFromModel(scheduler *workflow_execution.WorkflowSchedu
|
||||
if request == nil {
|
||||
return errors.New("no request found")
|
||||
}
|
||||
if scheduler.Workflow.Graph == nil { // if the workflow has no graph, return an error
|
||||
fmt.Println("Drafting order", scheduler.Workflow)
|
||||
if scheduler.Workflow == nil || scheduler.Workflow.Graph == nil { // if the workflow has no graph, return an error
|
||||
return errors.New("no graph found")
|
||||
}
|
||||
o.SetName()
|
||||
o.WorkflowID = scheduler.Workflow.GetID()
|
||||
o.IsDraft = true
|
||||
o.OrderBy = request.Username
|
||||
o.OrderBy = request.PeerID
|
||||
o.WorkflowExecutionIDs = []string{} // create an array of ids
|
||||
for _, exec := range scheduler.WorkflowExecutions {
|
||||
for _, exec := range scheduler.WorkflowExecution {
|
||||
o.WorkflowExecutionIDs = append(o.WorkflowExecutionIDs, exec.GetID())
|
||||
}
|
||||
// set the name of the order
|
||||
resourcesByPeer := map[string][]pricing.PricedItemITF{} // create a map of resources by peer
|
||||
|
||||
processings := scheduler.Workflow.GetPricedItem(scheduler.Workflow.IsProcessing, request) // get the processing items
|
||||
datas := scheduler.Workflow.GetPricedItem(scheduler.Workflow.IsData, request) // get the data items
|
||||
storages := scheduler.Workflow.GetPricedItem(scheduler.Workflow.IsStorage, request) // get the storage items
|
||||
workflows := scheduler.Workflow.GetPricedItem(scheduler.Workflow.IsWorkflow, request) // get the workflow items
|
||||
processings := scheduler.Workflow.GetPricedItem(scheduler.Workflow.Graph.IsProcessing, request) // get the processing items
|
||||
datas := scheduler.Workflow.GetPricedItem(scheduler.Workflow.Graph.IsData, request) // get the data items
|
||||
storages := scheduler.Workflow.GetPricedItem(scheduler.Workflow.Graph.IsStorage, request) // get the storage items
|
||||
workflows := scheduler.Workflow.GetPricedItem(scheduler.Workflow.Graph.IsWorkflow, request) // get the workflow items
|
||||
for _, items := range []map[string]pricing.PricedItemITF{processings, datas, storages, workflows} {
|
||||
for _, item := range items {
|
||||
if _, ok := resourcesByPeer[item.GetCreatorID()]; !ok {
|
||||
@@ -129,13 +120,16 @@ func (o *Order) draftStoreFromModel(scheduler *workflow_execution.WorkflowSchedu
|
||||
}
|
||||
for peerID, resources := range resourcesByPeer {
|
||||
peerOrder := &PeerOrder{
|
||||
Status: DRAFT,
|
||||
Status: enum.DRAFTED,
|
||||
PeerID: peerID,
|
||||
}
|
||||
peerOrder.GenerateID()
|
||||
for _, resource := range resources {
|
||||
peerOrder.AddItem(resource, len(resources)) // TODO SPECIALS REF ADDITIONALS NOTES
|
||||
}
|
||||
if o.SubOrders == nil {
|
||||
o.SubOrders = map[string]*PeerOrder{}
|
||||
}
|
||||
o.SubOrders[peerOrder.GetID()] = peerOrder
|
||||
}
|
||||
// search an order with same user name and same session id
|
||||
@@ -146,7 +140,8 @@ func (o *Order) draftStoreFromModel(scheduler *workflow_execution.WorkflowSchedu
|
||||
// should store the order
|
||||
res, code, err := o.GetAccessor(request).Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"order_by": {{Operator: dbs.EQUAL.String(), Value: request.Username}},
|
||||
"workflow_id": {{Operator: dbs.EQUAL.String(), Value: o.WorkflowID}},
|
||||
"order_by": {{Operator: dbs.EQUAL.String(), Value: request.PeerID}},
|
||||
},
|
||||
}, "", o.IsDraft)
|
||||
if code != 200 || err != nil {
|
||||
@@ -171,12 +166,12 @@ func (o *Order) draftBookOrder(scheduler *workflow_execution.WorkflowSchedule, r
|
||||
if request == nil {
|
||||
return draftedBookings, errors.New("no request found")
|
||||
}
|
||||
for _, exec := range scheduler.WorkflowExecutions {
|
||||
for _, exec := range scheduler.WorkflowExecution {
|
||||
_, priceds, _, err := scheduler.Workflow.Planify(exec.ExecDate, exec.EndDate, request)
|
||||
if err != nil {
|
||||
return draftedBookings, errors.New("could not planify the workflow" + fmt.Sprintf("%v", err))
|
||||
}
|
||||
bookings := exec.Book(scheduler.Workflow.UUID, priceds)
|
||||
bookings := exec.Book(scheduler.UUID, scheduler.Workflow.UUID, priceds)
|
||||
for _, booking := range bookings {
|
||||
_, err := (&peer.Peer{}).LaunchPeerExecution(booking.DestPeerID, "",
|
||||
tools.BOOKING, tools.POST, booking.Serialize(booking), request.Caller)
|
||||
@@ -231,13 +226,13 @@ func (d *Order) pay(request *tools.APIRequest) error {
|
||||
if res.Error != "" {
|
||||
errs += res.Error
|
||||
}
|
||||
if res.Status != PAID {
|
||||
if res.Status != enum.PAID {
|
||||
gotAnUnpaid = true
|
||||
}
|
||||
d.Status = PARTIAL
|
||||
d.Status = enum.PARTIAL
|
||||
d.SubOrders[res.GetID()] = res
|
||||
if count == len(d.SubOrders) && !gotAnUnpaid {
|
||||
d.Status = PAID
|
||||
d.Status = enum.PAID
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -250,20 +245,20 @@ func (d *Order) pay(request *tools.APIRequest) error {
|
||||
|
||||
type PeerOrder struct {
|
||||
utils.AbstractObject
|
||||
Error string `json:"error,omitempty" bson:"error,omitempty"`
|
||||
PeerID string `json:"peer_id,omitempty" bson:"peer_id,omitempty"`
|
||||
Status OrderStatus `json:"status" bson:"status" default:"0"`
|
||||
BillingAddress string `json:"billing_address,omitempty" bson:"billing_address,omitempty"`
|
||||
Items []*PeerItemOrder `json:"items,omitempty" bson:"items,omitempty"`
|
||||
Total float64 `json:"total,omitempty" bson:"total,omitempty"`
|
||||
Error string `json:"error,omitempty" bson:"error,omitempty"`
|
||||
PeerID string `json:"peer_id,omitempty" bson:"peer_id,omitempty"`
|
||||
Status enum.CompletionStatus `json:"status" bson:"status" default:"0"`
|
||||
BillingAddress string `json:"billing_address,omitempty" bson:"billing_address,omitempty"`
|
||||
Items []*PeerItemOrder `json:"items,omitempty" bson:"items,omitempty"`
|
||||
Total float64 `json:"total,omitempty" bson:"total,omitempty"`
|
||||
}
|
||||
|
||||
func (d *PeerOrder) Pay(request *tools.APIRequest, response chan *PeerOrder, wg *sync.WaitGroup) {
|
||||
d.Status = PENDING
|
||||
d.Status = enum.PENDING
|
||||
go func() {
|
||||
// DO SOMETHING TO PAY ON BLOCKCHAIN OR WHATEVER ON RETURN UPDATE STATUS
|
||||
d.Status = PAID // TO REMOVE LATER IT'S A MOCK
|
||||
if d.Status == PAID {
|
||||
d.Status = enum.PAID // TO REMOVE LATER IT'S A MOCK
|
||||
if d.Status == enum.PAID {
|
||||
for _, b := range d.Items {
|
||||
if !b.Item.IsPurchased() {
|
||||
continue
|
||||
@@ -277,7 +272,7 @@ func (d *PeerOrder) Pay(request *tools.APIRequest, response chan *PeerOrder, wg
|
||||
}
|
||||
}
|
||||
|
||||
if d.Status != PENDING {
|
||||
if d.Status != enum.PENDING {
|
||||
response <- d
|
||||
}
|
||||
wg.Done()
|
||||
@@ -333,4 +328,5 @@ func (d *PeerItemOrder) GetPrice(request *tools.APIRequest) (float64, error) {
|
||||
return p * float64(d.Quantity), nil
|
||||
}
|
||||
|
||||
// WTF HOW TO SELECT THE RIGHT PRICE ???
|
||||
// SHOULD SET A BUYING STATUS WHEN PAYMENT IS VALIDATED
|
||||
|
||||
@@ -37,6 +37,10 @@ type Peer struct {
|
||||
FailedExecution []PeerExecution `json:"failed_execution" bson:"failed_execution"` // FailedExecution is the list of failed executions, to be retried
|
||||
}
|
||||
|
||||
func (ao *Peer) VerifyAuth(request *tools.APIRequest) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// AddExecution adds an execution to the list of failed executions
|
||||
func (ao *Peer) AddExecution(exec PeerExecution) {
|
||||
found := false
|
||||
@@ -73,7 +77,7 @@ func (p *Peer) IsMySelf() (bool, string) {
|
||||
}
|
||||
|
||||
// LaunchPeerExecution launches an execution on a peer
|
||||
func (p *Peer) LaunchPeerExecution(peerID string, dataID string, dt tools.DataType, method tools.METHOD, body map[string]interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) {
|
||||
func (p *Peer) LaunchPeerExecution(peerID string, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) {
|
||||
p.UUID = peerID
|
||||
return cache.LaunchPeerExecution(peerID, dataID, dt, method, body, caller) // Launch the execution on the peer through the cache
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
@@ -15,11 +14,11 @@ import (
|
||||
* it defines the execution data
|
||||
*/
|
||||
type PeerExecution struct {
|
||||
Method string `json:"method" bson:"method"`
|
||||
Url string `json:"url" bson:"url"`
|
||||
Body map[string]interface{} `json:"body" bson:"body"`
|
||||
DataType int `json:"data_type" bson:"data_type"`
|
||||
DataID string `json:"data_id" bson:"data_id"`
|
||||
Method string `json:"method" bson:"method"`
|
||||
Url string `json:"url" bson:"url"`
|
||||
Body interface{} `json:"body" bson:"body"`
|
||||
DataType int `json:"data_type" bson:"data_type"`
|
||||
DataID string `json:"data_id" bson:"data_id"`
|
||||
}
|
||||
|
||||
var cache = &PeerCache{} // Singleton instance of the peer cache
|
||||
@@ -29,86 +28,78 @@ type PeerCache struct {
|
||||
}
|
||||
|
||||
// urlFormat formats the URL of the peer with the data type API function
|
||||
func (p *PeerCache) urlFormat(url string, dt tools.DataType) string {
|
||||
func (p *PeerCache) urlFormat(hostUrl string, dt tools.DataType) string {
|
||||
// localhost is replaced by the local peer URL
|
||||
// because localhost must collide on a web request security protocol
|
||||
localhost := ""
|
||||
if strings.Contains(url, "localhost") {
|
||||
/*localhost := ""
|
||||
if strings.Contains(hostUrl, "localhost") {
|
||||
localhost = "localhost"
|
||||
}
|
||||
if strings.Contains(url, "127.0.0.1") {
|
||||
if strings.Contains(hostUrl, "127.0.0.1") {
|
||||
localhost = "127.0.0.1"
|
||||
}
|
||||
if localhost != "" {
|
||||
r := regexp.MustCompile("(" + localhost + ":[0-9]+)")
|
||||
t := r.FindString(url)
|
||||
t := r.FindString(hostUrl)
|
||||
if t != "" {
|
||||
url = strings.Replace(url, t, dt.API()+":8080/oc", -1)
|
||||
hostUrl = strings.Replace(hostUrl, t, dt.API()+":8080/oc", -1)
|
||||
} else {
|
||||
url = strings.ReplaceAll(url, localhost, dt.API()+":8080/oc")
|
||||
hostUrl = strings.ReplaceAll(hostUrl, localhost, dt.API()+":8080/oc")
|
||||
}
|
||||
} else {
|
||||
url = url + "/" + dt.API()
|
||||
}
|
||||
return url
|
||||
} else {*/
|
||||
hostUrl = hostUrl + "/" + strings.ReplaceAll(dt.API(), "oc-", "")
|
||||
//}
|
||||
fmt.Println("Contacting", hostUrl)
|
||||
return hostUrl
|
||||
}
|
||||
|
||||
// checkPeerStatus checks the status of a peer
|
||||
func (p *PeerCache) checkPeerStatus(peerID string, appName string, caller *tools.HTTPCaller) (*Peer, bool) {
|
||||
func (p *PeerCache) checkPeerStatus(peerID string, appName string) (*Peer, bool) {
|
||||
api := tools.API{}
|
||||
access := NewShallowAccessor()
|
||||
res, code, _ := access.LoadOne(peerID) // Load the peer from db
|
||||
if code != 200 { // no peer no party
|
||||
return nil, false
|
||||
}
|
||||
methods := caller.URLS[tools.PEER] // Get the methods url of the peer
|
||||
if methods == nil {
|
||||
return res.(*Peer), false
|
||||
}
|
||||
meth := methods[tools.POST] // Get the POST method to check status
|
||||
if meth == "" {
|
||||
return res.(*Peer), false
|
||||
}
|
||||
url := p.urlFormat(res.(*Peer).Url, tools.PEER) + meth // Format the URL
|
||||
fmt.Println("Checking peer status on", url, "...")
|
||||
url := p.urlFormat(res.(*Peer).Url, tools.PEER) + "/status" // Format the URL
|
||||
state, services := api.CheckRemotePeer(url)
|
||||
fmt.Println("Checking peer status on", url, state, services) // Check the status of the peer
|
||||
res.(*Peer).ServicesState = services // Update the services states of the peer
|
||||
access.UpdateOne(res, peerID) // Update the peer in the db
|
||||
return res.(*Peer), state != tools.DEAD && services[appName] == 0 // Return the peer and its status
|
||||
}
|
||||
|
||||
// LaunchPeerExecution launches an execution on a peer
|
||||
// The method contacts the path described by : peer.Url + datatype path (from enums) + replacement of id by dataID
|
||||
func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string,
|
||||
dt tools.DataType, method tools.METHOD, body map[string]interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) {
|
||||
dt tools.DataType, method tools.METHOD, body interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) {
|
||||
fmt.Println("Launching peer execution on", caller.URLS, dt, method)
|
||||
methods := caller.URLS[dt] // Get the methods url of the data type
|
||||
if m, ok := methods[method]; !ok || m == "" {
|
||||
return nil, errors.New("no path found")
|
||||
return nil, errors.New("Requested method " + method.String() + " not declared in HTTPCaller")
|
||||
}
|
||||
meth := methods[method] // Get the method url to execute
|
||||
meth = strings.ReplaceAll(meth, ":id", dataID) // Replace the id in the url in case of a DELETE / UPDATE method (it's a standard naming in OC)
|
||||
path := methods[method] // Get the path corresponding to the action we want to execute
|
||||
path = strings.ReplaceAll(path, ":id", dataID) // Replace the id in the path in case of a DELETE / UPDATE method (it's a standard naming in OC)
|
||||
url := ""
|
||||
|
||||
// Check the status of the peer
|
||||
if mypeer, ok := p.checkPeerStatus(peerID, dt.API(), caller); !ok && mypeer != nil {
|
||||
if mypeer, ok := p.checkPeerStatus(peerID, dt.API()); !ok && mypeer != nil {
|
||||
// If the peer is not reachable, add the execution to the failed executions list
|
||||
pexec := &PeerExecution{
|
||||
Method: method.String(),
|
||||
Url: p.urlFormat((mypeer.Url)+meth, dt),
|
||||
Url: p.urlFormat((mypeer.Url), dt) + path, // the url is constitued of : host URL + resource path + action path (ex : mypeer.com/datacenter/resourcetype/path/to/action)
|
||||
Body: body,
|
||||
DataType: dt.EnumIndex(),
|
||||
DataID: dataID,
|
||||
}
|
||||
mypeer.AddExecution(*pexec)
|
||||
NewShallowAccessor().UpdateOne(mypeer, peerID) // Update the peer in the db
|
||||
return nil, errors.New("peer is not reachable")
|
||||
return nil, errors.New("peer is " + peerID + " not reachable")
|
||||
} else {
|
||||
if mypeer == nil {
|
||||
return nil, errors.New("peer not found")
|
||||
return nil, errors.New("peer " + peerID + " not found")
|
||||
}
|
||||
// If the peer is reachable, launch the execution
|
||||
url = p.urlFormat((mypeer.Url)+meth, dt) // Format the URL
|
||||
url = p.urlFormat((mypeer.Url), dt) + path // Format the URL
|
||||
tmp := mypeer.FailedExecution // Get the failed executions list
|
||||
mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list
|
||||
NewShallowAccessor().UpdateOne(mypeer, peerID) // Update the peer in the db
|
||||
@@ -116,12 +107,11 @@ func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string,
|
||||
go p.exec(v.Url, tools.ToMethod(v.Method), v.Body, caller)
|
||||
}
|
||||
}
|
||||
fmt.Println("URL exec", url)
|
||||
return nil, p.exec(url, method, body, caller) // Execute the method
|
||||
}
|
||||
|
||||
// exec executes the method on the peer
|
||||
func (p *PeerCache) exec(url string, method tools.METHOD, body map[string]interface{}, caller *tools.HTTPCaller) error {
|
||||
func (p *PeerCache) exec(url string, method tools.METHOD, body interface{}, caller *tools.HTTPCaller) error {
|
||||
var b []byte
|
||||
var err error
|
||||
if method == tools.POST { // Execute the POST method if it's a POST method
|
||||
@@ -133,8 +123,11 @@ func (p *PeerCache) exec(url string, method tools.METHOD, body map[string]interf
|
||||
if method == tools.DELETE { // Execute the DELETE method if it's a DELETE method
|
||||
b, err = caller.CallDelete(url, "")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var m map[string]interface{}
|
||||
json.Unmarshal(b, &m)
|
||||
err = json.Unmarshal(b, &m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -11,12 +11,14 @@ import (
|
||||
|
||||
type peerMongoAccessor struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
overrideAuth bool
|
||||
}
|
||||
|
||||
// New creates a new instance of the peerMongoAccessor
|
||||
func NewShallowAccessor() *peerMongoAccessor {
|
||||
return &peerMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
overrideAuth: true,
|
||||
AbstractAccessor: utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(tools.PEER.String()), // Create a logger with the data type
|
||||
Type: tools.PEER,
|
||||
},
|
||||
@@ -25,7 +27,8 @@ func NewShallowAccessor() *peerMongoAccessor {
|
||||
|
||||
func NewAccessor(request *tools.APIRequest) *peerMongoAccessor {
|
||||
return &peerMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
overrideAuth: false,
|
||||
AbstractAccessor: utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(tools.PEER.String()), // Create a logger with the data type
|
||||
Request: request,
|
||||
Type: tools.PEER,
|
||||
@@ -33,6 +36,10 @@ func NewAccessor(request *tools.APIRequest) *peerMongoAccessor {
|
||||
}
|
||||
}
|
||||
|
||||
func (wfa *peerMongoAccessor) ShouldVerifyAuth() bool {
|
||||
return !wfa.overrideAuth
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing special here, just the basic CRUD operations
|
||||
*/
|
||||
@@ -72,14 +79,16 @@ func (wfa *peerMongoAccessor) Search(filters *dbs.Filters, search string, isDraf
|
||||
}, isDraft, wfa)
|
||||
}
|
||||
func (a *peerMongoAccessor) getDefaultFilter(search string) *dbs.Filters {
|
||||
s, err := strconv.Atoi(search)
|
||||
if err == nil {
|
||||
if i, err := strconv.Atoi(search); err == nil {
|
||||
return &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // search by name if no filters are provided
|
||||
"state": {{Operator: dbs.EQUAL.String(), Value: s}},
|
||||
"state": {{Operator: dbs.EQUAL.String(), Value: i}},
|
||||
},
|
||||
}
|
||||
} else {
|
||||
if search == "*" {
|
||||
search = ""
|
||||
}
|
||||
return &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // search by name if no filters are provided
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
|
||||
@@ -5,7 +5,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/common"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/models"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
@@ -16,22 +17,48 @@ import (
|
||||
* it defines the resource compute
|
||||
*/
|
||||
type ComputeResource struct {
|
||||
AbstractResource[*ComputeResourceInstance]
|
||||
Architecture string `json:"architecture,omitempty" bson:"architecture,omitempty"` // Architecture is the architecture
|
||||
Infrastructure common.InfrastructureType `json:"infrastructure,omitempty" bson:"infrastructure,omitempty"`
|
||||
AbstractInstanciatedResource[*ComputeResourceInstance]
|
||||
Architecture string `json:"architecture,omitempty" bson:"architecture,omitempty"` // Architecture is the architecture
|
||||
Infrastructure enum.InfrastructureType `json:"infrastructure" bson:"infrastructure" default:"-1"` // Infrastructure is the infrastructure
|
||||
}
|
||||
|
||||
func (d *ComputeResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
||||
return NewAccessor[*ComputeResource](tools.COMPUTE_RESOURCE, request, func() utils.DBObject { return &ComputeResource{} })
|
||||
}
|
||||
|
||||
func (r *ComputeResource) GetType() string {
|
||||
return tools.COMPUTE_RESOURCE.String()
|
||||
}
|
||||
|
||||
func (abs *ComputeResource) ConvertToPricedResource(
|
||||
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
|
||||
if t != tools.COMPUTE_RESOURCE {
|
||||
return nil
|
||||
}
|
||||
p := abs.AbstractInstanciatedResource.ConvertToPricedResource(t, request)
|
||||
priced := p.(*PricedResource)
|
||||
return &PricedComputeResource{
|
||||
PricedResource: *priced,
|
||||
}
|
||||
}
|
||||
|
||||
type ComputeNode struct {
|
||||
Name string `json:"name,omitempty" bson:"name,omitempty"`
|
||||
Quantity int64 `json:"quantity" bson:"quantity" default:"1"`
|
||||
RAM *models.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
|
||||
CPUs map[string]int64 `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs key is model
|
||||
GPUs map[string]int64 `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs key is model
|
||||
}
|
||||
|
||||
type ComputeResourceInstance struct {
|
||||
ResourceInstance[*ComputeResourcePartnership]
|
||||
SecurityLevel string `json:"security_level,omitempty" bson:"security_level,omitempty"`
|
||||
PowerSource string `json:"power_source,omitempty" bson:"power_source,omitempty"`
|
||||
CPUs map[string]*common.CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs key is model
|
||||
GPUs map[string]*common.GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs key is model
|
||||
RAM *common.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
|
||||
Source string `json:"source,omitempty" bson:"source,omitempty"` // Source is the source of the resource
|
||||
SecurityLevel string `json:"security_level,omitempty" bson:"security_level,omitempty"`
|
||||
PowerSources []string `json:"power_sources,omitempty" bson:"power_sources,omitempty"`
|
||||
AnnualCO2Emissions float64 `json:"annual_co2_emissions,omitempty" bson:"co2_emissions,omitempty"`
|
||||
CPUs map[string]*models.CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs key is model
|
||||
GPUs map[string]*models.GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs key is model
|
||||
Nodes []*ComputeNode `json:"nodes,omitempty" bson:"nodes,omitempty"`
|
||||
}
|
||||
|
||||
type ComputeResourcePartnership struct {
|
||||
@@ -41,23 +68,14 @@ type ComputeResourcePartnership struct {
|
||||
MaxAllowedRAMSize float64 `json:"allowed_ram,omitempty" bson:"allowed_ram,omitempty"`
|
||||
}
|
||||
|
||||
type ComputeResourcePricingProfileOptions struct {
|
||||
CPUCore int `json:"cpu_core" bson:"cpu_core" default:"1"`
|
||||
GPUMemoryGB float64 `json:"gpu_memory_gb" bson:"gpu_memory_gb" default:"1"`
|
||||
RAMSizeGB float64 `json:"ram_size_gb" bson:"ram_size_gb" default:"1"`
|
||||
}
|
||||
|
||||
type ComputeResourcePricingProfile struct {
|
||||
pricing.ExploitPricingProfile[pricing.TimePricingStrategy]
|
||||
Options ComputeResourcePricingProfileOptions `json:"options,omitempty" bson:"options,omitempty"` // Options is the options of the pricing profile
|
||||
// ExploitPricingProfile is the pricing profile of a compute it means that we exploit the resource for an amount of continuous time
|
||||
OverrideCPUsPrices map[string]float64 `json:"cpus_prices,omitempty" bson:"cpus_prices,omitempty"` // CPUsPrices is the prices of the CPUs
|
||||
OverrideGPUsPrices map[string]float64 `json:"gpus_prices,omitempty" bson:"gpus_prices,omitempty"` // GPUsPrices is the prices of the GPUs
|
||||
OverrideRAMPrice float64 `json:"ram_price" bson:"ram_price" default:"-1"` // RAMPrice is the price of the RAM
|
||||
CPUsPrices map[string]float64 `json:"cpus_prices,omitempty" bson:"cpus_prices,omitempty"` // CPUsPrices is the prices of the CPUs
|
||||
GPUsPrices map[string]float64 `json:"gpus_prices,omitempty" bson:"gpus_prices,omitempty"` // GPUsPrices is the prices of the GPUs
|
||||
RAMPrice float64 `json:"ram_price" bson:"ram_price" default:"-1"` // RAMPrice is the price of the RAM
|
||||
}
|
||||
|
||||
// PROBLEM
|
||||
|
||||
func (p *ComputeResourcePricingProfile) IsPurchased() bool {
|
||||
return p.Pricing.BuyingStrategy != pricing.PAY_PER_USE
|
||||
}
|
||||
@@ -75,10 +93,10 @@ func (p *ComputeResourcePricingProfile) GetPrice(amountOfData float64, explicitD
|
||||
pp := float64(0)
|
||||
model := params[1]
|
||||
if strings.Contains(params[0], "cpus") && len(params) > 1 {
|
||||
if _, ok := p.OverrideCPUsPrices[model]; ok {
|
||||
p.Pricing.Price = p.OverrideCPUsPrices[model]
|
||||
if _, ok := p.CPUsPrices[model]; ok {
|
||||
p.Pricing.Price = p.CPUsPrices[model]
|
||||
}
|
||||
r, err := p.Pricing.GetPrice(amountOfData/float64(p.Options.CPUCore), explicitDuration, start, &end)
|
||||
r, err := p.Pricing.GetPrice(amountOfData, explicitDuration, start, &end)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -86,20 +104,20 @@ func (p *ComputeResourcePricingProfile) GetPrice(amountOfData float64, explicitD
|
||||
|
||||
}
|
||||
if strings.Contains(params[0], "gpus") && len(params) > 1 {
|
||||
if _, ok := p.OverrideGPUsPrices[model]; ok {
|
||||
p.Pricing.Price = p.OverrideGPUsPrices[model]
|
||||
if _, ok := p.GPUsPrices[model]; ok {
|
||||
p.Pricing.Price = p.GPUsPrices[model]
|
||||
}
|
||||
r, err := p.Pricing.GetPrice(amountOfData/float64(p.Options.GPUMemoryGB), explicitDuration, start, &end)
|
||||
r, err := p.Pricing.GetPrice(amountOfData, explicitDuration, start, &end)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
pp += r
|
||||
}
|
||||
if strings.Contains(params[0], "ram") {
|
||||
if p.OverrideRAMPrice >= 0 {
|
||||
p.Pricing.Price = p.OverrideRAMPrice
|
||||
if p.RAMPrice >= 0 {
|
||||
p.Pricing.Price = p.RAMPrice
|
||||
}
|
||||
r, err := p.Pricing.GetPrice(float64(amountOfData)/p.Options.RAMSizeGB, explicitDuration, start, &end)
|
||||
r, err := p.Pricing.GetPrice(float64(amountOfData), explicitDuration, start, &end)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -121,11 +139,19 @@ func (r *PricedComputeResource) GetType() tools.DataType {
|
||||
}
|
||||
|
||||
func (r *PricedComputeResource) GetPrice() (float64, error) {
|
||||
if r.UsageStart == nil || r.UsageEnd == nil {
|
||||
return 0, errors.New("Usage start and end must be set")
|
||||
now := time.Now()
|
||||
if r.UsageStart == nil {
|
||||
r.UsageStart = &now
|
||||
}
|
||||
if r.UsageEnd == nil {
|
||||
add := r.UsageStart.Add(time.Duration(1 * time.Hour))
|
||||
r.UsageEnd = &add
|
||||
}
|
||||
if r.SelectedPricing == nil {
|
||||
return 0, errors.New("Selected pricing must be set")
|
||||
if len(r.PricingProfiles) == 0 {
|
||||
return 0, errors.New("pricing profile must be set on Priced Compute" + r.ResourceID)
|
||||
}
|
||||
r.SelectedPricing = &r.PricingProfiles[0]
|
||||
}
|
||||
pricing := *r.SelectedPricing
|
||||
price := float64(0)
|
||||
|
||||
@@ -2,36 +2,29 @@ package resources
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/models"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
// enum of public private or licenced data
|
||||
type DataLicense int
|
||||
|
||||
const (
|
||||
PUBLIC DataLicense = iota
|
||||
PRIVATE
|
||||
LICENCED
|
||||
)
|
||||
|
||||
/*
|
||||
* DataResource is a struct that represents a data resource
|
||||
* it defines the resource data
|
||||
*/
|
||||
type DataResource struct {
|
||||
AbstractResource[*ResourceInstance[*DataResourcePartnership]]
|
||||
Type string `bson:"type,omitempty" json:"type,omitempty"`
|
||||
Quality string `bson:"quality,omitempty" json:"quality,omitempty"`
|
||||
OpenData bool `bson:"open_data" json:"open_data" default:"false"` // Type is the type of the storage
|
||||
Static bool `bson:"static" json:"static" default:"false"`
|
||||
UpdatePeriod time.Time `bson:"update_period,omitempty" json:"update_period,omitempty"`
|
||||
PersonalData bool `bson:"personal_data,omitempty" json:"personal_data,omitempty"`
|
||||
AnonymizedPersonalData bool `bson:"anonymized_personal_data,omitempty" json:"anonymized_personal_data,omitempty"`
|
||||
SizeGB float64 `json:"size_gb,omitempty" bson:"size_gb,omitempty"` // SizeGB is the size of the data License DataLicense `json:"license" bson:"license" description:"license of the data" default:"0"` // License is the license of the data
|
||||
AbstractInstanciatedResource[*DataInstance]
|
||||
Type string `bson:"type,omitempty" json:"type,omitempty"`
|
||||
Quality string `bson:"quality,omitempty" json:"quality,omitempty"`
|
||||
OpenData bool `bson:"open_data" json:"open_data" default:"false"` // Type is the type of the storage
|
||||
Static bool `bson:"static" json:"static" default:"false"`
|
||||
UpdatePeriod *time.Time `bson:"update_period,omitempty" json:"update_period,omitempty"`
|
||||
PersonalData bool `bson:"personal_data,omitempty" json:"personal_data,omitempty"`
|
||||
AnonymizedPersonalData bool `bson:"anonymized_personal_data,omitempty" json:"anonymized_personal_data,omitempty"`
|
||||
SizeGB float64 `json:"size,omitempty" bson:"size,omitempty"` // SizeGB is the size of the data License DataLicense `json:"license" bson:"license" description:"license of the data" default:"0"` // License is the license of the data
|
||||
// ? Interest DataLicense `json:"interest" bson:"interest" description:"interest of the data" default:"0"` // Interest is the interest of the data
|
||||
Example string `json:"example,omitempty" bson:"example,omitempty" description:"base64 encoded data"` // Example is an example of the data
|
||||
}
|
||||
@@ -40,6 +33,45 @@ func (d *DataResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
||||
return NewAccessor[*DataResource](tools.DATA_RESOURCE, request, func() utils.DBObject { return &DataResource{} }) // Create a new instance of the accessor
|
||||
}
|
||||
|
||||
func (r *DataResource) GetType() string {
|
||||
return tools.DATA_RESOURCE.String()
|
||||
}
|
||||
|
||||
func (abs *DataResource) ConvertToPricedResource(
|
||||
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
|
||||
if t != tools.DATA_RESOURCE {
|
||||
return nil
|
||||
}
|
||||
p := abs.AbstractInstanciatedResource.ConvertToPricedResource(t, request)
|
||||
priced := p.(*PricedResource)
|
||||
return &PricedDataResource{
|
||||
PricedResource: *priced,
|
||||
}
|
||||
}
|
||||
|
||||
type DataInstance struct {
|
||||
ResourceInstance[*DataResourcePartnership]
|
||||
Source string `json:"source,omitempty" bson:"source,omitempty"` // Source is the source of the data
|
||||
}
|
||||
|
||||
func (ri *DataInstance) StoreDraftDefault() {
|
||||
found := false
|
||||
for _, p := range ri.ResourceInstance.Env {
|
||||
if p.Attr == "source" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
ri.ResourceInstance.Env = append(ri.ResourceInstance.Env, models.Param{
|
||||
Attr: "source",
|
||||
Value: ri.Source,
|
||||
Readonly: true,
|
||||
})
|
||||
}
|
||||
ri.ResourceInstance.StoreDraftDefault()
|
||||
}
|
||||
|
||||
type DataResourcePartnership struct {
|
||||
ResourcePartnerShip[*DataResourcePricingProfile]
|
||||
MaxDownloadableGbAllowed float64 `json:"allowed_gb,omitempty" bson:"allowed_gb,omitempty"`
|
||||
@@ -57,6 +89,14 @@ const (
|
||||
PER_KB_DOWNLOADED
|
||||
)
|
||||
|
||||
func (t DataResourcePricingStrategy) String() string {
|
||||
return [...]string{"PER DOWNLOAD", "PER TB DOWNLOADED", "PER GB DOWNLOADED", "PER MB DOWNLOADED", "PER KB DOWNLOADED"}[t]
|
||||
}
|
||||
|
||||
func DataResourcePricingStrategyList() []DataResourcePricingStrategy {
|
||||
return []DataResourcePricingStrategy{PER_DOWNLOAD, PER_TB_DOWNLOADED, PER_GB_DOWNLOADED, PER_MB_DOWNLOADED, PER_KB_DOWNLOADED}
|
||||
}
|
||||
|
||||
func ToDataResourcePricingStrategy(i int) DataResourcePricingStrategy {
|
||||
return DataResourcePricingStrategy(i)
|
||||
}
|
||||
@@ -82,7 +122,7 @@ func (t DataResourcePricingStrategy) GetQuantity(amountOfDataGB float64) (float6
|
||||
case PER_KB_DOWNLOADED:
|
||||
return amountOfDataGB / 1000000, nil
|
||||
}
|
||||
return 0, errors.New("Pricing strategy not found")
|
||||
return 0, errors.New("pricing strategy not found")
|
||||
}
|
||||
|
||||
type DataResourcePricingProfile struct {
|
||||
@@ -111,11 +151,20 @@ func (r *PricedDataResource) GetType() tools.DataType {
|
||||
}
|
||||
|
||||
func (r *PricedDataResource) GetPrice() (float64, error) {
|
||||
if r.UsageStart == nil || r.UsageEnd == nil {
|
||||
return 0, errors.New("Usage start and end must be set")
|
||||
fmt.Println("GetPrice", r.UsageStart, r.UsageEnd)
|
||||
now := time.Now()
|
||||
if r.UsageStart == nil {
|
||||
r.UsageStart = &now
|
||||
}
|
||||
if r.UsageEnd == nil {
|
||||
add := r.UsageStart.Add(time.Duration(1 * time.Hour))
|
||||
r.UsageEnd = &add
|
||||
}
|
||||
if r.SelectedPricing == nil {
|
||||
return 0, errors.New("Selected pricing must be set")
|
||||
if len(r.PricingProfiles) == 0 {
|
||||
return 0, errors.New("pricing profile must be set on Priced Data" + r.ResourceID)
|
||||
}
|
||||
r.SelectedPricing = &r.PricingProfiles[0]
|
||||
}
|
||||
pricing := *r.SelectedPricing
|
||||
var err error
|
||||
|
||||
@@ -1,40 +1,27 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type ShallowResourceInterface interface {
|
||||
utils.DBObject
|
||||
GetType() tools.DataType
|
||||
GetCreatorID() string
|
||||
GetPricingID() string
|
||||
GetLocationStart() *time.Time
|
||||
GetLocationEnd() *time.Time
|
||||
GetExplicitDurationInS() float64
|
||||
SetStartUsage(start time.Time)
|
||||
SetEndUsage(end time.Time)
|
||||
GetPartnership(request *tools.APIRequest) ResourcePartnerITF
|
||||
SetResourceModel(model *resource_model.ResourceModel)
|
||||
}
|
||||
|
||||
type ResourceInterface interface {
|
||||
utils.DBObject
|
||||
Trim()
|
||||
Transform() utils.DBObject
|
||||
ConvertToPricedResource(t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF
|
||||
GetType() string
|
||||
GetSelectedInstance() utils.DBObject
|
||||
ClearEnv() utils.DBObject
|
||||
SetAllowedInstances(request *tools.APIRequest)
|
||||
SetResourceModel(model *resource_model.ResourceModel)
|
||||
}
|
||||
|
||||
type ResourceInstanceITF interface {
|
||||
utils.DBObject
|
||||
GetID() string
|
||||
GetName() string
|
||||
StoreDraftDefault()
|
||||
ClearEnv()
|
||||
GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF
|
||||
GetPeerGroups() ([]ResourcePartnerITF, []map[string][]string)
|
||||
ClearPeerGroups()
|
||||
|
||||
@@ -28,6 +28,7 @@ func (r *ResourceSet) Clear() {
|
||||
}
|
||||
|
||||
func (r *ResourceSet) Fill(request *tools.APIRequest) {
|
||||
r.Clear()
|
||||
for k, v := range map[utils.DBObject][]string{
|
||||
(&DataResource{}): r.Datas,
|
||||
(&ComputeResource{}): r.Computes,
|
||||
|
||||
@@ -2,6 +2,7 @@ package resources
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
||||
@@ -9,17 +10,17 @@ import (
|
||||
)
|
||||
|
||||
type PricedResource struct {
|
||||
Name string `json:"name,omitempty" bson:"name,omitempty"`
|
||||
Logo string `json:"logo,omitempty" bson:"logo,omitempty"`
|
||||
InstancesRefs map[string]string `json:"instances_refs,omitempty" bson:"instances_refs,omitempty"`
|
||||
PricingProfiles map[string][]pricing.PricingProfileITF `json:"pricing_profiles,omitempty" bson:"pricing_profiles,omitempty"`
|
||||
SelectedPricing *pricing.PricingProfileITF `json:"selected_pricing,omitempty" bson:"selected_pricing,omitempty"`
|
||||
ExplicitBookingDurationS float64 `json:"explicit_location_duration_s,omitempty" bson:"explicit_location_duration_s,omitempty"`
|
||||
UsageStart *time.Time `json:"start,omitempty" bson:"start,omitempty"`
|
||||
UsageEnd *time.Time `json:"end,omitempty" bson:"end,omitempty"`
|
||||
CreatorID string `json:"peer_id,omitempty" bson:"peer_id,omitempty"`
|
||||
ResourceID string `json:"resource_id,omitempty" bson:"resource_id,omitempty"`
|
||||
ResourceType tools.DataType `json:"resource_type,omitempty" bson:"resource_type,omitempty"`
|
||||
Name string `json:"name,omitempty" bson:"name,omitempty"`
|
||||
Logo string `json:"logo,omitempty" bson:"logo,omitempty"`
|
||||
InstancesRefs map[string]string `json:"instances_refs,omitempty" bson:"instances_refs,omitempty"`
|
||||
PricingProfiles []pricing.PricingProfileITF `json:"pricing_profiles,omitempty" bson:"pricing_profiles,omitempty"`
|
||||
SelectedPricing *pricing.PricingProfileITF `json:"selected_pricing,omitempty" bson:"selected_pricing,omitempty"`
|
||||
ExplicitBookingDurationS float64 `json:"explicit_location_duration_s,omitempty" bson:"explicit_location_duration_s,omitempty"`
|
||||
UsageStart *time.Time `json:"start,omitempty" bson:"start,omitempty"`
|
||||
UsageEnd *time.Time `json:"end,omitempty" bson:"end,omitempty"`
|
||||
CreatorID string `json:"peer_id,omitempty" bson:"peer_id,omitempty"`
|
||||
ResourceID string `json:"resource_id,omitempty" bson:"resource_id,omitempty"`
|
||||
ResourceType tools.DataType `json:"resource_type,omitempty" bson:"resource_type,omitempty"`
|
||||
}
|
||||
|
||||
func (abs *PricedResource) GetID() string {
|
||||
@@ -34,18 +35,6 @@ func (abs *PricedResource) GetCreatorID() string {
|
||||
return abs.CreatorID
|
||||
}
|
||||
|
||||
func (abs *PricedResource) SetStartUsage(start time.Time) {
|
||||
if abs.UsageStart == nil {
|
||||
abs.UsageStart = &start
|
||||
}
|
||||
}
|
||||
|
||||
func (abs *PricedResource) SetEndUsage(end time.Time) {
|
||||
if abs.UsageEnd == nil {
|
||||
abs.UsageEnd = &end
|
||||
}
|
||||
}
|
||||
|
||||
func (abs *PricedResource) IsPurchased() bool {
|
||||
if abs.SelectedPricing == nil {
|
||||
return false
|
||||
@@ -71,20 +60,34 @@ func (abs *PricedResource) SetLocationEnd(end time.Time) {
|
||||
|
||||
func (abs *PricedResource) GetExplicitDurationInS() float64 {
|
||||
if abs.ExplicitBookingDurationS == 0 {
|
||||
if abs.UsageEnd == nil || abs.UsageStart == nil {
|
||||
if abs.UsageEnd == nil && abs.UsageStart == nil {
|
||||
return time.Duration(1 * time.Hour).Seconds()
|
||||
}
|
||||
if abs.UsageEnd == nil {
|
||||
add := abs.UsageStart.Add(time.Duration(1 * time.Hour))
|
||||
abs.UsageEnd = &add
|
||||
}
|
||||
return abs.UsageEnd.Sub(*abs.UsageStart).Seconds()
|
||||
}
|
||||
return abs.ExplicitBookingDurationS
|
||||
}
|
||||
|
||||
func (r *PricedResource) GetPrice() (float64, error) {
|
||||
if r.UsageStart == nil || r.UsageEnd == nil {
|
||||
return 0, errors.New("Usage start and end must be set")
|
||||
fmt.Println("GetPrice", r.UsageStart, r.UsageEnd)
|
||||
now := time.Now()
|
||||
if r.UsageStart == nil {
|
||||
r.UsageStart = &now
|
||||
}
|
||||
if r.UsageEnd == nil {
|
||||
add := r.UsageStart.Add(time.Duration(1 * time.Hour))
|
||||
r.UsageEnd = &add
|
||||
}
|
||||
if r.SelectedPricing == nil {
|
||||
return 0, errors.New("Selected pricing must be set")
|
||||
if len(r.PricingProfiles) == 0 {
|
||||
return 0, errors.New("pricing profile must be set on Priced Resource " + r.ResourceID)
|
||||
}
|
||||
r.SelectedPricing = &r.PricingProfiles[0]
|
||||
}
|
||||
return (*r.SelectedPricing).GetPrice(1, 0, *r.UsageStart, *r.UsageEnd)
|
||||
pricing := *r.SelectedPricing
|
||||
return pricing.GetPrice(1, 0, *r.UsageStart, *r.UsageEnd)
|
||||
}
|
||||
|
||||
@@ -3,16 +3,17 @@ package resources
|
||||
import (
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/common"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/models"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type ProcessingUsage struct {
|
||||
CPUs map[string]*common.CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs key is model
|
||||
GPUs map[string]*common.GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs key is model
|
||||
RAM *common.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
|
||||
CPUs map[string]*models.CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs key is model
|
||||
GPUs map[string]*models.GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs key is model
|
||||
RAM *models.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
|
||||
|
||||
StorageGb float64 `bson:"storage,omitempty" json:"storage,omitempty"` // Storage is the storage
|
||||
Hypothesis string `bson:"hypothesis,omitempty" json:"hypothesis,omitempty"`
|
||||
@@ -24,14 +25,26 @@ type ProcessingUsage struct {
|
||||
* it defines the resource processing
|
||||
*/
|
||||
type ProcessingResource struct {
|
||||
AbstractResource[*ResourceInstance[*ResourcePartnerShip[*ProcessingResourcePricingProfile]]]
|
||||
Infrastructure common.InfrastructureType `json:"infrastructure,omitempty" bson:"infrastructure,omitempty"`
|
||||
IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service
|
||||
Usage *ProcessingUsage `bson:"usage,omitempty" json:"usage,omitempty"` // Usage is the usage of the processing
|
||||
OpenSource bool `json:"open_source" bson:"open_source" default:"false"`
|
||||
License string `json:"license,omitempty" bson:"license,omitempty"`
|
||||
Maturity string `json:"maturity,omitempty" bson:"maturity,omitempty"`
|
||||
Container *common.Container `json:"container,omitempty" bson:"container,omitempty"` // Container is the container
|
||||
AbstractInstanciatedResource[*ProcessingInstance]
|
||||
Infrastructure enum.InfrastructureType `json:"infrastructure" bson:"infrastructure" default:"-1"` // Infrastructure is the infrastructure
|
||||
IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service
|
||||
Usage *ProcessingUsage `bson:"usage,omitempty" json:"usage,omitempty"` // Usage is the usage of the processing
|
||||
OpenSource bool `json:"open_source" bson:"open_source" default:"false"`
|
||||
License string `json:"license,omitempty" bson:"license,omitempty"`
|
||||
Maturity string `json:"maturity,omitempty" bson:"maturity,omitempty"`
|
||||
}
|
||||
|
||||
func (r *ProcessingResource) GetType() string {
|
||||
return tools.PROCESSING_RESOURCE.String()
|
||||
}
|
||||
|
||||
type ProcessingResourceAccess struct {
|
||||
Container *models.Container `json:"container,omitempty" bson:"container,omitempty"` // Container is the container
|
||||
}
|
||||
|
||||
type ProcessingInstance struct {
|
||||
ResourceInstance[*ResourcePartnerShip[*ProcessingResourcePricingProfile]]
|
||||
Access *ProcessingResourceAccess `json:"access,omitempty" bson:"access,omitempty"` // Access is the access
|
||||
}
|
||||
|
||||
type PricedProcessingResource struct {
|
||||
|
||||
@@ -17,9 +17,9 @@ type purchaseResourceMongoAccessor struct {
|
||||
func NewAccessor(request *tools.APIRequest) *purchaseResourceMongoAccessor {
|
||||
return &purchaseResourceMongoAccessor{
|
||||
AbstractAccessor: utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(tools.BUYING_STATUS.String()), // Create a logger with the data type
|
||||
Logger: logs.CreateLogger(tools.PURCHASE_RESOURCE.String()), // Create a logger with the data type
|
||||
Request: request,
|
||||
Type: tools.BUYING_STATUS,
|
||||
Type: tools.PURCHASE_RESOURCE,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,69 +4,61 @@ import (
|
||||
"slices"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/config"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/models"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/biter777/countries"
|
||||
)
|
||||
|
||||
// AbstractResource is the struct containing all of the attributes commons to all ressources
|
||||
|
||||
// Resource is the interface to be implemented by all classes inheriting from Resource to have the same behavior
|
||||
|
||||
// http://www.inanzzz.com/index.php/post/wqbs/a-basic-usage-of-int-and-string-enum-types-in-golang
|
||||
/*
|
||||
* AbstractResource is a struct that represents a resource
|
||||
* it defines the resource data
|
||||
*/
|
||||
|
||||
type AbstractResource[T ResourceInstanceITF] struct {
|
||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||
Logo string `json:"logo,omitempty" bson:"logo,omitempty" validate:"required"` // Logo is the logo of the resource
|
||||
Description string `json:"description,omitempty" bson:"description,omitempty"` // Description is the description of the resource
|
||||
ShortDescription string `json:"short_description,omitempty" bson:"short_description,omitempty" validate:"required"` // ShortDescription is the short description of the resource
|
||||
Owners []utils.Owner `json:"owners,omitempty" bson:"owners,omitempty"` // Owners is the list of owners of the resource
|
||||
ResourceModel *resource_model.ResourceModel `json:"resource_model,omitempty" bson:"resource_model,omitempty"` // ResourceModel is the model of the resource
|
||||
UsageRestrictions string `bson:"usage_restrictions,omitempty" json:"usage_restrictions,omitempty"`
|
||||
SelectedInstanceIndex int `json:"selected_instance_index,omitempty" bson:"selected_instance_index,omitempty"` // SelectedInstance is the selected instance
|
||||
Instances []T `json:"instances,omitempty" bson:"instances,omitempty"` // Bill is the bill of the resource // Bill is the bill of the resource
|
||||
type AbstractResource struct {
|
||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||
Type string `json:"type,omitempty" bson:"type,omitempty"` // Type is the type of the resource
|
||||
Logo string `json:"logo,omitempty" bson:"logo,omitempty" validate:"required"` // Logo is the logo of the resource
|
||||
Description string `json:"description,omitempty" bson:"description,omitempty"` // Description is the description of the resource
|
||||
ShortDescription string `json:"short_description,omitempty" bson:"short_description,omitempty" validate:"required"` // ShortDescription is the short description of the resource
|
||||
Owners []utils.Owner `json:"owners,omitempty" bson:"owners,omitempty"` // Owners is the list of owners of the resource
|
||||
UsageRestrictions string `bson:"usage_restrictions,omitempty" json:"usage_restrictions,omitempty"`
|
||||
SelectedInstanceIndex *int `json:"selected_instance_index,omitempty" bson:"selected_instance_index,omitempty"` // SelectedInstance is the selected instance
|
||||
}
|
||||
|
||||
func (r *AbstractResource[T]) Transform() utils.DBObject {
|
||||
return r
|
||||
func (r *AbstractResource) GetSelectedInstance() utils.DBObject {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AbstractResource[T]) StoreDraftDefault() {
|
||||
func (r *AbstractResource) GetType() string {
|
||||
return tools.INVALID.String()
|
||||
}
|
||||
|
||||
func (r *AbstractResource) StoreDraftDefault() {
|
||||
r.IsDraft = true
|
||||
}
|
||||
func (r *AbstractResource[T]) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
|
||||
|
||||
func (r *AbstractResource) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
|
||||
if r.IsDraft != set.IsDrafted() && set.IsDrafted() {
|
||||
return true, set // only state can be updated
|
||||
}
|
||||
return r.IsDraft != set.IsDrafted() && set.IsDrafted(), set
|
||||
}
|
||||
|
||||
func (r *AbstractResource[T]) CanDelete() bool {
|
||||
func (r *AbstractResource) CanDelete() bool {
|
||||
return r.IsDraft // only draft bookings can be deleted
|
||||
}
|
||||
|
||||
func (ao *AbstractResource[T]) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
||||
return nil
|
||||
type AbstractInstanciatedResource[T ResourceInstanceITF] struct {
|
||||
AbstractResource // AbstractResource contains the basic fields of an object (id, name)
|
||||
Instances []T `json:"instances,omitempty" bson:"instances,omitempty"` // Bill is the bill of the resource // Bill is the bill of the resource
|
||||
}
|
||||
|
||||
func (abs *AbstractResource[T]) SetResourceModel(model *resource_model.ResourceModel) {
|
||||
abs.ResourceModel = model
|
||||
}
|
||||
|
||||
func (abs *AbstractResource[T]) ConvertToPricedResource(
|
||||
func (abs *AbstractInstanciatedResource[T]) ConvertToPricedResource(
|
||||
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
|
||||
instances := map[string]string{}
|
||||
profiles := map[string][]pricing.PricingProfileITF{}
|
||||
profiles := []pricing.PricingProfileITF{}
|
||||
for _, instance := range abs.Instances {
|
||||
instances[instance.GetID()] = instance.GetName()
|
||||
profiles[instance.GetID()] = instance.GetPricingsProfiles(request.PeerID, request.Groups)
|
||||
profiles = instance.GetPricingsProfiles(request.PeerID, request.Groups)
|
||||
}
|
||||
return &PricedResource{
|
||||
Name: abs.Name,
|
||||
@@ -79,11 +71,32 @@ func (abs *AbstractResource[T]) ConvertToPricedResource(
|
||||
}
|
||||
}
|
||||
|
||||
func (abs *AbstractResource[T]) SetAllowedInstances(request *tools.APIRequest) {
|
||||
func (abs *AbstractInstanciatedResource[T]) ClearEnv() utils.DBObject {
|
||||
for _, instance := range abs.Instances {
|
||||
instance.ClearEnv()
|
||||
}
|
||||
return abs
|
||||
}
|
||||
|
||||
func (r *AbstractInstanciatedResource[T]) GetSelectedInstance() utils.DBObject {
|
||||
if r.SelectedInstanceIndex != nil && len(r.Instances) > *r.SelectedInstanceIndex {
|
||||
return r.Instances[*r.SelectedInstanceIndex]
|
||||
}
|
||||
if len(r.Instances) > 0 {
|
||||
return r.Instances[0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (abs *AbstractInstanciatedResource[T]) SetAllowedInstances(request *tools.APIRequest) {
|
||||
if request != nil && request.PeerID == abs.CreatorID && request.PeerID != "" {
|
||||
return
|
||||
}
|
||||
abs.Instances = verifyAuthAction[T](abs.Instances, request)
|
||||
}
|
||||
|
||||
func (d *AbstractResource[T]) Trim() {
|
||||
func (d *AbstractInstanciatedResource[T]) Trim() {
|
||||
d.Type = d.GetType()
|
||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.CreatorID}}).IsMySelf(); !ok {
|
||||
for _, instance := range d.Instances {
|
||||
instance.ClearPeerGroups()
|
||||
@@ -91,7 +104,7 @@ func (d *AbstractResource[T]) Trim() {
|
||||
}
|
||||
}
|
||||
|
||||
func (abs *AbstractResource[T]) VerifyAuth(request *tools.APIRequest) bool {
|
||||
func (abs *AbstractInstanciatedResource[T]) VerifyAuth(request *tools.APIRequest) bool {
|
||||
return len(verifyAuthAction[T](abs.Instances, request)) > 0 || abs.AbstractObject.VerifyAuth(request)
|
||||
}
|
||||
|
||||
@@ -106,6 +119,7 @@ func verifyAuthAction[T ResourceInstanceITF](baseInstance []T, request *tools.AP
|
||||
if grps, ok := peers[request.PeerID]; ok || config.GetConfig().Whitelist {
|
||||
if (ok && slices.Contains(grps, "*")) || (!ok && config.GetConfig().Whitelist) {
|
||||
instances = append(instances, instance)
|
||||
continue
|
||||
}
|
||||
for _, grp := range grps {
|
||||
if slices.Contains(request.Groups, grp) {
|
||||
@@ -123,21 +137,26 @@ type GeoPoint struct {
|
||||
Longitude float64 `json:"longitude,omitempty" bson:"longitude,omitempty"`
|
||||
}
|
||||
|
||||
type Credentials struct {
|
||||
Login string `json:"login,omitempty" bson:"login,omitempty"`
|
||||
Pass string `json:"password,omitempty" bson:"password,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceInstance[T ResourcePartnerITF] struct {
|
||||
UUID string `json:"id,omitempty" bson:"id,omitempty"`
|
||||
Name string `json:"name,omitempty" bson:"name,omitempty"`
|
||||
utils.AbstractObject
|
||||
Location GeoPoint `json:"location,omitempty" bson:"location,omitempty"`
|
||||
Country countries.CountryCode `json:"country,omitempty" bson:"country,omitempty"`
|
||||
AccessProtocol string `json:"access_protocol,omitempty" bson:"access_protocol,omitempty"`
|
||||
Partnerships []T `json:"partner_resource,omitempty" bson:"partner_resource,omitempty"`
|
||||
Env []models.Param `json:"env,omitempty" bson:"env,omitempty"`
|
||||
Inputs []models.Param `json:"inputs,omitempty" bson:"inputs,omitempty"`
|
||||
Outputs []models.Param `json:"outputs,omitempty" bson:"outputs,omitempty"`
|
||||
Partnerships []T `json:"partnerships,omitempty" bson:"partnerships,omitempty"`
|
||||
}
|
||||
|
||||
func (ri *ResourceInstance[T]) GetID() string {
|
||||
return ri.UUID
|
||||
}
|
||||
|
||||
func (ri *ResourceInstance[T]) GetName() string {
|
||||
return ri.Name
|
||||
func (ri *ResourceInstance[T]) ClearEnv() {
|
||||
ri.Env = []models.Param{}
|
||||
ri.Inputs = []models.Param{}
|
||||
ri.Outputs = []models.Param{}
|
||||
}
|
||||
|
||||
func (ri *ResourceInstance[T]) GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF {
|
||||
@@ -167,14 +186,23 @@ func (ri *ResourceInstance[T]) ClearPeerGroups() {
|
||||
type ResourcePartnerShip[T pricing.PricingProfileITF] struct {
|
||||
Namespace string `json:"namespace" bson:"namespace" default:"default-namespace"`
|
||||
PeerGroups map[string][]string `json:"peer_groups,omitempty" bson:"peer_groups,omitempty"`
|
||||
PricingProfiles map[string]T `json:"pricing,omitempty" bson:"pricing,omitempty"`
|
||||
PricingProfiles []T `json:"pricing_profiles,omitempty" bson:"pricing_profiles,omitempty"`
|
||||
}
|
||||
|
||||
func (ri *ResourcePartnerShip[T]) GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF {
|
||||
profiles := []pricing.PricingProfileITF{}
|
||||
if ri.PeerGroups[peerID] != nil {
|
||||
for _, ri := range ri.PricingProfiles {
|
||||
profiles = append(profiles, ri)
|
||||
}
|
||||
if slices.Contains(groups, "*") {
|
||||
for _, ri := range ri.PricingProfiles {
|
||||
profiles = append(profiles, ri)
|
||||
}
|
||||
return profiles
|
||||
}
|
||||
for _, p := range ri.PeerGroups[peerID] {
|
||||
if slices.Contains(groups, p) {
|
||||
profiles := []pricing.PricingProfileITF{}
|
||||
for _, ri := range ri.PricingProfiles {
|
||||
profiles = append(profiles, ri)
|
||||
}
|
||||
@@ -182,7 +210,7 @@ func (ri *ResourcePartnerShip[T]) GetPricingsProfiles(peerID string, groups []st
|
||||
}
|
||||
}
|
||||
}
|
||||
return []pricing.PricingProfileITF{}
|
||||
return profiles
|
||||
}
|
||||
|
||||
func (rp *ResourcePartnerShip[T]) GetPeerGroups() map[string][]string {
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
@@ -22,10 +21,9 @@ func NewAccessor[T ResourceInterface](t tools.DataType, request *tools.APIReques
|
||||
}
|
||||
return &resourceMongoAccessor[T]{
|
||||
AbstractAccessor: utils.AbstractAccessor{
|
||||
ResourceModelAccessor: resource_model.NewAccessor(),
|
||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
||||
Request: request,
|
||||
Type: t,
|
||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
||||
Request: request,
|
||||
Type: t,
|
||||
},
|
||||
generateData: g,
|
||||
}
|
||||
@@ -34,19 +32,16 @@ func NewAccessor[T ResourceInterface](t tools.DataType, request *tools.APIReques
|
||||
/*
|
||||
* Nothing special here, just the basic CRUD operations
|
||||
*/
|
||||
|
||||
func (dca *resourceMongoAccessor[T]) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericDeleteOne(id, dca)
|
||||
}
|
||||
|
||||
func (dca *resourceMongoAccessor[T]) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
set.(T).SetResourceModel(nil)
|
||||
set.(T).Trim()
|
||||
return utils.GenericUpdateOne(set, id, dca, dca.generateData())
|
||||
}
|
||||
|
||||
func (dca *resourceMongoAccessor[T]) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
data.(T).SetResourceModel(nil)
|
||||
data.(T).Trim()
|
||||
return utils.GenericStoreOne(data, dca)
|
||||
}
|
||||
@@ -57,37 +52,28 @@ func (dca *resourceMongoAccessor[T]) CopyOne(data utils.DBObject) (utils.DBObjec
|
||||
|
||||
func (dca *resourceMongoAccessor[T]) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[T](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
resources, _, err := dca.ResourceModelAccessor.Search(nil, dca.GetType().String(), false)
|
||||
if err == nil && len(resources) > 0 {
|
||||
d.(T).SetResourceModel(resources[0].(*resource_model.ResourceModel))
|
||||
}
|
||||
d.(T).SetAllowedInstances(dca.Request)
|
||||
d = d.(T).Transform()
|
||||
return d, 200, nil
|
||||
}, dca)
|
||||
}
|
||||
|
||||
func (wfa *resourceMongoAccessor[T]) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType().String(), isDraft)
|
||||
return utils.GenericLoadAll[T](func(d utils.DBObject) utils.ShallowDBObject {
|
||||
if err == nil && len(resources) > 0 {
|
||||
d.(T).SetResourceModel(resources[0].(*resource_model.ResourceModel))
|
||||
}
|
||||
d.(T).SetAllowedInstances(wfa.Request)
|
||||
d = d.(T).Transform()
|
||||
return d
|
||||
}, isDraft, wfa)
|
||||
}
|
||||
|
||||
func (wfa *resourceMongoAccessor[T]) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType().String(), false)
|
||||
if filters == nil && search == "*" {
|
||||
return utils.GenericLoadAll[T](func(d utils.DBObject) utils.ShallowDBObject {
|
||||
d.(T).SetAllowedInstances(wfa.Request)
|
||||
return d
|
||||
}, isDraft, wfa)
|
||||
}
|
||||
return utils.GenericSearch[T](filters, search, wfa.getResourceFilter(search),
|
||||
func(d utils.DBObject) utils.ShallowDBObject {
|
||||
if err == nil && len(resources) > 0 {
|
||||
d.(T).SetResourceModel(resources[0].(*resource_model.ResourceModel))
|
||||
}
|
||||
d.(T).SetAllowedInstances(wfa.Request)
|
||||
d = d.(T).Transform()
|
||||
return d
|
||||
}, isDraft, wfa)
|
||||
}
|
||||
@@ -95,11 +81,12 @@ func (wfa *resourceMongoAccessor[T]) Search(filters *dbs.Filters, search string,
|
||||
func (abs *resourceMongoAccessor[T]) getResourceFilter(search string) *dbs.Filters {
|
||||
return &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractintanciatedresource.abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractintanciatedresource.abstractresource.type": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractintanciatedresource.abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractintanciatedresource.abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractintanciatedresource.abstractresource.owners.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractintanciatedresource.abstractresource.abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
package resource_model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type WebResource struct {
|
||||
Protocol string `bson:"protocol,omitempty" json:"protocol,omitempty"` // Protocol is the protocol of the URL
|
||||
Path string `bson:"path,omitempty" json:"path,omitempty"` // Path is the path of the URL
|
||||
}
|
||||
|
||||
type Model struct {
|
||||
Type string `json:"type,omitempty" bson:"type,omitempty"` // Type is the type of the model
|
||||
ReadOnly bool `json:"readonly,omitempty" bson:"readonly,omitempty"` // ReadOnly is the readonly of the model
|
||||
}
|
||||
|
||||
/*
|
||||
* ResourceModel is a struct that represents a resource model
|
||||
* it defines the resource metadata and specificity
|
||||
* Warning: This struct is not user available, it is only used by the system
|
||||
*/
|
||||
type ResourceModel struct {
|
||||
utils.AbstractObject
|
||||
ResourceType string `json:"resource_type,omitempty" bson:"resource_type,omitempty" validate:"required"`
|
||||
VarRefs map[string]string `json:"var_refs,omitempty" bson:"var_refs,omitempty"` // VarRefs is the variable references of the model
|
||||
Model map[string]map[string]Model `json:"model,omitempty" bson:"model,omitempty"`
|
||||
}
|
||||
|
||||
func (d *ResourceModel) StoreDraftDefault() {
|
||||
d.Name = d.ResourceType + " Resource Model"
|
||||
d.IsDraft = false
|
||||
}
|
||||
|
||||
func (abs *ResourceModel) VerifyAuth(request *tools.APIRequest) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *ResourceModel) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
||||
return &ResourceModelMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
Type: tools.RESOURCE_MODEL,
|
||||
Request: request,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (dma *ResourceModel) Deserialize(j map[string]interface{}, obj utils.DBObject) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, obj)
|
||||
return obj
|
||||
}
|
||||
|
||||
func (dma *ResourceModel) Serialize(obj utils.DBObject) map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
package resource_model
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type ResourceModelMongoAccessor struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing special here, just the basic CRUD operations
|
||||
*/
|
||||
|
||||
func NewAccessor() *ResourceModelMongoAccessor {
|
||||
return &ResourceModelMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
Type: tools.RESOURCE_MODEL,
|
||||
Logger: logs.CreateLogger(tools.RESOURCE_MODEL.String()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericDeleteOne(id, wfa)
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericUpdateOne(set, id, wfa, &ResourceModel{})
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
func (a *ResourceModelMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[*ResourceModel](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
return d, 200, nil
|
||||
}, a)
|
||||
}
|
||||
|
||||
func (a *ResourceModelMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericLoadAll[*ResourceModel](func(d utils.DBObject) utils.ShallowDBObject {
|
||||
return d
|
||||
}, isDraft, a)
|
||||
}
|
||||
|
||||
func (a *ResourceModelMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericSearch[*ResourceModel](filters, search,
|
||||
&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"resource_type": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}, func(d utils.DBObject) utils.ShallowDBObject { return d }, isDraft, a)
|
||||
}
|
||||
@@ -2,9 +2,11 @@ package resources
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/common"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/models"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
@@ -15,34 +17,67 @@ import (
|
||||
* it defines the resource storage
|
||||
*/
|
||||
type StorageResource struct {
|
||||
AbstractResource[*StorageResourceInstance] // AbstractResource contains the basic fields of an object (id, name)
|
||||
Type common.StorageType `bson:"type,omitempty"` // Type is the type of the storage
|
||||
TypeJSON string `json:"type,omitempty"`
|
||||
Acronym string `bson:"acronym,omitempty" json:"acronym,omitempty"` // Acronym is the acronym of the storage
|
||||
AbstractInstanciatedResource[*StorageResourceInstance] // AbstractResource contains the basic fields of an object (id, name)
|
||||
StorageType enum.StorageType `bson:"storage_type" json:"storage_type" default:"-1"` // Type is the type of the storage
|
||||
Acronym string `bson:"acronym,omitempty" json:"acronym,omitempty"` // Acronym is the acronym of the storage
|
||||
}
|
||||
|
||||
func (d *StorageResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
||||
return NewAccessor[*StorageResource](tools.STORAGE_RESOURCE, request, func() utils.DBObject { return &StorageResource{} }) // Create a new instance of the accessor
|
||||
}
|
||||
|
||||
func (r *StorageResource) Transform() utils.DBObject {
|
||||
r.TypeJSON = r.Type.String()
|
||||
return r
|
||||
func (r *StorageResource) GetType() string {
|
||||
return tools.STORAGE_RESOURCE.String()
|
||||
}
|
||||
|
||||
func (abs *StorageResource) ConvertToPricedResource(
|
||||
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
|
||||
if t != tools.STORAGE_RESOURCE {
|
||||
return nil
|
||||
}
|
||||
p := abs.AbstractInstanciatedResource.ConvertToPricedResource(t, request)
|
||||
priced := p.(*PricedResource)
|
||||
return &PricedStorageResource{
|
||||
PricedResource: *priced,
|
||||
}
|
||||
}
|
||||
|
||||
type StorageResourceInstance struct {
|
||||
ResourceInstance[*StorageResourcePartnership]
|
||||
Local bool `bson:"local" json:"local"`
|
||||
SecurityLevel string `bson:"security_level,omitempty" json:"security_level,omitempty"`
|
||||
SizeType common.StorageSize `bson:"size_type" json:"size_type" default:"0"` // SizeType is the type of the storage size
|
||||
SizeGB uint `bson:"size,omitempty" json:"size,omitempty"` // Size is the size of the storage
|
||||
Encryption bool `bson:"encryption,omitempty" json:"encryption,omitempty"` // Encryption is a flag that indicates if the storage is encrypted
|
||||
Redundancy string `bson:"redundancy,omitempty" json:"redundancy,omitempty"` // Redundancy is the redundancy of the storage
|
||||
Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage
|
||||
Credentials *Credentials `json:"credentials,omitempty" bson:"credentials,omitempty"`
|
||||
Source string `bson:"source,omitempty" json:"source,omitempty"` // Source is the source of the storage
|
||||
Local bool `bson:"local" json:"local"`
|
||||
SecurityLevel string `bson:"security_level,omitempty" json:"security_level,omitempty"`
|
||||
SizeType enum.StorageSize `bson:"size_type" json:"size_type" default:"0"` // SizeType is the type of the storage size
|
||||
SizeGB int64 `bson:"size,omitempty" json:"size,omitempty"` // Size is the size of the storage
|
||||
Encryption bool `bson:"encryption,omitempty" json:"encryption,omitempty"` // Encryption is a flag that indicates if the storage is encrypted
|
||||
Redundancy string `bson:"redundancy,omitempty" json:"redundancy,omitempty"` // Redundancy is the redundancy of the storage
|
||||
Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage
|
||||
}
|
||||
|
||||
func (i *StorageResourceInstance) GetID() string {
|
||||
return i.UUID
|
||||
func (ri *StorageResourceInstance) ClearEnv() {
|
||||
ri.Credentials = nil
|
||||
ri.Env = []models.Param{}
|
||||
ri.Inputs = []models.Param{}
|
||||
ri.Outputs = []models.Param{}
|
||||
}
|
||||
|
||||
func (ri *StorageResourceInstance) StoreDraftDefault() {
|
||||
found := false
|
||||
for _, p := range ri.ResourceInstance.Env {
|
||||
if p.Attr == "source" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
ri.ResourceInstance.Env = append(ri.ResourceInstance.Env, models.Param{
|
||||
Attr: "source",
|
||||
Value: ri.Source,
|
||||
Readonly: true,
|
||||
})
|
||||
}
|
||||
ri.ResourceInstance.StoreDraftDefault()
|
||||
}
|
||||
|
||||
type StorageResourcePartnership struct {
|
||||
@@ -59,8 +94,12 @@ const (
|
||||
GARANTED_STORAGE
|
||||
)
|
||||
|
||||
func PrivilegeStoragePricingStrategyList() []PrivilegeStoragePricingStrategy {
|
||||
return []PrivilegeStoragePricingStrategy{BASIC_STORAGE, GARANTED_ON_DELAY_STORAGE, GARANTED_STORAGE}
|
||||
}
|
||||
|
||||
func (t PrivilegeStoragePricingStrategy) String() string {
|
||||
return [...]string{"BASIC_STORAGE", "GARANTED_ON_DELAY_STORAGE", "GARANTED_STORAGE"}[t]
|
||||
return [...]string{"NO MEMORY HOLDING", "KEEPED ON MEMORY GARANTED DURING DELAY", "KEEPED ON MEMORY GARANTED"}[t]
|
||||
}
|
||||
|
||||
type StorageResourcePricingStrategy int
|
||||
@@ -73,6 +112,14 @@ const (
|
||||
PER_KB_STORED
|
||||
)
|
||||
|
||||
func StorageResourcePricingStrategyList() []StorageResourcePricingStrategy {
|
||||
return []StorageResourcePricingStrategy{PER_DATA_STORED, PER_TB_STORED, PER_GB_STORED, PER_MB_STORED, PER_KB_STORED}
|
||||
}
|
||||
|
||||
func (t StorageResourcePricingStrategy) String() string {
|
||||
return [...]string{"PER DATA STORED", "PER TB STORED", "PER GB STORED", "PER MB STORED", "PER KB STORED"}[t]
|
||||
}
|
||||
|
||||
func (t StorageResourcePricingStrategy) GetStrategy() string {
|
||||
return [...]string{"PER_DATA_STORED", "PER_GB_STORED", "PER_MB_STORED", "PER_KB_STORED"}[t]
|
||||
}
|
||||
@@ -98,7 +145,7 @@ func (t StorageResourcePricingStrategy) GetQuantity(amountOfDataGB float64) (flo
|
||||
case PER_KB_STORED:
|
||||
return amountOfDataGB * 1000000, nil
|
||||
}
|
||||
return 0, errors.New("Pricing strategy not found")
|
||||
return 0, errors.New("pricing strategy not found")
|
||||
}
|
||||
|
||||
type StorageResourcePricingProfile struct {
|
||||
@@ -123,11 +170,20 @@ func (r *PricedStorageResource) GetType() tools.DataType {
|
||||
}
|
||||
|
||||
func (r *PricedStorageResource) GetPrice() (float64, error) {
|
||||
if r.UsageStart == nil || r.UsageEnd == nil {
|
||||
return 0, errors.New("Usage start and end must be set")
|
||||
fmt.Println("GetPrice", r.UsageStart, r.UsageEnd)
|
||||
now := time.Now()
|
||||
if r.UsageStart == nil {
|
||||
r.UsageStart = &now
|
||||
}
|
||||
if r.UsageEnd == nil {
|
||||
add := r.UsageStart.Add(time.Duration(1 * time.Hour))
|
||||
r.UsageEnd = &add
|
||||
}
|
||||
if r.SelectedPricing == nil {
|
||||
return 0, errors.New("Selected pricing must be set")
|
||||
if len(r.PricingProfiles) == 0 {
|
||||
return 0, errors.New("pricing profile must be set on Priced Storage" + r.ResourceID)
|
||||
}
|
||||
r.SelectedPricing = &r.PricingProfiles[0]
|
||||
}
|
||||
pricing := *r.SelectedPricing
|
||||
var err error
|
||||
|
||||
@@ -2,74 +2,45 @@ package resources
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
// we don't have any information about the accessor
|
||||
type abstractWorkflowResource struct {
|
||||
WorkflowID string `bson:"workflow_id,omitempty" json:"workflow_id,omitempty"` // WorkflowID is the ID of the native workflow
|
||||
}
|
||||
type WorkflowResourcePricingProfile struct{}
|
||||
|
||||
// WorkflowResource is a struct that represents a workflow resource
|
||||
// it defines the resource workflow
|
||||
type WorkflowResource struct {
|
||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||
Logo string `json:"logo,omitempty" bson:"logo,omitempty" validate:"required"` // Logo is the logo of the resource
|
||||
Description string `json:"description,omitempty" bson:"description,omitempty"` // Description is the description of the resource
|
||||
ShortDescription string `json:"short_description,omitempty" bson:"short_description,omitempty" validate:"required"` // ShortDescription is the short description of the resource
|
||||
Owners []utils.Owner `json:"owners,omitempty" bson:"owners,omitempty"` // Owners is the list of owners of the resource
|
||||
ResourceModel *resource_model.ResourceModel `json:"resource_model,omitempty" bson:"resource_model,omitempty"` // ResourceModel is the model of the resource
|
||||
UsageRestrictions string `bson:"usage_restrictions,omitempty" json:"usage_restrictions,omitempty"`
|
||||
abstractWorkflowResource
|
||||
AbstractResource
|
||||
WorkflowID string `bson:"workflow_id,omitempty" json:"workflow_id,omitempty"` // WorkflowID is the ID of the native workflow
|
||||
}
|
||||
|
||||
func (r *WorkflowResource) Transform() utils.DBObject {
|
||||
return r
|
||||
func (d *WorkflowResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
||||
return NewAccessor[*ComputeResource](tools.WORKFLOW_RESOURCE, request, func() utils.DBObject { return &WorkflowResource{} })
|
||||
}
|
||||
|
||||
func (r *WorkflowResource) StoreDraftDefault() {
|
||||
r.IsDraft = true
|
||||
}
|
||||
func (r *WorkflowResource) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
|
||||
if r.IsDraft != set.IsDrafted() && set.IsDrafted() {
|
||||
return true, set // only state can be updated
|
||||
}
|
||||
return r.IsDraft != set.IsDrafted() && set.IsDrafted(), set
|
||||
func (r *WorkflowResource) GetType() string {
|
||||
return tools.WORKFLOW_RESOURCE.String()
|
||||
}
|
||||
|
||||
func (r *WorkflowResource) CanDelete() bool {
|
||||
return r.IsDraft // only draft bookings can be deleted
|
||||
func (d *WorkflowResource) ClearEnv() utils.DBObject {
|
||||
return d
|
||||
}
|
||||
|
||||
func (ao *WorkflowResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
||||
return nil
|
||||
func (d *WorkflowResource) Trim() {
|
||||
/* EMPTY */
|
||||
}
|
||||
|
||||
func (abs *WorkflowResource) SetResourceModel(model *resource_model.ResourceModel) {
|
||||
abs.ResourceModel = model
|
||||
}
|
||||
|
||||
func (w *WorkflowResource) Trim() {
|
||||
/*EMPTY AND PROUD TO BE*/
|
||||
}
|
||||
|
||||
func (w *WorkflowResource) SetAllowedInstances(request *tools.APIRequest) {
|
||||
/*EMPTY AND PROUD TO BE*/
|
||||
/* EMPTY */
|
||||
}
|
||||
|
||||
func (w *WorkflowResource) ConvertToPricedResource(
|
||||
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
|
||||
instances := map[string]string{}
|
||||
profiles := map[string][]pricing.PricingProfileITF{}
|
||||
return &PricedResource{
|
||||
Name: w.Name,
|
||||
Logo: w.Logo,
|
||||
ResourceID: w.UUID,
|
||||
ResourceType: t,
|
||||
InstancesRefs: instances,
|
||||
PricingProfiles: profiles,
|
||||
CreatorID: w.CreatorID,
|
||||
Name: w.Name,
|
||||
Logo: w.Logo,
|
||||
ResourceID: w.UUID,
|
||||
ResourceType: t,
|
||||
CreatorID: w.CreatorID,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,15 +27,20 @@ const (
|
||||
* every data in base root model should inherit from this struct (only exception is the ResourceModel)
|
||||
*/
|
||||
type AbstractObject struct {
|
||||
UUID string `json:"id,omitempty" bson:"id,omitempty" validate:"required"`
|
||||
Name string `json:"name,omitempty" bson:"name,omitempty" validate:"required"`
|
||||
IsDraft bool `json:"is_draft" bson:"is_draft" default:"false"`
|
||||
CreatorID string `json:"creator_id" bson:"creator_id" default:"unknown"`
|
||||
UUID string `json:"id,omitempty" bson:"id,omitempty" validate:"required"`
|
||||
Name string `json:"name,omitempty" bson:"name,omitempty" validate:"required"`
|
||||
IsDraft bool `json:"is_draft" bson:"is_draft" default:"false"`
|
||||
CreatorID string `json:"creator_id,omitempty" bson:"creator_id,omitempty"`
|
||||
UserCreatorID string `json:"user_creator_id,omitempty" bson:"user_creator_id,omitempty"`
|
||||
CreationDate time.Time `json:"creation_date,omitempty" bson:"creation_date,omitempty"`
|
||||
UpdateDate time.Time `json:"update_date,omitempty" bson:"update_date,omitempty"`
|
||||
UpdaterID string `json:"updater_id,omitempty" bson:"updater_id,omitempty"`
|
||||
UserUpdaterID string `json:"user_updater_id,omitempty" bson:"user_updater_id,omitempty"`
|
||||
AccessMode AccessMode `json:"access_mode" bson:"access_mode" default:"0"`
|
||||
}
|
||||
|
||||
CreationDate time.Time `json:"creation_date" bson:"creation_date"`
|
||||
UpdateDate time.Time `json:"update_date" bson:"update_date"`
|
||||
UpdaterID string `json:"updater_id" bson:"updater_id"`
|
||||
AccessMode AccessMode `json:"access_mode" bson:"access_mode" default:"0"`
|
||||
func (ri *AbstractObject) GetAccessor(request *tools.APIRequest) Accessor {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AbstractObject) GenerateID() {
|
||||
@@ -74,20 +79,25 @@ func (ao *AbstractObject) GetCreatorID() string {
|
||||
return ao.CreatorID
|
||||
}
|
||||
|
||||
func (ao *AbstractObject) UpToDate(user string, create bool) {
|
||||
func (ao *AbstractObject) UpToDate(user string, peer string, create bool) {
|
||||
ao.UpdateDate = time.Now()
|
||||
ao.UpdaterID = user
|
||||
ao.UpdaterID = peer
|
||||
ao.UserUpdaterID = user
|
||||
if create {
|
||||
ao.CreationDate = time.Now()
|
||||
ao.CreatorID = user
|
||||
ao.CreatorID = peer
|
||||
ao.UserCreatorID = user
|
||||
}
|
||||
}
|
||||
|
||||
func (ao *AbstractObject) VerifyAuth(request *tools.APIRequest) bool {
|
||||
return ao.AccessMode == Public || (request != nil && ao.CreatorID == request.Username)
|
||||
return ao.AccessMode == Public || (request != nil && ao.CreatorID == request.PeerID && request.PeerID != "")
|
||||
}
|
||||
|
||||
func (ao *AbstractObject) GetObjectFilters(search string) *dbs.Filters {
|
||||
if search == "*" {
|
||||
search = ""
|
||||
}
|
||||
return &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
@@ -120,6 +130,10 @@ type AbstractAccessor struct {
|
||||
ResourceModelAccessor Accessor
|
||||
}
|
||||
|
||||
func (r *AbstractAccessor) ShouldVerifyAuth() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *AbstractAccessor) GetRequest() *tools.APIRequest {
|
||||
return r.Request
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package utils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
@@ -14,11 +13,22 @@ type Owner struct {
|
||||
Logo string `json:"logo,omitempty" bson:"logo,omitempty"`
|
||||
}
|
||||
|
||||
func VerifyAccess(a Accessor, id string) error {
|
||||
data, _, err := a.LoadOne(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if a.ShouldVerifyAuth() && !data.VerifyAuth(a.GetRequest()) {
|
||||
return errors.New("you are not allowed to access :" + a.GetType().String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenericLoadOne loads one object from the database (generic)
|
||||
func GenericStoreOne(data DBObject, a Accessor) (DBObject, int, error) {
|
||||
data.GenerateID()
|
||||
data.StoreDraftDefault()
|
||||
data.UpToDate(a.GetUser(), true)
|
||||
data.UpToDate(a.GetUser(), a.GetPeerID(), true)
|
||||
f := dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractresource.abstractobject.name": {{
|
||||
@@ -31,8 +41,8 @@ func GenericStoreOne(data DBObject, a Accessor) (DBObject, int, error) {
|
||||
}},
|
||||
},
|
||||
}
|
||||
if !data.VerifyAuth(a.GetRequest()) {
|
||||
return nil, 403, errors.New("you are not allowed to access this collaborative area")
|
||||
if a.ShouldVerifyAuth() && !data.VerifyAuth(a.GetRequest()) {
|
||||
return nil, 403, errors.New("you are not allowed to access : " + a.GetType().String())
|
||||
}
|
||||
if cursor, _, _ := a.Search(&f, "", data.IsDrafted()); len(cursor) > 0 {
|
||||
return nil, 409, errors.New(a.GetType().String() + " with name " + data.GetName() + " already exists")
|
||||
@@ -53,14 +63,13 @@ func GenericStoreOne(data DBObject, a Accessor) (DBObject, int, error) {
|
||||
func GenericDeleteOne(id string, a Accessor) (DBObject, int, error) {
|
||||
res, code, err := a.LoadOne(id)
|
||||
if !res.CanDelete() {
|
||||
return nil, 403, errors.New("you are not allowed to delete this collaborative area")
|
||||
return nil, 403, errors.New("you are not allowed to delete :" + a.GetType().String())
|
||||
}
|
||||
if err != nil {
|
||||
a.GetLogger().Error().Msg("Could not retrieve " + id + " to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
if !res.VerifyAuth(a.GetRequest()) {
|
||||
return nil, 403, errors.New("you are not allowed to access this collaborative area")
|
||||
if a.ShouldVerifyAuth() && !res.VerifyAuth(a.GetRequest()) {
|
||||
return nil, 403, errors.New("you are not allowed to access " + a.GetType().String())
|
||||
}
|
||||
_, code, err = mongo.MONGOService.DeleteOne(id, a.GetType().String())
|
||||
if err != nil {
|
||||
@@ -79,12 +88,12 @@ func GenericUpdateOne(set DBObject, id string, a Accessor, new DBObject) (DBObje
|
||||
}
|
||||
ok, newSet := r.CanUpdate(set)
|
||||
if !ok {
|
||||
return nil, 403, errors.New("you are not allowed to delete this collaborative area")
|
||||
return nil, 403, errors.New("you are not allowed to delete :" + a.GetType().String())
|
||||
}
|
||||
set = newSet
|
||||
r.UpToDate(a.GetUser(), false)
|
||||
if !r.VerifyAuth(a.GetRequest()) {
|
||||
return nil, 403, errors.New("you are not allowed to access this collaborative area")
|
||||
r.UpToDate(a.GetUser(), a.GetPeerID(), false)
|
||||
if a.ShouldVerifyAuth() && !r.VerifyAuth(a.GetRequest()) {
|
||||
return nil, 403, errors.New("you are not allowed to access :" + a.GetType().String())
|
||||
}
|
||||
change := set.Serialize(set) // get the changes
|
||||
loaded := r.Serialize(r) // get the loaded object
|
||||
@@ -104,12 +113,11 @@ func GenericLoadOne[T DBObject](id string, f func(DBObject) (DBObject, int, erro
|
||||
var data T
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, a.GetType().String())
|
||||
if err != nil {
|
||||
a.GetLogger().Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&data)
|
||||
if !data.VerifyAuth(a.GetRequest()) {
|
||||
return nil, 403, errors.New("you are not allowed to access this collaborative area")
|
||||
if a.ShouldVerifyAuth() && !data.VerifyAuth(a.GetRequest()) {
|
||||
return nil, 403, errors.New("you are not allowed to access :" + a.GetType().String())
|
||||
}
|
||||
return f(data)
|
||||
}
|
||||
@@ -118,14 +126,13 @@ func genericLoadAll[T DBObject](res *mgb.Cursor, code int, err error, onlyDraft
|
||||
objs := []ShallowDBObject{}
|
||||
var results []T
|
||||
if err != nil {
|
||||
a.GetLogger().Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
if err = res.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(a.GetRequest()) || f(r) == nil || (onlyDraft && !r.IsDrafted()) || (!onlyDraft && r.IsDrafted()) {
|
||||
if (a.ShouldVerifyAuth() && !r.VerifyAuth(a.GetRequest())) || f(r) == nil || (onlyDraft && !r.IsDrafted()) || (!onlyDraft && r.IsDrafted()) {
|
||||
continue
|
||||
}
|
||||
objs = append(objs, f(r))
|
||||
@@ -135,13 +142,12 @@ func genericLoadAll[T DBObject](res *mgb.Cursor, code int, err error, onlyDraft
|
||||
|
||||
func GenericLoadAll[T DBObject](f func(DBObject) ShallowDBObject, onlyDraft bool, wfa Accessor) ([]ShallowDBObject, int, error) {
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType().String())
|
||||
fmt.Println("res_mongo", res_mongo)
|
||||
return genericLoadAll[T](res_mongo, code, err, onlyDraft, f, wfa)
|
||||
}
|
||||
|
||||
func GenericSearch[T DBObject](filters *dbs.Filters, search string, defaultFilters *dbs.Filters,
|
||||
f func(DBObject) ShallowDBObject, onlyDraft bool, wfa Accessor) ([]ShallowDBObject, int, error) {
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
if filters == nil && search != "" {
|
||||
filters = defaultFilters
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType().String())
|
||||
|
||||
@@ -11,8 +11,8 @@ type ShallowDBObject interface {
|
||||
GenerateID()
|
||||
GetID() string
|
||||
GetName() string
|
||||
Deserialize(j map[string]interface{}, obj DBObject) DBObject
|
||||
Serialize(obj DBObject) map[string]interface{}
|
||||
Deserialize(j map[string]interface{}, obj DBObject) DBObject
|
||||
}
|
||||
|
||||
// DBObject is an interface that defines the basic methods for a DBObject
|
||||
@@ -20,32 +20,33 @@ type DBObject interface {
|
||||
GenerateID()
|
||||
GetID() string
|
||||
GetName() string
|
||||
GetCreatorID() string
|
||||
IsDrafted() bool
|
||||
StoreDraftDefault()
|
||||
CanUpdate(set DBObject) (bool, DBObject)
|
||||
CanDelete() bool
|
||||
UpToDate(user string, create bool)
|
||||
StoreDraftDefault()
|
||||
GetCreatorID() string
|
||||
UpToDate(user string, peer string, create bool)
|
||||
CanUpdate(set DBObject) (bool, DBObject)
|
||||
VerifyAuth(request *tools.APIRequest) bool
|
||||
Deserialize(j map[string]interface{}, obj DBObject) DBObject
|
||||
Serialize(obj DBObject) map[string]interface{}
|
||||
GetAccessor(request *tools.APIRequest) Accessor
|
||||
Deserialize(j map[string]interface{}, obj DBObject) DBObject
|
||||
}
|
||||
|
||||
// Accessor is an interface that defines the basic methods for an Accessor
|
||||
type Accessor interface {
|
||||
GetRequest() *tools.APIRequest
|
||||
GetType() tools.DataType
|
||||
GetUser() string
|
||||
GetPeerID() string
|
||||
GetGroups() []string
|
||||
ShouldVerifyAuth() bool
|
||||
GetType() tools.DataType
|
||||
GetLogger() *zerolog.Logger
|
||||
GetCaller() *tools.HTTPCaller
|
||||
Search(filters *dbs.Filters, search string, isDraft bool) ([]ShallowDBObject, int, error)
|
||||
LoadAll(isDraft bool) ([]ShallowDBObject, int, error)
|
||||
GetRequest() *tools.APIRequest
|
||||
LoadOne(id string) (DBObject, int, error)
|
||||
DeleteOne(id string) (DBObject, int, error)
|
||||
CopyOne(data DBObject) (DBObject, int, error)
|
||||
StoreOne(data DBObject) (DBObject, int, error)
|
||||
LoadAll(isDraft bool) ([]ShallowDBObject, int, error)
|
||||
UpdateOne(set DBObject, id string) (DBObject, int, error)
|
||||
Search(filters *dbs.Filters, search string, isDraft bool) ([]ShallowDBObject, int, error)
|
||||
}
|
||||
|
||||
@@ -9,9 +9,49 @@ import (
|
||||
|
||||
// Graph is a struct that represents a graph
|
||||
type Graph struct {
|
||||
Zoom float64 `bson:"zoom" json:"zoom" default:"1"` // Zoom is the graphical zoom of the graph
|
||||
Items map[string]GraphItem `bson:"items" json:"items" default:"{}" validate:"required"` // Items is the list of elements in the graph
|
||||
Links []GraphLink `bson:"links" json:"links" default:"{}" validate:"required"` // Links is the list of links between elements in the graph
|
||||
Partial bool `json:"partial" default:"false"` // Partial is a flag that indicates if the graph is partial
|
||||
Zoom float64 `bson:"zoom" json:"zoom" default:"1"` // Zoom is the graphical zoom of the graph
|
||||
Items map[string]GraphItem `bson:"items" json:"items" default:"{}" validate:"required"` // Items is the list of elements in the graph
|
||||
Links []GraphLink `bson:"links" json:"links" default:"{}" validate:"required"` // Links is the list of links between elements in the graph
|
||||
}
|
||||
|
||||
func (g *Graph) Clear(id string) {
|
||||
realItems := map[string]GraphItem{}
|
||||
for k, it := range g.Items {
|
||||
if k == id {
|
||||
realinks := []GraphLink{}
|
||||
for _, link := range g.Links {
|
||||
if link.Source.ID != id && link.Destination.ID != id {
|
||||
realinks = append(realinks, link)
|
||||
}
|
||||
}
|
||||
g.Links = realinks
|
||||
g.Partial = true
|
||||
} else {
|
||||
realItems[k] = it
|
||||
}
|
||||
}
|
||||
g.Items = realItems
|
||||
}
|
||||
|
||||
func (wf *Graph) IsProcessing(item GraphItem) bool {
|
||||
return item.Processing != nil
|
||||
}
|
||||
|
||||
func (wf *Graph) IsCompute(item GraphItem) bool {
|
||||
return item.Compute != nil
|
||||
}
|
||||
|
||||
func (wf *Graph) IsData(item GraphItem) bool {
|
||||
return item.Data != nil
|
||||
}
|
||||
|
||||
func (wf *Graph) IsStorage(item GraphItem) bool {
|
||||
return item.Storage != nil
|
||||
}
|
||||
|
||||
func (wf *Graph) IsWorkflow(item GraphItem) bool {
|
||||
return item.Workflow != nil
|
||||
}
|
||||
|
||||
func (g *Graph) GetAverageTimeRelatedToProcessingActivity(start time.Time, processings []*resources.ProcessingResource, resource resources.ResourceInterface,
|
||||
@@ -105,71 +145,3 @@ func (g *Graph) GetResource(id string) (tools.DataType, resources.ResourceInterf
|
||||
}
|
||||
return tools.INVALID, nil
|
||||
}
|
||||
|
||||
// GraphItem is a struct that represents an item in a graph
|
||||
type GraphItem struct {
|
||||
ID string `bson:"id" json:"id" validate:"required"` // ID is the unique identifier of the item
|
||||
Width float64 `bson:"width" json:"width" validate:"required"` // Width is the graphical width of the item
|
||||
Height float64 `bson:"height" json:"height" validate:"required"` // Height is the graphical height of the item
|
||||
Position Position `bson:"position" json:"position" validate:"required"` // Position is the graphical position of the item
|
||||
*resources.ItemResource // ItemResource is the resource of the item affected to the item
|
||||
}
|
||||
|
||||
func (g *GraphItem) GetResource() (tools.DataType, resources.ResourceInterface) {
|
||||
if g.Data != nil {
|
||||
return tools.DATA_RESOURCE, g.Data
|
||||
} else if g.Compute != nil {
|
||||
return tools.COMPUTE_RESOURCE, g.Compute
|
||||
} else if g.Workflow != nil {
|
||||
return tools.WORKFLOW_RESOURCE, g.Workflow
|
||||
} else if g.Processing != nil {
|
||||
return tools.PROCESSING_RESOURCE, g.Processing
|
||||
} else if g.Storage != nil {
|
||||
return tools.STORAGE_RESOURCE, g.Storage
|
||||
}
|
||||
return tools.INVALID, nil
|
||||
}
|
||||
|
||||
// GraphLink is a struct that represents a link between two items in a graph
|
||||
type GraphLink struct {
|
||||
Source Position `bson:"source" json:"source" validate:"required"` // Source is the source graphical position of the link
|
||||
Destination Position `bson:"destination" json:"destination" validate:"required"` // Destination is the destination graphical position of the link
|
||||
Style *GraphLinkStyle `bson:"style,omitempty" json:"style,omitempty"` // Style is the graphical style of the link
|
||||
}
|
||||
|
||||
// tool function to check if a link is a link between a compute and a resource
|
||||
func (l *GraphLink) IsComputeLink(g Graph) (bool, string) {
|
||||
if g.Items == nil {
|
||||
return false, ""
|
||||
}
|
||||
if d, ok := g.Items[l.Source.ID]; ok && d.Compute != nil {
|
||||
return true, d.Compute.UUID
|
||||
}
|
||||
if d, ok := g.Items[l.Destination.ID]; ok && d.Compute != nil {
|
||||
return true, d.Compute.UUID
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// GraphLinkStyle is a struct that represents the style of a link in a graph
|
||||
type GraphLinkStyle struct {
|
||||
Color int64 `bson:"color" json:"color"` // Color is the graphical color of the link (int description of a color, can be transpose as hex)
|
||||
Stroke float64 `bson:"stroke" json:"stroke"` // Stroke is the graphical stroke of the link
|
||||
Tension float64 `bson:"tension" json:"tension"` // Tension is the graphical tension of the link
|
||||
HeadRadius float64 `bson:"head_radius" json:"head_radius"` // graphical pin radius
|
||||
DashWidth float64 `bson:"dash_width" json:"dash_width"` // DashWidth is the graphical dash width of the link
|
||||
DashSpace float64 `bson:"dash_space" json:"dash_space"` // DashSpace is the graphical dash space of the link
|
||||
EndArrow Position `bson:"end_arrow" json:"end_arrow"` // EndArrow is the graphical end arrow of the link
|
||||
StartArrow Position `bson:"start_arrow" json:"start_arrow"` // StartArrow is the graphical start arrow of the link
|
||||
ArrowStyle int64 `bson:"arrow_style" json:"arrow_style"` // ArrowStyle is the graphical arrow style of the link (enum foundable in UI)
|
||||
ArrowDirection int64 `bson:"arrow_direction" json:"arrow_direction"` // ArrowDirection is the graphical arrow direction of the link (enum foundable in UI)
|
||||
StartArrowWidth float64 `bson:"start_arrow_width" json:"start_arrow_width"` // StartArrowWidth is the graphical start arrow width of the link
|
||||
EndArrowWidth float64 `bson:"end_arrow_width" json:"end_arrow_width"` // EndArrowWidth is the graphical end arrow width of the link
|
||||
}
|
||||
|
||||
// Position is a struct that represents a graphical position
|
||||
type Position struct {
|
||||
ID string `json:"id" bson:"id"` // ID reprents ItemID (optionnal)
|
||||
X float64 `json:"x" bson:"x" validate:"required"` // X is the graphical x position
|
||||
Y float64 `json:"y" bson:"y" validate:"required"` // Y is the graphical y position
|
||||
}
|
||||
|
||||
38
models/workflow/graph/item.go
Normal file
38
models/workflow/graph/item.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
// GraphItem is a struct that represents an item in a graph
|
||||
type GraphItem struct {
|
||||
ID string `bson:"id" json:"id" validate:"required"` // ID is the unique identifier of the item
|
||||
Width float64 `bson:"width" json:"width" validate:"required"` // Width is the graphical width of the item
|
||||
Height float64 `bson:"height" json:"height" validate:"required"` // Height is the graphical height of the item
|
||||
Position Position `bson:"position" json:"position" validate:"required"` // Position is the graphical position of the item
|
||||
*resources.ItemResource // ItemResource is the resource of the item affected to the item
|
||||
}
|
||||
|
||||
func (g *GraphItem) GetResource() (tools.DataType, resources.ResourceInterface) {
|
||||
if g.Data != nil {
|
||||
return tools.DATA_RESOURCE, g.Data
|
||||
} else if g.Compute != nil {
|
||||
return tools.COMPUTE_RESOURCE, g.Compute
|
||||
} else if g.Workflow != nil {
|
||||
return tools.WORKFLOW_RESOURCE, g.Workflow
|
||||
} else if g.Processing != nil {
|
||||
return tools.PROCESSING_RESOURCE, g.Processing
|
||||
} else if g.Storage != nil {
|
||||
return tools.STORAGE_RESOURCE, g.Storage
|
||||
}
|
||||
return tools.INVALID, nil
|
||||
}
|
||||
|
||||
func (g *GraphItem) Clear() {
|
||||
g.Data = nil
|
||||
g.Compute = nil
|
||||
g.Workflow = nil
|
||||
g.Processing = nil
|
||||
g.Storage = nil
|
||||
}
|
||||
56
models/workflow/graph/link.go
Normal file
56
models/workflow/graph/link.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package graph
|
||||
|
||||
import "cloud.o-forge.io/core/oc-lib/models/common/models"
|
||||
|
||||
type StorageProcessingGraphLink struct {
|
||||
Write bool `json:"write" bson:"write"`
|
||||
Source string `json:"source" bson:"source"`
|
||||
Destination string `json:"destination" bson:"destination"`
|
||||
FileName string `json:"filename" bson:"filename"`
|
||||
}
|
||||
|
||||
// GraphLink is a struct that represents a link between two items in a graph
|
||||
type GraphLink struct {
|
||||
Source Position `bson:"source" json:"source" validate:"required"` // Source is the source graphical position of the link
|
||||
Destination Position `bson:"destination" json:"destination" validate:"required"` // Destination is the destination graphical position of the link
|
||||
Style *GraphLinkStyle `bson:"style,omitempty" json:"style,omitempty"` // Style is the graphical style of the link
|
||||
StorageLinkInfos []StorageProcessingGraphLink `bson:"storage_link_infos,omitempty" json:"storage_link_infos,omitempty"` // StorageLinkInfo is the storage link info
|
||||
Env []models.Param `json:"env" bson:"env"`
|
||||
}
|
||||
|
||||
// tool function to check if a link is a link between a compute and a resource
|
||||
func (l *GraphLink) IsComputeLink(g Graph) (bool, string) {
|
||||
if g.Items == nil {
|
||||
return false, ""
|
||||
}
|
||||
if d, ok := g.Items[l.Source.ID]; ok && d.Compute != nil {
|
||||
return true, d.Compute.UUID
|
||||
}
|
||||
if d, ok := g.Items[l.Destination.ID]; ok && d.Compute != nil {
|
||||
return true, d.Compute.UUID
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// GraphLinkStyle is a struct that represents the style of a link in a graph
|
||||
type GraphLinkStyle struct {
|
||||
Color int64 `bson:"color" json:"color"` // Color is the graphical color of the link (int description of a color, can be transpose as hex)
|
||||
Stroke float64 `bson:"stroke" json:"stroke"` // Stroke is the graphical stroke of the link
|
||||
Tension float64 `bson:"tension" json:"tension"` // Tension is the graphical tension of the link
|
||||
HeadRadius float64 `bson:"head_radius" json:"head_radius"` // graphical pin radius
|
||||
DashWidth float64 `bson:"dash_width" json:"dash_width"` // DashWidth is the graphical dash width of the link
|
||||
DashSpace float64 `bson:"dash_space" json:"dash_space"` // DashSpace is the graphical dash space of the link
|
||||
EndArrow Position `bson:"end_arrow" json:"end_arrow"` // EndArrow is the graphical end arrow of the link
|
||||
StartArrow Position `bson:"start_arrow" json:"start_arrow"` // StartArrow is the graphical start arrow of the link
|
||||
ArrowStyle int64 `bson:"arrow_style" json:"arrow_style"` // ArrowStyle is the graphical arrow style of the link (enum foundable in UI)
|
||||
ArrowDirection int64 `bson:"arrow_direction" json:"arrow_direction"` // ArrowDirection is the graphical arrow direction of the link (enum foundable in UI)
|
||||
StartArrowWidth float64 `bson:"start_arrow_width" json:"start_arrow_width"` // StartArrowWidth is the graphical start arrow width of the link
|
||||
EndArrowWidth float64 `bson:"end_arrow_width" json:"end_arrow_width"` // EndArrowWidth is the graphical end arrow width of the link
|
||||
}
|
||||
|
||||
// Position is a struct that represents a graphical position
|
||||
type Position struct {
|
||||
ID string `json:"id" bson:"id"` // ID reprents ItemID (optionnal)
|
||||
X float64 `json:"x" bson:"x" validate:"required"` // X is the graphical x position
|
||||
Y float64 `json:"y" bson:"y" validate:"required"` // Y is the graphical y position
|
||||
}
|
||||
@@ -2,10 +2,10 @@ package workflow
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
@@ -15,24 +15,23 @@ import (
|
||||
)
|
||||
|
||||
/*
|
||||
* AbstractWorkflow is a struct that represents a workflow for resource or native workflow
|
||||
* Warning: there is 2 types of workflows, the resource workflow and the native workflow
|
||||
* native workflow is the one that you create to schedule an execution
|
||||
* resource workflow is the one that is created to set our native workflow in catalog
|
||||
* Workflow is a struct that represents a workflow
|
||||
* it defines the native workflow
|
||||
*/
|
||||
type AbstractWorkflow struct {
|
||||
type Workflow struct {
|
||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||
resources.ResourceSet
|
||||
Graph *graph.Graph `bson:"graph,omitempty" json:"graph,omitempty"` // Graph UI & logic representation of the workflow
|
||||
ScheduleActive bool `json:"schedule_active" bson:"schedule_active"` // ScheduleActive is a flag that indicates if the schedule is active, if not the workflow is not scheduled and no execution or booking will be set
|
||||
// Schedule *WorkflowSchedule `bson:"schedule,omitempty" json:"schedule,omitempty"` // Schedule is the schedule of the workflow
|
||||
Shared []string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workflow
|
||||
Shared []string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workflow // AbstractWorkflow contains the basic fields of a workflow
|
||||
}
|
||||
|
||||
func (d *Workflow) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
||||
return NewAccessor(request) // Create a new instance of the accessor
|
||||
}
|
||||
|
||||
func (w *AbstractWorkflow) GetGraphItems(f func(item graph.GraphItem) bool) (list_datas []graph.GraphItem) {
|
||||
func (w *Workflow) GetGraphItems(f func(item graph.GraphItem) bool) (list_datas []graph.GraphItem) {
|
||||
for _, item := range w.Graph.Items {
|
||||
if f(item) {
|
||||
list_datas = append(list_datas, item)
|
||||
@@ -41,18 +40,7 @@ func (w *AbstractWorkflow) GetGraphItems(f func(item graph.GraphItem) bool) (lis
|
||||
return
|
||||
}
|
||||
|
||||
func (w *AbstractWorkflow) GetResources(f func(item graph.GraphItem) bool) map[string]resources.ResourceInterface {
|
||||
list_datas := map[string]resources.ResourceInterface{}
|
||||
for _, item := range w.Graph.Items {
|
||||
if f(item) {
|
||||
_, res := item.GetResource()
|
||||
list_datas[res.GetID()] = res
|
||||
}
|
||||
}
|
||||
return list_datas
|
||||
}
|
||||
|
||||
func (w *AbstractWorkflow) GetPricedItem(f func(item graph.GraphItem) bool, request *tools.APIRequest) map[string]pricing.PricedItemITF {
|
||||
func (w *Workflow) GetPricedItem(f func(item graph.GraphItem) bool, request *tools.APIRequest) map[string]pricing.PricedItemITF {
|
||||
list_datas := map[string]pricing.PricedItemITF{}
|
||||
for _, item := range w.Graph.Items {
|
||||
if f(item) {
|
||||
@@ -64,8 +52,13 @@ func (w *AbstractWorkflow) GetPricedItem(f func(item graph.GraphItem) bool, requ
|
||||
return list_datas
|
||||
}
|
||||
|
||||
func (w *AbstractWorkflow) GetByRelatedProcessing(processingID string, g func(item graph.GraphItem) bool) []resources.ResourceInterface {
|
||||
storages := []resources.ResourceInterface{}
|
||||
type Related struct {
|
||||
Node resources.ResourceInterface
|
||||
Links []graph.GraphLink
|
||||
}
|
||||
|
||||
func (w *Workflow) GetByRelatedProcessing(processingID string, g func(item graph.GraphItem) bool) map[string]Related {
|
||||
related := map[string]Related{}
|
||||
for _, link := range w.Graph.Links {
|
||||
nodeID := link.Destination.ID
|
||||
var node resources.ResourceInterface
|
||||
@@ -79,47 +72,16 @@ func (w *AbstractWorkflow) GetByRelatedProcessing(processingID string, g func(it
|
||||
_, node = item.GetResource() // we are looking for the storage as destination
|
||||
}
|
||||
if processingID == nodeID && node != nil { // if the storage is linked to the processing
|
||||
storages = append(storages, node)
|
||||
if _, ok := related[processingID]; !ok {
|
||||
related[processingID] = Related{}
|
||||
}
|
||||
rel := related[node.GetID()]
|
||||
rel.Node = node
|
||||
rel.Links = append(rel.Links, link)
|
||||
related[processingID] = rel
|
||||
}
|
||||
}
|
||||
return storages
|
||||
}
|
||||
|
||||
func (wf *AbstractWorkflow) IsProcessing(item graph.GraphItem) bool {
|
||||
return item.Processing != nil
|
||||
}
|
||||
|
||||
func (wf *AbstractWorkflow) IsCompute(item graph.GraphItem) bool {
|
||||
return item.Compute != nil
|
||||
}
|
||||
|
||||
func (wf *AbstractWorkflow) IsData(item graph.GraphItem) bool {
|
||||
return item.Data != nil
|
||||
}
|
||||
|
||||
func (wf *AbstractWorkflow) IsStorage(item graph.GraphItem) bool {
|
||||
return item.Storage != nil
|
||||
}
|
||||
|
||||
func (wf *AbstractWorkflow) IsWorkflow(item graph.GraphItem) bool {
|
||||
return item.Workflow != nil
|
||||
}
|
||||
|
||||
/*
|
||||
* Workflow is a struct that represents a workflow
|
||||
* it defines the native workflow
|
||||
*/
|
||||
type Workflow struct {
|
||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||
AbstractWorkflow // AbstractWorkflow contains the basic fields of a workflow
|
||||
}
|
||||
|
||||
func (w *Workflow) getPricedItem(item graph.GraphItem, request *tools.APIRequest) pricing.PricedItemITF {
|
||||
dt, res := item.GetResource()
|
||||
if dt == tools.INVALID {
|
||||
return nil
|
||||
}
|
||||
return res.ConvertToPricedResource(dt, request)
|
||||
return related
|
||||
}
|
||||
|
||||
func (ao *Workflow) VerifyAuth(request *tools.APIRequest) bool {
|
||||
@@ -129,8 +91,9 @@ func (ao *Workflow) VerifyAuth(request *tools.APIRequest) bool {
|
||||
shared, code, _ := shallow_collaborative_area.NewAccessor(request).LoadOne(shared)
|
||||
if code != 200 || shared == nil {
|
||||
isAuthorized = false
|
||||
} else {
|
||||
isAuthorized = shared.VerifyAuth(request)
|
||||
}
|
||||
isAuthorized = shared.VerifyAuth(request)
|
||||
}
|
||||
}
|
||||
return ao.AbstractObject.VerifyAuth(request) || isAuthorized
|
||||
@@ -165,124 +128,91 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (wf *Workflow) Planify(start time.Time, end *time.Time, request *tools.APIRequest) (float64, map[tools.DataType][]pricing.PricedItemITF, *Workflow, error) {
|
||||
processings := []*resources.ProcessingResource{}
|
||||
priceds := map[tools.DataType][]pricing.PricedItemITF{}
|
||||
priceds[tools.PROCESSING_RESOURCE] = []pricing.PricedItemITF{}
|
||||
for _, item := range wf.GetGraphItems(wf.IsProcessing) {
|
||||
dt, realItem := item.GetResource()
|
||||
if realItem == nil {
|
||||
return 0, priceds, nil, errors.New("could not load the processing resource")
|
||||
}
|
||||
priced := realItem.ConvertToPricedResource(dt, request)
|
||||
timeFromStartS := wf.Graph.GetAverageTimeProcessingBeforeStart(0, realItem.GetID(), request)
|
||||
started := start.Add(time.Duration(timeFromStartS) * time.Second)
|
||||
priced.SetLocationStart(started)
|
||||
priced.SetLocationEnd(started.Add(time.Duration(priced.GetExplicitDurationInS())))
|
||||
processings = append(processings, realItem.(*resources.ProcessingResource))
|
||||
priceds[tools.PROCESSING_RESOURCE] = append(priceds[tools.PROCESSING_RESOURCE], priced)
|
||||
func (wf *Workflow) Planify(start time.Time, end *time.Time, request *tools.APIRequest) (float64, map[tools.DataType]map[string]pricing.PricedItemITF, *Workflow, error) {
|
||||
priceds := map[tools.DataType]map[string]pricing.PricedItemITF{}
|
||||
ps, priceds, err := plan[*resources.ProcessingResource](tools.PROCESSING_RESOURCE, wf, priceds, request, wf.Graph.IsProcessing,
|
||||
func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) {
|
||||
return start.Add(time.Duration(wf.Graph.GetAverageTimeProcessingBeforeStart(0, res.GetID(), request)) * time.Second), priced.GetExplicitDurationInS()
|
||||
}, func(started time.Time, duration float64) *time.Time {
|
||||
s := started.Add(time.Duration(duration))
|
||||
return &s
|
||||
})
|
||||
if err != nil {
|
||||
return 0, priceds, nil, err
|
||||
}
|
||||
priceds[tools.DATA_RESOURCE] = []pricing.PricedItemITF{}
|
||||
for _, item := range wf.GetGraphItems(wf.IsData) {
|
||||
dt, realItem := item.GetResource()
|
||||
if realItem == nil {
|
||||
continue
|
||||
}
|
||||
priced := realItem.ConvertToPricedResource(dt, request)
|
||||
priced.SetLocationStart(start)
|
||||
priced.SetLocationEnd(*end)
|
||||
priceds[tools.PROCESSING_RESOURCE] = append(priceds[tools.PROCESSING_RESOURCE], priced)
|
||||
if _, priceds, err = plan[resources.ResourceInterface](tools.DATA_RESOURCE, wf, priceds, request, wf.Graph.IsData,
|
||||
func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) {
|
||||
return start, 0
|
||||
}, func(started time.Time, duration float64) *time.Time {
|
||||
return end
|
||||
}); err != nil {
|
||||
return 0, priceds, nil, err
|
||||
}
|
||||
for _, f := range []func(graph.GraphItem) bool{wf.IsStorage, wf.IsCompute} {
|
||||
for _, item := range wf.GetGraphItems(f) {
|
||||
dt, r := item.GetResource()
|
||||
if r == nil {
|
||||
continue
|
||||
}
|
||||
if priceds[dt] == nil {
|
||||
priceds[dt] = []pricing.PricedItemITF{}
|
||||
}
|
||||
priced := r.ConvertToPricedResource(dt, request)
|
||||
nearestStart, longestDuration := wf.Graph.GetAverageTimeRelatedToProcessingActivity(start, processings, r,
|
||||
func(i graph.GraphItem) resources.ResourceInterface {
|
||||
for k, f := range map[tools.DataType]func(graph.GraphItem) bool{tools.STORAGE_RESOURCE: wf.Graph.IsStorage, tools.COMPUTE_RESOURCE: wf.Graph.IsCompute} {
|
||||
if _, priceds, err = plan[resources.ResourceInterface](k, wf, priceds, request, f,
|
||||
func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) {
|
||||
nearestStart, longestDuration := wf.Graph.GetAverageTimeRelatedToProcessingActivity(start, ps, res, func(i graph.GraphItem) (r resources.ResourceInterface) {
|
||||
if f(i) {
|
||||
_, r := i.GetResource()
|
||||
return r
|
||||
} else {
|
||||
return nil
|
||||
_, r = i.GetResource()
|
||||
}
|
||||
return r
|
||||
}, request)
|
||||
started := start.Add(time.Duration(nearestStart) * time.Second)
|
||||
priced.SetLocationStart(started)
|
||||
if longestDuration >= 0 {
|
||||
priced.SetLocationEnd(started.Add(time.Duration(longestDuration)))
|
||||
}
|
||||
priceds[dt] = append(priceds[dt], priced)
|
||||
return start.Add(time.Duration(nearestStart) * time.Second), longestDuration
|
||||
}, func(started time.Time, duration float64) *time.Time {
|
||||
s := started.Add(time.Duration(duration))
|
||||
return &s
|
||||
}); err != nil {
|
||||
return 0, priceds, nil, err
|
||||
}
|
||||
}
|
||||
longest := wf.getLongestTime(end, priceds, request)
|
||||
priceds[tools.WORKFLOW_RESOURCE] = []pricing.PricedItemITF{}
|
||||
for _, item := range wf.GetGraphItems(wf.IsWorkflow) {
|
||||
access := NewAccessor(nil)
|
||||
_, r := item.GetResource()
|
||||
if r == nil {
|
||||
return 0, priceds, nil, errors.New("could not load the workflow")
|
||||
}
|
||||
priced := r.ConvertToPricedResource(tools.WORKFLOW_RESOURCE, request)
|
||||
res, code, err := access.LoadOne(r.GetID())
|
||||
if code != 200 || err != nil {
|
||||
return 0, priceds, nil, errors.New("could not load the workflow with id: " + fmt.Sprintf("%v", err.Error()))
|
||||
}
|
||||
neoLongest := float64(0)
|
||||
innerWF := res.(*Workflow)
|
||||
neoLongest, _, innerWF, err = innerWF.Planify(start, end, request)
|
||||
if neoLongest > longest {
|
||||
longest = neoLongest
|
||||
}
|
||||
started := start.Add(time.Duration(wf.getNearestStart(start, priceds, request)) * time.Second)
|
||||
priced.SetLocationStart(started)
|
||||
durationE := time.Duration(longest)
|
||||
if durationE < 0 {
|
||||
continue
|
||||
}
|
||||
ended := start.Add(durationE * time.Second)
|
||||
priced.SetLocationEnd(ended)
|
||||
priceds[tools.WORKFLOW_RESOURCE] = append(priceds[tools.WORKFLOW_RESOURCE], priced)
|
||||
longest := common.GetPlannerLongestTime(end, priceds, request)
|
||||
if _, priceds, err = plan[resources.ResourceInterface](tools.WORKFLOW_RESOURCE, wf, priceds, request, wf.Graph.IsWorkflow,
|
||||
func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) {
|
||||
start := start.Add(time.Duration(common.GetPlannerNearestStart(start, priceds, request)) * time.Second)
|
||||
longest := float64(-1)
|
||||
r, code, err := res.GetAccessor(request).LoadOne(res.GetID())
|
||||
if code != 200 || err != nil {
|
||||
return start, longest
|
||||
}
|
||||
if neoLongest, _, _, err := r.(*Workflow).Planify(start, end, request); err != nil {
|
||||
return start, longest
|
||||
} else if neoLongest > longest {
|
||||
longest = neoLongest
|
||||
}
|
||||
return start.Add(time.Duration(common.GetPlannerNearestStart(start, priceds, request)) * time.Second), longest
|
||||
}, func(start time.Time, longest float64) *time.Time {
|
||||
s := start.Add(time.Duration(longest) * time.Second)
|
||||
return &s
|
||||
}); err != nil {
|
||||
return 0, priceds, nil, err
|
||||
}
|
||||
return longest, priceds, wf, nil
|
||||
}
|
||||
|
||||
func (wf *Workflow) getNearestStart(start time.Time, priceds map[tools.DataType][]pricing.PricedItemITF, request *tools.APIRequest) float64 {
|
||||
near := float64(10000000000)
|
||||
for _, items := range priceds {
|
||||
for _, priced := range items {
|
||||
if priced.GetLocationStart() == nil {
|
||||
continue
|
||||
}
|
||||
newS := priced.GetLocationStart()
|
||||
if newS.Sub(start).Seconds() < near {
|
||||
near = newS.Sub(start).Seconds()
|
||||
func plan[T resources.ResourceInterface](dt tools.DataType, wf *Workflow, priceds map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest,
|
||||
f func(graph.GraphItem) bool, start func(resources.ResourceInterface, pricing.PricedItemITF) (time.Time, float64), end func(time.Time, float64) *time.Time) ([]T, map[tools.DataType]map[string]pricing.PricedItemITF, error) {
|
||||
resources := []T{}
|
||||
for _, item := range wf.GetGraphItems(f) {
|
||||
if priceds[dt] == nil {
|
||||
priceds[dt] = map[string]pricing.PricedItemITF{}
|
||||
}
|
||||
dt, realItem := item.GetResource()
|
||||
if realItem == nil {
|
||||
return resources, priceds, errors.New("could not load the processing resource")
|
||||
}
|
||||
priced := realItem.ConvertToPricedResource(dt, request)
|
||||
started, duration := start(realItem, priced)
|
||||
priced.SetLocationStart(started)
|
||||
if duration >= 0 {
|
||||
if e := end(started, duration); e != nil {
|
||||
priced.SetLocationEnd(*e)
|
||||
}
|
||||
}
|
||||
// get the nearest start from start var
|
||||
}
|
||||
return near
|
||||
}
|
||||
|
||||
func (wf *Workflow) getLongestTime(end *time.Time, priceds map[tools.DataType][]pricing.PricedItemITF, request *tools.APIRequest) float64 {
|
||||
if end == nil {
|
||||
return -1
|
||||
}
|
||||
longestTime := float64(0)
|
||||
for _, priced := range priceds[tools.PROCESSING_RESOURCE] {
|
||||
if priced.GetLocationEnd() == nil {
|
||||
continue
|
||||
if e := end(started, priced.GetExplicitDurationInS()); e != nil {
|
||||
priced.SetLocationEnd(*e)
|
||||
}
|
||||
newS := priced.GetLocationEnd()
|
||||
if longestTime < newS.Sub(*end).Seconds() {
|
||||
longestTime = newS.Sub(*end).Seconds()
|
||||
}
|
||||
// get the nearest start from start var
|
||||
resources = append(resources, realItem.(T))
|
||||
priceds[dt][item.ID] = priced
|
||||
}
|
||||
return longestTime
|
||||
return resources, priceds, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package workflow
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
||||
@@ -47,7 +49,7 @@ func (a *workflowMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error
|
||||
a.execute(res.(*Workflow), true, false) // up to date the workspace for the workflow
|
||||
a.share(res.(*Workflow), true, a.GetCaller())
|
||||
}
|
||||
return res, code, err
|
||||
return a.verifyResource(res), code, err
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -89,19 +91,27 @@ func (a *workflowMongoAccessor) share(realData *Workflow, delete bool, caller *t
|
||||
// UpdateOne updates a workflow in the database
|
||||
func (a *workflowMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
// avoid the update if the schedule is the same
|
||||
set = a.verifyResource(set)
|
||||
if set.(*Workflow).Graph != nil && set.(*Workflow).Graph.Partial {
|
||||
return nil, 403, errors.New("you are not allowed to update a partial workflow")
|
||||
}
|
||||
res, code, err := utils.GenericUpdateOne(set, id, a, &Workflow{})
|
||||
if code != 200 {
|
||||
return nil, code, err
|
||||
}
|
||||
workflow := res.(*Workflow)
|
||||
a.execute(workflow, false, false) // update the workspace for the workflow
|
||||
a.execute(workflow, false, true) // update the workspace for the workflow
|
||||
a.share(workflow, false, a.GetCaller()) // share the update to the peers
|
||||
return res, code, nil
|
||||
}
|
||||
|
||||
// StoreOne stores a workflow in the database
|
||||
func (a *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
data = a.verifyResource(data)
|
||||
d := data.(*Workflow)
|
||||
if d.Graph != nil && d.Graph.Partial {
|
||||
return nil, 403, errors.New("you are not allowed to update a partial workflow")
|
||||
}
|
||||
res, code, err := utils.GenericStoreOne(d, a)
|
||||
if err != nil || code != 200 {
|
||||
return nil, code, err
|
||||
@@ -109,19 +119,25 @@ func (a *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, i
|
||||
workflow := res.(*Workflow)
|
||||
|
||||
a.share(workflow, false, a.GetCaller()) // share the creation to the peers
|
||||
a.execute(workflow, false, false) // store the workspace for the workflow
|
||||
a.execute(workflow, false, true) // store the workspace for the workflow
|
||||
return res, code, nil
|
||||
}
|
||||
|
||||
// CopyOne copies a workflow in the database
|
||||
func (a *workflowMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
wf := data.(*Workflow)
|
||||
for _, item := range wf.Graph.Items {
|
||||
_, obj := item.GetResource()
|
||||
if obj != nil {
|
||||
obj.ClearEnv()
|
||||
}
|
||||
}
|
||||
return utils.GenericStoreOne(data, a)
|
||||
}
|
||||
|
||||
// execute is a function that executes a workflow
|
||||
// it stores the workflow resources in a specific workspace to never have a conflict in UI and logic
|
||||
func (a *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active bool) {
|
||||
|
||||
filters := &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by standard workspace name attached to a workflow
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: workflow.Name + "_workspace"}},
|
||||
@@ -173,5 +189,36 @@ func (a *workflowMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject,
|
||||
}
|
||||
|
||||
func (a *workflowMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericSearch[*Workflow](filters, search, (&Workflow{}).GetObjectFilters(search), func(d utils.DBObject) utils.ShallowDBObject { return d }, isDraft, a)
|
||||
return utils.GenericSearch[*Workflow](filters, search, (&Workflow{}).GetObjectFilters(search), func(d utils.DBObject) utils.ShallowDBObject { return a.verifyResource(d) }, isDraft, a)
|
||||
}
|
||||
|
||||
func (a *workflowMongoAccessor) verifyResource(obj utils.DBObject) utils.DBObject {
|
||||
wf := obj.(*Workflow)
|
||||
if wf.Graph == nil {
|
||||
return wf
|
||||
}
|
||||
for _, item := range wf.Graph.Items {
|
||||
t, resource := item.GetResource()
|
||||
if resource == nil {
|
||||
continue
|
||||
}
|
||||
var access utils.Accessor
|
||||
if t == tools.COMPUTE_RESOURCE {
|
||||
access = resources.NewAccessor[*resources.ComputeResource](t, a.GetRequest(), func() utils.DBObject { return &resources.ComputeResource{} })
|
||||
} else if t == tools.PROCESSING_RESOURCE {
|
||||
access = resources.NewAccessor[*resources.ProcessingResource](t, a.GetRequest(), func() utils.DBObject { return &resources.ProcessingResource{} })
|
||||
} else if t == tools.STORAGE_RESOURCE {
|
||||
access = resources.NewAccessor[*resources.StorageResource](t, a.GetRequest(), func() utils.DBObject { return &resources.StorageResource{} })
|
||||
} else if t == tools.WORKFLOW_RESOURCE {
|
||||
access = resources.NewAccessor[*resources.WorkflowResource](t, a.GetRequest(), func() utils.DBObject { return &resources.WorkflowResource{} })
|
||||
} else if t == tools.DATA_RESOURCE {
|
||||
access = resources.NewAccessor[*resources.DataResource](t, a.GetRequest(), func() utils.DBObject { return &resources.DataResource{} })
|
||||
} else {
|
||||
wf.Graph.Clear(resource.GetID())
|
||||
}
|
||||
if error := utils.VerifyAccess(access, resource.GetID()); error != nil {
|
||||
wf.Graph.Clear(resource.GetID())
|
||||
}
|
||||
}
|
||||
return wf
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
@@ -15,38 +15,41 @@ import (
|
||||
)
|
||||
|
||||
/*
|
||||
* WorkflowExecutions is a struct that represents a list of workflow executions
|
||||
* WorkflowExecution is a struct that represents a list of workflow executions
|
||||
* Warning: No user can write (del, post, put) a workflow execution, it is only used by the system
|
||||
* workflows generate their own executions
|
||||
*/
|
||||
type WorkflowExecutions struct {
|
||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||
ExecDate time.Time `json:"execution_date,omitempty" bson:"execution_date,omitempty" validate:"required"` // ExecDate is the execution date of the workflow, is required
|
||||
EndDate *time.Time `json:"end_date,omitempty" bson:"end_date,omitempty"` // EndDate is the end date of the workflow
|
||||
State common.ScheduledType `json:"state" bson:"state" default:"0"` // State is the state of the workflow
|
||||
WorkflowID string `json:"workflow_id" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow
|
||||
type WorkflowExecution struct {
|
||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||
PeerBookByGraph map[string]map[string][]string `json:"peer_book_by_graph,omitempty" bson:"peer_book_by_graph,omitempty"` // BookByResource is a map of the resource id and the list of the booking id
|
||||
ExecutionsID string `json:"executions_id,omitempty" bson:"executions_id,omitempty"`
|
||||
ExecDate time.Time `json:"execution_date,omitempty" bson:"execution_date,omitempty" validate:"required"` // ExecDate is the execution date of the workflow, is required
|
||||
EndDate *time.Time `json:"end_date,omitempty" bson:"end_date,omitempty"` // EndDate is the end date of the workflow
|
||||
State enum.BookingStatus `json:"state" bson:"state" default:"0"` // TEMPORARY TODO DEFAULT 1 -> 0 State is the state of the workflow
|
||||
WorkflowID string `json:"workflow_id" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow
|
||||
}
|
||||
|
||||
func (r *WorkflowExecutions) StoreDraftDefault() {
|
||||
r.IsDraft = true
|
||||
func (r *WorkflowExecution) StoreDraftDefault() {
|
||||
r.IsDraft = false // TODO: TEMPORARY
|
||||
r.State = enum.SCHEDULED
|
||||
}
|
||||
|
||||
func (r *WorkflowExecutions) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
|
||||
if r.State != set.(*WorkflowExecutions).State {
|
||||
return true, &WorkflowExecutions{State: set.(*WorkflowExecutions).State} // only state can be updated
|
||||
func (r *WorkflowExecution) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
|
||||
if r.State != set.(*WorkflowExecution).State {
|
||||
return true, &WorkflowExecution{State: set.(*WorkflowExecution).State} // only state can be updated
|
||||
}
|
||||
return r.IsDraft, set // only draft buying can be updated
|
||||
return !r.IsDraft, set // only draft buying can be updated
|
||||
}
|
||||
|
||||
func (r *WorkflowExecutions) CanDelete() bool {
|
||||
func (r *WorkflowExecution) CanDelete() bool {
|
||||
return r.IsDraft // only draft bookings can be deleted
|
||||
}
|
||||
|
||||
func (wfa *WorkflowExecutions) Equals(we *WorkflowExecutions) bool {
|
||||
func (wfa *WorkflowExecution) Equals(we *WorkflowExecution) bool {
|
||||
return wfa.ExecDate.Equal(we.ExecDate) && wfa.WorkflowID == we.WorkflowID
|
||||
}
|
||||
|
||||
func (ws *WorkflowExecutions) PurgeDraft(request *tools.APIRequest) error {
|
||||
func (ws *WorkflowExecution) PurgeDraft(request *tools.APIRequest) error {
|
||||
if ws.EndDate == nil {
|
||||
// if no end... then Book like a savage
|
||||
e := ws.ExecDate.Add(time.Hour)
|
||||
@@ -55,7 +58,7 @@ func (ws *WorkflowExecutions) PurgeDraft(request *tools.APIRequest) error {
|
||||
accessor := ws.GetAccessor(request)
|
||||
res, code, err := accessor.Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date
|
||||
"state": {{Operator: dbs.EQUAL.String(), Value: common.DRAFT.EnumIndex()}},
|
||||
"state": {{Operator: dbs.EQUAL.String(), Value: enum.DRAFT.EnumIndex()}},
|
||||
"workflow_id": {{Operator: dbs.EQUAL.String(), Value: ws.WorkflowID}},
|
||||
"execution_date": {
|
||||
{Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(*ws.EndDate)},
|
||||
@@ -73,53 +76,71 @@ func (ws *WorkflowExecutions) PurgeDraft(request *tools.APIRequest) error {
|
||||
}
|
||||
|
||||
// tool to transform the argo status to a state
|
||||
func (wfa *WorkflowExecutions) ArgoStatusToState(status string) *WorkflowExecutions {
|
||||
func (wfa *WorkflowExecution) ArgoStatusToState(status string) *WorkflowExecution {
|
||||
status = strings.ToLower(status)
|
||||
switch status {
|
||||
case "succeeded": // Succeeded
|
||||
wfa.State = common.SUCCESS
|
||||
wfa.State = enum.SUCCESS
|
||||
case "pending": // Pending
|
||||
wfa.State = common.SCHEDULED
|
||||
wfa.State = enum.SCHEDULED
|
||||
case "running": // Running
|
||||
wfa.State = common.STARTED
|
||||
wfa.State = enum.STARTED
|
||||
default: // Failed
|
||||
wfa.State = common.FAILURE
|
||||
wfa.State = enum.FAILURE
|
||||
}
|
||||
return wfa
|
||||
}
|
||||
|
||||
func (r *WorkflowExecutions) GenerateID() {
|
||||
r.UUID = uuid.New().String()
|
||||
func (r *WorkflowExecution) GenerateID() {
|
||||
if r.UUID == "" {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *WorkflowExecutions) GetName() string {
|
||||
func (d *WorkflowExecution) GetName() string {
|
||||
return d.UUID + "_" + d.ExecDate.String()
|
||||
}
|
||||
|
||||
func (d *WorkflowExecutions) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
||||
func (d *WorkflowExecution) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
||||
return NewAccessor(request) // Create a new instance of the accessor
|
||||
}
|
||||
|
||||
func (d *WorkflowExecutions) VerifyAuth(request *tools.APIRequest) bool {
|
||||
func (d *WorkflowExecution) VerifyAuth(request *tools.APIRequest) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *WorkflowExecutions) Book(wfID string, priceds map[tools.DataType][]pricing.PricedItemITF) []*booking.Booking {
|
||||
booking := d.bookEach(wfID, tools.STORAGE_RESOURCE, priceds[tools.STORAGE_RESOURCE])
|
||||
booking = append(booking, d.bookEach(wfID, tools.PROCESSING_RESOURCE, priceds[tools.PROCESSING_RESOURCE])...)
|
||||
func (d *WorkflowExecution) Book(executionsID string, wfID string, priceds map[tools.DataType]map[string]pricing.PricedItemITF) []*booking.Booking {
|
||||
booking := d.bookEach(executionsID, wfID, tools.STORAGE_RESOURCE, priceds[tools.STORAGE_RESOURCE])
|
||||
booking = append(booking, d.bookEach(executionsID, wfID, tools.PROCESSING_RESOURCE, priceds[tools.PROCESSING_RESOURCE])...)
|
||||
booking = append(booking,d.bookEach(executionsID, wfID, tools.COMPUTE_RESOURCE, priceds[tools.COMPUTE_RESOURCE])...)
|
||||
booking = append(booking,d.bookEach(executionsID, wfID, tools.DATA_RESOURCE, priceds[tools.DATA_RESOURCE])...)
|
||||
return booking
|
||||
}
|
||||
|
||||
func (d *WorkflowExecutions) bookEach(wfID string, dt tools.DataType, priceds []pricing.PricedItemITF) []*booking.Booking {
|
||||
func (d *WorkflowExecution) bookEach(executionsID string, wfID string, dt tools.DataType, priceds map[string]pricing.PricedItemITF) []*booking.Booking {
|
||||
items := []*booking.Booking{}
|
||||
for _, priced := range priceds {
|
||||
for itemID, priced := range priceds {
|
||||
if d.PeerBookByGraph == nil {
|
||||
d.PeerBookByGraph = map[string]map[string][]string{}
|
||||
}
|
||||
if d.PeerBookByGraph[priced.GetCreatorID()] == nil {
|
||||
d.PeerBookByGraph[priced.GetCreatorID()] = map[string][]string{}
|
||||
}
|
||||
if d.PeerBookByGraph[priced.GetCreatorID()][itemID] == nil {
|
||||
d.PeerBookByGraph[priced.GetCreatorID()][itemID] = []string{}
|
||||
}
|
||||
start := d.ExecDate
|
||||
if s := priced.GetLocationStart(); s != nil {
|
||||
start = *s
|
||||
}
|
||||
end := start.Add(time.Duration(priced.GetExplicitDurationInS()) * time.Second)
|
||||
bookingItem := &booking.Booking{
|
||||
State: common.DRAFT,
|
||||
AbstractObject: utils.AbstractObject{
|
||||
UUID: uuid.New().String(),
|
||||
Name: d.GetName() + "_" + executionsID + "_" + wfID,
|
||||
},
|
||||
ExecutionsID: executionsID,
|
||||
State: enum.SCHEDULED,
|
||||
ResourceID: priced.GetID(),
|
||||
ResourceType: dt,
|
||||
DestPeerID: priced.GetCreatorID(),
|
||||
@@ -129,6 +150,8 @@ func (d *WorkflowExecutions) bookEach(wfID string, dt tools.DataType, priceds []
|
||||
ExpectedEndDate: &end,
|
||||
}
|
||||
items = append(items, bookingItem)
|
||||
d.PeerBookByGraph[priced.GetCreatorID()][itemID] = append(
|
||||
d.PeerBookByGraph[priced.GetCreatorID()][itemID], bookingItem.GetID())
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
||||
@@ -6,18 +6,31 @@ import (
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type workflowExecutionMongoAccessor struct {
|
||||
utils.AbstractAccessor
|
||||
shallow bool
|
||||
}
|
||||
|
||||
func newShallowAccessor(request *tools.APIRequest) *workflowExecutionMongoAccessor {
|
||||
return &workflowExecutionMongoAccessor{
|
||||
shallow: true,
|
||||
AbstractAccessor: utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(tools.WORKFLOW_EXECUTION.String()), // Create a logger with the data type
|
||||
Request: request,
|
||||
Type: tools.WORKFLOW_EXECUTION,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewAccessor(request *tools.APIRequest) *workflowExecutionMongoAccessor {
|
||||
return &workflowExecutionMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
shallow: false,
|
||||
AbstractAccessor: utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(tools.WORKFLOW_EXECUTION.String()), // Create a logger with the data type
|
||||
Request: request,
|
||||
Type: tools.WORKFLOW_EXECUTION,
|
||||
@@ -30,7 +43,11 @@ func (wfa *workflowExecutionMongoAccessor) DeleteOne(id string) (utils.DBObject,
|
||||
}
|
||||
|
||||
func (wfa *workflowExecutionMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
return nil, 404, errors.New("not implemented")
|
||||
if set.(*WorkflowExecution).State == 0 {
|
||||
return nil, 400, errors.New("state is required")
|
||||
}
|
||||
realSet := WorkflowExecution{State: set.(*WorkflowExecution).State}
|
||||
return utils.GenericUpdateOne(&realSet, id, wfa, &WorkflowExecution{})
|
||||
}
|
||||
|
||||
func (wfa *workflowExecutionMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
@@ -42,37 +59,49 @@ func (wfa *workflowExecutionMongoAccessor) CopyOne(data utils.DBObject) (utils.D
|
||||
}
|
||||
|
||||
func (a *workflowExecutionMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[*WorkflowExecutions](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
if d.(*WorkflowExecutions).State == common.DRAFT && time.Now().UTC().After(d.(*WorkflowExecutions).ExecDate) {
|
||||
utils.GenericDeleteOne(d.GetID(), a)
|
||||
return nil, 404, errors.New("Not found")
|
||||
return utils.GenericLoadOne[*WorkflowExecution](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
now := time.Now()
|
||||
now = now.Add(time.Second * -60)
|
||||
if d.(*WorkflowExecution).State == enum.DRAFT && !a.shallow && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
|
||||
utils.GenericDeleteOne(d.GetID(), newShallowAccessor(a.Request))
|
||||
return nil, 404, errors.New("not found")
|
||||
}
|
||||
if d.(*WorkflowExecutions).State == common.SCHEDULED && time.Now().UTC().After(d.(*WorkflowExecutions).ExecDate) {
|
||||
d.(*WorkflowExecutions).State = common.FORGOTTEN
|
||||
utils.GenericRawUpdateOne(d, id, a)
|
||||
if d.(*WorkflowExecution).State == enum.SCHEDULED && !a.shallow && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
|
||||
d.(*WorkflowExecution).State = enum.FORGOTTEN
|
||||
utils.GenericRawUpdateOne(d, id, newShallowAccessor(a.Request))
|
||||
}
|
||||
return d, 200, nil
|
||||
}, a)
|
||||
}
|
||||
|
||||
func (a *workflowExecutionMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericLoadAll[*WorkflowExecutions](a.getExec(), isDraft, a)
|
||||
return utils.GenericLoadAll[*WorkflowExecution](a.getExec(), isDraft, a)
|
||||
}
|
||||
|
||||
func (a *workflowExecutionMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericSearch[*WorkflowExecutions](filters, search, (&WorkflowExecutions{}).GetObjectFilters(search), a.getExec(), isDraft, a)
|
||||
return utils.GenericSearch[*WorkflowExecution](filters, search, a.GetExecFilters(search), a.getExec(), isDraft, a)
|
||||
}
|
||||
|
||||
func (a *workflowExecutionMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
|
||||
return func(d utils.DBObject) utils.ShallowDBObject {
|
||||
if d.(*WorkflowExecutions).State == common.DRAFT && time.Now().UTC().After(d.(*WorkflowExecutions).ExecDate) {
|
||||
utils.GenericDeleteOne(d.GetID(), a)
|
||||
now := time.Now()
|
||||
now = now.Add(time.Second * -60)
|
||||
if d.(*WorkflowExecution).State == enum.DRAFT && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
|
||||
utils.GenericDeleteOne(d.GetID(), newShallowAccessor(a.Request))
|
||||
return nil
|
||||
}
|
||||
if d.(*WorkflowExecutions).State == common.SCHEDULED && time.Now().UTC().After(d.(*WorkflowExecutions).ExecDate) {
|
||||
d.(*WorkflowExecutions).State = common.FORGOTTEN
|
||||
utils.GenericRawUpdateOne(d, d.GetID(), a)
|
||||
if d.(*WorkflowExecution).State == enum.SCHEDULED && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
|
||||
d.(*WorkflowExecution).State = enum.FORGOTTEN
|
||||
utils.GenericRawUpdateOne(d, d.GetID(), newShallowAccessor(a.Request))
|
||||
return d
|
||||
}
|
||||
return d
|
||||
}
|
||||
}
|
||||
|
||||
func (a *workflowExecutionMongoAccessor) GetExecFilters(search string) *dbs.Filters {
|
||||
return &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search + "_execution"}},
|
||||
}}
|
||||
}
|
||||
|
||||
@@ -4,14 +4,19 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/common"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
"github.com/robfig/cron"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -20,14 +25,15 @@ import (
|
||||
*/
|
||||
// it's a flying object only use in a session time. It's not stored in the database
|
||||
type WorkflowSchedule struct {
|
||||
Workflow *workflow.Workflow `json:"workflow,omitempty"` // Workflow is the workflow dependancy of the schedule
|
||||
WorkflowExecutions []*WorkflowExecutions `json:"workflow_executions,omitempty"` // WorkflowExecutions is the list of executions of the workflow
|
||||
Message string `json:"message,omitempty"` // Message is the message of the schedule
|
||||
Warning string `json:"warning,omitempty"` // Warning is the warning message of the schedule
|
||||
Start time.Time `json:"start" validate:"required,ltfield=End"` // Start is the start time of the schedule, is required and must be less than the End time
|
||||
End *time.Time `json:"end,omitempty"` // End is the end time of the schedule, is required and must be greater than the Start time
|
||||
DurationS float64 `json:"duration_s" default:"-1"` // End is the end time of the schedule
|
||||
Cron string `json:"cron,omitempty"` // here the cron format : ss mm hh dd MM dw task
|
||||
UUID string `json:"id" validate:"required"` // ExecutionsID is the list of the executions id of the workflow
|
||||
Workflow *workflow.Workflow `json:"workflow,omitempty"` // Workflow is the workflow dependancy of the schedule
|
||||
WorkflowExecution []*WorkflowExecution `json:"workflow_executions,omitempty"` // WorkflowExecution is the list of executions of the workflow
|
||||
Message string `json:"message,omitempty"` // Message is the message of the schedule
|
||||
Warning string `json:"warning,omitempty"` // Warning is the warning message of the schedule
|
||||
Start time.Time `json:"start" validate:"required,ltfield=End"` // Start is the start time of the schedule, is required and must be less than the End time
|
||||
End *time.Time `json:"end,omitempty"` // End is the end time of the schedule, is required and must be greater than the Start time
|
||||
DurationS float64 `json:"duration_s" default:"-1"` // End is the end time of the schedule
|
||||
Cron string `json:"cron,omitempty"` // here the cron format : ss mm hh dd MM dw task
|
||||
}
|
||||
|
||||
func NewScheduler(start string, end string, durationInS float64, cron string) *WorkflowSchedule {
|
||||
@@ -36,6 +42,7 @@ func NewScheduler(start string, end string, durationInS float64, cron string) *W
|
||||
return nil
|
||||
}
|
||||
ws := &WorkflowSchedule{
|
||||
UUID: uuid.New().String(),
|
||||
Start: s,
|
||||
DurationS: durationInS,
|
||||
Cron: cron,
|
||||
@@ -47,72 +54,159 @@ func NewScheduler(start string, end string, durationInS float64, cron string) *W
|
||||
return ws
|
||||
}
|
||||
|
||||
func (ws *WorkflowSchedule) CheckBooking(wfID string, request *tools.APIRequest) (bool, *workflow.Workflow, []*WorkflowExecutions, error) {
|
||||
if request.Caller == nil && request.Caller.URLS == nil && request.Caller.URLS[tools.BOOKING] == nil || request.Caller.URLS[tools.BOOKING][tools.POST] == "" {
|
||||
return false, nil, []*WorkflowExecutions{}, errors.New("no caller defined")
|
||||
func (ws *WorkflowSchedule) CheckBooking(wfID string, request *tools.APIRequest) (bool, *workflow.Workflow, []*WorkflowExecution, []*booking.Booking, error) {
|
||||
l := logs.GetLogger().With().Str("SchedulerID", ws.UUID).Logger()
|
||||
l.Debug().Msg("Checking booking")
|
||||
if request.Caller == nil && request.Caller.URLS == nil && request.Caller.URLS[tools.BOOKING] == nil || request.Caller.URLS[tools.BOOKING][tools.GET] == "" {
|
||||
return false, nil, []*WorkflowExecution{}, []*booking.Booking{}, errors.New("no caller defined")
|
||||
}
|
||||
access := workflow.NewAccessor(nil)
|
||||
access := workflow.NewAccessor(request)
|
||||
res, code, err := access.LoadOne(wfID)
|
||||
if code != 200 {
|
||||
return false, nil, []*WorkflowExecutions{}, errors.New("could not load the workflow with id: " + err.Error())
|
||||
return false, nil, []*WorkflowExecution{}, []*booking.Booking{}, errors.New("could not load the workflow with id: " + err.Error())
|
||||
}
|
||||
wf := res.(*workflow.Workflow)
|
||||
longest, priceds, wf, err := wf.Planify(ws.Start, ws.End, request)
|
||||
if err != nil {
|
||||
return false, wf, []*WorkflowExecutions{}, err
|
||||
return false, wf, []*WorkflowExecution{}, []*booking.Booking{}, err
|
||||
}
|
||||
ws.DurationS = longest
|
||||
ws.Message = "We estimate that the workflow will start at " + ws.Start.String() + " and last " + fmt.Sprintf("%v", ws.DurationS) + "seconds."
|
||||
ws.Message = "We estimate that the workflow will start at " + ws.Start.String() + " and last " + fmt.Sprintf("%v", ws.DurationS) + " seconds."
|
||||
if ws.End != nil && ws.Start.Add(time.Duration(longest)*time.Second).After(*ws.End) {
|
||||
ws.Warning = "The workflow may be too long to be executed in the given time frame, we will try to book it anyway\n"
|
||||
}
|
||||
l.Debug().Msg("Getting executions")
|
||||
execs, err := ws.getExecutions(wf)
|
||||
if err != nil {
|
||||
return false, wf, []*WorkflowExecutions{}, err
|
||||
return false, wf, []*WorkflowExecution{}, []*booking.Booking{}, err
|
||||
}
|
||||
for _, exec := range execs {
|
||||
bookings := exec.Book(wfID, priceds)
|
||||
for _, booking := range bookings {
|
||||
_, err := (&peer.Peer{}).LaunchPeerExecution(booking.DestPeerID, "",
|
||||
tools.BOOKING, tools.POSTCHECK, booking.Serialize(booking), request.Caller)
|
||||
if err != nil {
|
||||
return false, wf, execs, err
|
||||
}
|
||||
bookings := []*booking.Booking{}
|
||||
for i, exec := range execs {
|
||||
l.Debug().Msg("looping throughs execs : " + string(i))
|
||||
bookings = append(bookings, exec.Book(ws.UUID, wfID, priceds)...)
|
||||
}
|
||||
|
||||
errCh := make(chan error, len(bookings))
|
||||
var m sync.Mutex
|
||||
|
||||
for _, b := range bookings {
|
||||
go getBooking(l, b, request, wf, execs, bookings, errCh, &m)
|
||||
}
|
||||
|
||||
for i := 0; i < len(bookings); i++ {
|
||||
if err := <-errCh; err != nil {
|
||||
return false, wf, execs, bookings, err
|
||||
}
|
||||
}
|
||||
return true, wf, execs, nil
|
||||
|
||||
return true, wf, execs, bookings, nil
|
||||
}
|
||||
|
||||
func (ws *WorkflowSchedule) Schedules(wfID string, request *tools.APIRequest) (*workflow.Workflow, []*WorkflowExecutions, error) {
|
||||
func getBooking(l zerolog.Logger, b *booking.Booking, request *tools.APIRequest, wf *workflow.Workflow, execs []*WorkflowExecution, bookings []*booking.Booking, errCh chan error, m *sync.Mutex) {
|
||||
|
||||
m.Lock()
|
||||
c, err := getCallerCopy(request, errCh)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
m.Unlock()
|
||||
|
||||
bl := l.With().Str("booking", b.UUID).Logger()
|
||||
meth := request.Caller.URLS[tools.BOOKING][tools.GET]
|
||||
meth = strings.ReplaceAll(meth, ":id", b.ResourceID)
|
||||
meth = strings.ReplaceAll(meth, ":start_date", b.ExpectedStartDate.Format("2006-01-02T15:04:05"))
|
||||
meth = strings.ReplaceAll(meth, ":end_date", b.ExpectedEndDate.Format("2006-01-02T15:04:05"))
|
||||
request.Caller.URLS[tools.BOOKING][tools.GET] = meth
|
||||
bl.Debug().Msg("Get booking " + b.UUID + " on " + b.DestPeerID)
|
||||
_, err = (&peer.Peer{}).LaunchPeerExecution(b.DestPeerID, b.ResourceID, tools.BOOKING, tools.GET, nil, &c)
|
||||
bl.Debug().Msg("Received response from Get booking " + b.UUID + " on " + b.DestPeerID)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
errCh <- nil
|
||||
}
|
||||
|
||||
func getCallerCopy(request *tools.APIRequest, errCh chan error) (tools.HTTPCaller, error) {
|
||||
var c tools.HTTPCaller
|
||||
err := request.Caller.DeepCopy(c)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return tools.HTTPCaller{}, nil
|
||||
}
|
||||
c.URLS = request.Caller.URLS
|
||||
return c, err
|
||||
}
|
||||
|
||||
func (ws *WorkflowSchedule) Schedules(wfID string, request *tools.APIRequest) (*WorkflowSchedule, *workflow.Workflow, []*WorkflowExecution, error) {
|
||||
if request == nil {
|
||||
return nil, []*WorkflowExecutions{}, errors.New("no request found")
|
||||
return ws, nil, []*WorkflowExecution{}, errors.New("no request found")
|
||||
}
|
||||
c := request.Caller
|
||||
if c == nil || c.URLS == nil || c.URLS[tools.BOOKING] == nil {
|
||||
return nil, []*WorkflowExecutions{}, errors.New("no caller defined")
|
||||
return ws, nil, []*WorkflowExecution{}, errors.New("no caller defined")
|
||||
}
|
||||
methods := c.URLS[tools.BOOKING]
|
||||
if _, ok := methods[tools.POST]; !ok {
|
||||
return nil, []*WorkflowExecutions{}, errors.New("no path found")
|
||||
if _, ok := methods[tools.GET]; !ok {
|
||||
return ws, nil, []*WorkflowExecution{}, errors.New("no path found")
|
||||
}
|
||||
ok, wf, executions, err := ws.CheckBooking(wfID, request)
|
||||
ok, wf, executions, bookings, err := ws.CheckBooking(wfID, request)
|
||||
ws.WorkflowExecution = executions
|
||||
if !ok || err != nil {
|
||||
return nil, []*WorkflowExecutions{}, errors.New("could not book the workflow" + fmt.Sprintf("%v", err))
|
||||
return ws, nil, executions, errors.New("could not book the workflow : " + fmt.Sprintf("%v", err))
|
||||
}
|
||||
ws.Workflow = wf
|
||||
|
||||
var errCh = make(chan error, len(bookings))
|
||||
var m sync.Mutex
|
||||
|
||||
for _, booking := range bookings {
|
||||
go ws.BookExecs(booking, request, errCh, &m)
|
||||
}
|
||||
|
||||
for i := 0; i < len(bookings); i++ {
|
||||
if err := <- errCh ; err != nil {
|
||||
return ws, wf, executions, errors.New("could not launch the peer execution : " + fmt.Sprintf("%v", err))
|
||||
}
|
||||
}
|
||||
|
||||
ws.Workflow = wf
|
||||
ws.WorkflowExecutions = executions
|
||||
|
||||
fmt.Println("Schedules")
|
||||
for _, exec := range executions {
|
||||
err := exec.PurgeDraft(request)
|
||||
if err != nil {
|
||||
return nil, []*WorkflowExecutions{}, errors.New("could not book the workflow" + fmt.Sprintf("%v", err))
|
||||
return ws, nil, []*WorkflowExecution{}, errors.New("purge draft" + fmt.Sprintf("%v", err))
|
||||
}
|
||||
exec.GenerateID()
|
||||
// Should DELETE the previous execution2
|
||||
exec.StoreDraftDefault()
|
||||
utils.GenericStoreOne(exec, NewAccessor(request))
|
||||
}
|
||||
return wf, executions, nil
|
||||
fmt.Println("Schedules")
|
||||
return ws, wf, executions, nil
|
||||
}
|
||||
|
||||
func (ws *WorkflowSchedule) BookExecs(booking *booking.Booking, request *tools.APIRequest, errCh chan error, m *sync.Mutex) {
|
||||
|
||||
m.Lock()
|
||||
c, err := getCallerCopy(request, errCh)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
m.Unlock()
|
||||
|
||||
l := logs.GetLogger().With().Str("SchedulerID", ws.UUID).Logger()
|
||||
l.Debug().Msg("Booking " + booking.UUID + " on " + booking.DestPeerID)
|
||||
_, err = (&peer.Peer{}).LaunchPeerExecution(booking.DestPeerID, "",
|
||||
tools.BOOKING, tools.POST, booking.Serialize(booking), &c)
|
||||
l.Debug().Msg("Received answer for booking " + booking.UUID + " on " + booking.DestPeerID)
|
||||
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
errCh <- nil
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -127,21 +221,23 @@ VERIFY THAT WE HANDLE DIFFERENCE BETWEEN LOCATION TIME && BOOKING
|
||||
* getExecutions is a function that returns the executions of a workflow
|
||||
* it returns an array of workflow_execution.WorkflowExecution
|
||||
*/
|
||||
func (ws *WorkflowSchedule) getExecutions(workflow *workflow.Workflow) ([]*WorkflowExecutions, error) {
|
||||
workflows_executions := []*WorkflowExecutions{}
|
||||
func (ws *WorkflowSchedule) getExecutions(workflow *workflow.Workflow) ([]*WorkflowExecution, error) {
|
||||
workflows_executions := []*WorkflowExecution{}
|
||||
dates, err := ws.getDates()
|
||||
if err != nil {
|
||||
return workflows_executions, err
|
||||
}
|
||||
for _, date := range dates {
|
||||
obj := &WorkflowExecutions{
|
||||
obj := &WorkflowExecution{
|
||||
AbstractObject: utils.AbstractObject{
|
||||
UUID: uuid.New().String(), // set the uuid of the execution
|
||||
Name: workflow.Name + "_execution_" + date.Start.String(), // set the name of the execution
|
||||
},
|
||||
ExecDate: date.Start, // set the execution date
|
||||
EndDate: date.End, // set the end date
|
||||
State: common.DRAFT, // set the state to 1 (scheduled)
|
||||
WorkflowID: workflow.GetID(), // set the workflow id dependancy of the execution
|
||||
ExecutionsID: ws.UUID,
|
||||
ExecDate: date.Start, // set the execution date
|
||||
EndDate: date.End, // set the end date
|
||||
State: enum.DRAFT, // set the state to 1 (scheduled)
|
||||
WorkflowID: workflow.GetID(), // set the workflow id dependancy of the execution
|
||||
}
|
||||
workflows_executions = append(workflows_executions, obj)
|
||||
}
|
||||
@@ -152,7 +248,7 @@ func (ws *WorkflowSchedule) getDates() ([]Schedule, error) {
|
||||
schedule := []Schedule{}
|
||||
if len(ws.Cron) > 0 { // if cron is set then end date should be set
|
||||
if ws.End == nil {
|
||||
return schedule, errors.New("a cron task should have an end date.")
|
||||
return schedule, errors.New("a cron task should have an end date")
|
||||
}
|
||||
if ws.DurationS <= 0 {
|
||||
ws.DurationS = ws.End.Sub(ws.Start).Seconds()
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package workspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
@@ -23,15 +21,12 @@ func (d *Workspace) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
||||
}
|
||||
|
||||
func (ao *Workspace) VerifyAuth(request *tools.APIRequest) bool {
|
||||
fmt.Println("Workspace.VerifyAuth", ao.Shared)
|
||||
if ao.Shared != "" {
|
||||
shared, code, _ := shallow_collaborative_area.NewAccessor(request).LoadOne(ao.Shared)
|
||||
fmt.Println("Workspace.VerifyAuth", shared, code)
|
||||
if code != 200 || shared == nil {
|
||||
return false
|
||||
}
|
||||
return shared.VerifyAuth(request)
|
||||
}
|
||||
fmt.Println("Workspace.VerifyAuth", ao.AbstractObject.VerifyAuth(request))
|
||||
return ao.AbstractObject.VerifyAuth(request)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package workspace
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
@@ -73,12 +72,14 @@ func (a *workspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils
|
||||
func (a *workspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
filters := &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: data.GetName() + "_workspace"}},
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: data.GetName() + "_workspace"}},
|
||||
"abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: a.GetPeerID()}},
|
||||
},
|
||||
}
|
||||
// filters *dbs.Filters, word string, isDraft bool
|
||||
res, _, err := a.Search(filters, "", true) // Search for the workspace
|
||||
if err == nil && len(res) > 0 { // If the workspace already exists, return an error
|
||||
return nil, 409, errors.New("A workspace with the same name already exists")
|
||||
return nil, 409, errors.New("a workspace with the same name already exists")
|
||||
}
|
||||
// reset the resources
|
||||
d := data.(*Workspace)
|
||||
@@ -116,7 +117,6 @@ func (a *workspaceMongoAccessor) Search(filters *dbs.Filters, search string, isD
|
||||
This function is used to share the workspace with the peers
|
||||
*/
|
||||
func (a *workspaceMongoAccessor) share(realData *Workspace, method tools.METHOD, caller *tools.HTTPCaller) {
|
||||
fmt.Println("Sharing workspace", realData, caller)
|
||||
if realData == nil || realData.Shared == "" || caller == nil || caller.Disabled {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package tools
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/config"
|
||||
@@ -116,8 +115,8 @@ func (a *API) SubscribeRouter(infos []*beego.ControllerInfo) {
|
||||
// CheckRemotePeer checks the state of a remote peer
|
||||
func (a *API) CheckRemotePeer(url string) (State, map[string]int) {
|
||||
// Check if the database is up
|
||||
caller := NewHTTPCaller(map[DataType]map[METHOD]string{}) // Create a new http caller
|
||||
var resp APIStatusResponse
|
||||
caller := NewHTTPCaller(map[DataType]map[METHOD]string{}) // Create a new http caller
|
||||
b, err := caller.CallPost(url, "", map[string]interface{}{}) // Call the status endpoint of the peer
|
||||
if err != nil {
|
||||
return DEAD, map[string]int{} // If the peer is not reachable, return dead
|
||||
@@ -150,7 +149,6 @@ func (a *API) CheckRemoteAPIs(apis []DataType) (State, map[string]string, error)
|
||||
continue
|
||||
}
|
||||
json.Unmarshal(b, &resp)
|
||||
fmt.Println(string(b))
|
||||
if resp.Data == nil { //
|
||||
state = REDUCED_SERVICE // If the response is empty, return reduced service
|
||||
continue
|
||||
|
||||
@@ -13,7 +13,6 @@ const (
|
||||
WORKFLOW
|
||||
WORKFLOW_EXECUTION
|
||||
WORKSPACE
|
||||
RESOURCE_MODEL
|
||||
PEER
|
||||
COLLABORATIVE_AREA
|
||||
RULE
|
||||
@@ -21,7 +20,12 @@ const (
|
||||
WORKFLOW_HISTORY
|
||||
WORKSPACE_HISTORY
|
||||
ORDER
|
||||
BUYING_STATUS
|
||||
PURCHASE_RESOURCE
|
||||
ADMIRALTY_SOURCE
|
||||
ADMIRALTY_TARGET
|
||||
ADMIRALTY_SECRET
|
||||
ADMIRALTY_KUBECONFIG
|
||||
ADMIRALTY_NODES
|
||||
)
|
||||
|
||||
var NOAPI = ""
|
||||
@@ -31,6 +35,11 @@ var WORKFLOWAPI = "oc-workflow"
|
||||
var WORKSPACEAPI = "oc-workspace"
|
||||
var PEERSAPI = "oc-peer"
|
||||
var DATACENTERAPI = "oc-datacenter"
|
||||
var ADMIRALTY_SOURCEAPI = DATACENTERAPI+"/admiralty/source"
|
||||
var ADMIRALTY_TARGETAPI = DATACENTERAPI+"/admiralty/target"
|
||||
var ADMIRALTY_SECRETAPI = DATACENTERAPI+"/admiralty/secret"
|
||||
var ADMIRALTY_KUBECONFIGAPI = DATACENTERAPI+"/admiralty/kubeconfig"
|
||||
var ADMIRALTY_NODESAPI = DATACENTERAPI+"/admiralty/node"
|
||||
|
||||
// Bind the standard API name to the data type
|
||||
var DefaultAPI = [...]string{
|
||||
@@ -43,7 +52,6 @@ var DefaultAPI = [...]string{
|
||||
WORKFLOWAPI,
|
||||
NOAPI,
|
||||
WORKSPACEAPI,
|
||||
CATALOGAPI,
|
||||
PEERSAPI,
|
||||
SHAREDAPI,
|
||||
SHAREDAPI,
|
||||
@@ -52,6 +60,11 @@ var DefaultAPI = [...]string{
|
||||
NOAPI,
|
||||
NOAPI,
|
||||
NOAPI,
|
||||
ADMIRALTY_SOURCEAPI,
|
||||
ADMIRALTY_TARGETAPI,
|
||||
ADMIRALTY_SECRETAPI,
|
||||
ADMIRALTY_KUBECONFIGAPI,
|
||||
ADMIRALTY_NODESAPI,
|
||||
}
|
||||
|
||||
// Bind the standard data name to the data type
|
||||
@@ -65,7 +78,6 @@ var Str = [...]string{
|
||||
"workflow",
|
||||
"workflow_execution",
|
||||
"workspace",
|
||||
"resource_model",
|
||||
"peer",
|
||||
"collaborative_area",
|
||||
"rule",
|
||||
@@ -73,7 +85,12 @@ var Str = [...]string{
|
||||
"workflow_history",
|
||||
"workspace_history",
|
||||
"order",
|
||||
"buying_status",
|
||||
"purchase_resource",
|
||||
"admiralty_source",
|
||||
"admiralty_target",
|
||||
"admiralty_secret",
|
||||
"admiralty_kubeconfig",
|
||||
"admiralty_node",
|
||||
}
|
||||
|
||||
func FromInt(i int) string {
|
||||
@@ -92,3 +109,7 @@ func (d DataType) String() string { // String - Returns the string name of the d
|
||||
func (d DataType) EnumIndex() int {
|
||||
return int(d)
|
||||
}
|
||||
|
||||
func DataTypeList() []DataType {
|
||||
return []DataType{DATA_RESOURCE, PROCESSING_RESOURCE, STORAGE_RESOURCE, COMPUTE_RESOURCE, WORKFLOW_RESOURCE, WORKFLOW, WORKFLOW_EXECUTION, WORKSPACE, PEER, COLLABORATIVE_AREA, RULE, BOOKING, WORKFLOW_HISTORY, WORKSPACE_HISTORY, ORDER, PURCHASE_RESOURCE,ADMIRALTY_SOURCE,ADMIRALTY_TARGET,ADMIRALTY_SECRET,ADMIRALTY_KUBECONFIG,ADMIRALTY_NODES}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package tools
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -49,8 +50,9 @@ func ToMethod(str string) METHOD {
|
||||
var HTTPCallerInstance = &HTTPCaller{} // Singleton instance of the HTTPCaller
|
||||
|
||||
type HTTPCaller struct {
|
||||
URLS map[DataType]map[METHOD]string // Map of the different methods and their urls
|
||||
Disabled bool // Disabled flag
|
||||
URLS map[DataType]map[METHOD]string // Map of the different methods and their urls
|
||||
Disabled bool // Disabled flag
|
||||
LastResults map[string]interface{} // Used to store information regarding the last execution of a given method on a given data type
|
||||
}
|
||||
|
||||
// NewHTTPCaller creates a new instance of the HTTP Caller
|
||||
@@ -61,6 +63,16 @@ func NewHTTPCaller(urls map[DataType]map[METHOD]string) *HTTPCaller {
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a copy of the current caller, in order to have parallelized executions without race condition
|
||||
func (c* HTTPCaller) DeepCopy(dst HTTPCaller) error {
|
||||
bytes, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return json.Unmarshal(bytes, &dst)
|
||||
}
|
||||
|
||||
// CallGet calls the GET method on the HTTP server
|
||||
func (caller *HTTPCaller) CallGet(url string, subpath string, types ...string) ([]byte, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, url+subpath, bytes.NewBuffer([]byte("")))
|
||||
@@ -76,22 +88,41 @@ func (caller *HTTPCaller) CallGet(url string, subpath string, types ...string) (
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return io.ReadAll(resp.Body)
|
||||
err = caller.StoreResp(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return caller.LastResults["body"].([]byte), nil
|
||||
}
|
||||
|
||||
// CallPut calls the DELETE method on the HTTP server
|
||||
func (caller *HTTPCaller) CallDelete(url string, subpath string) ([]byte, error) {
|
||||
resp, err := http.NewRequest("DELETE", url+subpath, nil)
|
||||
if err != nil || resp == nil || resp.Body == nil {
|
||||
req, err := http.NewRequest("DELETE", url+subpath, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil || req == nil || req.Body == nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return io.ReadAll(resp.Body)
|
||||
|
||||
err = caller.StoreResp(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return caller.LastResults["body"].([]byte), nil
|
||||
}
|
||||
|
||||
// CallPost calls the POST method on the HTTP server
|
||||
func (caller *HTTPCaller) CallPost(url string, subpath string, body map[string]interface{}, types ...string) ([]byte, error) {
|
||||
postBody, _ := json.Marshal(body)
|
||||
func (caller *HTTPCaller) CallPost(url string, subpath string, body interface{}, types ...string) ([]byte, error) {
|
||||
postBody, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
responseBody := bytes.NewBuffer(postBody)
|
||||
contentType := "application/json"
|
||||
if len(types) > 0 {
|
||||
@@ -102,7 +133,12 @@ func (caller *HTTPCaller) CallPost(url string, subpath string, body map[string]i
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return io.ReadAll(resp.Body)
|
||||
err = caller.StoreResp(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return caller.LastResults["body"].([]byte), nil
|
||||
}
|
||||
|
||||
// CallPost calls the POST method on the HTTP server
|
||||
@@ -120,7 +156,12 @@ func (caller *HTTPCaller) CallPut(url string, subpath string, body map[string]in
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return io.ReadAll(resp.Body)
|
||||
err = caller.StoreResp(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return caller.LastResults["body"].([]byte), nil
|
||||
}
|
||||
|
||||
// CallRaw calls the Raw method on the HTTP server
|
||||
@@ -140,7 +181,12 @@ func (caller *HTTPCaller) CallRaw(method string, url string, subpath string,
|
||||
req.AddCookie(c)
|
||||
}
|
||||
client := &http.Client{}
|
||||
return client.Do(req)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CallRaw calls the Raw method on the HTTP server
|
||||
@@ -160,3 +206,17 @@ func (caller *HTTPCaller) CallForm(method string, url string, subpath string,
|
||||
client := &http.Client{}
|
||||
return client.Do(req)
|
||||
}
|
||||
|
||||
func (caller *HTTPCaller) StoreResp(resp *http.Response) error {
|
||||
caller.LastResults = make(map[string]interface{})
|
||||
caller.LastResults["header"] = resp.Header
|
||||
caller.LastResults["code"] = resp.StatusCode
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
fmt.Println("Error reading the body of the last request")
|
||||
return err
|
||||
}
|
||||
|
||||
caller.LastResults["body"] = data
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user