Compare commits

...

29 Commits

Author SHA1 Message Date
plm
245f3adea3 Merge branch 'issue#4' 2024-12-16 09:18:58 +01:00
mr
fbbce7817b set up 2024-12-05 09:21:03 +01:00
mr
1fcbc7c08a add username to our trip 2024-12-04 12:14:55 +01:00
mr
fd01f535a1 add username to our trip 2024-12-04 11:33:08 +01:00
mr
6681c455d8 debug 2024-12-03 12:51:41 +01:00
mr
4b88da8ff6 debug 2024-12-03 11:57:51 +01:00
mr
ea55c94c73 debug 2024-12-03 10:57:28 +01:00
mr
471e0c9d9b debug 2024-12-03 10:01:10 +01:00
mr
2924ccd23b debug 2024-12-03 09:25:27 +01:00
mr
6042d47700 debug 2024-12-03 08:33:36 +01:00
mr
599a614480 test 2024-12-02 16:59:08 +01:00
mr
e2ddd7e4e6 test 2024-12-02 16:26:44 +01:00
mr
9a2ed2351d test 2024-12-02 14:48:51 +01:00
mr
2ec6899a18 test 2024-12-02 13:19:23 +01:00
mr
f17f48921d test 2024-12-02 13:18:12 +01:00
mr
cb21db672b correct loadone 2024-12-02 09:11:45 +01:00
mr
b591533ba4 test 2024-11-29 10:25:42 +01:00
mr
8d701e67d8 lightest + clearest code 2024-11-28 16:49:41 +01:00
mr
8a11805fe8 some 2024-11-28 14:31:25 +01:00
mr
68bb4a51f1 some 2024-11-28 14:20:50 +01:00
mr
30be88c8ce some 2024-11-28 14:04:58 +01:00
mr
e524c865ea some 2024-11-28 13:41:50 +01:00
mr
9ea342c4c4 some 2024-11-28 13:19:01 +01:00
mr
b388e43f30 some 2024-11-28 12:51:23 +01:00
mr
e560f34215 some 2024-11-28 12:35:56 +01:00
mr
11a3767255 some 2024-11-28 12:04:20 +01:00
mr
0349ad9a29 some 2024-11-28 11:49:20 +01:00
mr
195f5b0794 some 2024-11-28 11:17:02 +01:00
mr
2816e3ea35 test of a lightest formula of code 2024-11-28 11:05:54 +01:00
51 changed files with 1299 additions and 2368 deletions

View File

@ -16,6 +16,7 @@ type Config struct {
Port string Port string
LokiUrl string LokiUrl string
LogLevel string LogLevel string
Whitelist bool
} }
func (c Config) GetUrl() string { func (c Config) GetUrl() string {
@ -37,19 +38,11 @@ func GetConfig() *Config {
} }
func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string, logLevel string) *Config { func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string, logLevel string) *Config {
/*once.Do(func() {
instance = &Config{
MongoUrl: mongoUrl,
MongoDatabase: database,
NATSUrl: natsUrl,
LokiUrl: lokiUrl,
LogLevel: logLevel,
}
})*/
GetConfig().MongoUrl = mongoUrl GetConfig().MongoUrl = mongoUrl
GetConfig().MongoDatabase = database GetConfig().MongoDatabase = database
GetConfig().NATSUrl = natsUrl GetConfig().NATSUrl = natsUrl
GetConfig().LokiUrl = lokiUrl GetConfig().LokiUrl = lokiUrl
GetConfig().LogLevel = logLevel GetConfig().LogLevel = logLevel
GetConfig().Whitelist = true
return GetConfig() return GetConfig()
} }

View File

@ -3,6 +3,7 @@ package mongo
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"slices" "slices"
"time" "time"
@ -288,6 +289,7 @@ func (m *MongoDB) Search(filters *dbs.Filters, collection_name string) (*mongo.C
} }
opts := options.Find() opts := options.Find()
opts.SetLimit(100) opts.SetLimit(100)
fmt.Println("Filters: ", CollectionMap, collection_name)
targetDBCollection := CollectionMap[collection_name] targetDBCollection := CollectionMap[collection_name]
orList := bson.A{} orList := bson.A{}
andList := bson.A{} andList := bson.A{}

View File

@ -1,8 +1,11 @@
package oclib package oclib
import ( import (
"encoding/base64"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"net/http"
"strings" "strings"
"runtime/debug" "runtime/debug"
@ -15,12 +18,8 @@ import (
"cloud.o-forge.io/core/oc-lib/models/collaborative_area" "cloud.o-forge.io/core/oc-lib/models/collaborative_area"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resource_model" "cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/resources/compute" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resources/data"
"cloud.o-forge.io/core/oc-lib/models/resources/processing"
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
w2 "cloud.o-forge.io/core/oc-lib/models/workflow" w2 "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution" "cloud.o-forge.io/core/oc-lib/models/workflow_execution"
@ -48,7 +47,7 @@ const (
WORKSPACE = tools.WORKSPACE WORKSPACE = tools.WORKSPACE
WORKFLOW_EXECUTION = tools.WORKFLOW_EXECUTION WORKFLOW_EXECUTION = tools.WORKFLOW_EXECUTION
PEER = tools.PEER PEER = tools.PEER
SHARED_WORKSPACE = tools.COLLABORATIVE_AREA COLLABORATIVE_AREA = tools.COLLABORATIVE_AREA
RULE = tools.RULE RULE = tools.RULE
BOOKING = tools.BOOKING BOOKING = tools.BOOKING
) )
@ -118,6 +117,49 @@ func InitDaemon(appName string) {
beego.BConfig.WebConfig.StaticDir["/swagger"] = "swagger" beego.BConfig.WebConfig.StaticDir["/swagger"] = "swagger"
} }
type IDTokenClaims struct {
UserID string `json:"user_id"`
PeerID string `json:"peer_id"`
Groups []string `json:"groups"`
}
// SessionClaims struct
type SessionClaims struct {
AccessToken map[string]interface{} `json:"access_token"`
IDToken IDTokenClaims `json:"id_token"`
}
// Claims struct
type Claims struct {
Session SessionClaims `json:"session"`
}
func ExtractTokenInfo(request http.Request) (string, string, []string) {
reqToken := request.Header.Get("Authorization")
splitToken := strings.Split(reqToken, "Bearer ")
if len(splitToken) < 2 {
reqToken = ""
} else {
reqToken = splitToken[1]
}
if reqToken != "" {
token := strings.Split(reqToken, ".")
if len(token) > 2 {
bytes, err := base64.StdEncoding.DecodeString(token[2])
if err != nil {
return "", "", []string{}
}
var c Claims
err = json.Unmarshal(bytes, &c)
if err != nil {
return "", "", []string{}
}
return c.Session.IDToken.UserID, c.Session.IDToken.PeerID, c.Session.IDToken.Groups
}
}
return "", "", []string{}
}
func Init(appName string) { func Init(appName string) {
InitDaemon(appName) InitDaemon(appName)
api := &tools.API{} api := &tools.API{}
@ -158,7 +200,7 @@ func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string,
If not we will store it If not we will store it
Resource model is the model that will define the structure of the resources Resource model is the model that will define the structure of the resources
*/ */
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) accessor := (&resource_model.ResourceModel{}).GetAccessor("", "", []string{}, nil)
for _, model := range []string{tools.DATA_RESOURCE.String(), tools.PROCESSING_RESOURCE.String(), tools.STORAGE_RESOURCE.String(), tools.COMPUTE_RESOURCE.String(), tools.WORKFLOW_RESOURCE.String()} { for _, model := range []string{tools.DATA_RESOURCE.String(), tools.PROCESSING_RESOURCE.String(), tools.STORAGE_RESOURCE.String(), tools.COMPUTE_RESOURCE.String(), tools.WORKFLOW_RESOURCE.String()} {
data, code, _ := accessor.Search(nil, model) data, code, _ := accessor.Search(nil, model)
if code == 404 || len(data) == 0 { if code == 404 || len(data) == 0 {
@ -225,6 +267,18 @@ func GetConfLoader() *onion.Onion {
return config.GetConfLoader() return config.GetConfLoader()
} }
type Request struct {
collection LibDataEnum
user string
peerID string
groups []string
caller *tools.HTTPCaller
}
func NewRequest(collection LibDataEnum, user string, peerID string, groups []string, caller *tools.HTTPCaller) *Request {
return &Request{collection: collection, user: user, peerID: peerID, groups: groups, caller: caller}
}
/* /*
* Search will search for the data in the database * Search will search for the data in the database
* @param filters *dbs.Filters * @param filters *dbs.Filters
@ -233,18 +287,14 @@ func GetConfLoader() *onion.Onion {
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibDataShallow * @return data LibDataShallow
*/ */
func Search(filters *dbs.Filters, word string, collection LibDataEnum, c ...*tools.HTTPCaller) (data LibDataShallow) { func (r *Request) Search(filters *dbs.Filters, word string) (data LibDataShallow) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in Search : "+fmt.Sprintf("%v", r))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in Search : "+fmt.Sprintf("%v", r)))
data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())} data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
} }
}() }()
var caller *tools.HTTPCaller // define the caller d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.user, r.peerID, r.groups, r.caller).Search(filters, word)
if len(c) > 0 {
caller = c[0]
}
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).Search(filters, word)
if err != nil { if err != nil {
data = LibDataShallow{Data: d, Code: code, Err: err.Error()} data = LibDataShallow{Data: d, Code: code, Err: err.Error()}
return return
@ -259,18 +309,14 @@ func Search(filters *dbs.Filters, word string, collection LibDataEnum, c ...*too
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibDataShallow * @return data LibDataShallow
*/ */
func LoadAll(collection LibDataEnum, c ...*tools.HTTPCaller) (data LibDataShallow) { func (r *Request) LoadAll() (data LibDataShallow) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadAll : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack()))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadAll : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())} data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
} }
}() }()
var caller *tools.HTTPCaller // define the caller d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.user, r.peerID, r.groups, r.caller).LoadAll()
if len(c) > 0 {
caller = c[0]
}
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).LoadAll()
if err != nil { if err != nil {
data = LibDataShallow{Data: d, Code: code, Err: err.Error()} data = LibDataShallow{Data: d, Code: code, Err: err.Error()}
return return
@ -286,18 +332,14 @@ func LoadAll(collection LibDataEnum, c ...*tools.HTTPCaller) (data LibDataShallo
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibData * @return data LibData
*/ */
func LoadOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data LibData) { func (r *Request) LoadOne(id string) (data LibData) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack()))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in LoadOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())} data = LibData{Data: nil, Code: 500, Err: "Panic recovered in LoadOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
} }
}() }()
var caller *tools.HTTPCaller // define the caller d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.user, r.peerID, r.groups, r.caller).LoadOne(id)
if len(c) > 0 {
caller = c[0]
}
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).LoadOne(id)
if err != nil { if err != nil {
data = LibData{Data: d, Code: code, Err: err.Error()} data = LibData{Data: d, Code: code, Err: err.Error()}
return return
@ -314,19 +356,15 @@ func LoadOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data Li
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibData * @return data LibData
*/ */
func UpdateOne(collection LibDataEnum, set map[string]interface{}, id string, c ...*tools.HTTPCaller) (data LibData) { func (r *Request) UpdateOne(set map[string]interface{}, id string) (data LibData) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in UpdateOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack()))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in UpdateOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in UpdateOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())} data = LibData{Data: nil, Code: 500, Err: "Panic recovered in UpdateOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
} }
}() }()
var caller *tools.HTTPCaller // define the caller model := models.Model(r.collection.EnumIndex())
if len(c) > 0 { d, code, err := model.GetAccessor(r.user, r.peerID, r.groups, r.caller).UpdateOne(model.Deserialize(set, model), id)
caller = c[0]
}
model := models.Model(collection.EnumIndex())
d, code, err := model.GetAccessor(caller).UpdateOne(model.Deserialize(set), id)
if err != nil { if err != nil {
data = LibData{Data: d, Code: code, Err: err.Error()} data = LibData{Data: d, Code: code, Err: err.Error()}
return return
@ -342,18 +380,14 @@ func UpdateOne(collection LibDataEnum, set map[string]interface{}, id string, c
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibData * @return data LibData
*/ */
func DeleteOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data LibData) { func (r *Request) DeleteOne(id string) (data LibData) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in DeleteOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack()))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in DeleteOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in DeleteOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())} data = LibData{Data: nil, Code: 500, Err: "Panic recovered in DeleteOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
} }
}() }()
var caller *tools.HTTPCaller // define the caller d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.user, r.peerID, r.groups, r.caller).DeleteOne(id)
if len(c) > 0 {
caller = c[0]
}
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).DeleteOne(id)
if err != nil { if err != nil {
data = LibData{Data: d, Code: code, Err: err.Error()} data = LibData{Data: d, Code: code, Err: err.Error()}
return return
@ -369,19 +403,15 @@ func DeleteOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibData * @return data LibData
*/ */
func StoreOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.HTTPCaller) (data LibData) { func (r *Request) StoreOne(object map[string]interface{}) (data LibData) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in StoreOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack()))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in StoreOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in StoreOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())} data = LibData{Data: nil, Code: 500, Err: "Panic recovered in StoreOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
} }
}() }()
var caller *tools.HTTPCaller // define the caller model := models.Model(r.collection.EnumIndex())
if len(c) > 0 { d, code, err := model.GetAccessor(r.user, r.peerID, r.groups, r.caller).StoreOne(model.Deserialize(object, model))
caller = c[0]
}
model := models.Model(collection.EnumIndex())
d, code, err := model.GetAccessor(caller).StoreOne(model.Deserialize(object))
if err != nil { if err != nil {
data = LibData{Data: d, Code: code, Err: err.Error()} data = LibData{Data: d, Code: code, Err: err.Error()}
return return
@ -397,19 +427,15 @@ func StoreOne(collection LibDataEnum, object map[string]interface{}, c ...*tools
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibData * @return data LibData
*/ */
func CopyOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.HTTPCaller) (data LibData) { func (r *Request) CopyOne(object map[string]interface{}) (data LibData) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in CopyOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack()))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in CopyOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in UpdateOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())} data = LibData{Data: nil, Code: 500, Err: "Panic recovered in UpdateOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
} }
}() }()
var caller *tools.HTTPCaller // define the caller model := models.Model(r.collection.EnumIndex())
if len(c) > 0 { d, code, err := model.GetAccessor(r.user, r.peerID, r.groups, r.caller).CopyOne(model.Deserialize(object, model))
caller = c[0]
}
model := models.Model(collection.EnumIndex())
d, code, err := model.GetAccessor(caller).CopyOne(model.Deserialize(object))
if err != nil { if err != nil {
data = LibData{Data: d, Code: code, Err: err.Error()} data = LibData{Data: d, Code: code, Err: err.Error()}
return return
@ -420,73 +446,73 @@ func CopyOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.
// ================ CAST ========================= // // ================ CAST ========================= //
func (l *LibData) ToDataResource() *data.DataResource { func (l *LibData) ToDataResource() *resources.DataResource {
if l.Data.GetAccessor(nil).GetType() == tools.DATA_RESOURCE.String() { if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.DATA_RESOURCE {
return l.Data.(*data.DataResource) return l.Data.(*resources.DataResource)
} }
return nil return nil
} }
func (l *LibData) ToComputeResource() *compute.ComputeResource { func (l *LibData) ToComputeResource() *resources.ComputeResource {
if l.Data != nil && l.Data.GetAccessor(nil).GetType() == tools.COMPUTE_RESOURCE.String() { if l.Data != nil && l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.COMPUTE_RESOURCE {
return l.Data.(*compute.ComputeResource) return l.Data.(*resources.ComputeResource)
} }
return nil return nil
} }
func (l *LibData) ToStorageResource() *storage.StorageResource { func (l *LibData) ToStorageResource() *resources.StorageResource {
if l.Data.GetAccessor(nil).GetType() == tools.STORAGE_RESOURCE.String() { if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.STORAGE_RESOURCE {
return l.Data.(*storage.StorageResource) return l.Data.(*resources.StorageResource)
} }
return nil return nil
} }
func (l *LibData) ToProcessingResource() *processing.ProcessingResource { func (l *LibData) ToProcessingResource() *resources.ProcessingResource {
if l.Data.GetAccessor(nil).GetType() == tools.PROCESSING_RESOURCE.String() { if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.PROCESSING_RESOURCE {
return l.Data.(*processing.ProcessingResource) return l.Data.(*resources.ProcessingResource)
} }
return nil return nil
} }
func (l *LibData) ToWorkflowResource() *w.WorkflowResource { func (l *LibData) ToWorkflowResource() *resources.WorkflowResource {
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_RESOURCE.String() { if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.WORKFLOW_RESOURCE {
return l.Data.(*w.WorkflowResource) return l.Data.(*resources.WorkflowResource)
} }
return nil return nil
} }
func (l *LibData) ToPeer() *peer.Peer { func (l *LibData) ToPeer() *peer.Peer {
if l.Data.GetAccessor(nil).GetType() == tools.PEER.String() { if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.PEER {
return l.Data.(*peer.Peer) return l.Data.(*peer.Peer)
} }
return nil return nil
} }
func (l *LibData) ToWorkflow() *w2.Workflow { func (l *LibData) ToWorkflow() *w2.Workflow {
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW.String() { if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.WORKFLOW {
return l.Data.(*w2.Workflow) return l.Data.(*w2.Workflow)
} }
return nil return nil
} }
func (l *LibData) ToWorkspace() *workspace.Workspace { func (l *LibData) ToWorkspace() *workspace.Workspace {
if l.Data.GetAccessor(nil).GetType() == tools.WORKSPACE.String() { if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.WORKSPACE {
return l.Data.(*workspace.Workspace) return l.Data.(*workspace.Workspace)
} }
return nil return nil
} }
func (l *LibData) ToCollaborativeArea() *collaborative_area.CollaborativeArea { func (l *LibData) ToCollaborativeArea() *collaborative_area.CollaborativeArea {
if l.Data.GetAccessor(nil).GetType() == tools.COLLABORATIVE_AREA.String() { if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.COLLABORATIVE_AREA {
return l.Data.(*collaborative_area.CollaborativeArea) return l.Data.(*collaborative_area.CollaborativeArea)
} }
return nil return nil
} }
func (l *LibData) ToRule() *rule.Rule { func (l *LibData) ToRule() *rule.Rule {
if l.Data.GetAccessor(nil).GetType() == tools.COLLABORATIVE_AREA.String() { if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.COLLABORATIVE_AREA {
return l.Data.(*rule.Rule) return l.Data.(*rule.Rule)
} }
return nil return nil
} }
func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecution { func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecution {
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_EXECUTION.String() { if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.WORKFLOW_EXECUTION {
return l.Data.(*workflow_execution.WorkflowExecution) return l.Data.(*workflow_execution.WorkflowExecution)
} }
return nil return nil

1
go.mod
View File

@ -44,6 +44,7 @@ require (
github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect
github.com/robfig/cron v1.2.0
github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 // indirect github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/scram v1.1.2 // indirect

2
go.sum
View File

@ -87,6 +87,8 @@ github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSz
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=

View File

@ -1,14 +1,12 @@
package booking package booking
import ( import (
"encoding/json"
"time" "time"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution" "cloud.o-forge.io/core/oc-lib/models/workflow_execution"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
"go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/bson/primitive"
) )
@ -28,7 +26,7 @@ func (wfa *Booking) CheckBooking(id string, start time.Time, end *time.Time) (bo
return true, nil return true, nil
} }
e := *end e := *end
accessor := wfa.GetAccessor(nil) accessor := New(tools.BOOKING, "", "", nil, nil)
res, code, err := accessor.Search(&dbs.Filters{ res, code, err := accessor.Search(&dbs.Filters{
And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date
"compute_resource_id": {{Operator: dbs.EQUAL.String(), Value: id}}, "compute_resource_id": {{Operator: dbs.EQUAL.String(), Value: id}},
@ -51,41 +49,14 @@ func (wfa *Booking) ArgoStatusToState(status string) *Booking {
return wfa return wfa
} }
func (ao *Booking) GetID() string {
return ao.UUID
}
func (r *Booking) GenerateID() {
if r.UUID == "" {
r.UUID = uuid.New().String()
}
}
func (d *Booking) GetName() string { func (d *Booking) GetName() string {
return d.UUID + "_" + d.ExecDate.String() return d.UUID + "_" + d.ExecDate.String()
} }
func (d *Booking) GetAccessor(caller *tools.HTTPCaller) utils.Accessor { func (d *Booking) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor return New(tools.BOOKING, username, peerID, groups, caller) // Create a new instance of the accessor
data.Init(tools.BOOKING, caller) // Initialize the accessor with the BOOKING model type
return data
} }
func (dma *Booking) Deserialize(j map[string]interface{}) utils.DBObject { func (d *Booking) VerifyAuth(username string, peerID string, groups []string) bool {
b, err := json.Marshal(j) return true
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *Booking) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
} }

View File

@ -4,9 +4,10 @@ import (
"time" "time"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution" "cloud.o-forge.io/core/oc-lib/models/workflow_execution"
"cloud.o-forge.io/core/oc-lib/tools"
) )
type bookingMongoAccessor struct { type bookingMongoAccessor struct {
@ -14,90 +15,62 @@ type bookingMongoAccessor struct {
} }
// New creates a new instance of the bookingMongoAccessor // New creates a new instance of the bookingMongoAccessor
func New() *bookingMongoAccessor { func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *bookingMongoAccessor {
return &bookingMongoAccessor{} return &bookingMongoAccessor{
AbstractAccessor: utils.AbstractAccessor{
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
Caller: caller,
PeerID: peerID,
Groups: groups,
User: username, // Set the caller
Type: t,
},
}
} }
/* /*
* Nothing special here, just the basic CRUD operations * Nothing special here, just the basic CRUD operations
*/ */
func (wfa *bookingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) { func (a *bookingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return wfa.GenericDeleteOne(id, wfa) return utils.GenericDeleteOne(id, a)
} }
func (wfa *bookingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (a *bookingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
return wfa.GenericUpdateOne(set, id, wfa, &Booking{}) return utils.GenericUpdateOne(set, id, a, &Booking{})
} }
func (wfa *bookingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *bookingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.GenericStoreOne(data, wfa) return utils.GenericStoreOne(data, a)
} }
func (wfa *bookingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *bookingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.GenericStoreOne(data, wfa) return utils.GenericStoreOne(data, a)
} }
func (wfa *bookingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (a *bookingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var workflow Booking return utils.GenericLoadOne[*Booking](id, func(d utils.DBObject) (utils.DBObject, int, error) {
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType()) if d.(*Booking).State == workflow_execution.SCHEDULED && time.Now().UTC().After(*d.(*Booking).ExecDate) {
if err != nil { d.(*Booking).State = workflow_execution.FORGOTTEN
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error()) utils.GenericRawUpdateOne(d, id, a)
return nil, code, err
}
res_mongo.Decode(&workflow)
if workflow.State == workflow_execution.SCHEDULED && time.Now().UTC().After(*workflow.ExecDate) {
workflow.State = workflow_execution.FORGOTTEN
wfa.GenericRawUpdateOne(&workflow, id, wfa)
}
return &workflow, 200, nil
}
func (wfa *bookingMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []Booking
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
if r.State == workflow_execution.SCHEDULED && time.Now().UTC().After(*r.ExecDate) {
r.State = workflow_execution.FORGOTTEN
wfa.GenericRawUpdateOne(&r, r.UUID, wfa)
} }
objs = append(objs, &r.AbstractObject) // Warning only AbstractObject is returned return d, 200, nil
} }, a)
return objs, 200, nil
} }
// Search is a function that searches for a booking in the database func (a *bookingMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
func (wfa *bookingMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) { return utils.GenericLoadAll[*Booking](a.getExec(), a)
objs := []utils.ShallowDBObject{} }
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = &dbs.Filters{ func (a *bookingMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided return utils.GenericSearch[*Booking](filters, search, (&Booking{}).GetObjectFilters(search), a.getExec(), a)
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}}, }
},
} func (a *bookingMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
} return func(d utils.DBObject) utils.ShallowDBObject {
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType()) if d.(*Booking).State == workflow_execution.SCHEDULED && time.Now().UTC().After(*d.(*Booking).ExecDate) {
if err != nil { d.(*Booking).State = workflow_execution.FORGOTTEN
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error()) utils.GenericRawUpdateOne(d, d.GetID(), a)
return nil, code, err }
} return d
var results []Booking }
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
if r.State == workflow_execution.SCHEDULED && time.Now().UTC().After(*r.ExecDate) {
r.State = workflow_execution.FORGOTTEN
wfa.GenericRawUpdateOne(&r, r.UUID, wfa)
}
objs = append(objs, &r)
}
return objs, 200, nil
} }

View File

@ -1,16 +1,17 @@
package collaborative_area package collaborative_area
import ( import (
"encoding/json" "fmt"
"slices"
"time" "time"
"cloud.o-forge.io/core/oc-lib/config"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
w "cloud.o-forge.io/core/oc-lib/models/workflow" w "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/models/workspace" "cloud.o-forge.io/core/oc-lib/models/workspace"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
) )
type CollaborativeAreaRule struct { type CollaborativeAreaRule struct {
@ -27,14 +28,13 @@ type CollaborativeAreaRule struct {
type CollaborativeArea struct { type CollaborativeArea struct {
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name) utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
IsSent bool `json:"is_sent" bson:"-"` // IsSent is a flag that indicates if the workspace is sent IsSent bool `json:"is_sent" bson:"-"` // IsSent is a flag that indicates if the workspace is sent
CreatorID string `json:"peer_id,omitempty" bson:"peer_id,omitempty" validate:"required"` // CreatorID is the ID of the creator
Version string `json:"version,omitempty" bson:"version,omitempty"` // Version is the version of the workspace Version string `json:"version,omitempty" bson:"version,omitempty"` // Version is the version of the workspace
Description string `json:"description,omitempty" bson:"description,omitempty" validate:"required"` // Description is the description of the workspace Description string `json:"description,omitempty" bson:"description,omitempty" validate:"required"` // Description is the description of the workspace
CollaborativeAreaRule *CollaborativeAreaRule `json:"collaborative_area,omitempty" bson:"collaborative_area,omitempty"` // CollaborativeArea is the collaborative area of the workspace CollaborativeAreaRule *CollaborativeAreaRule `json:"collaborative_area,omitempty" bson:"collaborative_area,omitempty"` // CollaborativeArea is the collaborative area of the workspace
Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"` // Attributes is the attributes of the workspace (TODO) Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"` // Attributes is the attributes of the workspace (TODO)
Workspaces []string `json:"workspaces" bson:"workspaces"` // Workspaces is the workspaces of the workspace Workspaces []string `json:"workspaces" bson:"workspaces"` // Workspaces is the workspaces of the workspace
Workflows []string `json:"workflows" bson:"workflows"` // Workflows is the workflows of the workspace Workflows []string `json:"workflows" bson:"workflows"` // Workflows is the workflows of the workspace
Peers []string `json:"peers" bson:"peers"` // Peers is the peers of the workspace AllowedPeersGroup map[string][]string `json:"allowed_peers_group,omitempty" bson:"allowed_peers_group,omitempty"` // AllowedPeersGroup is the group of allowed peers
Rules []string `json:"rules" bson:"rules,omitempty"` // Rules is the rules of the workspace Rules []string `json:"rules" bson:"rules,omitempty"` // Rules is the rules of the workspace
SharedRules []*rule.Rule `json:"shared_rules,omitempty" bson:"-"` // SharedRules is the shared rules of the workspace SharedRules []*rule.Rule `json:"shared_rules,omitempty" bson:"-"` // SharedRules is the shared rules of the workspace
@ -43,41 +43,56 @@ type CollaborativeArea struct {
SharedPeers []*peer.Peer `json:"shared_peers,omitempty" bson:"-"` // SharedPeers is the shared peers of the workspace SharedPeers []*peer.Peer `json:"shared_peers,omitempty" bson:"-"` // SharedPeers is the shared peers of the workspace
} }
func (ao *CollaborativeArea) GetID() string { func (ao *CollaborativeArea) Clear(peerID string) {
return ao.UUID ao.CreatorID = peerID
} if config.GetConfig().Whitelist {
ao.AllowedPeersGroup[peerID] = []string{"*"}
func (r *CollaborativeArea) GenerateID() { } else {
if r.UUID == "" { ao.AllowedPeersGroup[peerID] = []string{}
r.UUID = uuid.New().String()
} }
} // then reset the shared fields
if ao.Workspaces == nil {
func (d *CollaborativeArea) GetName() string { ao.Workspaces = []string{}
return d.Name
}
func (d *CollaborativeArea) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.COLLABORATIVE_AREA, caller) // Initialize the accessor with the SHARED_WORKSPACE model type
return data
}
func (dma *CollaborativeArea) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
} }
json.Unmarshal(b, dma) if ao.Workflows == nil {
return dma ao.Workflows = []string{}
}
if ao.Rules == nil {
ao.Rules = []string{}
}
if ao.CollaborativeAreaRule == nil {
ao.CollaborativeAreaRule = &CollaborativeAreaRule{
ShareMode: "private",
ExploitedBy: "collaborators only",
}
}
ao.CollaborativeAreaRule.CreatedAt = time.Now().UTC()
} }
func (dma *CollaborativeArea) Serialize() map[string]interface{} { func (ao *CollaborativeArea) VerifyAuth(username string, peerID string, groups []string) bool {
var m map[string]interface{} if ao.AllowedPeersGroup != nil || config.GetConfig().Whitelist {
b, err := json.Marshal(dma) if grps, ok := ao.AllowedPeersGroup[peerID]; ok || config.GetConfig().Whitelist {
if err != nil { fmt.Println("grps", grps, "ok", ok, "config.GetConfig().Whitelist", config.GetConfig().Whitelist)
return nil if slices.Contains(grps, "*") || (!ok && config.GetConfig().Whitelist) {
return true
}
for _, grp := range grps {
if slices.Contains(groups, grp) {
return true
}
}
}
} }
json.Unmarshal(b, &m) return ao.AbstractObject.VerifyAuth(username, peerID, groups)
return m }
func (d *CollaborativeArea) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return New(tools.COLLABORATIVE_AREA, username, peerID, groups, caller) // Create a new instance of the accessor
}
func (d *CollaborativeArea) Trim() *CollaborativeArea {
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.CreatorID}}).IsMySelf(); !ok {
d.AllowedPeersGroup = map[string][]string{}
}
return d
} }

View File

@ -4,13 +4,13 @@ import (
"errors" "errors"
"fmt" "fmt"
"slices" "slices"
"time"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/models/workflow"
w "cloud.o-forge.io/core/oc-lib/models/workflow" w "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/models/workspace" "cloud.o-forge.io/core/oc-lib/models/workspace"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
@ -19,43 +19,144 @@ import (
// SharedWorkspace is a struct that represents a collaborative area // SharedWorkspace is a struct that represents a collaborative area
type collaborativeAreaMongoAccessor struct { type collaborativeAreaMongoAccessor struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller) utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
workspaceAccessor utils.Accessor
workflowAccessor utils.Accessor
peerAccessor utils.Accessor
ruleAccessor utils.Accessor
} }
// New creates a new instance of the collaborativeAreaMongoAccessor func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *collaborativeAreaMongoAccessor {
func New() *collaborativeAreaMongoAccessor { return &collaborativeAreaMongoAccessor{
return &collaborativeAreaMongoAccessor{} AbstractAccessor: utils.AbstractAccessor{
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
Caller: caller,
PeerID: peerID,
Groups: groups, // Set the caller
Type: t,
},
workspaceAccessor: (&workspace.Workspace{}).GetAccessor(username, peerID, groups, nil),
workflowAccessor: (&w.Workflow{}).GetAccessor(username, peerID, groups, nil),
peerAccessor: (&peer.Peer{}).GetAccessor(username, peerID, groups, nil),
ruleAccessor: (&rule.Rule{}).GetAccessor(username, peerID, groups, nil),
}
} }
// DeleteOne deletes a collaborative area from the database, given its ID, it automatically share to peers if the workspace is shared // DeleteOne deletes a collaborative area from the database, given its ID, it automatically share to peers if the workspace is shared
func (wfa *collaborativeAreaMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) { func (a *collaborativeAreaMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
set, code, _ := wfa.LoadOne(id) set, code, err := a.LoadOne(id)
if code == 200 { // always delete on peers than recreate if code != 200 {
wfa.deleteToPeer(set.(*CollaborativeArea)) return nil, code, err
} }
wfa.sharedWorkflow(&CollaborativeArea{}, id) // create all shared workflows a.deleteToPeer(set.(*CollaborativeArea))
wfa.sharedWorkspace(&CollaborativeArea{}, id) // create all collaborative areas a.sharedWorkflow(&CollaborativeArea{}, id) // create all shared workflows
return wfa.GenericDeleteOne(id, wfa) // then add on yours a.sharedWorkspace(&CollaborativeArea{}, id) // create all collaborative areas
return utils.GenericDeleteOne(id, a) // then add on yours
}
// UpdateOne updates a collaborative area in the database, given its ID and the new data, it automatically share to peers if the workspace is shared
func (a *collaborativeAreaMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
res, code, err := utils.GenericUpdateOne(set.(*CollaborativeArea).Trim(), id, a, &CollaborativeArea{})
// a.deleteToPeer(res.(*CollaborativeArea)) // delete the collaborative area on the peer
a.sharedWorkflow(res.(*CollaborativeArea), id) // replace all shared workflows
a.sharedWorkspace(res.(*CollaborativeArea), id) // replace all collaborative areas (not shared worspace obj but workspace one)
// a.sendToPeer(res.(*CollaborativeArea)) // send the collaborative area (collaborative area object) to the peers
return res, code, err
}
// StoreOne stores a collaborative area in the database, it automatically share to peers if the workspace is shared
func (a *collaborativeAreaMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
_, id := (&peer.Peer{}).IsMySelf() // get the local peer
data.(*CollaborativeArea).Clear(id) // set the creator
// retrieve or proper peer
dd, code, err := a.peerAccessor.Search(nil, "0")
if code != 200 || len(dd) == 0 {
return nil, code, errors.New("Could not retrieve the peer" + err.Error())
}
data.(*CollaborativeArea).CollaborativeAreaRule.Creator = dd[0].GetID()
d, code, err := utils.GenericStoreOne(data.(*CollaborativeArea).Trim(), a)
if code == 200 {
a.sharedWorkflow(d.(*CollaborativeArea), d.GetID()) // create all shared workflows
a.sharedWorkspace(d.(*CollaborativeArea), d.GetID()) // create all collaborative areas
a.sendToPeer(d.(*CollaborativeArea)) // send the collaborative area (collaborative area object) to the peers
}
return data, code, err
}
// CopyOne copies a CollaborativeArea in the database
func (a *collaborativeAreaMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return a.StoreOne(data)
}
func filterEnrich[T utils.ShallowDBObject](arr []string, a utils.Accessor) []T {
var new []T
res, code, _ := a.Search(&dbs.Filters{
Or: map[string][]dbs.Filter{
"abstractobject.id": {{Operator: dbs.IN.String(), Value: arr}},
},
}, "")
if code == 200 {
for _, r := range res {
new = append(new, r.(T))
}
}
return new
}
// enrich is a function that enriches the CollaborativeArea with the shared objects
func (a *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *CollaborativeArea) *CollaborativeArea {
sharedWorkspace.SharedWorkspaces = append(sharedWorkspace.SharedWorkspaces,
filterEnrich[*workspace.Workspace](sharedWorkspace.Workspaces, a.workspaceAccessor)...)
sharedWorkspace.SharedWorkflows = append(sharedWorkspace.SharedWorkflows,
filterEnrich[*workflow.Workflow](sharedWorkspace.Workflows, a.workflowAccessor)...)
peerskey := []string{}
for k := range sharedWorkspace.AllowedPeersGroup {
peerskey = append(peerskey, k)
}
sharedWorkspace.SharedPeers = append(sharedWorkspace.SharedPeers,
filterEnrich[*peer.Peer](peerskey, a.peerAccessor)...)
sharedWorkspace.SharedRules = append(sharedWorkspace.SharedRules,
filterEnrich[*rule.Rule](sharedWorkspace.Rules, a.ruleAccessor)...)
return sharedWorkspace
}
func (a *collaborativeAreaMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
return utils.GenericLoadOne[*CollaborativeArea](id, func(d utils.DBObject) (utils.DBObject, int, error) {
return a.enrich(d.(*CollaborativeArea)), 200, nil
}, a)
}
func (a *collaborativeAreaMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
return utils.GenericLoadAll[*CollaborativeArea](func(d utils.DBObject) utils.ShallowDBObject {
return a.enrich(d.(*CollaborativeArea))
}, a)
}
func (a *collaborativeAreaMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
return utils.GenericSearch[*CollaborativeArea](filters, search, (&CollaborativeArea{}).GetObjectFilters(search),
func(d utils.DBObject) utils.ShallowDBObject {
return a.enrich(d.(*CollaborativeArea))
}, a)
} }
/* /*
sharedWorkspace is a function that shares the collaborative area to the peers sharedWorkspace is a function that shares the collaborative area to the peers
*/ */
func (wfa *collaborativeAreaMongoAccessor) sharedWorkspace(shared *CollaborativeArea, id string) { func (a *collaborativeAreaMongoAccessor) sharedWorkspace(shared *CollaborativeArea, id string) {
eldest, code, _ := wfa.LoadOne(id) // get the eldest eldest, code, _ := a.LoadOne(id) // get the eldest
accessor := (&workspace.Workspace{}).GetAccessor(nil)
if code == 200 { if code == 200 {
eld := eldest.(*CollaborativeArea) eld := eldest.(*CollaborativeArea)
if eld.Workspaces != nil { // update all your workspaces in the eldest by replacing shared ref by an empty string if eld.Workspaces != nil { // update all your workspaces in the eldest by replacing shared ref by an empty string
for _, v := range eld.Workspaces { for _, v := range eld.Workspaces {
accessor.UpdateOne(&workspace.Workspace{Shared: ""}, v) a.workspaceAccessor.UpdateOne(&workspace.Workspace{Shared: ""}, v)
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKSPACE] == nil { if a.Caller != nil || a.Caller.URLS == nil || a.Caller.URLS[tools.WORKSPACE] == nil {
continue continue
} }
paccess := (&peer.Peer{}) // send to all peers paccess := (&peer.Peer{}) // send to all peers
for _, p := range shared.Peers { // delete the collaborative area on the peer for k := range shared.AllowedPeersGroup { // delete the collaborative area on the peer
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKSPACE, tools.DELETE, nil, wfa.Caller) b, err := paccess.LaunchPeerExecution(k, v, tools.WORKSPACE, tools.DELETE, nil, a.Caller)
if err != nil && b == nil { if err != nil && b == nil {
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error()) a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
} }
} }
} }
@ -63,20 +164,20 @@ func (wfa *collaborativeAreaMongoAccessor) sharedWorkspace(shared *Collaborative
} }
if shared.Workspaces != nil { if shared.Workspaces != nil {
for _, v := range shared.Workspaces { // update all the collaborative areas for _, v := range shared.Workspaces { // update all the collaborative areas
workspace, code, _ := accessor.UpdateOne(&workspace.Workspace{Shared: shared.UUID}, v) // add the shared ref to workspace workspace, code, _ := a.workspaceAccessor.UpdateOne(&workspace.Workspace{Shared: shared.UUID}, v) // add the shared ref to workspace
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKSPACE] == nil { if a.Caller != nil || a.Caller.URLS == nil || a.Caller.URLS[tools.WORKSPACE] == nil {
continue continue
} }
for _, p := range shared.Peers { for k := range shared.AllowedPeersGroup {
if code != 200 { if code != 200 {
continue continue
} }
paccess := (&peer.Peer{}) // send to all peers, add the collaborative area on the peer paccess := (&peer.Peer{}) // send to all peers, add the collaborative area on the peer
s := workspace.Serialize() s := workspace.Serialize(workspace)
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + p s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + k
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKSPACE, tools.POST, s, wfa.Caller) b, err := paccess.LaunchPeerExecution(k, v, tools.WORKSPACE, tools.POST, s, a.Caller)
if err != nil && b == nil { if err != nil && b == nil {
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error()) a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
} }
} }
} }
@ -86,14 +187,13 @@ func (wfa *collaborativeAreaMongoAccessor) sharedWorkspace(shared *Collaborative
} }
// sharedWorkflow is a function that shares the shared workflow to the peers // sharedWorkflow is a function that shares the shared workflow to the peers
func (wfa *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeArea, id string) { func (a *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeArea, id string) {
accessor := (&w.Workflow{}).GetAccessor(nil) eldest, code, _ := a.LoadOne(id) // get the eldest
eldest, code, _ := wfa.LoadOne(id) // get the eldest
if code == 200 { if code == 200 {
eld := eldest.(*CollaborativeArea) eld := eldest.(*CollaborativeArea)
if eld.Workflows != nil { if eld.Workflows != nil {
for _, v := range eld.Workflows { for _, v := range eld.Workflows {
data, code, _ := accessor.LoadOne(v) data, code, _ := a.workflowAccessor.LoadOne(v)
if code == 200 { if code == 200 {
s := data.(*w.Workflow) s := data.(*w.Workflow)
new := []string{} new := []string{}
@ -104,15 +204,15 @@ func (wfa *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeA
} // kick the shared reference in your old shared workflow } // kick the shared reference in your old shared workflow
n := &w.Workflow{} n := &w.Workflow{}
n.Shared = new n.Shared = new
accessor.UpdateOne(n, v) a.workflowAccessor.UpdateOne(n, v)
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKFLOW] == nil { if a.Caller != nil || a.Caller.URLS == nil || a.Caller.URLS[tools.WORKFLOW] == nil {
continue continue
} }
paccess := (&peer.Peer{}) // send to all peers paccess := (&peer.Peer{}) // send to all peers
for _, p := range shared.Peers { // delete the shared workflow on the peer for k := range shared.AllowedPeersGroup { // delete the shared workflow on the peer
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKFLOW, tools.DELETE, nil, wfa.Caller) b, err := paccess.LaunchPeerExecution(k, v, tools.WORKFLOW, tools.DELETE, nil, a.Caller)
if err != nil && b == nil { if err != nil && b == nil {
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error()) a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
} }
} }
} }
@ -121,23 +221,23 @@ func (wfa *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeA
} }
if shared.Workflows != nil { // update all the shared workflows if shared.Workflows != nil { // update all the shared workflows
for _, v := range shared.Workflows { for _, v := range shared.Workflows {
data, code, _ := accessor.LoadOne(v) data, code, _ := a.workflowAccessor.LoadOne(v)
if code == 200 { if code == 200 {
s := data.(*w.Workflow) s := data.(*w.Workflow)
if !slices.Contains(s.Shared, id) { if !slices.Contains(s.Shared, id) {
s.Shared = append(s.Shared, id) s.Shared = append(s.Shared, id)
workflow, code, _ := accessor.UpdateOne(s, v) workflow, code, _ := a.workflowAccessor.UpdateOne(s, v)
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKFLOW] == nil { if a.Caller != nil || a.Caller.URLS == nil || a.Caller.URLS[tools.WORKFLOW] == nil {
continue continue
} }
paccess := (&peer.Peer{}) paccess := (&peer.Peer{})
for _, p := range shared.Peers { // send to all peers for k := range shared.AllowedPeersGroup { // send to all peers
if code == 200 { if code == 200 {
s := workflow.Serialize() // add the shared workflow on the peer s := workflow.Serialize(workflow) // add the shared workflow on the peer
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + p s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + k
b, err := paccess.LaunchPeerExecution(p, shared.UUID, tools.WORKFLOW, tools.POST, s, wfa.Caller) b, err := paccess.LaunchPeerExecution(k, shared.UUID, tools.WORKFLOW, tools.POST, s, a.Caller)
if err != nil && b == nil { if err != nil && b == nil {
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error()) a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
} }
} }
} }
@ -150,194 +250,29 @@ func (wfa *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeA
} }
// sharedWorkspace is a function that shares the collaborative area to the peers // sharedWorkspace is a function that shares the collaborative area to the peers
func (wfa *collaborativeAreaMongoAccessor) deleteToPeer(shared *CollaborativeArea) { func (a *collaborativeAreaMongoAccessor) deleteToPeer(shared *CollaborativeArea) {
if wfa.Caller == nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.COLLABORATIVE_AREA] == nil || wfa.Caller.Disabled { a.contactPeer(shared, tools.POST)
return
}
paccess := (&peer.Peer{})
for _, v := range shared.Peers {
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: v}}).IsMySelf(); ok {
continue
}
b, err := paccess.LaunchPeerExecution(v, shared.UUID, tools.COLLABORATIVE_AREA, tools.DELETE, nil, wfa.Caller)
if err != nil && b == nil {
wfa.Logger.Error().Msg("Could not send to peer " + v + ". Error: " + err.Error())
}
}
} }
// sharedWorkspace is a function that shares the collaborative area to the peers // sharedWorkspace is a function that shares the collaborative area to the peers
func (wfa *collaborativeAreaMongoAccessor) sendToPeer(shared *CollaborativeArea) { func (a *collaborativeAreaMongoAccessor) sendToPeer(shared *CollaborativeArea) {
if wfa.Caller == nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.COLLABORATIVE_AREA] == nil || wfa.Caller.Disabled { a.contactPeer(shared, tools.POST)
}
func (a *collaborativeAreaMongoAccessor) contactPeer(shared *CollaborativeArea, meth tools.METHOD) {
if a.Caller == nil || a.Caller.URLS == nil || a.Caller.URLS[tools.COLLABORATIVE_AREA] == nil || a.Caller.Disabled {
return return
} }
paccess := (&peer.Peer{}) paccess := (&peer.Peer{})
for _, v := range shared.Peers { for k := range shared.AllowedPeersGroup {
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: v}}).IsMySelf(); ok || shared.IsSent { if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: k}}).IsMySelf(); ok || (shared.IsSent && meth == tools.POST) || (!shared.IsSent && meth != tools.POST) {
continue continue
} }
shared.IsSent = true shared.IsSent = meth == tools.POST
b, err := paccess.LaunchPeerExecution(v, v, tools.COLLABORATIVE_AREA, tools.POST, shared.Serialize(), wfa.Caller) b, err := paccess.LaunchPeerExecution(k, k, tools.COLLABORATIVE_AREA, meth, shared.Serialize(shared), a.Caller)
if err != nil && b == nil { if err != nil && b == nil {
wfa.Logger.Error().Msg("Could not send to peer " + v + ". Error: " + err.Error()) a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
} }
} }
} }
// UpdateOne updates a collaborative area in the database, given its ID and the new data, it automatically share to peers if the workspace is shared
func (wfa *collaborativeAreaMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
res, code, err := wfa.GenericUpdateOne(set.(*CollaborativeArea), id, wfa, &CollaborativeArea{})
fmt.Println("UpdateOne", set, res, code, err)
// wfa.deleteToPeer(res.(*CollaborativeArea)) // delete the collaborative area on the peer
wfa.sharedWorkflow(res.(*CollaborativeArea), id) // replace all shared workflows
wfa.sharedWorkspace(res.(*CollaborativeArea), id) // replace all collaborative areas (not shared worspace obj but workspace one)
// wfa.sendToPeer(res.(*CollaborativeArea)) // send the collaborative area (collaborative area object) to the peers
return res, code, err
}
// StoreOne stores a collaborative area in the database, it automatically share to peers if the workspace is shared
func (wfa *collaborativeAreaMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
_, id := (&peer.Peer{}).IsMySelf() // get the local peer
data.(*CollaborativeArea).CreatorID = id // set the creator id
data.(*CollaborativeArea).Peers = append(data.(*CollaborativeArea).Peers, id) // add the creator id to the peers
// then reset the shared fields
if data.(*CollaborativeArea).Workspaces == nil {
data.(*CollaborativeArea).Workspaces = []string{}
}
if data.(*CollaborativeArea).Workflows == nil {
data.(*CollaborativeArea).Workflows = []string{}
}
if data.(*CollaborativeArea).Rules == nil {
data.(*CollaborativeArea).Rules = []string{}
}
if data.(*CollaborativeArea).CollaborativeAreaRule == nil {
data.(*CollaborativeArea).CollaborativeAreaRule = &CollaborativeAreaRule{
ShareMode: "private",
ExploitedBy: "collaborators only",
}
}
data.(*CollaborativeArea).CollaborativeAreaRule.CreatedAt = time.Now().UTC()
// retrieve or proper peer
dd, code, err := (&peer.Peer{}).GetAccessor(nil).Search(nil, "0")
if code != 200 || len(dd) == 0 {
return nil, code, errors.New("Could not retrieve the peer" + err.Error())
}
data.(*CollaborativeArea).CollaborativeAreaRule.Creator = dd[0].GetID()
d, code, err := wfa.GenericStoreOne(data.(*CollaborativeArea), wfa)
if code == 200 {
wfa.sharedWorkflow(d.(*CollaborativeArea), d.GetID()) // create all shared workflows
wfa.sharedWorkspace(d.(*CollaborativeArea), d.GetID()) // create all collaborative areas
wfa.sendToPeer(d.(*CollaborativeArea)) // send the collaborative area (collaborative area object) to the peers
}
return data, code, err
}
// CopyOne copies a CollaborativeArea in the database
func (wfa *collaborativeAreaMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.StoreOne(data)
}
// enrich is a function that enriches the CollaborativeArea with the shared objects
func (wfa *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *CollaborativeArea) *CollaborativeArea {
access := (&workspace.Workspace{}).GetAccessor(nil)
res, code, _ := access.Search(&dbs.Filters{
Or: map[string][]dbs.Filter{
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Workspaces}},
},
}, "")
if code == 200 {
for _, r := range res {
sharedWorkspace.SharedWorkspaces = append(sharedWorkspace.SharedWorkspaces, r.(*workspace.Workspace))
}
}
access = (&w.Workflow{}).GetAccessor(nil)
res, code, _ = access.Search(&dbs.Filters{
Or: map[string][]dbs.Filter{
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Workflows}},
},
}, "")
if code == 200 {
for _, r := range res {
sharedWorkspace.SharedWorkflows = append(sharedWorkspace.SharedWorkflows, r.(*w.Workflow))
}
}
access = (&peer.Peer{}).GetAccessor(nil)
res, code, _ = access.Search(&dbs.Filters{
Or: map[string][]dbs.Filter{
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Peers}},
},
}, "")
if code == 200 {
for _, r := range res {
sharedWorkspace.SharedPeers = append(sharedWorkspace.SharedPeers, r.(*peer.Peer))
}
}
access = (&rule.Rule{}).GetAccessor(nil)
res, code, _ = access.Search(&dbs.Filters{
Or: map[string][]dbs.Filter{
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Rules}},
},
}, "")
if code == 200 {
for _, r := range res {
sharedWorkspace.SharedRules = append(sharedWorkspace.SharedRules, r.(*rule.Rule))
}
}
return sharedWorkspace
}
// LoadOne loads a collaborative area from the database, given its ID and enrich it
func (wfa *collaborativeAreaMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var sharedWorkspace CollaborativeArea
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err
}
res_mongo.Decode(&sharedWorkspace)
return wfa.enrich(&sharedWorkspace), 200, nil // enrich the collaborative area
}
// LoadAll loads all the collaborative areas from the database and enrich them
func (wfa collaborativeAreaMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []CollaborativeArea
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
objs = append(objs, wfa.enrich(&r)) // enrich the collaborative area
}
return objs, 200, nil
}
// Search searches for collaborative areas in the database, given some filters OR a search string
func (wfa *collaborativeAreaMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = &dbs.Filters{
Or: map[string][]dbs.Filter{ // search by name only by default can be override
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
},
}
}
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err
}
var results []CollaborativeArea
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
objs = append(objs, wfa.enrich(&r)) // enrich the collaborative area
}
return objs, 200, nil
}

View File

@ -1,8 +1,6 @@
package rule package rule
import ( import (
"encoding/json"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid" "github.com/google/uuid"
@ -18,39 +16,14 @@ type Rule struct {
Actions []string `json:"actions,omitempty" bson:"actions,omitempty"` // NOT DEFINITIVE TO SPECIFICATION Actions []string `json:"actions,omitempty" bson:"actions,omitempty"` // NOT DEFINITIVE TO SPECIFICATION
} }
func (ao *Rule) GetID() string {
return ao.UUID
}
func (r *Rule) GenerateID() { func (r *Rule) GenerateID() {
r.UUID = uuid.New().String() r.UUID = uuid.New().String()
} }
func (d *Rule) GetName() string { func (d *Rule) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return d.Name return New(tools.RULE, username, peerID, groups, caller)
} }
func (d *Rule) GetAccessor(caller *tools.HTTPCaller) utils.Accessor { func (d *Rule) VerifyAuth(username string, peerID string, groups []string) bool {
data := New() return true
data.Init(tools.RULE, caller)
return data
}
func (dma *Rule) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *Rule) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
} }

View File

@ -3,7 +3,9 @@ package rule
import ( import (
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
) )
type ruleMongoAccessor struct { type ruleMongoAccessor struct {
@ -11,35 +13,44 @@ type ruleMongoAccessor struct {
} }
// New creates a new instance of the ruleMongoAccessor // New creates a new instance of the ruleMongoAccessor
func New() *ruleMongoAccessor { func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *ruleMongoAccessor {
return &ruleMongoAccessor{} return &ruleMongoAccessor{
AbstractAccessor: utils.AbstractAccessor{
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
Caller: caller,
PeerID: peerID,
Groups: groups, // Set the caller
User: username,
Type: t,
},
}
} }
// GetType returns the type of the rule // GetType returns the type of the rule
func (wfa *ruleMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) { func (a *ruleMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return wfa.GenericDeleteOne(id, wfa) return utils.GenericDeleteOne(id, a)
} }
// UpdateOne updates a rule in the database // UpdateOne updates a rule in the database
func (wfa *ruleMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (a *ruleMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
return wfa.GenericUpdateOne(set.(*Rule), id, wfa, &Rule{}) return utils.GenericUpdateOne(set.(*Rule), id, a, &Rule{})
} }
// StoreOne stores a rule in the database // StoreOne stores a rule in the database
func (wfa *ruleMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *ruleMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.GenericStoreOne(data.(*Rule), wfa) return utils.GenericStoreOne(data.(*Rule), a)
} }
func (wfa *ruleMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *ruleMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.GenericStoreOne(data, wfa) return utils.GenericStoreOne(data, a)
} }
// LoadOne loads a rule from the database // LoadOne loads a rule from the database
func (wfa *ruleMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (a *ruleMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var rule Rule var rule Rule
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType()) res_mongo, code, err := mongo.MONGOService.LoadOne(id, a.GetType().String())
if err != nil { if err != nil {
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error()) a.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
res_mongo.Decode(&rule) res_mongo.Decode(&rule)
@ -47,11 +58,11 @@ func (wfa *ruleMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
} }
// LoadAll loads all rules from the database // LoadAll loads all rules from the database
func (wfa ruleMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) { func (a ruleMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{} objs := []utils.ShallowDBObject{}
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType()) res_mongo, code, err := mongo.MONGOService.LoadAll(a.GetType().String())
if err != nil { if err != nil {
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error()) a.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
var results []Rule var results []Rule
@ -65,7 +76,7 @@ func (wfa ruleMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
} }
// Search searches for rules in the database, given some filters OR a search string // Search searches for rules in the database, given some filters OR a search string
func (wfa *ruleMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) { func (a *ruleMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{} objs := []utils.ShallowDBObject{}
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" { if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = &dbs.Filters{ filters = &dbs.Filters{
@ -74,9 +85,9 @@ func (wfa *ruleMongoAccessor) Search(filters *dbs.Filters, search string) ([]uti
}, },
} }
} }
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType()) res_mongo, code, err := mongo.MONGOService.Search(filters, a.GetType().String())
if err != nil { if err != nil {
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error()) a.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
var results []Rule var results []Rule

View File

@ -1,17 +1,13 @@
package shallow_collaborative_area package shallow_collaborative_area
import ( import (
"encoding/json"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
) )
type ShallowCollaborativeArea struct { type ShallowCollaborativeArea struct {
utils.AbstractObject utils.AbstractObject
IsSent bool `json:"is_sent" bson:"-"` IsSent bool `json:"is_sent" bson:"-"`
CreatorID string `json:"peer_id,omitempty" bson:"peer_id,omitempty" validate:"required"`
Version string `json:"version,omitempty" bson:"version,omitempty"` Version string `json:"version,omitempty" bson:"version,omitempty"`
Description string `json:"description,omitempty" bson:"description,omitempty" validate:"required"` Description string `json:"description,omitempty" bson:"description,omitempty" validate:"required"`
Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"` Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"`
@ -21,41 +17,6 @@ type ShallowCollaborativeArea struct {
Rules []string `json:"rules,omitempty" bson:"rules,omitempty"` Rules []string `json:"rules,omitempty" bson:"rules,omitempty"`
} }
func (ao *ShallowCollaborativeArea) GetID() string { func (d *ShallowCollaborativeArea) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return ao.UUID return New(tools.COLLABORATIVE_AREA, username, peerID, groups, caller)
}
func (r *ShallowCollaborativeArea) GenerateID() {
if r.UUID == "" {
r.UUID = uuid.New().String()
}
}
func (d *ShallowCollaborativeArea) GetName() string {
return d.Name
}
func (d *ShallowCollaborativeArea) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New()
data.Init(tools.COLLABORATIVE_AREA, caller)
return data
}
func (dma *ShallowCollaborativeArea) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *ShallowCollaborativeArea) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
} }

View File

@ -2,82 +2,58 @@ package shallow_collaborative_area
import ( import (
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
) )
type shallowSharedWorkspaceMongoAccessor struct { type shallowSharedWorkspaceMongoAccessor struct {
utils.AbstractAccessor utils.AbstractAccessor
} }
func New() *shallowSharedWorkspaceMongoAccessor { func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *shallowSharedWorkspaceMongoAccessor {
return &shallowSharedWorkspaceMongoAccessor{} return &shallowSharedWorkspaceMongoAccessor{
AbstractAccessor: utils.AbstractAccessor{
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
Caller: caller,
PeerID: peerID,
User: username, // Set the caller
Groups: groups, // Set the caller
Type: t,
},
}
} }
func (wfa *shallowSharedWorkspaceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) { func (a *shallowSharedWorkspaceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return wfa.GenericDeleteOne(id, wfa) return utils.GenericDeleteOne(id, a)
} }
func (wfa *shallowSharedWorkspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (a *shallowSharedWorkspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
return wfa.GenericUpdateOne(set.(*ShallowCollaborativeArea), id, wfa, &ShallowCollaborativeArea{}) return utils.GenericUpdateOne(set.(*ShallowCollaborativeArea), id, a, &ShallowCollaborativeArea{})
} }
func (wfa *shallowSharedWorkspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *shallowSharedWorkspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.GenericStoreOne(data.(*ShallowCollaborativeArea), wfa) return utils.GenericStoreOne(data.(*ShallowCollaborativeArea), a)
} }
func (wfa *shallowSharedWorkspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *shallowSharedWorkspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.StoreOne(data) return a.StoreOne(data)
} }
func (wfa *shallowSharedWorkspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (a *shallowSharedWorkspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var sharedWorkspace ShallowCollaborativeArea return utils.GenericLoadOne[*ShallowCollaborativeArea](id, func(d utils.DBObject) (utils.DBObject, int, error) {
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType()) return d, 200, nil
if err != nil { }, a)
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err
}
res_mongo.Decode(&sharedWorkspace)
return &sharedWorkspace, 200, nil
} }
func (wfa shallowSharedWorkspaceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) { func (a *shallowSharedWorkspaceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{} return utils.GenericLoadAll[*ShallowCollaborativeArea](func(d utils.DBObject) utils.ShallowDBObject {
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType()) return d
if err != nil { }, a)
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []ShallowCollaborativeArea
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
objs = append(objs, &r)
}
return objs, 200, nil
} }
func (wfa *shallowSharedWorkspaceMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) { func (a *shallowSharedWorkspaceMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{} return utils.GenericSearch[*ShallowCollaborativeArea](filters, search, (&ShallowCollaborativeArea{}).GetObjectFilters(search), func(d utils.DBObject) utils.ShallowDBObject {
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" { return d
filters = &dbs.Filters{ }, a)
Or: map[string][]dbs.Filter{
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
},
}
}
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err
}
var results []ShallowCollaborativeArea
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
objs = append(objs, &r)
}
return objs, 200, nil
} }

View File

@ -8,12 +8,8 @@ import (
"cloud.o-forge.io/core/oc-lib/models/collaborative_area" "cloud.o-forge.io/core/oc-lib/models/collaborative_area"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resource_model" resource "cloud.o-forge.io/core/oc-lib/models/resources"
d "cloud.o-forge.io/core/oc-lib/models/resources/data" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
p "cloud.o-forge.io/core/oc-lib/models/resources/processing"
s "cloud.o-forge.io/core/oc-lib/models/resources/storage"
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
w2 "cloud.o-forge.io/core/oc-lib/models/workflow" w2 "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution" "cloud.o-forge.io/core/oc-lib/models/workflow_execution"
@ -25,11 +21,11 @@ This package contains the models used in the application
It's used to create the models dynamically It's used to create the models dynamically
*/ */
var models = map[string]func() utils.DBObject{ var models = map[string]func() utils.DBObject{
tools.WORKFLOW_RESOURCE.String(): func() utils.DBObject { return &w.WorkflowResource{} }, tools.WORKFLOW_RESOURCE.String(): func() utils.DBObject { return &resource.WorkflowResource{} },
tools.DATA_RESOURCE.String(): func() utils.DBObject { return &d.DataResource{} }, tools.DATA_RESOURCE.String(): func() utils.DBObject { return &resource.DataResource{} },
tools.COMPUTE_RESOURCE.String(): func() utils.DBObject { return &compute.ComputeResource{} }, tools.COMPUTE_RESOURCE.String(): func() utils.DBObject { return &resource.ComputeResource{} },
tools.STORAGE_RESOURCE.String(): func() utils.DBObject { return &s.StorageResource{} }, tools.STORAGE_RESOURCE.String(): func() utils.DBObject { return &resource.StorageResource{} },
tools.PROCESSING_RESOURCE.String(): func() utils.DBObject { return &p.ProcessingResource{} }, tools.PROCESSING_RESOURCE.String(): func() utils.DBObject { return &resource.ProcessingResource{} },
tools.WORKFLOW.String(): func() utils.DBObject { return &w2.Workflow{} }, tools.WORKFLOW.String(): func() utils.DBObject { return &w2.Workflow{} },
tools.WORKFLOW_EXECUTION.String(): func() utils.DBObject { return &workflow_execution.WorkflowExecution{} }, tools.WORKFLOW_EXECUTION.String(): func() utils.DBObject { return &workflow_execution.WorkflowExecution{} },
tools.WORKSPACE.String(): func() utils.DBObject { return &w3.Workspace{} }, tools.WORKSPACE.String(): func() utils.DBObject { return &w3.Workspace{} },

View File

@ -1,12 +1,10 @@
package peer package peer
import ( import (
"encoding/json"
"fmt" "fmt"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
) )
// now write a go enum for the state partner with self, blacklist, partner // now write a go enum for the state partner with self, blacklist, partner
@ -65,7 +63,7 @@ func (ao *Peer) RemoveExecution(exec PeerExecution) {
// IsMySelf checks if the peer is the local peer // IsMySelf checks if the peer is the local peer
func (ao *Peer) IsMySelf() (bool, string) { func (ao *Peer) IsMySelf() (bool, string) {
d, code, err := ao.GetAccessor(nil).Search(nil, SELF.String()) d, code, err := New(tools.PEER, "", "", nil, nil).Search(nil, SELF.String())
if code != 200 || err != nil || len(d) == 0 { if code != 200 || err != nil || len(d) == 0 {
return false, "" return false, ""
} }
@ -78,42 +76,11 @@ func (p *Peer) LaunchPeerExecution(peerID string, dataID string, dt tools.DataTy
p.UUID = peerID p.UUID = peerID
return cache.LaunchPeerExecution(peerID, dataID, dt, method, body, caller) // Launch the execution on the peer through the cache return cache.LaunchPeerExecution(peerID, dataID, dt, method, body, caller) // Launch the execution on the peer through the cache
} }
func (d *Peer) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
func (ao *Peer) GetID() string { data := New(tools.PEER, username, peerID, groups, caller) // Create a new instance of the accessor
return ao.UUID
}
func (r *Peer) GenerateID() {
if r.UUID == "" {
r.UUID = uuid.New().String()
}
}
func (d *Peer) GetName() string {
return d.Name
}
func (d *Peer) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.PEER, caller) // Initialize the accessor with the PEER model type
return data return data
} }
func (dma *Peer) Deserialize(j map[string]interface{}) utils.DBObject { func (d *Peer) VerifyAuth(username string, peerID string, groups []string) bool {
b, err := json.Marshal(j) return true
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *Peer) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
} }

View File

@ -56,7 +56,7 @@ func (p *PeerCache) urlFormat(url string, dt tools.DataType) string {
// checkPeerStatus checks the status of a peer // checkPeerStatus checks the status of a peer
func (p *PeerCache) checkPeerStatus(peerID string, appName string, caller *tools.HTTPCaller) (*Peer, bool) { func (p *PeerCache) checkPeerStatus(peerID string, appName string, caller *tools.HTTPCaller) (*Peer, bool) {
api := tools.API{} api := tools.API{}
access := (&Peer{}).GetAccessor(nil) access := NewShallow()
res, code, _ := access.LoadOne(peerID) // Load the peer from db res, code, _ := access.LoadOne(peerID) // Load the peer from db
if code != 200 { // no peer no party if code != 200 { // no peer no party
return nil, false return nil, false
@ -101,18 +101,18 @@ func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string,
DataID: dataID, DataID: dataID,
} }
mypeer.AddExecution(*pexec) mypeer.AddExecution(*pexec)
mypeer.GetAccessor(nil).UpdateOne(mypeer, peerID) // Update the peer in the db NewShallow().UpdateOne(mypeer, peerID) // Update the peer in the db
return nil, errors.New("peer is not reachable") return nil, errors.New("peer is not reachable")
} else { } else {
if mypeer == nil { if mypeer == nil {
return nil, errors.New("peer not found") return nil, errors.New("peer not found")
} }
// If the peer is reachable, launch the execution // If the peer is reachable, launch the execution
url = p.urlFormat((mypeer.Url)+meth, dt) // Format the URL url = p.urlFormat((mypeer.Url)+meth, dt) // Format the URL
tmp := mypeer.FailedExecution // Get the failed executions list tmp := mypeer.FailedExecution // Get the failed executions list
mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list
mypeer.GetAccessor(nil).UpdateOne(mypeer, peerID) // Update the peer in the db NewShallow().UpdateOne(mypeer, peerID) // Update the peer in the db
for _, v := range tmp { // Retry the failed executions for _, v := range tmp { // Retry the failed executions
go p.exec(v.Url, tools.ToMethod(v.Method), v.Body, caller) go p.exec(v.Url, tools.ToMethod(v.Method), v.Body, caller)
} }
} }

View File

@ -4,8 +4,9 @@ import (
"strconv" "strconv"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
) )
type peerMongoAccessor struct { type peerMongoAccessor struct {
@ -13,8 +14,26 @@ type peerMongoAccessor struct {
} }
// New creates a new instance of the peerMongoAccessor // New creates a new instance of the peerMongoAccessor
func New() *peerMongoAccessor { func NewShallow() *peerMongoAccessor {
return &peerMongoAccessor{} return &peerMongoAccessor{
utils.AbstractAccessor{
Logger: logs.CreateLogger(tools.PEER.String()), // Create a logger with the data type
Type: tools.PEER,
},
}
}
func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *peerMongoAccessor {
return &peerMongoAccessor{
utils.AbstractAccessor{
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
Caller: caller,
PeerID: peerID,
User: username,
Groups: groups, // Set the caller
Type: t,
},
}
} }
/* /*
@ -22,80 +41,53 @@ func New() *peerMongoAccessor {
*/ */
func (wfa *peerMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) { func (wfa *peerMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return wfa.GenericDeleteOne(id, wfa) return utils.GenericDeleteOne(id, wfa)
} }
func (wfa *peerMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (wfa *peerMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
return wfa.GenericUpdateOne(set.(*Peer), id, wfa, &Peer{}) return utils.GenericUpdateOne(set.(*Peer), id, wfa, &Peer{})
} }
func (wfa *peerMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (wfa *peerMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.GenericStoreOne(data.(*Peer), wfa) return utils.GenericStoreOne(data.(*Peer), wfa)
} }
func (wfa *peerMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (wfa *peerMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.GenericStoreOne(data, wfa) return utils.GenericStoreOne(data, wfa)
} }
func (wfa *peerMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (dca *peerMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var peer Peer return utils.GenericLoadOne[*Peer](id, func(d utils.DBObject) (utils.DBObject, int, error) {
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType()) return d, 200, nil
if err != nil { }, dca)
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err
}
res_mongo.Decode(&peer)
return &peer, 200, nil
} }
func (wfa peerMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) { func (wfa *peerMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{} return utils.GenericLoadAll[*Peer](func(d utils.DBObject) utils.ShallowDBObject {
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType()) return d
if err != nil { }, wfa)
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []Peer
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
objs = append(objs, &r)
}
return objs, 200, nil
} }
func (wfa *peerMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) { func (wfa *peerMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{} return utils.GenericSearch[*Peer](filters, search, wfa.getDefaultFilter(search),
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" { func(d utils.DBObject) utils.ShallowDBObject {
s, err := strconv.Atoi(search) return d
if err == nil { }, wfa)
filters = &dbs.Filters{ }
Or: map[string][]dbs.Filter{ // search by name if no filters are provided func (a *peerMongoAccessor) getDefaultFilter(search string) *dbs.Filters {
"state": {{Operator: dbs.EQUAL.String(), Value: s}}, s, err := strconv.Atoi(search)
}, if err == nil {
} return &dbs.Filters{
} else { Or: map[string][]dbs.Filter{ // search by name if no filters are provided
filters = &dbs.Filters{ "state": {{Operator: dbs.EQUAL.String(), Value: s}},
Or: map[string][]dbs.Filter{ // search by name if no filters are provided },
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}}, }
"url": {{Operator: dbs.LIKE.String(), Value: search}}, } else {
}, return &dbs.Filters{
} Or: map[string][]dbs.Filter{ // search by name if no filters are provided
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
"url": {{Operator: dbs.LIKE.String(), Value: search}},
},
} }
} }
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err
}
var results []Peer
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
objs = append(objs, &r)
}
return objs, 200, nil
} }

View File

@ -1,83 +0,0 @@
package resource_model
import (
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/models/utils"
)
type ResourceModelMongoAccessor struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
}
/*
* Nothing special here, just the basic CRUD operations
*/
func (wfa *ResourceModelMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return wfa.GenericDeleteOne(id, wfa)
}
func (wfa *ResourceModelMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
return wfa.GenericUpdateOne(set, id, wfa, &ResourceModel{})
}
func (wfa *ResourceModelMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.GenericStoreOne(data, wfa)
}
func (wfa *ResourceModelMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.GenericStoreOne(data, wfa)
}
func (wfa *ResourceModelMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var workflow ResourceModel
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err
}
res_mongo.Decode(&workflow)
return &workflow, 200, nil
}
func (wfa ResourceModelMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []ResourceModel
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
objs = append(objs, &r)
}
return objs, 200, nil
}
func (wfa *ResourceModelMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = &dbs.Filters{
Or: map[string][]dbs.Filter{
"resource_type": {{Operator: dbs.LIKE.String(), Value: search}},
},
}
}
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err
}
var results []ResourceModel
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
objs = append(objs, &r)
}
return objs, 200, nil
}

View File

@ -1,13 +1,53 @@
package compute package resources
import ( import (
"encoding/json" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
/*
* ComputeResource is a struct that represents a compute resource
* it defines the resource compute
*/
type ComputeResource struct {
resource_model.AbstractResource
Technology TechnologyEnum `json:"technology" bson:"technology" default:"0"` // Technology is the technology
Architecture string `json:"architecture,omitempty" bson:"architecture,omitempty"` // Architecture is the architecture
Access AccessEnum `json:"access" bson:"access" default:"0"` // Access is the access
Localisation string `json:"localisation,omitempty" bson:"localisation,omitempty"` // Localisation is the localisation
CPUs []*CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs
RAM *RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
GPUs []*GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs
}
func (d *ComputeResource) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return New[*ComputeResource](tools.COMPUTE_RESOURCE, username, peerID, groups, caller, func() utils.DBObject { return &ComputeResource{} })
}
// CPU is a struct that represents a CPU
type CPU struct {
Cores uint `bson:"cores,omitempty" json:"cores,omitempty"` //TODO: validate
Architecture string `bson:"architecture,omitempty" json:"architecture,omitempty"` //TOOD: enum
Shared bool `bson:"shared,omitempty" json:"shared,omitempty"`
MinimumMemory uint `bson:"minimum_memory,omitempty" json:"minimum_memory,omitempty"`
Platform string `bson:"platform,omitempty" json:"platform,omitempty"`
}
type RAM struct {
Size uint `bson:"size,omitempty" json:"size,omitempty" description:"Units in MB"`
Ecc bool `bson:"ecc,omitempty" json:"ecc,omitempty"`
}
type GPU struct {
CudaCores uint `bson:"cuda_cores,omitempty" json:"cuda_cores,omitempty"`
Model string `bson:"model,omitempty" json:"model,omitempty"`
Memory uint `bson:"memory,omitempty" json:"memory,omitempty" description:"Units in MB"`
TensorCores uint `bson:"tensor_cores,omitempty" json:"tensor_cores,omitempty"`
}
type TechnologyEnum int type TechnologyEnum int
const ( const (
@ -36,66 +76,3 @@ const (
func (a AccessEnum) String() string { func (a AccessEnum) String() string {
return [...]string{"SSH", "SSH_KUBE_API", "SSH_SLURM", "SSH_DOCKER", "OPENCLOUD", "VPN"}[a] return [...]string{"SSH", "SSH_KUBE_API", "SSH_SLURM", "SSH_DOCKER", "OPENCLOUD", "VPN"}[a]
} }
/*
* ComputeResource is a struct that represents a compute resource
* it defines the resource compute
*/
type ComputeResource struct {
resource_model.AbstractResource
Technology TechnologyEnum `json:"technology" bson:"technology" default:"0"` // Technology is the technology
Architecture string `json:"architecture,omitempty" bson:"architecture,omitempty"` // Architecture is the architecture
Access AccessEnum `json:"access" bson:"access default:"0"` // Access is the access
Localisation string `json:"localisation,omitempty" bson:"localisation,omitempty"` // Localisation is the localisation
CPUs []*CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs
RAM *RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
GPUs []*GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs
}
func (dma *ComputeResource) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *ComputeResource) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}
func (d *ComputeResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New()
data.Init(tools.COMPUTE_RESOURCE, caller)
return data
}
// CPU is a struct that represents a CPU
type CPU struct {
Cores uint `bson:"cores,omitempty" json:"cores,omitempty"` //TODO: validate
Architecture string `bson:"architecture,omitempty" json:"architecture,omitempty"` //TOOD: enum
Shared bool `bson:"shared,omitempty" json:"shared,omitempty"`
MinimumMemory uint `bson:"minimum_memory,omitempty" json:"minimum_memory,omitempty"`
Platform string `bson:"platform,omitempty" json:"platform,omitempty"`
}
type RAM struct {
Size uint `bson:"size,omitempty" json:"size,omitempty" description:"Units in MB"`
Ecc bool `bson:"ecc,omitempty" json:"ecc,omitempty"`
}
type GPU struct {
CudaCores uint `bson:"cuda_cores,omitempty" json:"cuda_cores,omitempty"`
Model string `bson:"model,omitempty" json:"model,omitempty"`
Memory uint `bson:"memory,omitempty" json:"memory,omitempty" description:"Units in MB"`
TensorCores uint `bson:"tensor_cores,omitempty" json:"tensor_cores,omitempty"`
}

View File

@ -1,112 +0,0 @@
package compute
import (
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils"
)
type computeMongoAccessor struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
}
// New creates a new instance of the computeMongoAccessor
func New() *computeMongoAccessor {
return &computeMongoAccessor{}
}
/*
* Nothing special here, just the basic CRUD operations
*/
func (dca *computeMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return dca.GenericDeleteOne(id, dca)
}
func (dca *computeMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
set.(*ComputeResource).ResourceModel = nil
return dca.GenericUpdateOne(set, id, dca, &ComputeResource{})
}
func (dca *computeMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
data.(*ComputeResource).ResourceModel = nil
return dca.GenericStoreOne(data, dca)
}
func (dca *computeMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return dca.GenericStoreOne(data, dca)
}
func (dca *computeMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var compute ComputeResource
res_mongo, code, err := mongo.MONGOService.LoadOne(id, dca.GetType())
if err != nil {
dca.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err
}
res_mongo.Decode(&compute)
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, dca.GetType())
if err == nil && len(resources) > 0 {
compute.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
return &compute, 200, nil
}
func (wfa computeMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []ComputeResource
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results {
if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
objs = append(objs, &r) // only get the abstract resource !
}
return objs, 200, nil
}
func (wfa *computeMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
},
}
}
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err
}
var results []ComputeResource
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results {
if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
objs = append(objs, &r) // only get the abstract resource !
}
return objs, 200, nil
}

View File

@ -1,46 +0,0 @@
package compute
import (
"testing"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils"
"github.com/stretchr/testify/assert"
)
func TestStoreOneCompute(t *testing.T) {
dc := ComputeResource{
AbstractResource: resource_model.AbstractResource{
AbstractObject: utils.AbstractObject{Name: "testCompute"},
Description: "Lorem Ipsum",
Logo: "azerty.com",
Owner: "toto",
OwnerLogo: "totoLogo",
SourceUrl: "azerty.fr",
},
}
dcma := New()
id, _, _ := dcma.StoreOne(&dc)
assert.NotEmpty(t, id)
}
func TestLoadOneCompute(t *testing.T) {
dc := ComputeResource{
AbstractResource: resource_model.AbstractResource{
AbstractObject: utils.AbstractObject{Name: "testCompute"},
Description: "Lorem Ipsum",
Logo: "azerty.com",
Owner: "toto",
OwnerLogo: "totoLogo",
SourceUrl: "azerty.fr",
},
}
dcma := New()
new_dc, _, _ := dcma.StoreOne(&dc)
assert.Equal(t, dc, new_dc)
}

View File

@ -1,9 +1,7 @@
package data package resources
import ( import (
"encoding/json" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
@ -39,27 +37,6 @@ type DataResource struct {
Example string `json:"example,omitempty" bson:"example,omitempty" description:"base64 encoded data"` // Example is an example of the data Example string `json:"example,omitempty" bson:"example,omitempty" description:"base64 encoded data"` // Example is an example of the data
} }
func (dma *DataResource) Deserialize(j map[string]interface{}) utils.DBObject { func (d *DataResource) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
b, err := json.Marshal(j) return New[*DataResource](tools.DATA_RESOURCE, username, peerID, groups, caller, func() utils.DBObject { return &DataResource{} }) // Create a new instance of the accessor
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *DataResource) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}
func (d *DataResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.DATA_RESOURCE, caller) // Initialize the accessor with the DATA_RESOURCE model type
return data
} }

View File

@ -1,110 +0,0 @@
package data
import (
"cloud.o-forge.io/core/oc-lib/dbs"
mongo "cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils"
)
type dataMongoAccessor struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
}
// New creates a new instance of the dataMongoAccessor
func New() *dataMongoAccessor {
return &dataMongoAccessor{}
}
/*
* Nothing special here, just the basic CRUD operations
*/
func (dma *dataMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return dma.GenericDeleteOne(id, dma)
}
func (dma *dataMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
set.(*DataResource).ResourceModel = nil
return dma.GenericUpdateOne(set, id, dma, &DataResource{})
}
func (dma *dataMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
data.(*DataResource).ResourceModel = nil
return dma.GenericStoreOne(data, dma)
}
func (dma *dataMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return dma.GenericStoreOne(data, dma)
}
func (dma *dataMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var data DataResource
res_mongo, code, err := mongo.MONGOService.LoadOne(id, dma.GetType())
if err != nil {
dma.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err
}
res_mongo.Decode(&data)
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, dma.GetType())
if err == nil && len(resources) > 0 {
data.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
return &data, 200, nil
}
func (wfa dataMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []DataResource
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results {
if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
objs = append(objs, &r) // only get the abstract resource !
}
return objs, 200, nil
}
func (wfa *dataMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
},
}
}
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err
}
var results []DataResource
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results {
if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
objs = append(objs, &r) // only get the abstract resource !
}
return objs, 200, nil
}

View File

@ -1,53 +0,0 @@
package data
import (
"testing"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils"
"github.com/stretchr/testify/assert"
)
func TestStoreOneData(t *testing.T) {
d := DataResource{
WebResource: resource_model.WebResource{
Protocol: "http", Path: "azerty.fr",
},
Example: "123456",
AbstractResource: resource_model.AbstractResource{
AbstractObject: utils.AbstractObject{Name: "testData"},
Description: "Lorem Ipsum",
Logo: "azerty.com",
Owner: "toto",
OwnerLogo: "totoLogo",
SourceUrl: "azerty.fr",
},
}
dma := New()
id, _, _ := dma.StoreOne(&d)
assert.NotEmpty(t, id)
}
func TestLoadOneDate(t *testing.T) {
d := DataResource{
WebResource: resource_model.WebResource{
Protocol: "http", Path: "azerty.fr",
},
Example: "123456",
AbstractResource: resource_model.AbstractResource{
AbstractObject: utils.AbstractObject{Name: "testData"},
Description: "Lorem Ipsum",
Logo: "azerty.com",
Owner: "toto",
OwnerLogo: "totoLogo",
SourceUrl: "azerty.fr",
},
}
dma := New()
new_d, _, _ := dma.StoreOne(&d)
assert.Equal(t, d, new_d)
}

View File

@ -0,0 +1,43 @@
package resources
import (
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
)
type Container struct {
Image string `json:"image,omitempty" bson:"image,omitempty"` // Image is the container image
Command string `json:"command,omitempty" bson:"command,omitempty"` // Command is the container command
Args string `json:"args,omitempty" bson:"args,omitempty"` // Args is the container arguments
Env map[string]string `json:"env,omitempty" bson:"env,omitempty"` // Env is the container environment variables
Volumes map[string]string `json:"volumes,omitempty" bson:"volumes,omitempty"` // Volumes is the container volumes
}
type Expose struct {
Port int `json:"port,omitempty" bson:"port,omitempty"` // Port is the port
Reverse string `json:"reverse,omitempty" bson:"reverse,omitempty"` // Reverse is the reverse
PAT int `json:"pat,omitempty" bson:"pat,omitempty"` // PAT is the PAT
}
/*
* ProcessingResource is a struct that represents a processing resource
* it defines the resource processing
*/
type ProcessingResource struct {
resource_model.AbstractResource
IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service
CPUs []*CPU `bson:"cpus,omitempty" json:"cp_us,omitempty"` // CPUs is the list of CPUs
GPUs []*GPU `bson:"gpus,omitempty" json:"gp_us,omitempty"` // GPUs is the list of GPUs
RAM *RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
Storage uint `bson:"storage,omitempty" json:"storage,omitempty"` // Storage is the storage
Parallel bool `bson:"parallel,omitempty" json:"parallel,omitempty"` // Parallel is a flag that indicates if the processing is parallel
ScalingModel uint `bson:"scaling_model,omitempty" json:"scaling_model,omitempty"` // ScalingModel is the scaling model
DiskIO string `bson:"disk_io,omitempty" json:"disk_io,omitempty"` // DiskIO is the disk IO
Container *Container `bson:"container,omitempty" json:"container,omitempty"` // Container is the container
Expose []Expose `bson:"expose,omitempty" json:"expose,omitempty"` // Expose is the execution
}
func (d *ProcessingResource) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return New[*ProcessingResource](tools.PROCESSING_RESOURCE, username, peerID, groups, caller, func() utils.DBObject { return &ProcessingResource{} }) // Create a new instance of the accessor
}

View File

@ -1,67 +0,0 @@
package processing
import (
"encoding/json"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
"cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
)
type Container struct {
Image string `json:"image,omitempty" bson:"image,omitempty"` // Image is the container image
Command string `json:"command,omitempty" bson:"command,omitempty"` // Command is the container command
Args string `json:"args,omitempty" bson:"args,omitempty"` // Args is the container arguments
Env map[string]string `json:"env,omitempty" bson:"env,omitempty"` // Env is the container environment variables
Volumes map[string]string `json:"volumes,omitempty" bson:"volumes,omitempty"` // Volumes is the container volumes
}
type Expose struct {
Port int `json:"port,omitempty" bson:"port,omitempty"` // Port is the port
Reverse string `json:"reverse,omitempty" bson:"reverse,omitempty"` // Reverse is the reverse
PAT int `json:"pat,omitempty" bson:"pat,omitempty"` // PAT is the PAT
}
/*
* ProcessingResource is a struct that represents a processing resource
* it defines the resource processing
*/
type ProcessingResource struct {
resource_model.AbstractResource
IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service
CPUs []*compute.CPU `bson:"cpus,omitempty" json:"cp_us,omitempty"` // CPUs is the list of CPUs
GPUs []*compute.GPU `bson:"gpus,omitempty" json:"gp_us,omitempty"` // GPUs is the list of GPUs
RAM *compute.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
Storage uint `bson:"storage,omitempty" json:"storage,omitempty"` // Storage is the storage
Parallel bool `bson:"parallel,omitempty" json:"parallel,omitempty"` // Parallel is a flag that indicates if the processing is parallel
ScalingModel uint `bson:"scaling_model,omitempty" json:"scaling_model,omitempty"` // ScalingModel is the scaling model
DiskIO string `bson:"disk_io,omitempty" json:"disk_io,omitempty"` // DiskIO is the disk IO
Container *Container `bson:"container,omitempty" json:"container,omitempty"` // Container is the container
Expose []Expose `bson:"expose,omitempty" json:"expose,omitempty"` // Expose is the execution
}
func (dma *ProcessingResource) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *ProcessingResource) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}
func (d *ProcessingResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.PROCESSING_RESOURCE, caller) // Initialize the accessor with the PROCESSING_RESOURCE model type
return data
}

View File

@ -1,114 +0,0 @@
package processing
import (
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils"
)
type processingMongoAccessor struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
}
// New creates a new instance of the processingMongoAccessor
func New() *processingMongoAccessor {
return &processingMongoAccessor{}
}
/*
* Nothing special here, just the basic CRUD operations
*/
func (pma *processingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return pma.GenericDeleteOne(id, pma)
}
func (pma *processingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
set.(*ProcessingResource).ResourceModel = nil
return pma.GenericUpdateOne(set, id, pma, &ProcessingResource{})
}
func (pma *processingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
data.(*ProcessingResource).ResourceModel = nil
return pma.GenericStoreOne(data, pma)
}
func (pma *processingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return pma.GenericStoreOne(data, pma)
}
func (pma *processingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var processing ProcessingResource
res_mongo, code, err := mongo.MONGOService.LoadOne(id, pma.GetType())
if err != nil {
pma.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err
}
res_mongo.Decode(&processing)
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, pma.GetType())
if err == nil && len(resources) > 0 {
processing.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
return &processing, 200, nil
}
func (wfa processingMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []ProcessingResource
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results {
if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
objs = append(objs, &r) // only get the abstract resource !
}
return objs, 200, nil
}
// Search searches for processing resources in the database, given some filters OR a search string
func (wfa *processingMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
},
}
}
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err
}
var results []ProcessingResource
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results {
if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
objs = append(objs, &r) // only get the abstract resource !
}
return objs, 200, nil
}

View File

@ -1,38 +0,0 @@
package processing
/*
func TestStoreOneProcessing(t *testing.T) {
p := ProcessingResource{Container: "totoCont",
AbstractResource: resources.AbstractResource{
AbstractObject: utils.AbstractObject{Name: "testData"},
Description: "Lorem Ipsum",
Logo: "azerty.com",
Owner: "toto",
OwnerLogo: "totoLogo",
SourceUrl: "azerty.fr",
},
}
sma := ProcessingMongoAccessor{}
id, _, _ := sma.StoreOne(&p)
assert.NotEmpty(t, id)
}
func TestLoadOneProcessing(t *testing.T) {
p := ProcessingResource{Container: "totoCont",
AbstractResource: resources.AbstractResource{
AbstractObject: utils.AbstractObject{Name: "testData"},
Description: "Lorem Ipsum",
Logo: "azerty.com",
Owner: "toto",
OwnerLogo: "totoLogo",
SourceUrl: "azerty.fr",
},
}
sma := ProcessingMongoAccessor{}
new_s, _, _ := sma.StoreOne(&p)
assert.Equal(t, p, new_s)
}
*/

View File

@ -1,12 +1,8 @@
package resources package resources
import ( import (
"cloud.o-forge.io/core/oc-lib/models/resource_model" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resources/data" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
"cloud.o-forge.io/core/oc-lib/models/resources/processing"
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
) )
// AbstractResource is the struct containing all of the attributes commons to all ressources // AbstractResource is the struct containing all of the attributes commons to all ressources
@ -14,44 +10,86 @@ import (
// Resource is the interface to be implemented by all classes inheriting from Resource to have the same behavior // Resource is the interface to be implemented by all classes inheriting from Resource to have the same behavior
// http://www.inanzzz.com/index.php/post/wqbs/a-basic-usage-of-int-and-string-enum-types-in-golang // http://www.inanzzz.com/index.php/post/wqbs/a-basic-usage-of-int-and-string-enum-types-in-golang
type ResourceInterface interface {
utils.DBObject
Trim() *resource_model.AbstractResource
SetResourceModel(model *resource_model.ResourceModel)
}
type ResourceSet struct { type ResourceSet struct {
Datas []string `bson:"datas,omitempty" json:"datas,omitempty"` Datas []string `bson:"datas,omitempty" json:"datas,omitempty"`
Storages []string `bson:"storages,omitempty" json:"storages,omitempty"` Storages []string `bson:"storages,omitempty" json:"storages,omitempty"`
Processings []string `bson:"processings,omitempty" json:"processings,omitempty"` Processings []string `bson:"processings,omitempty" json:"processings,omitempty"`
Computes []string `bson:"computes,omitempty" json:"computes,omitempty"` Computes []string `bson:"computes,omitempty" json:"computes,omitempty"`
Workflows []string `bson:"workflows,omitempty" json:"workflows,omitempty"` Workflows []string `bson:"workflows,omitempty" json:"workflows,omitempty"`
DataResources []*data.DataResource `bson:"-" json:"data_resources,omitempty"` DataResources []*DataResource `bson:"-" json:"data_resources,omitempty"`
StorageResources []*storage.StorageResource `bson:"-" json:"storage_resources,omitempty"` StorageResources []*StorageResource `bson:"-" json:"storage_resources,omitempty"`
ProcessingResources []*processing.ProcessingResource `bson:"-" json:"processing_resources,omitempty"` ProcessingResources []*ProcessingResource `bson:"-" json:"processing_resources,omitempty"`
ComputeResources []*compute.ComputeResource `bson:"-" json:"compute_resources,omitempty"` ComputeResources []*ComputeResource `bson:"-" json:"compute_resources,omitempty"`
WorkflowResources []*w.WorkflowResource `bson:"-" json:"workflow_resources,omitempty"` WorkflowResources []*WorkflowResource `bson:"-" json:"workflow_resources,omitempty"`
}
func (r *ResourceSet) Clear() {
r.DataResources = nil
r.StorageResources = nil
r.ProcessingResources = nil
r.ComputeResources = nil
r.WorkflowResources = nil
}
func (r *ResourceSet) Fill(username string, peerID string, groups []string) {
for k, v := range map[utils.DBObject][]string{
(&DataResource{}): r.Datas,
(&ComputeResource{}): r.Computes,
(&StorageResource{}): r.Storages,
(&ProcessingResource{}): r.Processings,
(&WorkflowResource{}): r.Workflows,
} {
for _, id := range v {
d, _, e := k.GetAccessor(username, peerID, groups, nil).LoadOne(id)
if e == nil {
switch k.(type) {
case *DataResource:
r.DataResources = append(r.DataResources, d.(*DataResource))
case *ComputeResource:
r.ComputeResources = append(r.ComputeResources, d.(*ComputeResource))
case *StorageResource:
r.StorageResources = append(r.StorageResources, d.(*StorageResource))
case *ProcessingResource:
r.ProcessingResources = append(r.ProcessingResources, d.(*ProcessingResource))
case *WorkflowResource:
r.WorkflowResources = append(r.WorkflowResources, d.(*WorkflowResource))
}
}
}
}
} }
type ItemResource struct { type ItemResource struct {
Data *data.DataResource `bson:"data,omitempty" json:"data,omitempty"` Data *DataResource `bson:"data,omitempty" json:"data,omitempty"`
Processing *processing.ProcessingResource `bson:"processing,omitempty" json:"processing,omitempty"` Processing *ProcessingResource `bson:"processing,omitempty" json:"processing,omitempty"`
Storage *storage.StorageResource `bson:"storage,omitempty" json:"storage,omitempty"` Storage *StorageResource `bson:"storage,omitempty" json:"storage,omitempty"`
Compute *compute.ComputeResource `bson:"compute,omitempty" json:"compute,omitempty"` Compute *ComputeResource `bson:"compute,omitempty" json:"compute,omitempty"`
Workflow *w.WorkflowResource `bson:"workflow,omitempty" json:"workflow,omitempty"` Workflow *WorkflowResource `bson:"workflow,omitempty" json:"workflow,omitempty"`
} }
func (i *ItemResource) GetAbstractRessource() *resource_model.AbstractResource { func (i *ItemResource) GetAbstractRessource() *resource_model.AbstractResource {
if(i.Data != nil){ if i.Data != nil {
return &i.Data.AbstractResource return &i.Data.AbstractResource
} }
if(i.Processing != nil){ if i.Processing != nil {
return &i.Processing.AbstractResource return &i.Processing.AbstractResource
} }
if(i.Storage != nil){ if i.Storage != nil {
return &i.Storage.AbstractResource return &i.Storage.AbstractResource
} }
if(i.Compute != nil){ if i.Compute != nil {
return &i.Compute.AbstractResource return &i.Compute.AbstractResource
} }
if(i.Workflow != nil){ if i.Workflow != nil {
return &i.Workflow.AbstractResource return &i.Workflow.AbstractResource
} }
return nil return nil
} }

View File

@ -0,0 +1,95 @@
package resources
import (
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
)
type resourceMongoAccessor[T ResourceInterface] struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
generateData func() utils.DBObject
}
// New creates a new instance of the computeMongoAccessor
func New[T ResourceInterface](t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller, g func() utils.DBObject) *resourceMongoAccessor[T] {
return &resourceMongoAccessor[T]{
AbstractAccessor: utils.AbstractAccessor{
ResourceModelAccessor: resource_model.New(),
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
Caller: caller,
PeerID: peerID,
User: username, // Set the caller
Groups: groups, // Set the caller
Type: t,
},
generateData: g,
}
}
/*
* Nothing special here, just the basic CRUD operations
*/
func (dca *resourceMongoAccessor[T]) DeleteOne(id string) (utils.DBObject, int, error) {
return utils.GenericDeleteOne(id, dca)
}
func (dca *resourceMongoAccessor[T]) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
set.(T).SetResourceModel(nil)
return utils.GenericUpdateOne(set.(T).Trim(), id, dca, dca.generateData()) // TODO CHANGE
}
func (dca *resourceMongoAccessor[T]) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
data.(T).SetResourceModel(nil)
return utils.GenericStoreOne(data.(T).Trim(), dca)
}
func (dca *resourceMongoAccessor[T]) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return dca.StoreOne(data)
}
func (dca *resourceMongoAccessor[T]) LoadOne(id string) (utils.DBObject, int, error) {
return utils.GenericLoadOne[T](id, func(d utils.DBObject) (utils.DBObject, int, error) {
resources, _, err := dca.ResourceModelAccessor.Search(nil, dca.GetType().String())
if err == nil && len(resources) > 0 {
d.(T).SetResourceModel(resources[0].(*resource_model.ResourceModel))
}
return d, 200, nil
}, dca)
}
func (wfa *resourceMongoAccessor[T]) LoadAll() ([]utils.ShallowDBObject, int, error) {
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType().String())
return utils.GenericLoadAll[T](func(d utils.DBObject) utils.ShallowDBObject {
if err == nil && len(resources) > 0 {
d.(T).SetResourceModel(resources[0].(*resource_model.ResourceModel))
}
return d
}, wfa)
}
func (wfa *resourceMongoAccessor[T]) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType().String())
return utils.GenericSearch[T](filters, search, wfa.getResourceFilter(search),
func(d utils.DBObject) utils.ShallowDBObject {
if err == nil && len(resources) > 0 {
d.(T).SetResourceModel(resources[0].(*resource_model.ResourceModel))
}
return d
}, wfa)
}
func (abs *resourceMongoAccessor[T]) getResourceFilter(search string) *dbs.Filters {
return &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
},
}
}

View File

@ -2,7 +2,10 @@ package resource_model
import ( import (
"encoding/json" "encoding/json"
"slices"
"cloud.o-forge.io/core/oc-lib/config"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid" "github.com/google/uuid"
@ -26,9 +29,35 @@ type AbstractResource struct {
OwnerLogo string `json:"owner_logo,omitempty" bson:"owner_logo,omitempty"` // OwnerLogo is the owner logo of the resource OwnerLogo string `json:"owner_logo,omitempty" bson:"owner_logo,omitempty"` // OwnerLogo is the owner logo of the resource
SourceUrl string `json:"source_url,omitempty" bson:"source_url,omitempty" validate:"required"` // SourceUrl is the source URL of the resource SourceUrl string `json:"source_url,omitempty" bson:"source_url,omitempty" validate:"required"` // SourceUrl is the source URL of the resource
PeerID string `json:"peer_id,omitempty" bson:"peer_id,omitempty" validate:"required"` // PeerID is the ID of the peer getting this resource PeerID string `json:"peer_id,omitempty" bson:"peer_id,omitempty" validate:"required"` // PeerID is the ID of the peer getting this resource
Price string `json:"price,omitempty" bson:"price,omitempty"` // Price is the price of access to the resource
License string `json:"license,omitempty" bson:"license,omitempty"` // License is the license of the resource License string `json:"license,omitempty" bson:"license,omitempty"` // License is the license of the resource
ResourceModel *ResourceModel `json:"resource_model,omitempty" bson:"resource_model,omitempty"` // ResourceModel is the model of the resource ResourceModel *ResourceModel `json:"resource_model,omitempty" bson:"resource_model,omitempty"` // ResourceModel is the model of the resource
AllowedPeersGroup map[string][]string `json:"allowed_peers_group,omitempty" bson:"allowed_peers_group,omitempty"` // AllowedPeersGroup is the group of allowed peers
Price string `json:"price,omitempty" bson:"price,omitempty"` // Price is the price of access to the resource
Currency string `json:"currency,omitempty" bson:"currency,omitempty"` // Currency is the currency of the price
}
func (ao *AbstractResource) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return nil
}
func (abs *AbstractResource) SetResourceModel(model *ResourceModel) {
abs.ResourceModel = model
}
func (abs *AbstractResource) VerifyAuth(username string, peerID string, groups []string) bool {
if grps, ok := abs.AllowedPeersGroup[peerID]; ok || config.GetConfig().Whitelist {
if (ok && slices.Contains(grps, "*")) || (!ok && config.GetConfig().Whitelist) {
return true
}
for _, grp := range grps {
if slices.Contains(groups, grp) {
return true
}
}
}
return abs.AbstractObject.VerifyAuth(username, peerID, groups)
} }
/* /*
@ -68,6 +97,13 @@ func (abs *AbstractResource) GetModelReadOnly(cat string, key string) interface{
return abs.ResourceModel.Model[cat][key].ReadOnly return abs.ResourceModel.Model[cat][key].ReadOnly
} }
func (d *AbstractResource) Trim() *AbstractResource {
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.PeerID}}).IsMySelf(); !ok {
d.AllowedPeersGroup = map[string][]string{}
}
return d
}
type Model struct { type Model struct {
Type string `json:"type,omitempty" bson:"type,omitempty"` // Type is the type of the model Type string `json:"type,omitempty" bson:"type,omitempty"` // Type is the type of the model
ReadOnly bool `json:"readonly,omitempty" bson:"readonly,omitempty"` // ReadOnly is the readonly of the model ReadOnly bool `json:"readonly,omitempty" bson:"readonly,omitempty"` // ReadOnly is the readonly of the model
@ -89,7 +125,7 @@ func (ao *ResourceModel) GetID() string {
return ao.UUID return ao.UUID
} }
func (ao *ResourceModel) UpToDate() {} func (ao *ResourceModel) UpToDate(user string, create bool) {}
func (r *ResourceModel) GenerateID() { func (r *ResourceModel) GenerateID() {
r.UUID = uuid.New().String() r.UUID = uuid.New().String()
@ -99,24 +135,34 @@ func (d *ResourceModel) GetName() string {
return d.UUID return d.UUID
} }
func (d *ResourceModel) GetAccessor(caller *tools.HTTPCaller) utils.Accessor { func (abs *ResourceModel) VerifyAuth(username string, peerID string, groups []string) bool {
data := &ResourceModelMongoAccessor{} return true
data.Init(tools.RESOURCE_MODEL, caller)
return data
} }
func (dma *ResourceModel) Deserialize(j map[string]interface{}) utils.DBObject { func (d *ResourceModel) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return &ResourceModelMongoAccessor{
utils.AbstractAccessor{
Type: tools.RESOURCE_MODEL,
PeerID: peerID,
Groups: groups,
User: username,
Caller: caller,
},
}
}
func (dma *ResourceModel) Deserialize(j map[string]interface{}, obj utils.DBObject) utils.DBObject {
b, err := json.Marshal(j) b, err := json.Marshal(j)
if err != nil { if err != nil {
return nil return nil
} }
json.Unmarshal(b, dma) json.Unmarshal(b, obj)
return dma return obj
} }
func (dma *ResourceModel) Serialize() map[string]interface{} { func (dma *ResourceModel) Serialize(obj utils.DBObject) map[string]interface{} {
var m map[string]interface{} var m map[string]interface{}
b, err := json.Marshal(dma) b, err := json.Marshal(obj)
if err != nil { if err != nil {
return nil return nil
} }

View File

@ -0,0 +1,62 @@
package resource_model
import (
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
)
type ResourceModelMongoAccessor struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
}
/*
* Nothing special here, just the basic CRUD operations
*/
func New() *ResourceModelMongoAccessor {
return &ResourceModelMongoAccessor{
utils.AbstractAccessor{
Type: tools.RESOURCE_MODEL,
Logger: logs.CreateLogger(tools.RESOURCE_MODEL.String()),
},
}
}
func (wfa *ResourceModelMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return utils.GenericDeleteOne(id, wfa)
}
func (wfa *ResourceModelMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
return utils.GenericUpdateOne(set, id, wfa, &ResourceModel{})
}
func (wfa *ResourceModelMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
return utils.GenericStoreOne(data, wfa)
}
func (wfa *ResourceModelMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return utils.GenericStoreOne(data, wfa)
}
func (a *ResourceModelMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
return utils.GenericLoadOne[*ResourceModel](id, func(d utils.DBObject) (utils.DBObject, int, error) {
return d, 200, nil
}, a)
}
func (a *ResourceModelMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
return utils.GenericLoadAll[*ResourceModel](func(d utils.DBObject) utils.ShallowDBObject {
return d
}, a)
}
func (a *ResourceModelMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
return utils.GenericSearch[*ResourceModel](filters, search,
&dbs.Filters{
Or: map[string][]dbs.Filter{
"resource_type": {{Operator: dbs.LIKE.String(), Value: search}},
},
}, func(d utils.DBObject) utils.ShallowDBObject { return d }, a)
}

View File

@ -1,9 +1,7 @@
package storage package resources
import ( import (
"encoding/json" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
@ -58,27 +56,6 @@ type StorageResource struct {
Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage
} }
func (dma *StorageResource) Deserialize(j map[string]interface{}) utils.DBObject { func (d *StorageResource) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
b, err := json.Marshal(j) return New[*StorageResource](tools.STORAGE_RESOURCE, username, peerID, groups, caller, func() utils.DBObject { return &StorageResource{} }) // Create a new instance of the accessor
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *StorageResource) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}
func (d *StorageResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.STORAGE_RESOURCE, caller) // Initialize the accessor with the STORAGE_RESOURCE model type
return data
} }

View File

@ -1,114 +0,0 @@
package storage
import (
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils"
)
type storageMongoAccessor struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
}
// New creates a new instance of the storageMongoAccessor
func New() *storageMongoAccessor {
return &storageMongoAccessor{}
}
/*
* Nothing special here, just the basic CRUD operations
*/
func (sma *storageMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return sma.GenericDeleteOne(id, sma)
}
func (sma *storageMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
set.(*StorageResource).ResourceModel = nil
return sma.GenericUpdateOne(set, id, sma, &StorageResource{})
}
func (sma *storageMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
data.(*StorageResource).ResourceModel = nil
return sma.GenericStoreOne(data, sma)
}
func (sma *storageMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return sma.GenericStoreOne(data, sma)
}
func (sma *storageMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var storage StorageResource
res_mongo, code, err := mongo.MONGOService.LoadOne(id, sma.GetType())
if err != nil {
sma.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err
}
res_mongo.Decode(&storage)
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, sma.GetType())
if err == nil && len(resources) > 0 {
storage.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
return &storage, 200, nil
}
func (wfa storageMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []StorageResource
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results {
if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
objs = append(objs, &r) // only get the abstract resource !
}
return objs, 200, nil
}
// Search searches for storage resources in the database, given some filters OR a search string
func (wfa *storageMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
},
}
}
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err
}
var results []StorageResource
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results {
if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
objs = append(objs, &r) // only get the abstract resource !
}
return objs, 200, nil
}

View File

@ -1,46 +0,0 @@
package storage
import (
"testing"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils"
"github.com/stretchr/testify/assert"
)
func TestStoreOneStorage(t *testing.T) {
s := StorageResource{Size: 123, WebResource: resource_model.WebResource{Protocol: "http", Path: "azerty.fr"},
AbstractResource: resource_model.AbstractResource{
AbstractObject: utils.AbstractObject{Name: "testData"},
Description: "Lorem Ipsum",
Logo: "azerty.com",
Owner: "toto",
OwnerLogo: "totoLogo",
SourceUrl: "azerty.fr",
},
}
sma := New()
id, _, _ := sma.StoreOne(&s)
assert.NotEmpty(t, id)
}
func TestLoadOneStorage(t *testing.T) {
s := StorageResource{Size: 123, WebResource: resource_model.WebResource{Protocol: "http", Path: "azerty.fr"},
AbstractResource: resource_model.AbstractResource{
AbstractObject: utils.AbstractObject{Name: "testData"},
Description: "Lorem Ipsum",
Logo: "azerty.com",
Owner: "toto",
OwnerLogo: "totoLogo",
SourceUrl: "azerty.fr",
},
}
sma := New()
new_s, _, _ := sma.StoreOne(&s)
assert.Equal(t, s, new_s)
}

View File

@ -0,0 +1,18 @@
package resources
import (
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
)
// WorkflowResource is a struct that represents a workflow resource
// it defines the resource workflow
type WorkflowResource struct {
resource_model.AbstractResource
WorkflowID string `bson:"workflow_id,omitempty" json:"workflow_id,omitempty"` // WorkflowID is the ID of the native workflow
}
func (d *WorkflowResource) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return New[*WorkflowResource](tools.WORKFLOW_RESOURCE, username, peerID, groups, caller, func() utils.DBObject { return &WorkflowResource{} }) // Create a new instance of the accessor
}

View File

@ -1,41 +0,0 @@
package oclib
import (
"encoding/json"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
)
// WorkflowResource is a struct that represents a workflow resource
// it defines the resource workflow
type WorkflowResource struct {
resource_model.AbstractResource
WorkflowID string `bson:"workflow_id,omitempty" json:"workflow_id,omitempty"` // WorkflowID is the ID of the native workflow
}
func (d *WorkflowResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.WORKFLOW_RESOURCE, caller) // Initialize the accessor with the WORKFLOW_RESOURCE model type
return data
}
func (dma *WorkflowResource) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *WorkflowResource) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}

View File

@ -1,113 +0,0 @@
package oclib
import (
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils"
)
type workflowResourceMongoAccessor struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
}
func New() *workflowResourceMongoAccessor {
return &workflowResourceMongoAccessor{}
}
func (wfa *workflowResourceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return wfa.GenericDeleteOne(id, wfa)
}
func (wfa *workflowResourceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
set.(*WorkflowResource).ResourceModel = nil
return wfa.GenericUpdateOne(set, id, wfa, &WorkflowResource{})
}
func (wfa *workflowResourceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
data.(*WorkflowResource).ResourceModel = nil
return wfa.GenericStoreOne(data, wfa)
}
func (wfa *workflowResourceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
res, _, _ := wfa.LoadOne(data.GetID())
data.(*WorkflowResource).WorkflowID = data.GetID()
if res == nil {
return wfa.GenericStoreOne(data, wfa)
} else {
data.(*WorkflowResource).UUID = res.GetID()
return wfa.GenericUpdateOne(data, res.GetID(), wfa, &WorkflowResource{})
}
}
func (wfa *workflowResourceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var workflow WorkflowResource
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err
}
res_mongo.Decode(&workflow)
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, wfa.GetType())
if err == nil && len(resources) > 0 {
workflow.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
return &workflow, 200, nil
}
func (wfa workflowResourceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []WorkflowResource
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results {
if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
objs = append(objs, &r)
}
return objs, 200, nil
}
// Search searches for workflow resources in the database, given some filters OR a search string
func (wfa *workflowResourceMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
},
}
}
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err
}
var results []WorkflowResource
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results {
if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
objs = append(objs, &r)
}
return objs, 200, nil
}

View File

@ -1,43 +0,0 @@
package oclib
import (
"testing"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils"
"github.com/stretchr/testify/assert"
)
func TestStoreOneWorkflow(t *testing.T) {
w := WorkflowResource{AbstractResource: resource_model.AbstractResource{
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
Description: "Lorem Ipsum",
Logo: "azerty.com",
Owner: "toto",
OwnerLogo: "totoLogo",
SourceUrl: "azerty.fr",
},
}
wma := New()
id, _, _ := wma.StoreOne(&w)
assert.NotEmpty(t, id)
}
func TestLoadOneWorkflow(t *testing.T) {
w := WorkflowResource{AbstractResource: resource_model.AbstractResource{
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
Description: "Lorem Ipsum",
Logo: "azerty.com",
Owner: "toto",
OwnerLogo: "totoLogo",
SourceUrl: "azerty.fr",
},
}
wma := New()
new_w, _, _ := wma.StoreOne(&w)
assert.Equal(t, w, new_w)
}

View File

@ -3,69 +3,40 @@ package utils
import ( import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"time" "time"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/go-playground/validator/v10" "github.com/go-playground/validator/v10"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/rs/zerolog" "github.com/rs/zerolog"
mgb "go.mongodb.org/mongo-driver/mongo"
) )
// single instance of the validator used in every model Struct to validate the fields // single instance of the validator used in every model Struct to validate the fields
var validate = validator.New(validator.WithRequiredStructEnabled()) var validate = validator.New(validator.WithRequiredStructEnabled())
type AccessMode int
const (
Private AccessMode = iota
Public
)
/* /*
* AbstractObject is a struct that represents the basic fields of an object * AbstractObject is a struct that represents the basic fields of an object
* it defines the object id and name * it defines the object id and name
* every data in base root model should inherit from this struct (only exception is the ResourceModel) * every data in base root model should inherit from this struct (only exception is the ResourceModel)
*/ */
type AbstractObject struct { type AbstractObject struct {
UUID string `json:"id,omitempty" bson:"id,omitempty" validate:"required"` UUID string `json:"id,omitempty" bson:"id,omitempty" validate:"required"`
Name string `json:"name,omitempty" bson:"name,omitempty" validate:"required"` Name string `json:"name,omitempty" bson:"name,omitempty" validate:"required"`
UpdateDate time.Time `json:"update_date" bson:"update_date"` UpdateDate time.Time `json:"update_date" bson:"update_date"`
LastPeerWriter string `json:"last_peer_writer" bson:"last_peer_writer"` LastPeerWriter string `json:"last_peer_writer" bson:"last_peer_writer"`
} CreatorID string `json:"creator_id" bson:"creator_id" default:"unknown"`
AccessMode AccessMode `json:"access_mode" bson:"access_mode" default:"0"`
// GetID returns the id of the object (abstract)
func (ao *AbstractObject) GetID() string {
return ao.UUID
}
// GetName returns the name of the object (abstract)
func (ao *AbstractObject) GetName() string {
return ao.Name
}
func (ao *AbstractObject) UpToDate() {
ao.UpdateDate = time.Now()
// ao.LastPeerWriter, _ = static.GetMyLocalJsonPeer()
}
// GetAccessor returns the accessor of the object (abstract)
func (dma *AbstractObject) GetAccessor(caller *tools.HTTPCaller) Accessor {
return nil
}
func (dma *AbstractObject) Deserialize(j map[string]interface{}) DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *AbstractObject) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
} }
func (r *AbstractObject) GenerateID() { func (r *AbstractObject) GenerateID() {
@ -74,13 +45,83 @@ func (r *AbstractObject) GenerateID() {
} }
} }
type AbstractAccessor struct { // GetID implements ShallowDBObject.
Logger zerolog.Logger // Logger is the logger of the accessor, it's a specilized logger for the accessor func (ao AbstractObject) GetID() string {
Type string // Type is the data type of the accessor return ao.UUID
Caller *tools.HTTPCaller // Caller is the http caller of the accessor (optionnal) only need in a peer connection
} }
func (dma *AbstractAccessor) GetType() string { // GetName implements ShallowDBObject.
func (ao AbstractObject) GetName() string {
return ao.Name
}
func (ao *AbstractObject) UpToDate(user string, create bool) {
ao.UpdateDate = time.Now()
ao.LastPeerWriter = user
if create {
ao.CreatorID = user
}
}
func (ao *AbstractObject) VerifyAuth(username string, peerID string, groups []string) bool {
return ao.AccessMode == Public || ao.CreatorID == username
}
func (ao *AbstractObject) GetObjectFilters(search string) *dbs.Filters {
return &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
}}
}
func (dma *AbstractObject) Deserialize(j map[string]interface{}, obj DBObject) DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, obj)
return obj
}
func (dma *AbstractObject) Serialize(obj DBObject) map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(obj)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}
type AbstractAccessor struct {
Logger zerolog.Logger // Logger is the logger of the accessor, it's a specilized logger for the accessor
Type tools.DataType // Type is the data type of the accessor
Caller *tools.HTTPCaller // Caller is the http caller of the accessor (optionnal) only need in a peer connection
PeerID string // PeerID is the id of the peer
Groups []string // Groups is the list of groups that can access the accessor
User string // User is the user that is using the accessor
ResourceModelAccessor Accessor
}
func (dma *AbstractAccessor) GetUser() string {
return dma.User
}
func (dma *AbstractAccessor) GetPeerID() string {
return dma.PeerID
}
func (dma *AbstractAccessor) GetGroups() []string {
return dma.Groups
}
func (dma *AbstractAccessor) GetLogger() *zerolog.Logger {
return &dma.Logger
}
func (dma *AbstractAccessor) VerifyAuth() string {
return ""
}
func (dma *AbstractAccessor) GetType() tools.DataType {
return dma.Type return dma.Type
} }
@ -88,16 +129,10 @@ func (dma *AbstractAccessor) GetCaller() *tools.HTTPCaller {
return dma.Caller return dma.Caller
} }
// Init initializes the accessor with the data type and the http caller
func (dma *AbstractAccessor) Init(t tools.DataType, caller *tools.HTTPCaller) {
dma.Logger = logs.CreateLogger(t.String()) // Create a logger with the data type
dma.Caller = caller // Set the caller
dma.Type = t.String() // Set the data type
}
// GenericLoadOne loads one object from the database (generic) // GenericLoadOne loads one object from the database (generic)
func (wfa *AbstractAccessor) GenericStoreOne(data DBObject, accessor Accessor) (DBObject, int, error) { func GenericStoreOne(data DBObject, a Accessor) (DBObject, int, error) {
data.GenerateID() data.GenerateID()
data.UpToDate(a.GetUser(), true)
f := dbs.Filters{ f := dbs.Filters{
Or: map[string][]dbs.Filter{ Or: map[string][]dbs.Filter{
"abstractresource.abstractobject.name": {{ "abstractresource.abstractobject.name": {{
@ -110,31 +145,37 @@ func (wfa *AbstractAccessor) GenericStoreOne(data DBObject, accessor Accessor) (
}}, }},
}, },
} }
if cursor, _, _ := accessor.Search(&f, ""); len(cursor) > 0 { if !data.VerifyAuth(a.GetUser(), a.GetPeerID(), a.GetGroups()) {
return nil, 409, errors.New(accessor.GetType() + " with name " + data.GetName() + " already exists") return nil, 403, errors.New("You are not allowed to access this collaborative area")
}
if cursor, _, _ := a.Search(&f, ""); len(cursor) > 0 {
return nil, 409, errors.New(a.GetType().String() + " with name " + data.GetName() + " already exists")
} }
err := validate.Struct(data) err := validate.Struct(data)
if err != nil { if err != nil {
return nil, 422, err return nil, 422, err
} }
id, code, err := mongo.MONGOService.StoreOne(data, data.GetID(), wfa.GetType()) id, code, err := mongo.MONGOService.StoreOne(data, data.GetID(), a.GetType().String())
if err != nil { if err != nil {
wfa.Logger.Error().Msg("Could not store " + data.GetName() + " to db. Error: " + err.Error()) a.GetLogger().Error().Msg("Could not store " + data.GetName() + " to db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
return accessor.LoadOne(id) return a.LoadOne(id)
} }
// GenericLoadOne loads one object from the database (generic) // GenericLoadOne loads one object from the database (generic)
func (dma *AbstractAccessor) GenericDeleteOne(id string, accessor Accessor) (DBObject, int, error) { func GenericDeleteOne(id string, a Accessor) (DBObject, int, error) {
res, code, err := accessor.LoadOne(id) res, code, err := a.LoadOne(id)
if err != nil { if err != nil {
dma.Logger.Error().Msg("Could not retrieve " + id + " to db. Error: " + err.Error()) a.GetLogger().Error().Msg("Could not retrieve " + id + " to db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
_, code, err = mongo.MONGOService.DeleteOne(id, accessor.GetType()) if !res.VerifyAuth(a.GetUser(), a.GetPeerID(), a.GetGroups()) {
return nil, 403, errors.New("You are not allowed to access this collaborative area")
}
_, code, err = mongo.MONGOService.DeleteOne(id, a.GetType().String())
if err != nil { if err != nil {
dma.Logger.Error().Msg("Could not delete " + id + " to db. Error: " + err.Error()) a.GetLogger().Error().Msg("Could not delete " + id + " to db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
return res, 200, nil return res, 200, nil
@ -142,32 +183,84 @@ func (dma *AbstractAccessor) GenericDeleteOne(id string, accessor Accessor) (DBO
// GenericLoadOne loads one object from the database (generic) // GenericLoadOne loads one object from the database (generic)
// json expected in entry is a flatted object no need to respect the inheritance hierarchy // json expected in entry is a flatted object no need to respect the inheritance hierarchy
func (dma *AbstractAccessor) GenericUpdateOne(set DBObject, id string, accessor Accessor, new DBObject) (DBObject, int, error) { func GenericUpdateOne(set DBObject, id string, a Accessor, new DBObject) (DBObject, int, error) {
r, c, err := accessor.LoadOne(id) r, c, err := a.LoadOne(id)
if err != nil { if err != nil {
return nil, c, err return nil, c, err
} }
change := set.Serialize() // get the changes r.UpToDate(a.GetUser(), false)
loaded := r.Serialize() // get the loaded object if !r.VerifyAuth(a.GetUser(), a.GetPeerID(), a.GetGroups()) {
return nil, 403, errors.New("You are not allowed to access this collaborative area")
}
change := set.Serialize(set) // get the changes
loaded := r.Serialize(r) // get the loaded object
for k, v := range change { // apply the changes, with a flatten method for k, v := range change { // apply the changes, with a flatten method
loaded[k] = v loaded[k] = v
} }
id, code, err := mongo.MONGOService.UpdateOne(new.Deserialize(loaded), id, accessor.GetType()) id, code, err := mongo.MONGOService.UpdateOne(new.Deserialize(loaded, new), id, a.GetType().String())
if err != nil { if err != nil {
dma.Logger.Error().Msg("Could not update " + id + " to db. Error: " + err.Error()) a.GetLogger().Error().Msg("Could not update " + id + " to db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
return accessor.LoadOne(id) return a.LoadOne(id)
}
func GenericLoadOne[T DBObject](id string, f func(DBObject) (DBObject, int, error), a Accessor) (DBObject, int, error) {
var data T
res_mongo, code, err := mongo.MONGOService.LoadOne(id, a.GetType().String())
if err != nil {
a.GetLogger().Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err
}
res_mongo.Decode(&data)
if !data.VerifyAuth(a.GetUser(), a.GetPeerID(), a.GetGroups()) {
return nil, 403, errors.New("You are not allowed to access this collaborative area")
}
return f(data)
}
func genericLoadAll[T DBObject](res *mgb.Cursor, code int, err error, f func(DBObject) ShallowDBObject, a Accessor) ([]ShallowDBObject, int, error) {
objs := []ShallowDBObject{}
var results []T
if err != nil {
a.GetLogger().Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
if err = res.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
if !r.VerifyAuth(a.GetUser(), a.GetPeerID(), a.GetGroups()) {
continue
}
objs = append(objs, f(r))
}
return objs, 200, nil
}
func GenericLoadAll[T DBObject](f func(DBObject) ShallowDBObject, wfa Accessor) ([]ShallowDBObject, int, error) {
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType().String())
fmt.Println("res_mongo", res_mongo)
return genericLoadAll[T](res_mongo, code, err, f, wfa)
}
func GenericSearch[T DBObject](filters *dbs.Filters, search string, defaultFilters *dbs.Filters,
f func(DBObject) ShallowDBObject, wfa Accessor) ([]ShallowDBObject, int, error) {
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = defaultFilters
}
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType().String())
return genericLoadAll[T](res_mongo, code, err, f, wfa)
} }
// GenericLoadOne loads one object from the database (generic) // GenericLoadOne loads one object from the database (generic)
// json expected in entry is a flatted object no need to respect the inheritance hierarchy // json expected in entry is a flatted object no need to respect the inheritance hierarchy
func (dma *AbstractAccessor) GenericRawUpdateOne(set DBObject, id string, accessor Accessor) (DBObject, int, error) { func GenericRawUpdateOne(set DBObject, id string, a Accessor) (DBObject, int, error) {
id, code, err := mongo.MONGOService.UpdateOne(set, id, accessor.GetType()) id, code, err := mongo.MONGOService.UpdateOne(set, id, a.GetType().String())
if err != nil { if err != nil {
dma.Logger.Error().Msg("Could not update " + id + " to db. Error: " + err.Error()) a.GetLogger().Error().Msg("Could not update " + id + " to db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
return accessor.LoadOne(id) return a.LoadOne(id)
} }

View File

@ -3,6 +3,7 @@ package utils
import ( import (
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/rs/zerolog"
) )
// ShallowDBObject is an interface that defines the basic methods shallowed version of a DBObject // ShallowDBObject is an interface that defines the basic methods shallowed version of a DBObject
@ -10,8 +11,8 @@ type ShallowDBObject interface {
GenerateID() GenerateID()
GetID() string GetID() string
GetName() string GetName() string
Deserialize(j map[string]interface{}) DBObject Deserialize(j map[string]interface{}, obj DBObject) DBObject
Serialize() map[string]interface{} Serialize(obj DBObject) map[string]interface{}
} }
// DBObject is an interface that defines the basic methods for a DBObject // DBObject is an interface that defines the basic methods for a DBObject
@ -19,16 +20,20 @@ type DBObject interface {
GenerateID() GenerateID()
GetID() string GetID() string
GetName() string GetName() string
UpToDate() UpToDate(user string, create bool)
Deserialize(j map[string]interface{}) DBObject VerifyAuth(username string, PeerID string, groups []string) bool
Serialize() map[string]interface{} Deserialize(j map[string]interface{}, obj DBObject) DBObject
GetAccessor(caller *tools.HTTPCaller) Accessor Serialize(obj DBObject) map[string]interface{}
GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) Accessor
} }
// Accessor is an interface that defines the basic methods for an Accessor // Accessor is an interface that defines the basic methods for an Accessor
type Accessor interface { type Accessor interface {
Init(t tools.DataType, caller *tools.HTTPCaller) GetType() tools.DataType
GetType() string GetUser() string
GetPeerID() string
GetGroups() []string
GetLogger() *zerolog.Logger
GetCaller() *tools.HTTPCaller GetCaller() *tools.HTTPCaller
Search(filters *dbs.Filters, search string) ([]ShallowDBObject, int, error) Search(filters *dbs.Filters, search string) ([]ShallowDBObject, int, error)
LoadAll() ([]ShallowDBObject, int, error) LoadAll() ([]ShallowDBObject, int, error)

View File

@ -1,15 +1,13 @@
package workflow package workflow
import ( import (
"encoding/json"
"errors" "errors"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resources" "cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
"cloud.o-forge.io/core/oc-lib/models/resources/workflow/graph"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/models/workflow/graph"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
@ -36,8 +34,8 @@ func (w *AbstractWorkflow) GetWorkflows() (list_computings []graph.GraphItem) {
return return
} }
func (w *AbstractWorkflow) GetComputeByRelatedProcessing(processingID string) []*compute.ComputeResource { func (w *AbstractWorkflow) GetComputeByRelatedProcessing(processingID string) []*resources.ComputeResource {
storages := []*compute.ComputeResource{} storages := []*resources.ComputeResource{}
for _, link := range w.Graph.Links { for _, link := range w.Graph.Links {
nodeID := link.Destination.ID // we considers that the processing is the destination nodeID := link.Destination.ID // we considers that the processing is the destination
node := w.Graph.Items[link.Source.ID].Compute // we are looking for the storage as source node := w.Graph.Items[link.Source.ID].Compute // we are looking for the storage as source
@ -52,8 +50,8 @@ func (w *AbstractWorkflow) GetComputeByRelatedProcessing(processingID string) []
return storages return storages
} }
func (w *AbstractWorkflow) GetStoragesByRelatedProcessing(processingID string) []*storage.StorageResource { func (w *AbstractWorkflow) GetStoragesByRelatedProcessing(processingID string) []*resources.StorageResource {
storages := []*storage.StorageResource{} storages := []*resources.StorageResource{}
for _, link := range w.Graph.Links { for _, link := range w.Graph.Links {
nodeID := link.Destination.ID // we considers that the processing is the destination nodeID := link.Destination.ID // we considers that the processing is the destination
node := w.Graph.Items[link.Source.ID].Storage // we are looking for the storage as source node := w.Graph.Items[link.Source.ID].Storage // we are looking for the storage as source
@ -100,6 +98,20 @@ type Workflow struct {
AbstractWorkflow // AbstractWorkflow contains the basic fields of a workflow AbstractWorkflow // AbstractWorkflow contains the basic fields of a workflow
} }
func (ao *Workflow) VerifyAuth(username string, peerID string, groups []string) bool {
isAuthorized := false
if len(ao.Shared) > 0 {
for _, shared := range ao.Shared {
shared, code, _ := shallow_collaborative_area.New(tools.COLLABORATIVE_AREA, username, peerID, groups, nil).LoadOne(shared)
if code != 200 || shared == nil {
isAuthorized = false
}
isAuthorized = shared.VerifyAuth(username, peerID, groups)
}
}
return ao.AbstractObject.VerifyAuth(username, peerID, groups) || isAuthorized
}
/* /*
* CheckBooking is a function that checks the booking of the workflow on peers (even ourselves) * CheckBooking is a function that checks the booking of the workflow on peers (even ourselves)
*/ */
@ -108,7 +120,7 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
if wfa.Graph == nil { // no graph no booking if wfa.Graph == nil { // no graph no booking
return false, nil return false, nil
} }
accessor := (&compute.ComputeResource{}).GetAccessor(nil) accessor := (&resources.ComputeResource{}).GetAccessor("", "", []string{}, caller)
for _, link := range wfa.Graph.Links { for _, link := range wfa.Graph.Links {
if ok, dc_id := wfa.isDCLink(link); ok { // check if the link is a link between a compute and a resource if ok, dc_id := wfa.isDCLink(link); ok { // check if the link is a link between a compute and a resource
dc, code, _ := accessor.LoadOne(dc_id) dc, code, _ := accessor.LoadOne(dc_id)
@ -116,7 +128,7 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
continue continue
} }
// CHECK BOOKING ON PEER, compute could be a remote one // CHECK BOOKING ON PEER, compute could be a remote one
peerID := dc.(*compute.ComputeResource).PeerID peerID := dc.(*resources.ComputeResource).PeerID
if peerID == "" { if peerID == "" {
return false, errors.New("no peer id") return false, errors.New("no peer id")
} // no peer id no booking, we need to know where to book } // no peer id no booking, we need to know where to book
@ -129,31 +141,6 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
return true, nil return true, nil
} }
func (d *Workflow) GetName() string { func (d *Workflow) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return d.Name return New(tools.WORKFLOW, username, peerID, groups, caller) // Create a new instance of the accessor
}
func (d *Workflow) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.WORKFLOW, caller) // Initialize the accessor with the WORKFLOW model type
return data
}
func (dma *Workflow) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *Workflow) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
} }

View File

@ -8,10 +8,8 @@ import (
type WorkflowHistory struct{ Workflow } type WorkflowHistory struct{ Workflow }
func (d *WorkflowHistory) GetAccessor(caller *tools.HTTPCaller) utils.Accessor { func (d *WorkflowHistory) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor return New(tools.WORKSPACE_HISTORY, username, peerID, groups, caller) // Create a new instance of the accessor
data.Init(tools.WORKSPACE_HISTORY, caller) // Initialize the accessor with the WORKSPACE model type
return data
} }
func (r *WorkflowHistory) GenerateID() { func (r *WorkflowHistory) GenerateID() {
r.UUID = uuid.New().String() r.UUID = uuid.New().String()

View File

@ -9,24 +9,42 @@ import (
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resources" "cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution" "cloud.o-forge.io/core/oc-lib/models/workflow_execution"
"cloud.o-forge.io/core/oc-lib/models/workspace" "cloud.o-forge.io/core/oc-lib/models/workspace"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
cron "github.com/robfig/cron/v3" cron "github.com/robfig/cron"
) )
type workflowMongoAccessor struct { type workflowMongoAccessor struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller) utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
computeResourceAccessor utils.Accessor
collaborativeAreaAccessor utils.Accessor
executionAccessor utils.Accessor
workspaceAccessor utils.Accessor
} }
// New creates a new instance of the workflowMongoAccessor // New creates a new instance of the workflowMongoAccessor
func New() *workflowMongoAccessor { func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *workflowMongoAccessor {
return &workflowMongoAccessor{} return &workflowMongoAccessor{
computeResourceAccessor: (&resources.ComputeResource{}).GetAccessor(username, peerID, groups, nil),
collaborativeAreaAccessor: (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(username, peerID, groups, nil),
executionAccessor: (&workflow_execution.WorkflowExecution{}).GetAccessor(username, peerID, groups, nil),
workspaceAccessor: (&workspace.Workspace{}).GetAccessor(username, peerID, groups, nil),
AbstractAccessor: utils.AbstractAccessor{
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
Caller: caller,
PeerID: peerID,
User: username,
Groups: groups, // Set the caller
Type: t,
},
}
} }
/* /*
@ -37,7 +55,7 @@ func New() *workflowMongoAccessor {
* getExecutions is a function that returns the executions of a workflow * getExecutions is a function that returns the executions of a workflow
* it returns an array of workflow_execution.WorkflowExecution * it returns an array of workflow_execution.WorkflowExecution
*/ */
func (wfa *workflowMongoAccessor) getExecutions(id string, data *Workflow) ([]*workflow_execution.WorkflowExecution, error) { func (a *workflowMongoAccessor) getExecutions(id string, data *Workflow) ([]*workflow_execution.WorkflowExecution, error) {
workflows_execution := []*workflow_execution.WorkflowExecution{} workflows_execution := []*workflow_execution.WorkflowExecution{}
if data.Schedule != nil { // only set execution on a scheduled workflow if data.Schedule != nil { // only set execution on a scheduled workflow
if data.Schedule.Start == nil { // if no start date, return an error if data.Schedule.Start == nil { // if no start date, return an error
@ -92,14 +110,14 @@ func (wfa *workflowMongoAccessor) getExecutions(id string, data *Workflow) ([]*w
} }
// DeleteOne deletes a workflow from the database, delete depending executions and bookings // DeleteOne deletes a workflow from the database, delete depending executions and bookings
func (wfa *workflowMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) { func (a *workflowMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
wfa.execution(id, &Workflow{ a.execution(id, &Workflow{
AbstractWorkflow: AbstractWorkflow{ScheduleActive: false}, AbstractWorkflow: AbstractWorkflow{ScheduleActive: false},
}, true) // delete the executions }, true) // delete the executions
res, code, err := wfa.GenericDeleteOne(id, wfa) res, code, err := utils.GenericDeleteOne(id, a)
if res != nil && code == 200 { if res != nil && code == 200 {
wfa.execute(res.(*Workflow), true, false) // up to date the workspace for the workflow a.execute(res.(*Workflow), true, false) // up to date the workspace for the workflow
wfa.share(res.(*Workflow), true, wfa.Caller) a.share(res.(*Workflow), true, a.Caller)
} }
return res, code, err return res, code, err
} }
@ -109,15 +127,15 @@ func (wfa *workflowMongoAccessor) DeleteOne(id string) (utils.DBObject, int, err
* it takes the workflow id, the real data and the executions * it takes the workflow id, the real data and the executions
* it returns an error if the booking fails * it returns an error if the booking fails
*/ */
func (wfa *workflowMongoAccessor) book(id string, realData *Workflow, execs []*workflow_execution.WorkflowExecution) error { func (a *workflowMongoAccessor) book(id string, realData *Workflow, execs []*workflow_execution.WorkflowExecution) error {
if wfa.Caller == nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.BOOKING] == nil { if a.Caller == nil || a.Caller.URLS == nil || a.Caller.URLS[tools.BOOKING] == nil {
return errors.New("no caller defined") return errors.New("no caller defined")
} }
methods := wfa.Caller.URLS[tools.BOOKING] methods := a.Caller.URLS[tools.BOOKING]
if _, ok := methods[tools.POST]; !ok { if _, ok := methods[tools.POST]; !ok {
return errors.New("no path found") return errors.New("no path found")
} }
res, code, _ := wfa.LoadOne(id) res, code, _ := a.LoadOne(id)
if code != 200 { if code != 200 {
return errors.New("could not load workflow") return errors.New("could not load workflow")
} }
@ -127,7 +145,6 @@ func (wfa *workflowMongoAccessor) book(id string, realData *Workflow, execs []*w
g = realData.Graph g = realData.Graph
} }
if g != nil && g.Links != nil && len(g.Links) > 0 { // if the graph is set and has links then book the workflow (even on ourselves) if g != nil && g.Links != nil && len(g.Links) > 0 { // if the graph is set and has links then book the workflow (even on ourselves)
accessor := (&compute.ComputeResource{}).GetAccessor(nil)
isDCFound := []string{} isDCFound := []string{}
for _, link := range g.Links { for _, link := range g.Links {
if ok, dc_id := realData.isDCLink(link); ok { // check if the link is a link between a compute and a resource booking is only on compute if ok, dc_id := realData.isDCLink(link); ok { // check if the link is a link between a compute and a resource booking is only on compute
@ -135,12 +152,12 @@ func (wfa *workflowMongoAccessor) book(id string, realData *Workflow, execs []*w
continue continue
} // if the compute is already found, skip it } // if the compute is already found, skip it
isDCFound = append(isDCFound, dc_id) isDCFound = append(isDCFound, dc_id)
dc, code, _ := accessor.LoadOne(dc_id) dc, code, _ := a.computeResourceAccessor.LoadOne(dc_id)
if code != 200 { if code != 200 {
continue continue
} }
// CHECK BOOKING // CHECK BOOKING
peerID := dc.(*compute.ComputeResource).PeerID peerID := dc.(*resources.ComputeResource).PeerID
if peerID == "" { // no peer id no booking if peerID == "" { // no peer id no booking
continue continue
} }
@ -150,7 +167,7 @@ func (wfa *workflowMongoAccessor) book(id string, realData *Workflow, execs []*w
WorkflowID: id, // set the workflow id "WHO" WorkflowID: id, // set the workflow id "WHO"
ResourceID: dc_id, // set the compute id "WHERE" ResourceID: dc_id, // set the compute id "WHERE"
Executions: execs, // set the executions to book "WHAT" Executions: execs, // set the executions to book "WHAT"
}).Serialize(), wfa.Caller) }).Serialize(), a.Caller)
if err != nil { if err != nil {
fmt.Println("BOOKING", err) fmt.Println("BOOKING", err)
return err return err
@ -164,13 +181,12 @@ func (wfa *workflowMongoAccessor) book(id string, realData *Workflow, execs []*w
/* /*
* share is a function that shares a workflow to the peers if the workflow is shared * share is a function that shares a workflow to the peers if the workflow is shared
*/ */
func (wfa *workflowMongoAccessor) share(realData *Workflow, delete bool, caller *tools.HTTPCaller) { func (a *workflowMongoAccessor) share(realData *Workflow, delete bool, caller *tools.HTTPCaller) {
if realData == nil || realData.Shared == nil || len(realData.Shared) == 0 || caller == nil || caller.Disabled { // no shared no sharing if realData == nil || realData.Shared == nil || len(realData.Shared) == 0 || caller == nil || caller.Disabled { // no shared no sharing
return return
} }
for _, sharedID := range realData.Shared { // loop through the shared ids for _, sharedID := range realData.Shared { // loop through the shared ids
access := (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(nil) res, code, _ := a.collaborativeAreaAccessor.LoadOne(sharedID)
res, code, _ := access.LoadOne(sharedID)
if code != 200 { if code != 200 {
continue continue
} }
@ -186,11 +202,12 @@ func (wfa *workflowMongoAccessor) share(realData *Workflow, delete bool, caller
history.StoreOne(history.MapFromWorkflow(res.(*Workflow))) history.StoreOne(history.MapFromWorkflow(res.(*Workflow)))
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.DELETE, map[string]interface{}{}, caller) _, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.DELETE, map[string]interface{}{}, caller)
} else { // if the workflow is updated, share the update } else { // if the workflow is updated, share the update
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.PUT, res.Serialize(), caller) _, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.PUT,
res.Serialize(res), caller)
} }
} }
if err != nil { if err != nil {
wfa.Logger.Error().Msg(err.Error()) a.Logger.Error().Msg(err.Error())
} }
} }
} }
@ -198,35 +215,34 @@ func (wfa *workflowMongoAccessor) share(realData *Workflow, delete bool, caller
/* /*
* execution is a create or delete function for the workflow executions depending on the schedule of the workflow * execution is a create or delete function for the workflow executions depending on the schedule of the workflow
*/ */
func (wfa *workflowMongoAccessor) execution(id string, realData *Workflow, delete bool) (int, error) { func (a *workflowMongoAccessor) execution(id string, realData *Workflow, delete bool) (int, error) {
nats := tools.NewNATSCaller() // create a new nats caller because executions are sent to the nats for daemons nats := tools.NewNATSCaller() // create a new nats caller because executions are sent to the nats for daemons
mongo.MONGOService.DeleteMultiple(map[string]interface{}{ mongo.MONGOService.DeleteMultiple(map[string]interface{}{
"state": 1, // only delete the scheduled executions only scheduled if executions are in progress or ended, they should not be deleted for registration "state": 1, // only delete the scheduled executions only scheduled if executions are in progress or ended, they should not be deleted for registration
"workflow_id": id, "workflow_id": id,
}, tools.WORKFLOW_EXECUTION.String()) }, tools.WORKFLOW_EXECUTION.String())
err := wfa.book(id, realData, []*workflow_execution.WorkflowExecution{}) // delete the booking of the workflow on the peers err := a.book(id, realData, []*workflow_execution.WorkflowExecution{}) // delete the booking of the workflow on the peers
fmt.Println("DELETE BOOKING", err) fmt.Println("DELETE BOOKING", err)
nats.SetNATSPub(tools.WORKFLOW.String(), tools.REMOVE, realData) // send the deletion to the nats nats.SetNATSPub(tools.WORKFLOW.String(), tools.REMOVE, realData) // send the deletion to the nats
if err != nil { if err != nil {
return 409, err return 409, err
} }
accessor := (&workflow_execution.WorkflowExecution{}).GetAccessor(nil) execs, err := a.getExecutions(id, realData) // get the executions of the workflow
execs, err := wfa.getExecutions(id, realData) // get the executions of the workflow
if err != nil { if err != nil {
return 422, err return 422, err
} }
if !realData.ScheduleActive || delete { // if the schedule is not active, delete the executions if !realData.ScheduleActive || delete { // if the schedule is not active, delete the executions
execs = []*workflow_execution.WorkflowExecution{} execs = []*workflow_execution.WorkflowExecution{}
} }
err = wfa.book(id, realData, execs) // book the workflow on the peers err = a.book(id, realData, execs) // book the workflow on the peers
fmt.Println("BOOKING", err) fmt.Println("BOOKING", err)
if err != nil { if err != nil {
return 409, err // if the booking fails, return an error for integrity between peers return 409, err // if the booking fails, return an error for integrity between peers
} }
fmt.Println("BOOKING", delete) fmt.Println("BOOKING", delete)
for _, obj := range execs { for _, obj := range execs {
_, code, err := accessor.StoreOne(obj) _, code, err := a.executionAccessor.StoreOne(obj)
fmt.Println("EXEC", code, err) fmt.Println("EXEC", code, err)
if code != 200 { if code != 200 {
return code, err return code, err
@ -237,21 +253,21 @@ func (wfa *workflowMongoAccessor) execution(id string, realData *Workflow, delet
} }
// UpdateOne updates a workflow in the database // UpdateOne updates a workflow in the database
func (wfa *workflowMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (a *workflowMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
res, code, err := wfa.LoadOne(id) res, code, err := a.LoadOne(id)
if code != 200 { if code != 200 {
return nil, 409, err return nil, 409, err
} }
// avoid the update if the schedule is the same // avoid the update if the schedule is the same
avoid := set.(*Workflow).Schedule == nil || (res.(*Workflow).Schedule != nil && res.(*Workflow).ScheduleActive == set.(*Workflow).ScheduleActive && res.(*Workflow).Schedule.Start == set.(*Workflow).Schedule.Start && res.(*Workflow).Schedule.End == set.(*Workflow).Schedule.End && res.(*Workflow).Schedule.Cron == set.(*Workflow).Schedule.Cron) avoid := set.(*Workflow).Schedule == nil || (res.(*Workflow).Schedule != nil && res.(*Workflow).ScheduleActive == set.(*Workflow).ScheduleActive && res.(*Workflow).Schedule.Start == set.(*Workflow).Schedule.Start && res.(*Workflow).Schedule.End == set.(*Workflow).Schedule.End && res.(*Workflow).Schedule.Cron == set.(*Workflow).Schedule.Cron)
res, code, err = wfa.GenericUpdateOne(set, id, wfa, &Workflow{}) res, code, err = utils.GenericUpdateOne(set, id, a, &Workflow{})
if code != 200 { if code != 200 {
return nil, code, err return nil, code, err
} }
workflow := res.(*Workflow) workflow := res.(*Workflow)
if !avoid { // if the schedule is not avoided, update the executions if !avoid { // if the schedule is not avoided, update the executions
if code, err := wfa.execution(id, workflow, false); code != 200 { if code, err := a.execution(id, workflow, false); code != 200 {
return nil, code, errors.New("could not update the executions : " + err.Error()) return nil, code, errors.New("could not update the executions : " + err.Error())
} }
} }
@ -260,16 +276,16 @@ func (wfa *workflowMongoAccessor) UpdateOne(set utils.DBObject, id string) (util
now := time.Now().UTC() now := time.Now().UTC()
if (workflow.Schedule.End != nil && now.After(*workflow.Schedule.End)) || (workflow.Schedule.End == nil && workflow.Schedule.Start != nil && now.After(*workflow.Schedule.Start)) { // if the start date is passed, then you can book if (workflow.Schedule.End != nil && now.After(*workflow.Schedule.End)) || (workflow.Schedule.End == nil && workflow.Schedule.Start != nil && now.After(*workflow.Schedule.Start)) { // if the start date is passed, then you can book
workflow.ScheduleActive = false workflow.ScheduleActive = false
wfa.GenericRawUpdateOne(workflow, id, wfa) utils.GenericRawUpdateOne(workflow, id, a)
} // if the start date is passed, update the executions } // if the start date is passed, update the executions
} }
wfa.execute(workflow, false, false) // update the workspace for the workflow a.execute(workflow, false, false) // update the workspace for the workflow
wfa.share(workflow, false, wfa.Caller) // share the update to the peers a.share(workflow, false, a.Caller) // share the update to the peers
return res, code, nil return res, code, nil
} }
// StoreOne stores a workflow in the database // StoreOne stores a workflow in the database
func (wfa *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
d := data.(*Workflow) d := data.(*Workflow)
if d.ScheduleActive && d.Schedule != nil { // if the workflow is scheduled, update the executions if d.ScheduleActive && d.Schedule != nil { // if the workflow is scheduled, update the executions
now := time.Now().UTC() now := time.Now().UTC()
@ -277,45 +293,44 @@ func (wfa *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject,
d.ScheduleActive = false d.ScheduleActive = false
} // if the start date is passed, update the executions } // if the start date is passed, update the executions
} }
res, code, err := wfa.GenericStoreOne(d, wfa) res, code, err := utils.GenericStoreOne(d, a)
if err != nil || code != 200 { if err != nil || code != 200 {
return nil, code, err return nil, code, err
} }
workflow := res.(*Workflow) workflow := res.(*Workflow)
wfa.share(workflow, false, wfa.Caller) // share the creation to the peers a.share(workflow, false, a.Caller) // share the creation to the peers
//store the executions //store the executions
if code, err := wfa.execution(res.GetID(), workflow, false); err != nil { if code, err := a.execution(res.GetID(), workflow, false); err != nil {
return nil, code, err return nil, code, err
} }
wfa.execute(workflow, false, false) // store the workspace for the workflow a.execute(workflow, false, false) // store the workspace for the workflow
return res, code, nil return res, code, nil
} }
// CopyOne copies a workflow in the database // CopyOne copies a workflow in the database
func (wfa *workflowMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *workflowMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.GenericStoreOne(data, wfa) return utils.GenericStoreOne(data, a)
} }
// execute is a function that executes a workflow // execute is a function that executes a workflow
// it stores the workflow resources in a specific workspace to never have a conflict in UI and logic // it stores the workflow resources in a specific workspace to never have a conflict in UI and logic
func (wfa *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active bool) { func (a *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active bool) {
accessor := (&workspace.Workspace{}).GetAccessor(nil)
filters := &dbs.Filters{ filters := &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by standard workspace name attached to a workflow Or: map[string][]dbs.Filter{ // filter by standard workspace name attached to a workflow
"abstractobject.name": {{dbs.LIKE.String(), workflow.Name + "_workspace"}}, "abstractobject.name": {{Operator: dbs.LIKE.String(), Value: workflow.Name + "_workspace"}},
}, },
} }
resource, _, err := accessor.Search(filters, "") resource, _, err := a.workspaceAccessor.Search(filters, "")
if delete { // if delete is set to true, delete the workspace if delete { // if delete is set to true, delete the workspace
for _, r := range resource { for _, r := range resource {
accessor.DeleteOne(r.GetID()) a.workspaceAccessor.DeleteOne(r.GetID())
} }
return return
} }
if err == nil && len(resource) > 0 { // if the workspace already exists, update it if err == nil && len(resource) > 0 { // if the workspace already exists, update it
accessor.UpdateOne(&workspace.Workspace{ a.workspaceAccessor.UpdateOne(&workspace.Workspace{
Active: active, Active: active,
ResourceSet: resources.ResourceSet{ ResourceSet: resources.ResourceSet{
Datas: workflow.Datas, Datas: workflow.Datas,
@ -326,7 +341,7 @@ func (wfa *workflowMongoAccessor) execute(workflow *Workflow, delete bool, activ
}, },
}, resource[0].GetID()) }, resource[0].GetID())
} else { // if the workspace does not exist, create it } else { // if the workspace does not exist, create it
accessor.StoreOne(&workspace.Workspace{ a.workspaceAccessor.StoreOne(&workspace.Workspace{
Active: active, Active: active,
AbstractObject: utils.AbstractObject{Name: workflow.Name + "_workspace"}, AbstractObject: utils.AbstractObject{Name: workflow.Name + "_workspace"},
ResourceSet: resources.ResourceSet{ ResourceSet: resources.ResourceSet{
@ -340,65 +355,25 @@ func (wfa *workflowMongoAccessor) execute(workflow *Workflow, delete bool, activ
} }
} }
// LoadOne loads a workflow from the database func (a *workflowMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
func (wfa *workflowMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { return utils.GenericLoadOne[*Workflow](id, func(d utils.DBObject) (utils.DBObject, int, error) {
var workflow Workflow w := d.(*Workflow)
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType()) if w.ScheduleActive && w.Schedule != nil { // if the workflow is scheduled, update the executions
if err != nil { now := time.Now().UTC()
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error()) if (w.Schedule.End != nil && now.After(*w.Schedule.End)) || (w.Schedule.End == nil && w.Schedule.Start != nil && now.After(*w.Schedule.Start)) { // if the start date is passed, then you can book
return nil, code, err w.ScheduleActive = false
} utils.GenericRawUpdateOne(d, id, a)
res_mongo.Decode(&workflow) } // if the start date is passed, update the executions
if workflow.ScheduleActive && workflow.Schedule != nil { // if the workflow is scheduled, update the executions
now := time.Now().UTC()
if (workflow.Schedule.End != nil && now.After(*workflow.Schedule.End)) || (workflow.Schedule.End == nil && workflow.Schedule.Start != nil && now.After(*workflow.Schedule.Start)) { // if the start date is passed, then you can book
workflow.ScheduleActive = false
wfa.GenericRawUpdateOne(&workflow, id, wfa)
} // if the start date is passed, update the executions
}
wfa.execute(&workflow, false, true) // if no workspace is attached to the workflow, create it
return &workflow, 200, nil
}
// LoadAll loads all the workflows from the database
func (wfa workflowMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []Workflow
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
objs = append(objs, &r.AbstractObject) // only AbstractObject fields !
}
return objs, 200, nil
}
func (wfa *workflowMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
},
} }
} a.execute(w, false, true) // if no workspace is attached to the workflow, create it
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType()) return d, 200, nil
if err != nil { }, a)
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error()) }
return nil, code, err
} func (a *workflowMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
var results []Workflow return utils.GenericLoadAll[*Workflow](func(d utils.DBObject) utils.ShallowDBObject { return &d.(*Workflow).AbstractObject }, a)
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil { }
return nil, 404, err
} func (a *workflowMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
for _, r := range results { return utils.GenericSearch[*Workflow](filters, search, (&Workflow{}).GetObjectFilters(search), func(d utils.DBObject) utils.ShallowDBObject { return d }, a)
objs = append(objs, &r)
}
return objs, 200, nil
} }

View File

@ -4,6 +4,7 @@ import (
"testing" "testing"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -12,7 +13,7 @@ func TestStoreOneWorkflow(t *testing.T) {
AbstractObject: utils.AbstractObject{Name: "testWorkflow"}, AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
} }
wma := New() wma := New(tools.WORKFLOW, "", "", nil, nil)
id, _, _ := wma.StoreOne(&w) id, _, _ := wma.StoreOne(&w)
assert.NotEmpty(t, id) assert.NotEmpty(t, id)
@ -23,7 +24,7 @@ func TestLoadOneWorkflow(t *testing.T) {
AbstractObject: utils.AbstractObject{Name: "testWorkflow"}, AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
} }
wma := New() wma := New(tools.WORKFLOW, "", "", nil, nil)
new_w, _, _ := wma.StoreOne(&w) new_w, _, _ := wma.StoreOne(&w)
assert.Equal(t, w, new_w) assert.Equal(t, w, new_w)
} }

View File

@ -107,10 +107,6 @@ func (wfa *WorkflowExecution) ArgoStatusToState(status string) *WorkflowExecutio
return wfa return wfa
} }
func (ao *WorkflowExecution) GetID() string {
return ao.UUID
}
func (r *WorkflowExecution) GenerateID() { func (r *WorkflowExecution) GenerateID() {
r.UUID = uuid.New().String() r.UUID = uuid.New().String()
} }
@ -119,29 +115,10 @@ func (d *WorkflowExecution) GetName() string {
return d.UUID + "_" + d.ExecDate.String() return d.UUID + "_" + d.ExecDate.String()
} }
func (d *WorkflowExecution) GetAccessor(caller *tools.HTTPCaller) utils.Accessor { func (d *WorkflowExecution) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor return New(tools.WORKFLOW_EXECUTION, username, peerID, groups, caller) // Create a new instance of the accessor
data.Init(tools.WORKFLOW_EXECUTION, caller) // Initialize the accessor with the WORKFLOW_EXECUTION model type
return data
} }
// New creates a new instance of the WorkflowExecution from a map func (d *WorkflowExecution) VerifyAuth(username string, peerID string, groups []string) bool {
func (dma *WorkflowExecution) Deserialize(j map[string]interface{}) utils.DBObject { return true
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
// Serialize returns the WorkflowExecution as a map
func (dma *WorkflowExecution) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
} }

View File

@ -4,95 +4,68 @@ import (
"time" "time"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
) )
type workflowExecutionMongoAccessor struct { type workflowExecutionMongoAccessor struct {
utils.AbstractAccessor utils.AbstractAccessor
} }
func New() *workflowExecutionMongoAccessor { func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *workflowExecutionMongoAccessor {
return &workflowExecutionMongoAccessor{} return &workflowExecutionMongoAccessor{
utils.AbstractAccessor{
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
Caller: caller,
PeerID: peerID,
User: username, // Set the caller
Groups: groups, // Set the caller
Type: t,
},
}
} }
func (wfa *workflowExecutionMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) { func (wfa *workflowExecutionMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return wfa.GenericDeleteOne(id, wfa) return utils.GenericDeleteOne(id, wfa)
} }
func (wfa *workflowExecutionMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (wfa *workflowExecutionMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
return wfa.GenericUpdateOne(set, id, wfa, &WorkflowExecution{}) return utils.GenericUpdateOne(set, id, wfa, &WorkflowExecution{})
} }
func (wfa *workflowExecutionMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (wfa *workflowExecutionMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.GenericStoreOne(data, wfa) return utils.GenericStoreOne(data, wfa)
} }
func (wfa *workflowExecutionMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (wfa *workflowExecutionMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.GenericStoreOne(data, wfa) return utils.GenericStoreOne(data, wfa)
} }
func (wfa *workflowExecutionMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (a *workflowExecutionMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var workflow WorkflowExecution return utils.GenericLoadOne[*WorkflowExecution](id, func(d utils.DBObject) (utils.DBObject, int, error) {
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType()) if d.(*WorkflowExecution).State == SCHEDULED && time.Now().UTC().After(*d.(*WorkflowExecution).ExecDate) {
if err != nil { d.(*WorkflowExecution).State = FORGOTTEN
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error()) utils.GenericRawUpdateOne(d, id, a)
return nil, code, err }
} return d, 200, nil
res_mongo.Decode(&workflow) }, a)
if workflow.State == SCHEDULED && time.Now().UTC().After(*workflow.ExecDate) {
workflow.State = FORGOTTEN
wfa.GenericRawUpdateOne(&workflow, id, wfa)
}
return &workflow, 200, nil
} }
func (wfa *workflowExecutionMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) { func (a *workflowExecutionMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{} return utils.GenericLoadAll[*WorkflowExecution](a.getExec(), a)
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []WorkflowExecution
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
if r.State == SCHEDULED && time.Now().UTC().After(*r.ExecDate) {
r.State = FORGOTTEN
wfa.GenericRawUpdateOne(&r, r.UUID, wfa)
}
objs = append(objs, &r.AbstractObject)
}
return objs, 200, nil
} }
// Search searches for workflow executions in the database, given some filters OR a search string func (a *workflowExecutionMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
func (wfa *workflowExecutionMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) { return utils.GenericSearch[*WorkflowExecution](filters, search, (&WorkflowExecution{}).GetObjectFilters(search), a.getExec(), a)
objs := []utils.ShallowDBObject{} }
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = &dbs.Filters{ func (a *workflowExecutionMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided return func(d utils.DBObject) utils.ShallowDBObject {
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}}, if d.(*WorkflowExecution).State == SCHEDULED && time.Now().UTC().After(*d.(*WorkflowExecution).ExecDate) {
}, d.(*WorkflowExecution).State = FORGOTTEN
} utils.GenericRawUpdateOne(d, d.GetID(), a)
} }
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType()) return d
if err != nil { }
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err
}
var results []WorkflowExecution
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
if r.State == SCHEDULED && time.Now().UTC().After(*r.ExecDate) {
r.State = FORGOTTEN
wfa.GenericRawUpdateOne(&r, r.UUID, wfa)
}
objs = append(objs, &r)
}
return objs, 200, nil
} }

View File

@ -1,12 +1,10 @@
package workspace package workspace
import ( import (
"encoding/json" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
"cloud.o-forge.io/core/oc-lib/models/resources" "cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
) )
// Workspace is a struct that represents a workspace // Workspace is a struct that represents a workspace
@ -18,43 +16,17 @@ type Workspace struct {
Shared string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workspace Shared string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workspace
} }
func (ao *Workspace) GetID() string { func (d *Workspace) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return ao.UUID return New(tools.WORKSPACE, username, peerID, groups, caller) // Create a new instance of the accessor
} }
func (r *Workspace) GenerateID() { func (ao *Workspace) VerifyAuth(username string, peerID string, groups []string) bool {
if r.UUID == "" { if ao.Shared != "" {
r.UUID = uuid.New().String() shared, code, _ := shallow_collaborative_area.New(tools.COLLABORATIVE_AREA, username, peerID, groups, nil).LoadOne(ao.Shared)
if code != 200 || shared == nil {
return false
}
return shared.VerifyAuth(username, peerID, groups)
} }
} return ao.AbstractObject.VerifyAuth(username, peerID, groups)
func (d *Workspace) GetName() string {
return d.Name
}
func (d *Workspace) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.WORKSPACE, caller) // Initialize the accessor with the WORKSPACE model type
return data
}
// New creates a new instance of the workspaceMongoAccessor from a map
func (dma *Workspace) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
// Serialize returns the workspaceMongoAccessor as a map
func (dma *Workspace) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
} }

View File

@ -8,10 +8,8 @@ import (
type WorkspaceHistory struct{ Workspace } type WorkspaceHistory struct{ Workspace }
func (d *WorkspaceHistory) GetAccessor(caller *tools.HTTPCaller) utils.Accessor { func (d *WorkspaceHistory) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor return New(tools.WORKFLOW_HISTORY, username, peerID, groups, caller) // Create a new instance of the accessor
data.Init(tools.WORKSPACE_HISTORY, caller) // Initialize the accessor with the WORKSPACE model type
return data
} }
func (r *WorkspaceHistory) GenerateID() { func (r *WorkspaceHistory) GenerateID() {
r.UUID = uuid.New().String() r.UUID = uuid.New().String()

View File

@ -5,14 +5,9 @@ import (
"fmt" "fmt"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
"cloud.o-forge.io/core/oc-lib/models/resources/data"
"cloud.o-forge.io/core/oc-lib/models/resources/processing"
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
@ -23,194 +18,104 @@ type workspaceMongoAccessor struct {
} }
// New creates a new instance of the workspaceMongoAccessor // New creates a new instance of the workspaceMongoAccessor
func New() *workspaceMongoAccessor { func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *workspaceMongoAccessor {
return &workspaceMongoAccessor{} return &workspaceMongoAccessor{
utils.AbstractAccessor{
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
Caller: caller,
PeerID: peerID,
User: username,
Groups: groups, // Set the caller
Type: t,
},
}
} }
// DeleteOne deletes a workspace from the database, given its ID, it automatically share to peers if the workspace is shared // DeleteOne deletes a workspace from the database, given its ID, it automatically share to peers if the workspace is shared
// it checks if a workspace with the same name already exists // it checks if a workspace with the same name already exists
func (wfa *workspaceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) { func (a *workspaceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
res, code, err := wfa.GenericDeleteOne(id, wfa) res, code, err := utils.GenericDeleteOne(id, a)
if code == 200 && res != nil { if code == 200 && res != nil {
wfa.share(res.(*Workspace), tools.DELETE, wfa.Caller) // Share the deletion to the peers a.share(res.(*Workspace), tools.DELETE, a.Caller) // Share the deletion to the peers
} }
return res, code, err return res, code, err
} }
// UpdateOne updates a workspace in the database, given its ID, it automatically share to peers if the workspace is shared // UpdateOne updates a workspace in the database, given its ID, it automatically share to peers if the workspace is shared
func (wfa *workspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (a *workspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
d := set.(*Workspace) // Get the workspace from the set d := set.(*Workspace) // Get the workspace from the set
d.DataResources = nil // Reset the resources d.Clear()
d.ComputeResources = nil
d.StorageResources = nil
d.ProcessingResources = nil
d.WorkflowResources = nil
if d.Active { // If the workspace is active, deactivate all the other workspaces if d.Active { // If the workspace is active, deactivate all the other workspaces
res, _, err := wfa.LoadAll() res, _, err := a.LoadAll()
if err == nil { if err == nil {
for _, r := range res { for _, r := range res {
if r.GetID() != id { if r.GetID() != id {
r.(*Workspace).Active = false r.(*Workspace).Active = false
wfa.UpdateOne(r.(*Workspace), r.GetID()) a.UpdateOne(r.(*Workspace), r.GetID())
} }
} }
} }
} }
res, code, err := wfa.GenericUpdateOne(set, id, wfa, &Workspace{}) res, code, err := utils.GenericUpdateOne(set, id, a, &Workspace{})
if code == 200 && res != nil { if code == 200 && res != nil {
wfa.share(res.(*Workspace), tools.PUT, wfa.Caller) a.share(res.(*Workspace), tools.PUT, a.Caller)
} }
return res, code, err return res, code, err
} }
// StoreOne stores a workspace in the database, it checks if a workspace with the same name already exists // StoreOne stores a workspace in the database, it checks if a workspace with the same name already exists
func (wfa *workspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *workspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
filters := &dbs.Filters{ filters := &dbs.Filters{
Or: map[string][]dbs.Filter{ Or: map[string][]dbs.Filter{
"abstractobject.name": {{dbs.LIKE.String(), data.GetName() + "_workspace"}}, "abstractobject.name": {{Operator: dbs.LIKE.String(), Value: data.GetName() + "_workspace"}},
}, },
} }
res, _, err := wfa.Search(filters, "") // Search for the workspace res, _, err := a.Search(filters, "") // Search for the workspace
if err == nil && len(res) > 0 { // If the workspace already exists, return an error if err == nil && len(res) > 0 { // If the workspace already exists, return an error
return nil, 409, errors.New("A workspace with the same name already exists") return nil, 409, errors.New("A workspace with the same name already exists")
} }
// reset the resources // reset the resources
d := data.(*Workspace) d := data.(*Workspace)
d.DataResources = nil d.Clear()
d.ComputeResources = nil return utils.GenericStoreOne(d, a)
d.StorageResources = nil
d.ProcessingResources = nil
d.WorkflowResources = nil
return wfa.GenericStoreOne(d, wfa)
} }
// CopyOne copies a workspace in the database // CopyOne copies a workspace in the database
func (wfa *workspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *workspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return wfa.GenericStoreOne(data, wfa) return utils.GenericStoreOne(data, a)
} }
/* func (a *workspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
This function is used to fill the workspace with the resources return utils.GenericLoadOne[*Workspace](id, func(d utils.DBObject) (utils.DBObject, int, error) {
*/ d.(*Workspace).Fill(a.GetUser(), a.PeerID, a.Groups)
func (wfa *workspaceMongoAccessor) fill(workflow *Workspace) *Workspace { return d, 200, nil
// Fill the workspace with the resources }, a)
if workflow.Datas != nil && len(workflow.Datas) > 0 {
dataAccessor := (&data.DataResource{}).GetAccessor(nil)
for _, id := range workflow.Datas {
d, _, e := dataAccessor.LoadOne(id)
if e == nil {
workflow.DataResources = append(workflow.DataResources, d.(*data.DataResource))
}
}
}
// Fill the workspace with the computes
if workflow.Computes != nil && len(workflow.Computes) > 0 {
dataAccessor := (&compute.ComputeResource{}).GetAccessor(nil)
for _, id := range workflow.Computes {
d, _, e := dataAccessor.LoadOne(id)
if e == nil {
workflow.ComputeResources = append(workflow.ComputeResources, d.(*compute.ComputeResource))
}
}
}
// Fill the workspace with the storages
if workflow.Storages != nil && len(workflow.Storages) > 0 {
dataAccessor := (&storage.StorageResource{}).GetAccessor(nil)
for _, id := range workflow.Storages {
d, _, e := dataAccessor.LoadOne(id)
if e == nil {
workflow.StorageResources = append(workflow.StorageResources, d.(*storage.StorageResource))
}
}
}
// Fill the workspace with the processings
if workflow.Processings != nil && len(workflow.Processings) > 0 {
dataAccessor := (&processing.ProcessingResource{}).GetAccessor(nil)
for _, id := range workflow.Processings {
d, _, e := dataAccessor.LoadOne(id)
if e == nil {
workflow.ProcessingResources = append(workflow.ProcessingResources, d.(*processing.ProcessingResource))
}
}
}
// Fill the workspace with the workflows
if workflow.Workflows != nil && len(workflow.Workflows) > 0 {
dataAccessor := (&w.WorkflowResource{}).GetAccessor(nil)
for _, id := range workflow.Workflows {
d, _, e := dataAccessor.LoadOne(id)
if e == nil {
workflow.WorkflowResources = append(workflow.WorkflowResources, d.(*w.WorkflowResource))
}
}
}
return workflow
} }
// LoadOne loads a workspace from the database, given its ID func (a *workspaceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
func (wfa *workspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { return utils.GenericLoadAll[*Workspace](func(d utils.DBObject) utils.ShallowDBObject {
var workflow Workspace d.(*Workspace).Fill(a.GetUser(), a.PeerID, a.Groups)
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType()) return d
if err != nil { }, a)
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err
}
res_mongo.Decode(&workflow)
return wfa.fill(&workflow), 200, nil
} }
// LoadAll loads all the workspaces from the database func (a *workspaceMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
func (wfa workspaceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) { return utils.GenericSearch[*Workspace](filters, search, (&Workspace{}).GetObjectFilters(search), func(d utils.DBObject) utils.ShallowDBObject {
objs := []utils.ShallowDBObject{} d.(*Workspace).Fill(a.GetUser(), a.PeerID, a.Groups)
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType()) return d
if err != nil { }, a)
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []Workspace
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
objs = append(objs, wfa.fill(&r))
}
return objs, 200, nil
}
// Search searches for workspaces in the database, given some filters OR a search string
func (wfa *workspaceMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{}
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
},
}
}
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err
}
var results []Workspace
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
objs = append(objs, wfa.fill(&r))
}
return objs, 200, nil
} }
/* /*
This function is used to share the workspace with the peers This function is used to share the workspace with the peers
*/ */
func (wfa *workspaceMongoAccessor) share(realData *Workspace, method tools.METHOD, caller *tools.HTTPCaller) { func (a *workspaceMongoAccessor) share(realData *Workspace, method tools.METHOD, caller *tools.HTTPCaller) {
fmt.Println("Sharing workspace", realData, caller) fmt.Println("Sharing workspace", realData, caller)
if realData == nil || realData.Shared == "" || caller == nil || caller.Disabled { if realData == nil || realData.Shared == "" || caller == nil || caller.Disabled {
return return
} }
access := (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(nil) shallow := &shallow_collaborative_area.ShallowCollaborativeArea{}
access := (shallow).GetAccessor(a.GetUser(), a.PeerID, a.Groups, nil)
res, code, _ := access.LoadOne(realData.Shared) res, code, _ := access.LoadOne(realData.Shared)
if code != 200 { if code != 200 {
return return
@ -227,10 +132,10 @@ func (wfa *workspaceMongoAccessor) share(realData *Workspace, method tools.METHO
history.StoreOne(history.MapFromWorkspace(res.(*Workspace))) history.StoreOne(history.MapFromWorkspace(res.(*Workspace)))
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.DELETE, map[string]interface{}{}, caller) _, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.DELETE, map[string]interface{}{}, caller)
} else { // If the workspace is updated, share the update } else { // If the workspace is updated, share the update
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.PUT, res.Serialize(), caller) _, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.PUT, res.Serialize(res), caller)
} }
} }
if err != nil { if err != nil {
wfa.Logger.Error().Msg(err.Error()) a.Logger.Error().Msg(err.Error())
} }
} }