test of a lightest formula of code

This commit is contained in:
mr 2024-11-28 11:05:54 +01:00
parent 15ca06aba8
commit 2816e3ea35
36 changed files with 574 additions and 783 deletions

View File

@ -16,6 +16,7 @@ type Config struct {
Port string Port string
LokiUrl string LokiUrl string
LogLevel string LogLevel string
Whitelist bool
} }
func (c Config) GetUrl() string { func (c Config) GetUrl() string {
@ -37,19 +38,11 @@ func GetConfig() *Config {
} }
func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string, logLevel string) *Config { func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string, logLevel string) *Config {
/*once.Do(func() {
instance = &Config{
MongoUrl: mongoUrl,
MongoDatabase: database,
NATSUrl: natsUrl,
LokiUrl: lokiUrl,
LogLevel: logLevel,
}
})*/
GetConfig().MongoUrl = mongoUrl GetConfig().MongoUrl = mongoUrl
GetConfig().MongoDatabase = database GetConfig().MongoDatabase = database
GetConfig().NATSUrl = natsUrl GetConfig().NATSUrl = natsUrl
GetConfig().LokiUrl = lokiUrl GetConfig().LokiUrl = lokiUrl
GetConfig().LogLevel = logLevel GetConfig().LogLevel = logLevel
GetConfig().Whitelist = true
return GetConfig() return GetConfig()
} }

View File

@ -15,10 +15,10 @@ import (
"cloud.o-forge.io/core/oc-lib/models/collaborative_area" "cloud.o-forge.io/core/oc-lib/models/collaborative_area"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resources/compute" "cloud.o-forge.io/core/oc-lib/models/resources/compute"
"cloud.o-forge.io/core/oc-lib/models/resources/data" "cloud.o-forge.io/core/oc-lib/models/resources/data"
"cloud.o-forge.io/core/oc-lib/models/resources/processing" "cloud.o-forge.io/core/oc-lib/models/resources/processing"
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resources/storage" "cloud.o-forge.io/core/oc-lib/models/resources/storage"
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow" w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
@ -48,7 +48,7 @@ const (
WORKSPACE = tools.WORKSPACE WORKSPACE = tools.WORKSPACE
WORKFLOW_EXECUTION = tools.WORKFLOW_EXECUTION WORKFLOW_EXECUTION = tools.WORKFLOW_EXECUTION
PEER = tools.PEER PEER = tools.PEER
SHARED_WORKSPACE = tools.COLLABORATIVE_AREA COLLABORATIVE_AREA = tools.COLLABORATIVE_AREA
RULE = tools.RULE RULE = tools.RULE
BOOKING = tools.BOOKING BOOKING = tools.BOOKING
) )
@ -158,7 +158,7 @@ func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string,
If not we will store it If not we will store it
Resource model is the model that will define the structure of the resources Resource model is the model that will define the structure of the resources
*/ */
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) accessor := (&resource_model.ResourceModel{}).GetAccessor("", []string{}, nil)
for _, model := range []string{tools.DATA_RESOURCE.String(), tools.PROCESSING_RESOURCE.String(), tools.STORAGE_RESOURCE.String(), tools.COMPUTE_RESOURCE.String(), tools.WORKFLOW_RESOURCE.String()} { for _, model := range []string{tools.DATA_RESOURCE.String(), tools.PROCESSING_RESOURCE.String(), tools.STORAGE_RESOURCE.String(), tools.COMPUTE_RESOURCE.String(), tools.WORKFLOW_RESOURCE.String()} {
data, code, _ := accessor.Search(nil, model) data, code, _ := accessor.Search(nil, model)
if code == 404 || len(data) == 0 { if code == 404 || len(data) == 0 {
@ -225,6 +225,17 @@ func GetConfLoader() *onion.Onion {
return config.GetConfLoader() return config.GetConfLoader()
} }
type Request struct {
collection LibDataEnum
peerID string
groups []string
caller *tools.HTTPCaller
}
func NewRequest(collection LibDataEnum, peerID string, groups []string, caller *tools.HTTPCaller) *Request {
return &Request{collection: collection, peerID: peerID, groups: groups, caller: caller}
}
/* /*
* Search will search for the data in the database * Search will search for the data in the database
* @param filters *dbs.Filters * @param filters *dbs.Filters
@ -233,18 +244,14 @@ func GetConfLoader() *onion.Onion {
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibDataShallow * @return data LibDataShallow
*/ */
func Search(filters *dbs.Filters, word string, collection LibDataEnum, c ...*tools.HTTPCaller) (data LibDataShallow) { func (r *Request) Search(filters *dbs.Filters, word string, collection LibDataEnum) (data LibDataShallow) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in Search : "+fmt.Sprintf("%v", r))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in Search : "+fmt.Sprintf("%v", r)))
data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())} data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
} }
}() }()
var caller *tools.HTTPCaller // define the caller d, code, err := models.Model(collection.EnumIndex()).GetAccessor(r.peerID, r.groups, r.caller).Search(filters, word)
if len(c) > 0 {
caller = c[0]
}
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).Search(filters, word)
if err != nil { if err != nil {
data = LibDataShallow{Data: d, Code: code, Err: err.Error()} data = LibDataShallow{Data: d, Code: code, Err: err.Error()}
return return
@ -259,18 +266,14 @@ func Search(filters *dbs.Filters, word string, collection LibDataEnum, c ...*too
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibDataShallow * @return data LibDataShallow
*/ */
func LoadAll(collection LibDataEnum, c ...*tools.HTTPCaller) (data LibDataShallow) { func (r *Request) LoadAll() (data LibDataShallow) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadAll : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack()))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadAll : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())} data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
} }
}() }()
var caller *tools.HTTPCaller // define the caller d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.peerID, r.groups, r.caller).LoadAll()
if len(c) > 0 {
caller = c[0]
}
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).LoadAll()
if err != nil { if err != nil {
data = LibDataShallow{Data: d, Code: code, Err: err.Error()} data = LibDataShallow{Data: d, Code: code, Err: err.Error()}
return return
@ -286,18 +289,14 @@ func LoadAll(collection LibDataEnum, c ...*tools.HTTPCaller) (data LibDataShallo
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibData * @return data LibData
*/ */
func LoadOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data LibData) { func (r *Request) LoadOne(id string) (data LibData) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack()))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in LoadOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())} data = LibData{Data: nil, Code: 500, Err: "Panic recovered in LoadOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
} }
}() }()
var caller *tools.HTTPCaller // define the caller d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.peerID, r.groups, r.caller).LoadOne(id)
if len(c) > 0 {
caller = c[0]
}
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).LoadOne(id)
if err != nil { if err != nil {
data = LibData{Data: d, Code: code, Err: err.Error()} data = LibData{Data: d, Code: code, Err: err.Error()}
return return
@ -314,19 +313,15 @@ func LoadOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data Li
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibData * @return data LibData
*/ */
func UpdateOne(collection LibDataEnum, set map[string]interface{}, id string, c ...*tools.HTTPCaller) (data LibData) { func (r *Request) UpdateOne(set map[string]interface{}, id string) (data LibData) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in UpdateOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack()))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in UpdateOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in UpdateOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())} data = LibData{Data: nil, Code: 500, Err: "Panic recovered in UpdateOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
} }
}() }()
var caller *tools.HTTPCaller // define the caller model := models.Model(r.collection.EnumIndex())
if len(c) > 0 { d, code, err := model.GetAccessor(r.peerID, r.groups, r.caller).UpdateOne(model.Deserialize(set, model), id)
caller = c[0]
}
model := models.Model(collection.EnumIndex())
d, code, err := model.GetAccessor(caller).UpdateOne(model.Deserialize(set), id)
if err != nil { if err != nil {
data = LibData{Data: d, Code: code, Err: err.Error()} data = LibData{Data: d, Code: code, Err: err.Error()}
return return
@ -342,18 +337,14 @@ func UpdateOne(collection LibDataEnum, set map[string]interface{}, id string, c
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibData * @return data LibData
*/ */
func DeleteOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data LibData) { func (r *Request) DeleteOne(id string) (data LibData) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in DeleteOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack()))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in DeleteOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in DeleteOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())} data = LibData{Data: nil, Code: 500, Err: "Panic recovered in DeleteOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
} }
}() }()
var caller *tools.HTTPCaller // define the caller d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.peerID, r.groups, r.caller).DeleteOne(id)
if len(c) > 0 {
caller = c[0]
}
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).DeleteOne(id)
if err != nil { if err != nil {
data = LibData{Data: d, Code: code, Err: err.Error()} data = LibData{Data: d, Code: code, Err: err.Error()}
return return
@ -369,19 +360,15 @@ func DeleteOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibData * @return data LibData
*/ */
func StoreOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.HTTPCaller) (data LibData) { func (r *Request) StoreOne(object map[string]interface{}) (data LibData) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in StoreOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack()))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in StoreOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in StoreOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())} data = LibData{Data: nil, Code: 500, Err: "Panic recovered in StoreOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
} }
}() }()
var caller *tools.HTTPCaller // define the caller model := models.Model(r.collection.EnumIndex())
if len(c) > 0 { d, code, err := model.GetAccessor(r.peerID, r.groups, r.caller).StoreOne(model.Deserialize(object, model))
caller = c[0]
}
model := models.Model(collection.EnumIndex())
d, code, err := model.GetAccessor(caller).StoreOne(model.Deserialize(object))
if err != nil { if err != nil {
data = LibData{Data: d, Code: code, Err: err.Error()} data = LibData{Data: d, Code: code, Err: err.Error()}
return return
@ -397,7 +384,7 @@ func StoreOne(collection LibDataEnum, object map[string]interface{}, c ...*tools
* @param c ...*tools.HTTPCaller * @param c ...*tools.HTTPCaller
* @return data LibData * @return data LibData
*/ */
func CopyOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.HTTPCaller) (data LibData) { func CopyOne(collection LibDataEnum, object map[string]interface{}, peerID string, groups []string, c ...*tools.HTTPCaller) (data LibData) {
defer func() { // recover the panic defer func() { // recover the panic
if r := recover(); r != nil { if r := recover(); r != nil {
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in CopyOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack()))) tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in CopyOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
@ -409,7 +396,7 @@ func CopyOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.
caller = c[0] caller = c[0]
} }
model := models.Model(collection.EnumIndex()) model := models.Model(collection.EnumIndex())
d, code, err := model.GetAccessor(caller).CopyOne(model.Deserialize(object)) d, code, err := model.GetAccessor(peerID, groups, caller).CopyOne(model.Deserialize(object, model))
if err != nil { if err != nil {
data = LibData{Data: d, Code: code, Err: err.Error()} data = LibData{Data: d, Code: code, Err: err.Error()}
return return
@ -421,72 +408,72 @@ func CopyOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.
// ================ CAST ========================= // // ================ CAST ========================= //
func (l *LibData) ToDataResource() *data.DataResource { func (l *LibData) ToDataResource() *data.DataResource {
if l.Data.GetAccessor(nil).GetType() == tools.DATA_RESOURCE.String() { if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.DATA_RESOURCE.String() {
return l.Data.(*data.DataResource) return l.Data.(*data.DataResource)
} }
return nil return nil
} }
func (l *LibData) ToComputeResource() *compute.ComputeResource { func (l *LibData) ToComputeResource() *compute.ComputeResource {
if l.Data != nil && l.Data.GetAccessor(nil).GetType() == tools.COMPUTE_RESOURCE.String() { if l.Data != nil && l.Data.GetAccessor("", []string{}, nil).GetType() == tools.COMPUTE_RESOURCE.String() {
return l.Data.(*compute.ComputeResource) return l.Data.(*compute.ComputeResource)
} }
return nil return nil
} }
func (l *LibData) ToStorageResource() *storage.StorageResource { func (l *LibData) ToStorageResource() *storage.StorageResource {
if l.Data.GetAccessor(nil).GetType() == tools.STORAGE_RESOURCE.String() { if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.STORAGE_RESOURCE.String() {
return l.Data.(*storage.StorageResource) return l.Data.(*storage.StorageResource)
} }
return nil return nil
} }
func (l *LibData) ToProcessingResource() *processing.ProcessingResource { func (l *LibData) ToProcessingResource() *processing.ProcessingResource {
if l.Data.GetAccessor(nil).GetType() == tools.PROCESSING_RESOURCE.String() { if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.PROCESSING_RESOURCE.String() {
return l.Data.(*processing.ProcessingResource) return l.Data.(*processing.ProcessingResource)
} }
return nil return nil
} }
func (l *LibData) ToWorkflowResource() *w.WorkflowResource { func (l *LibData) ToWorkflowResource() *w.WorkflowResource {
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_RESOURCE.String() { if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.WORKFLOW_RESOURCE.String() {
return l.Data.(*w.WorkflowResource) return l.Data.(*w.WorkflowResource)
} }
return nil return nil
} }
func (l *LibData) ToPeer() *peer.Peer { func (l *LibData) ToPeer() *peer.Peer {
if l.Data.GetAccessor(nil).GetType() == tools.PEER.String() { if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.PEER.String() {
return l.Data.(*peer.Peer) return l.Data.(*peer.Peer)
} }
return nil return nil
} }
func (l *LibData) ToWorkflow() *w2.Workflow { func (l *LibData) ToWorkflow() *w2.Workflow {
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW.String() { if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.WORKFLOW.String() {
return l.Data.(*w2.Workflow) return l.Data.(*w2.Workflow)
} }
return nil return nil
} }
func (l *LibData) ToWorkspace() *workspace.Workspace { func (l *LibData) ToWorkspace() *workspace.Workspace {
if l.Data.GetAccessor(nil).GetType() == tools.WORKSPACE.String() { if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.WORKSPACE.String() {
return l.Data.(*workspace.Workspace) return l.Data.(*workspace.Workspace)
} }
return nil return nil
} }
func (l *LibData) ToCollaborativeArea() *collaborative_area.CollaborativeArea { func (l *LibData) ToCollaborativeArea() *collaborative_area.CollaborativeArea {
if l.Data.GetAccessor(nil).GetType() == tools.COLLABORATIVE_AREA.String() { if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.COLLABORATIVE_AREA.String() {
return l.Data.(*collaborative_area.CollaborativeArea) return l.Data.(*collaborative_area.CollaborativeArea)
} }
return nil return nil
} }
func (l *LibData) ToRule() *rule.Rule { func (l *LibData) ToRule() *rule.Rule {
if l.Data.GetAccessor(nil).GetType() == tools.COLLABORATIVE_AREA.String() { if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.COLLABORATIVE_AREA.String() {
return l.Data.(*rule.Rule) return l.Data.(*rule.Rule)
} }
return nil return nil
} }
func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecution { func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecution {
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_EXECUTION.String() { if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.WORKFLOW_EXECUTION.String() {
return l.Data.(*workflow_execution.WorkflowExecution) return l.Data.(*workflow_execution.WorkflowExecution)
} }
return nil return nil

View File

@ -1,14 +1,12 @@
package booking package booking
import ( import (
"encoding/json"
"time" "time"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution" "cloud.o-forge.io/core/oc-lib/models/workflow_execution"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
"go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/bson/primitive"
) )
@ -28,7 +26,7 @@ func (wfa *Booking) CheckBooking(id string, start time.Time, end *time.Time) (bo
return true, nil return true, nil
} }
e := *end e := *end
accessor := wfa.GetAccessor(nil) accessor := New()
res, code, err := accessor.Search(&dbs.Filters{ res, code, err := accessor.Search(&dbs.Filters{
And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date
"compute_resource_id": {{Operator: dbs.EQUAL.String(), Value: id}}, "compute_resource_id": {{Operator: dbs.EQUAL.String(), Value: id}},
@ -51,41 +49,12 @@ func (wfa *Booking) ArgoStatusToState(status string) *Booking {
return wfa return wfa
} }
func (ao *Booking) GetID() string {
return ao.UUID
}
func (r *Booking) GenerateID() {
if r.UUID == "" {
r.UUID = uuid.New().String()
}
}
func (d *Booking) GetName() string { func (d *Booking) GetName() string {
return d.UUID + "_" + d.ExecDate.String() return d.UUID + "_" + d.ExecDate.String()
} }
func (d *Booking) GetAccessor(caller *tools.HTTPCaller) utils.Accessor { func (d *Booking) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor data := New() // Create a new instance of the accessor
data.Init(tools.BOOKING, caller) // Initialize the accessor with the BOOKING model type data.Init(tools.BOOKING, peerID, groups, caller) // Initialize the accessor with the BOOKING model type
return data return data
} }
func (dma *Booking) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *Booking) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}

View File

@ -1,7 +1,7 @@
package collaborative_area package collaborative_area
import ( import (
"encoding/json" "slices"
"time" "time"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
@ -10,7 +10,6 @@ import (
w "cloud.o-forge.io/core/oc-lib/models/workflow" w "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/models/workspace" "cloud.o-forge.io/core/oc-lib/models/workspace"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
) )
type CollaborativeAreaRule struct { type CollaborativeAreaRule struct {
@ -34,7 +33,7 @@ type CollaborativeArea struct {
Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"` // Attributes is the attributes of the workspace (TODO) Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"` // Attributes is the attributes of the workspace (TODO)
Workspaces []string `json:"workspaces" bson:"workspaces"` // Workspaces is the workspaces of the workspace Workspaces []string `json:"workspaces" bson:"workspaces"` // Workspaces is the workspaces of the workspace
Workflows []string `json:"workflows" bson:"workflows"` // Workflows is the workflows of the workspace Workflows []string `json:"workflows" bson:"workflows"` // Workflows is the workflows of the workspace
Peers []string `json:"peers" bson:"peers"` // Peers is the peers of the workspace AllowedPeersGroup map[string][]string `json:"allowed_peers_group,omitempty" bson:"allowed_peers_group,omitempty"` // AllowedPeersGroup is the group of allowed peers
Rules []string `json:"rules" bson:"rules,omitempty"` // Rules is the rules of the workspace Rules []string `json:"rules" bson:"rules,omitempty"` // Rules is the rules of the workspace
SharedRules []*rule.Rule `json:"shared_rules,omitempty" bson:"-"` // SharedRules is the shared rules of the workspace SharedRules []*rule.Rule `json:"shared_rules,omitempty" bson:"-"` // SharedRules is the shared rules of the workspace
@ -43,41 +42,31 @@ type CollaborativeArea struct {
SharedPeers []*peer.Peer `json:"shared_peers,omitempty" bson:"-"` // SharedPeers is the shared peers of the workspace SharedPeers []*peer.Peer `json:"shared_peers,omitempty" bson:"-"` // SharedPeers is the shared peers of the workspace
} }
func (ao *CollaborativeArea) GetID() string { func (ao *CollaborativeArea) VerifyAuth(peerID string, groups []string) bool {
return ao.UUID if ao.AllowedPeersGroup != nil && len(ao.AllowedPeersGroup) > 0 {
} if grps, ok := ao.AllowedPeersGroup[peerID]; ok {
if slices.Contains(grps, "*") {
func (r *CollaborativeArea) GenerateID() { return true
if r.UUID == "" { }
r.UUID = uuid.New().String() for _, grp := range grps {
if slices.Contains(groups, grp) {
return true
}
}
}
} }
return false
} }
func (d *CollaborativeArea) GetName() string { func (d *CollaborativeArea) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return d.Name data := New(peerID, groups) // Create a new instance of the accessor
} data.Init(tools.COLLABORATIVE_AREA, peerID, groups, caller) // Initialize the accessor with the SHARED_WORKSPACE model type
func (d *CollaborativeArea) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.COLLABORATIVE_AREA, caller) // Initialize the accessor with the SHARED_WORKSPACE model type
return data return data
} }
func (dma *CollaborativeArea) Deserialize(j map[string]interface{}) utils.DBObject { func (d *CollaborativeArea) Trim() *CollaborativeArea {
b, err := json.Marshal(j) if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.CreatorID}}).IsMySelf(); !ok {
if err != nil { d.AllowedPeersGroup = map[string][]string{}
return nil
} }
json.Unmarshal(b, dma) return d
return dma
}
func (dma *CollaborativeArea) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
} }

View File

@ -6,6 +6,7 @@ import (
"slices" "slices"
"time" "time"
"cloud.o-forge.io/core/oc-lib/config"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
@ -19,19 +20,30 @@ import (
// SharedWorkspace is a struct that represents a collaborative area // SharedWorkspace is a struct that represents a collaborative area
type collaborativeAreaMongoAccessor struct { type collaborativeAreaMongoAccessor struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller) utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
workspaceAccessor utils.Accessor
workflowAccessor utils.Accessor
peerAccessor utils.Accessor
ruleAccessor utils.Accessor
} }
// New creates a new instance of the collaborativeAreaMongoAccessor // New creates a new instance of the collaborativeAreaMongoAccessor
func New() *collaborativeAreaMongoAccessor { func New(peerID string, groups []string) *collaborativeAreaMongoAccessor {
return &collaborativeAreaMongoAccessor{} return &collaborativeAreaMongoAccessor{
workspaceAccessor: (&workspace.Workspace{}).GetAccessor(peerID, groups, nil),
workflowAccessor: (&w.Workflow{}).GetAccessor(peerID, groups, nil),
peerAccessor: (&peer.Peer{}).GetAccessor(peerID, groups, nil),
ruleAccessor: (&rule.Rule{}).GetAccessor(peerID, groups, nil),
}
} }
// DeleteOne deletes a collaborative area from the database, given its ID, it automatically share to peers if the workspace is shared // DeleteOne deletes a collaborative area from the database, given its ID, it automatically share to peers if the workspace is shared
func (wfa *collaborativeAreaMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) { func (wfa *collaborativeAreaMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
set, code, _ := wfa.LoadOne(id) set, code, err := wfa.LoadOne(id)
if code == 200 { // always delete on peers than recreate if code != 200 {
wfa.deleteToPeer(set.(*CollaborativeArea)) return nil, code, err
} }
wfa.deleteToPeer(set.(*CollaborativeArea))
wfa.sharedWorkflow(&CollaborativeArea{}, id) // create all shared workflows wfa.sharedWorkflow(&CollaborativeArea{}, id) // create all shared workflows
wfa.sharedWorkspace(&CollaborativeArea{}, id) // create all collaborative areas wfa.sharedWorkspace(&CollaborativeArea{}, id) // create all collaborative areas
return wfa.GenericDeleteOne(id, wfa) // then add on yours return wfa.GenericDeleteOne(id, wfa) // then add on yours
@ -42,20 +54,19 @@ sharedWorkspace is a function that shares the collaborative area to the peers
*/ */
func (wfa *collaborativeAreaMongoAccessor) sharedWorkspace(shared *CollaborativeArea, id string) { func (wfa *collaborativeAreaMongoAccessor) sharedWorkspace(shared *CollaborativeArea, id string) {
eldest, code, _ := wfa.LoadOne(id) // get the eldest eldest, code, _ := wfa.LoadOne(id) // get the eldest
accessor := (&workspace.Workspace{}).GetAccessor(nil)
if code == 200 { if code == 200 {
eld := eldest.(*CollaborativeArea) eld := eldest.(*CollaborativeArea)
if eld.Workspaces != nil { // update all your workspaces in the eldest by replacing shared ref by an empty string if eld.Workspaces != nil { // update all your workspaces in the eldest by replacing shared ref by an empty string
for _, v := range eld.Workspaces { for _, v := range eld.Workspaces {
accessor.UpdateOne(&workspace.Workspace{Shared: ""}, v) wfa.workspaceAccessor.UpdateOne(&workspace.Workspace{Shared: ""}, v)
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKSPACE] == nil { if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKSPACE] == nil {
continue continue
} }
paccess := (&peer.Peer{}) // send to all peers paccess := (&peer.Peer{}) // send to all peers
for _, p := range shared.Peers { // delete the collaborative area on the peer for k := range shared.AllowedPeersGroup { // delete the collaborative area on the peer
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKSPACE, tools.DELETE, nil, wfa.Caller) b, err := paccess.LaunchPeerExecution(k, v, tools.WORKSPACE, tools.DELETE, nil, wfa.Caller)
if err != nil && b == nil { if err != nil && b == nil {
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error()) wfa.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
} }
} }
} }
@ -63,20 +74,20 @@ func (wfa *collaborativeAreaMongoAccessor) sharedWorkspace(shared *Collaborative
} }
if shared.Workspaces != nil { if shared.Workspaces != nil {
for _, v := range shared.Workspaces { // update all the collaborative areas for _, v := range shared.Workspaces { // update all the collaborative areas
workspace, code, _ := accessor.UpdateOne(&workspace.Workspace{Shared: shared.UUID}, v) // add the shared ref to workspace workspace, code, _ := wfa.workspaceAccessor.UpdateOne(&workspace.Workspace{Shared: shared.UUID}, v) // add the shared ref to workspace
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKSPACE] == nil { if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKSPACE] == nil {
continue continue
} }
for _, p := range shared.Peers { for k := range shared.AllowedPeersGroup {
if code != 200 { if code != 200 {
continue continue
} }
paccess := (&peer.Peer{}) // send to all peers, add the collaborative area on the peer paccess := (&peer.Peer{}) // send to all peers, add the collaborative area on the peer
s := workspace.Serialize() s := workspace.Serialize(workspace)
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + p s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + k
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKSPACE, tools.POST, s, wfa.Caller) b, err := paccess.LaunchPeerExecution(k, v, tools.WORKSPACE, tools.POST, s, wfa.Caller)
if err != nil && b == nil { if err != nil && b == nil {
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error()) wfa.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
} }
} }
} }
@ -87,13 +98,12 @@ func (wfa *collaborativeAreaMongoAccessor) sharedWorkspace(shared *Collaborative
// sharedWorkflow is a function that shares the shared workflow to the peers // sharedWorkflow is a function that shares the shared workflow to the peers
func (wfa *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeArea, id string) { func (wfa *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeArea, id string) {
accessor := (&w.Workflow{}).GetAccessor(nil)
eldest, code, _ := wfa.LoadOne(id) // get the eldest eldest, code, _ := wfa.LoadOne(id) // get the eldest
if code == 200 { if code == 200 {
eld := eldest.(*CollaborativeArea) eld := eldest.(*CollaborativeArea)
if eld.Workflows != nil { if eld.Workflows != nil {
for _, v := range eld.Workflows { for _, v := range eld.Workflows {
data, code, _ := accessor.LoadOne(v) data, code, _ := wfa.workflowAccessor.LoadOne(v)
if code == 200 { if code == 200 {
s := data.(*w.Workflow) s := data.(*w.Workflow)
new := []string{} new := []string{}
@ -104,15 +114,15 @@ func (wfa *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeA
} // kick the shared reference in your old shared workflow } // kick the shared reference in your old shared workflow
n := &w.Workflow{} n := &w.Workflow{}
n.Shared = new n.Shared = new
accessor.UpdateOne(n, v) wfa.workflowAccessor.UpdateOne(n, v)
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKFLOW] == nil { if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKFLOW] == nil {
continue continue
} }
paccess := (&peer.Peer{}) // send to all peers paccess := (&peer.Peer{}) // send to all peers
for _, p := range shared.Peers { // delete the shared workflow on the peer for k := range shared.AllowedPeersGroup { // delete the shared workflow on the peer
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKFLOW, tools.DELETE, nil, wfa.Caller) b, err := paccess.LaunchPeerExecution(k, v, tools.WORKFLOW, tools.DELETE, nil, wfa.Caller)
if err != nil && b == nil { if err != nil && b == nil {
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error()) wfa.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
} }
} }
} }
@ -121,23 +131,23 @@ func (wfa *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeA
} }
if shared.Workflows != nil { // update all the shared workflows if shared.Workflows != nil { // update all the shared workflows
for _, v := range shared.Workflows { for _, v := range shared.Workflows {
data, code, _ := accessor.LoadOne(v) data, code, _ := wfa.workflowAccessor.LoadOne(v)
if code == 200 { if code == 200 {
s := data.(*w.Workflow) s := data.(*w.Workflow)
if !slices.Contains(s.Shared, id) { if !slices.Contains(s.Shared, id) {
s.Shared = append(s.Shared, id) s.Shared = append(s.Shared, id)
workflow, code, _ := accessor.UpdateOne(s, v) workflow, code, _ := wfa.workflowAccessor.UpdateOne(s, v)
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKFLOW] == nil { if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKFLOW] == nil {
continue continue
} }
paccess := (&peer.Peer{}) paccess := (&peer.Peer{})
for _, p := range shared.Peers { // send to all peers for k := range shared.AllowedPeersGroup { // send to all peers
if code == 200 { if code == 200 {
s := workflow.Serialize() // add the shared workflow on the peer s := workflow.Serialize(workflow) // add the shared workflow on the peer
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + p s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + k
b, err := paccess.LaunchPeerExecution(p, shared.UUID, tools.WORKFLOW, tools.POST, s, wfa.Caller) b, err := paccess.LaunchPeerExecution(k, shared.UUID, tools.WORKFLOW, tools.POST, s, wfa.Caller)
if err != nil && b == nil { if err != nil && b == nil {
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error()) wfa.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
} }
} }
} }
@ -155,13 +165,13 @@ func (wfa *collaborativeAreaMongoAccessor) deleteToPeer(shared *CollaborativeAre
return return
} }
paccess := (&peer.Peer{}) paccess := (&peer.Peer{})
for _, v := range shared.Peers { for k, _ := range shared.AllowedPeersGroup {
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: v}}).IsMySelf(); ok { if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: k}}).IsMySelf(); ok {
continue continue
} }
b, err := paccess.LaunchPeerExecution(v, shared.UUID, tools.COLLABORATIVE_AREA, tools.DELETE, nil, wfa.Caller) b, err := paccess.LaunchPeerExecution(k, shared.UUID, tools.COLLABORATIVE_AREA, tools.DELETE, nil, wfa.Caller)
if err != nil && b == nil { if err != nil && b == nil {
wfa.Logger.Error().Msg("Could not send to peer " + v + ". Error: " + err.Error()) wfa.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
} }
} }
} }
@ -173,22 +183,21 @@ func (wfa *collaborativeAreaMongoAccessor) sendToPeer(shared *CollaborativeArea)
} }
paccess := (&peer.Peer{}) paccess := (&peer.Peer{})
for _, v := range shared.Peers { for k := range shared.AllowedPeersGroup {
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: v}}).IsMySelf(); ok || shared.IsSent { if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: k}}).IsMySelf(); ok || shared.IsSent {
continue continue
} }
shared.IsSent = true shared.IsSent = true
b, err := paccess.LaunchPeerExecution(v, v, tools.COLLABORATIVE_AREA, tools.POST, shared.Serialize(), wfa.Caller) b, err := paccess.LaunchPeerExecution(k, k, tools.COLLABORATIVE_AREA, tools.POST, shared.Serialize(shared), wfa.Caller)
if err != nil && b == nil { if err != nil && b == nil {
wfa.Logger.Error().Msg("Could not send to peer " + v + ". Error: " + err.Error()) wfa.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
} }
} }
} }
// UpdateOne updates a collaborative area in the database, given its ID and the new data, it automatically share to peers if the workspace is shared // UpdateOne updates a collaborative area in the database, given its ID and the new data, it automatically share to peers if the workspace is shared
func (wfa *collaborativeAreaMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (wfa *collaborativeAreaMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
res, code, err := wfa.GenericUpdateOne(set.(*CollaborativeArea), id, wfa, &CollaborativeArea{}) res, code, err := wfa.GenericUpdateOne(set.(*CollaborativeArea).Trim(), id, wfa, &CollaborativeArea{})
fmt.Println("UpdateOne", set, res, code, err)
// wfa.deleteToPeer(res.(*CollaborativeArea)) // delete the collaborative area on the peer // wfa.deleteToPeer(res.(*CollaborativeArea)) // delete the collaborative area on the peer
wfa.sharedWorkflow(res.(*CollaborativeArea), id) // replace all shared workflows wfa.sharedWorkflow(res.(*CollaborativeArea), id) // replace all shared workflows
wfa.sharedWorkspace(res.(*CollaborativeArea), id) // replace all collaborative areas (not shared worspace obj but workspace one) wfa.sharedWorkspace(res.(*CollaborativeArea), id) // replace all collaborative areas (not shared worspace obj but workspace one)
@ -198,9 +207,14 @@ func (wfa *collaborativeAreaMongoAccessor) UpdateOne(set utils.DBObject, id stri
// StoreOne stores a collaborative area in the database, it automatically share to peers if the workspace is shared // StoreOne stores a collaborative area in the database, it automatically share to peers if the workspace is shared
func (wfa *collaborativeAreaMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (wfa *collaborativeAreaMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
_, id := (&peer.Peer{}).IsMySelf() // get the local peer _, id := (&peer.Peer{}).IsMySelf() // get the local peer
data.(*CollaborativeArea).CreatorID = id // set the creator id data.(*CollaborativeArea).CreatorID = id // set the creator id
data.(*CollaborativeArea).Peers = append(data.(*CollaborativeArea).Peers, id) // add the creator id to the peers // add the creator id to the peers
if config.GetConfig().Whitelist {
data.(*CollaborativeArea).AllowedPeersGroup[id] = []string{"*"}
} else {
data.(*CollaborativeArea).AllowedPeersGroup[id] = []string{}
}
// then reset the shared fields // then reset the shared fields
if data.(*CollaborativeArea).Workspaces == nil { if data.(*CollaborativeArea).Workspaces == nil {
data.(*CollaborativeArea).Workspaces = []string{} data.(*CollaborativeArea).Workspaces = []string{}
@ -219,12 +233,12 @@ func (wfa *collaborativeAreaMongoAccessor) StoreOne(data utils.DBObject) (utils.
} }
data.(*CollaborativeArea).CollaborativeAreaRule.CreatedAt = time.Now().UTC() data.(*CollaborativeArea).CollaborativeAreaRule.CreatedAt = time.Now().UTC()
// retrieve or proper peer // retrieve or proper peer
dd, code, err := (&peer.Peer{}).GetAccessor(nil).Search(nil, "0") dd, code, err := wfa.peerAccessor.Search(nil, "0")
if code != 200 || len(dd) == 0 { if code != 200 || len(dd) == 0 {
return nil, code, errors.New("Could not retrieve the peer" + err.Error()) return nil, code, errors.New("Could not retrieve the peer" + err.Error())
} }
data.(*CollaborativeArea).CollaborativeAreaRule.Creator = dd[0].GetID() data.(*CollaborativeArea).CollaborativeAreaRule.Creator = dd[0].GetID()
d, code, err := wfa.GenericStoreOne(data.(*CollaborativeArea), wfa) d, code, err := wfa.GenericStoreOne(data.(*CollaborativeArea).Trim(), wfa)
if code == 200 { if code == 200 {
wfa.sharedWorkflow(d.(*CollaborativeArea), d.GetID()) // create all shared workflows wfa.sharedWorkflow(d.(*CollaborativeArea), d.GetID()) // create all shared workflows
wfa.sharedWorkspace(d.(*CollaborativeArea), d.GetID()) // create all collaborative areas wfa.sharedWorkspace(d.(*CollaborativeArea), d.GetID()) // create all collaborative areas
@ -240,8 +254,7 @@ func (wfa *collaborativeAreaMongoAccessor) CopyOne(data utils.DBObject) (utils.D
// enrich is a function that enriches the CollaborativeArea with the shared objects // enrich is a function that enriches the CollaborativeArea with the shared objects
func (wfa *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *CollaborativeArea) *CollaborativeArea { func (wfa *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *CollaborativeArea) *CollaborativeArea {
access := (&workspace.Workspace{}).GetAccessor(nil) res, code, _ := wfa.workspaceAccessor.Search(&dbs.Filters{
res, code, _ := access.Search(&dbs.Filters{
Or: map[string][]dbs.Filter{ Or: map[string][]dbs.Filter{
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Workspaces}}, "abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Workspaces}},
}, },
@ -251,8 +264,7 @@ func (wfa *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *Collaborative
sharedWorkspace.SharedWorkspaces = append(sharedWorkspace.SharedWorkspaces, r.(*workspace.Workspace)) sharedWorkspace.SharedWorkspaces = append(sharedWorkspace.SharedWorkspaces, r.(*workspace.Workspace))
} }
} }
access = (&w.Workflow{}).GetAccessor(nil) res, code, _ = wfa.workflowAccessor.Search(&dbs.Filters{
res, code, _ = access.Search(&dbs.Filters{
Or: map[string][]dbs.Filter{ Or: map[string][]dbs.Filter{
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Workflows}}, "abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Workflows}},
}, },
@ -262,10 +274,13 @@ func (wfa *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *Collaborative
sharedWorkspace.SharedWorkflows = append(sharedWorkspace.SharedWorkflows, r.(*w.Workflow)) sharedWorkspace.SharedWorkflows = append(sharedWorkspace.SharedWorkflows, r.(*w.Workflow))
} }
} }
access = (&peer.Peer{}).GetAccessor(nil) peerskey := []string{}
res, code, _ = access.Search(&dbs.Filters{ for k := range sharedWorkspace.AllowedPeersGroup {
peerskey = append(peerskey, k)
}
res, code, _ = wfa.peerAccessor.Search(&dbs.Filters{
Or: map[string][]dbs.Filter{ Or: map[string][]dbs.Filter{
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Peers}}, "abstractobject.id": {{Operator: dbs.IN.String(), Value: peerskey}},
}, },
}, "") }, "")
if code == 200 { if code == 200 {
@ -273,8 +288,7 @@ func (wfa *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *Collaborative
sharedWorkspace.SharedPeers = append(sharedWorkspace.SharedPeers, r.(*peer.Peer)) sharedWorkspace.SharedPeers = append(sharedWorkspace.SharedPeers, r.(*peer.Peer))
} }
} }
access = (&rule.Rule{}).GetAccessor(nil) res, code, _ = wfa.ruleAccessor.Search(&dbs.Filters{
res, code, _ = access.Search(&dbs.Filters{
Or: map[string][]dbs.Filter{ Or: map[string][]dbs.Filter{
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Rules}}, "abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Rules}},
}, },
@ -296,6 +310,9 @@ func (wfa *collaborativeAreaMongoAccessor) LoadOne(id string) (utils.DBObject, i
return nil, code, err return nil, code, err
} }
res_mongo.Decode(&sharedWorkspace) res_mongo.Decode(&sharedWorkspace)
if !sharedWorkspace.VerifyAuth(wfa.PeerID, wfa.Groups) {
return nil, 403, errors.New("You are not allowed to access this collaborative area")
}
return wfa.enrich(&sharedWorkspace), 200, nil // enrich the collaborative area return wfa.enrich(&sharedWorkspace), 200, nil // enrich the collaborative area
} }
@ -312,6 +329,9 @@ func (wfa collaborativeAreaMongoAccessor) LoadAll() ([]utils.ShallowDBObject, in
return nil, 404, err return nil, 404, err
} }
for _, r := range results { for _, r := range results {
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
continue
}
objs = append(objs, wfa.enrich(&r)) // enrich the collaborative area objs = append(objs, wfa.enrich(&r)) // enrich the collaborative area
} }
return objs, 200, nil return objs, 200, nil
@ -337,6 +357,9 @@ func (wfa *collaborativeAreaMongoAccessor) Search(filters *dbs.Filters, search s
return nil, 404, err return nil, 404, err
} }
for _, r := range results { for _, r := range results {
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
continue
}
objs = append(objs, wfa.enrich(&r)) // enrich the collaborative area objs = append(objs, wfa.enrich(&r)) // enrich the collaborative area
} }
return objs, 200, nil return objs, 200, nil

View File

@ -1,8 +1,6 @@
package rule package rule
import ( import (
"encoding/json"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid" "github.com/google/uuid"
@ -18,39 +16,12 @@ type Rule struct {
Actions []string `json:"actions,omitempty" bson:"actions,omitempty"` // NOT DEFINITIVE TO SPECIFICATION Actions []string `json:"actions,omitempty" bson:"actions,omitempty"` // NOT DEFINITIVE TO SPECIFICATION
} }
func (ao *Rule) GetID() string {
return ao.UUID
}
func (r *Rule) GenerateID() { func (r *Rule) GenerateID() {
r.UUID = uuid.New().String() r.UUID = uuid.New().String()
} }
func (d *Rule) GetName() string { func (d *Rule) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return d.Name
}
func (d *Rule) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() data := New()
data.Init(tools.RULE, caller) data.Init(tools.RULE, peerID, groups, caller)
return data return data
} }
func (dma *Rule) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *Rule) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}

View File

@ -1,11 +1,8 @@
package shallow_collaborative_area package shallow_collaborative_area
import ( import (
"encoding/json"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
) )
type ShallowCollaborativeArea struct { type ShallowCollaborativeArea struct {
@ -21,41 +18,8 @@ type ShallowCollaborativeArea struct {
Rules []string `json:"rules,omitempty" bson:"rules,omitempty"` Rules []string `json:"rules,omitempty" bson:"rules,omitempty"`
} }
func (ao *ShallowCollaborativeArea) GetID() string { func (d *ShallowCollaborativeArea) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return ao.UUID
}
func (r *ShallowCollaborativeArea) GenerateID() {
if r.UUID == "" {
r.UUID = uuid.New().String()
}
}
func (d *ShallowCollaborativeArea) GetName() string {
return d.Name
}
func (d *ShallowCollaborativeArea) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() data := New()
data.Init(tools.COLLABORATIVE_AREA, caller) data.Init(tools.COLLABORATIVE_AREA, peerID, groups, caller)
return data return data
} }
func (dma *ShallowCollaborativeArea) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *ShallowCollaborativeArea) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}

View File

@ -1,12 +1,10 @@
package peer package peer
import ( import (
"encoding/json"
"fmt" "fmt"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
) )
// now write a go enum for the state partner with self, blacklist, partner // now write a go enum for the state partner with self, blacklist, partner
@ -65,7 +63,7 @@ func (ao *Peer) RemoveExecution(exec PeerExecution) {
// IsMySelf checks if the peer is the local peer // IsMySelf checks if the peer is the local peer
func (ao *Peer) IsMySelf() (bool, string) { func (ao *Peer) IsMySelf() (bool, string) {
d, code, err := ao.GetAccessor(nil).Search(nil, SELF.String()) d, code, err := New().Search(nil, SELF.String())
if code != 200 || err != nil || len(d) == 0 { if code != 200 || err != nil || len(d) == 0 {
return false, "" return false, ""
} }
@ -78,42 +76,8 @@ func (p *Peer) LaunchPeerExecution(peerID string, dataID string, dt tools.DataTy
p.UUID = peerID p.UUID = peerID
return cache.LaunchPeerExecution(peerID, dataID, dt, method, body, caller) // Launch the execution on the peer through the cache return cache.LaunchPeerExecution(peerID, dataID, dt, method, body, caller) // Launch the execution on the peer through the cache
} }
func (d *Peer) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
func (ao *Peer) GetID() string { data := New() // Create a new instance of the accessor
return ao.UUID data.Init(tools.PEER, peerID, groups, caller) // Initialize the accessor with the PEER model type
}
func (r *Peer) GenerateID() {
if r.UUID == "" {
r.UUID = uuid.New().String()
}
}
func (d *Peer) GetName() string {
return d.Name
}
func (d *Peer) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.PEER, caller) // Initialize the accessor with the PEER model type
return data return data
} }
func (dma *Peer) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *Peer) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}

View File

@ -56,7 +56,7 @@ func (p *PeerCache) urlFormat(url string, dt tools.DataType) string {
// checkPeerStatus checks the status of a peer // checkPeerStatus checks the status of a peer
func (p *PeerCache) checkPeerStatus(peerID string, appName string, caller *tools.HTTPCaller) (*Peer, bool) { func (p *PeerCache) checkPeerStatus(peerID string, appName string, caller *tools.HTTPCaller) (*Peer, bool) {
api := tools.API{} api := tools.API{}
access := (&Peer{}).GetAccessor(nil) access := New()
res, code, _ := access.LoadOne(peerID) // Load the peer from db res, code, _ := access.LoadOne(peerID) // Load the peer from db
if code != 200 { // no peer no party if code != 200 { // no peer no party
return nil, false return nil, false
@ -101,18 +101,18 @@ func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string,
DataID: dataID, DataID: dataID,
} }
mypeer.AddExecution(*pexec) mypeer.AddExecution(*pexec)
mypeer.GetAccessor(nil).UpdateOne(mypeer, peerID) // Update the peer in the db New().UpdateOne(mypeer, peerID) // Update the peer in the db
return nil, errors.New("peer is not reachable") return nil, errors.New("peer is not reachable")
} else { } else {
if mypeer == nil { if mypeer == nil {
return nil, errors.New("peer not found") return nil, errors.New("peer not found")
} }
// If the peer is reachable, launch the execution // If the peer is reachable, launch the execution
url = p.urlFormat((mypeer.Url)+meth, dt) // Format the URL url = p.urlFormat((mypeer.Url)+meth, dt) // Format the URL
tmp := mypeer.FailedExecution // Get the failed executions list tmp := mypeer.FailedExecution // Get the failed executions list
mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list
mypeer.GetAccessor(nil).UpdateOne(mypeer, peerID) // Update the peer in the db New().UpdateOne(mypeer, peerID) // Update the peer in the db
for _, v := range tmp { // Retry the failed executions for _, v := range tmp { // Retry the failed executions
go p.exec(v.Url, tools.ToMethod(v.Method), v.Body, caller) go p.exec(v.Url, tools.ToMethod(v.Method), v.Body, caller)
} }
} }

View File

@ -1,9 +1,7 @@
package compute package compute
import ( import (
"encoding/json" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
@ -54,28 +52,9 @@ type ComputeResource struct {
GPUs []*GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs GPUs []*GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs
} }
func (dma *ComputeResource) Deserialize(j map[string]interface{}) utils.DBObject { func (d *ComputeResource) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
b, err := json.Marshal(j) data := New(peerID, groups)
if err != nil { data.Init(tools.COMPUTE_RESOURCE, peerID, groups, caller)
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *ComputeResource) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}
func (d *ComputeResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New()
data.Init(tools.COMPUTE_RESOURCE, caller)
return data return data
} }

View File

@ -2,8 +2,7 @@ package compute
import ( import (
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
) )
@ -12,8 +11,12 @@ type computeMongoAccessor struct {
} }
// New creates a new instance of the computeMongoAccessor // New creates a new instance of the computeMongoAccessor
func New() *computeMongoAccessor { func New(peerID string, groups []string) *computeMongoAccessor {
return &computeMongoAccessor{} return &computeMongoAccessor{
utils.AbstractAccessor{
ResourceModelAccessor: resource_model.New(),
},
}
} }
/* /*
@ -26,87 +29,48 @@ func (dca *computeMongoAccessor) DeleteOne(id string) (utils.DBObject, int, erro
func (dca *computeMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (dca *computeMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
set.(*ComputeResource).ResourceModel = nil set.(*ComputeResource).ResourceModel = nil
return dca.GenericUpdateOne(set, id, dca, &ComputeResource{}) return dca.GenericUpdateOne(set.(*ComputeResource).Trim(), id, dca, &ComputeResource{})
} }
func (dca *computeMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (dca *computeMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
data.(*ComputeResource).ResourceModel = nil data.(*ComputeResource).ResourceModel = nil
return dca.GenericStoreOne(data, dca) return dca.GenericStoreOne(data.(*ComputeResource).Trim(), dca)
} }
func (dca *computeMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (dca *computeMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return dca.GenericStoreOne(data, dca) return dca.StoreOne(data)
} }
func (dca *computeMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (dca *computeMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var compute ComputeResource return utils.GenericLoadOne[*ComputeResource](id, func(d utils.DBObject) (utils.DBObject, int, error) {
resources, _, err := dca.ResourceModelAccessor.Search(nil, dca.GetType())
res_mongo, code, err := mongo.MONGOService.LoadOne(id, dca.GetType()) if err == nil && len(resources) > 0 {
if err != nil { d.(*ComputeResource).ResourceModel = resources[0].(*resource_model.ResourceModel)
dca.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error()) }
return nil, code, err return d, 200, nil
} }, dca)
res_mongo.Decode(&compute)
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, dca.GetType())
if err == nil && len(resources) > 0 {
compute.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
return &compute, 200, nil
} }
func (wfa computeMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) { func (wfa *computeMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{} resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType()) return utils.GenericLoadAll[*ComputeResource](func(d utils.DBObject, a []utils.ShallowDBObject) []utils.ShallowDBObject {
if err != nil {
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
var results []ComputeResource
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results {
if err == nil && len(resources) > 0 { if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel) d.(*ComputeResource).ResourceModel = resources[0].(*resource_model.ResourceModel)
} }
objs = append(objs, &r) // only get the abstract resource ! a = append(a, d) // only get the abstract resource !
} return a
return objs, 200, nil }, wfa)
} }
func (wfa *computeMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) { func (wfa *computeMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
objs := []utils.ShallowDBObject{} root := &ComputeResource{}
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" { resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
filters = &dbs.Filters{ return utils.GenericSearch[*ComputeResource](filters, search, root.GetResourceFilter(search),
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided func(d utils.DBObject, a []utils.ShallowDBObject) []utils.ShallowDBObject {
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}}, if err == nil && len(resources) > 0 {
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}}, d.(*ComputeResource).ResourceModel = resources[0].(*resource_model.ResourceModel)
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}}, }
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}}, a = append(a, d) // only get the abstract resource !
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}}, return a
}, }, wfa)
}
}
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
if err != nil {
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err
}
var results []ComputeResource
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results {
if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
}
objs = append(objs, &r) // only get the abstract resource !
}
return objs, 200, nil
} }

View File

@ -3,7 +3,7 @@ package compute
import ( import (
"testing" "testing"
"cloud.o-forge.io/core/oc-lib/models/resource_model" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -21,7 +21,7 @@ func TestStoreOneCompute(t *testing.T) {
}, },
} }
dcma := New() dcma := New("", nil)
id, _, _ := dcma.StoreOne(&dc) id, _, _ := dcma.StoreOne(&dc)
assert.NotEmpty(t, id) assert.NotEmpty(t, id)
@ -39,7 +39,7 @@ func TestLoadOneCompute(t *testing.T) {
}, },
} }
dcma := New() dcma := New("", nil)
new_dc, _, _ := dcma.StoreOne(&dc) new_dc, _, _ := dcma.StoreOne(&dc)
assert.Equal(t, dc, new_dc) assert.Equal(t, dc, new_dc)

View File

@ -1,9 +1,7 @@
package data package data
import ( import (
"encoding/json" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
@ -39,27 +37,8 @@ type DataResource struct {
Example string `json:"example,omitempty" bson:"example,omitempty" description:"base64 encoded data"` // Example is an example of the data Example string `json:"example,omitempty" bson:"example,omitempty" description:"base64 encoded data"` // Example is an example of the data
} }
func (dma *DataResource) Deserialize(j map[string]interface{}) utils.DBObject { func (d *DataResource) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
b, err := json.Marshal(j) data := New() // Create a new instance of the accessor
if err != nil { data.Init(tools.DATA_RESOURCE, peerID, groups, caller) // Initialize the accessor with the DATA_RESOURCE model type
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *DataResource) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}
func (d *DataResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.DATA_RESOURCE, caller) // Initialize the accessor with the DATA_RESOURCE model type
return data return data
} }

View File

@ -1,9 +1,11 @@
package data package data
import ( import (
"errors"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
mongo "cloud.o-forge.io/core/oc-lib/dbs/mongo" mongo "cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/models/resource_model" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
) )
@ -13,7 +15,11 @@ type dataMongoAccessor struct {
// New creates a new instance of the dataMongoAccessor // New creates a new instance of the dataMongoAccessor
func New() *dataMongoAccessor { func New() *dataMongoAccessor {
return &dataMongoAccessor{} return &dataMongoAccessor{
utils.AbstractAccessor{
ResourceModelAccessor: resource_model.New(),
},
}
} }
/* /*
@ -26,16 +32,16 @@ func (dma *dataMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error)
func (dma *dataMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (dma *dataMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
set.(*DataResource).ResourceModel = nil set.(*DataResource).ResourceModel = nil
return dma.GenericUpdateOne(set, id, dma, &DataResource{}) return dma.GenericUpdateOne(set.(*DataResource).Trim(), id, dma, &DataResource{})
} }
func (dma *dataMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (dma *dataMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
data.(*DataResource).ResourceModel = nil data.(*DataResource).ResourceModel = nil
return dma.GenericStoreOne(data, dma) return dma.GenericStoreOne(data.(*DataResource).Trim(), dma)
} }
func (dma *dataMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (dma *dataMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return dma.GenericStoreOne(data, dma) return dma.StoreOne(data)
} }
func (dma *dataMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (dma *dataMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
@ -46,8 +52,10 @@ func (dma *dataMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
return nil, code, err return nil, code, err
} }
res_mongo.Decode(&data) res_mongo.Decode(&data)
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) if !data.VerifyAuth(dma.PeerID, dma.Groups) {
resources, _, err := accessor.Search(nil, dma.GetType()) return nil, 403, errors.New("You are not allowed to access this collaborative area")
}
resources, _, err := dma.ResourceModelAccessor.Search(nil, dma.GetType())
if err == nil && len(resources) > 0 { if err == nil && len(resources) > 0 {
data.ResourceModel = resources[0].(*resource_model.ResourceModel) data.ResourceModel = resources[0].(*resource_model.ResourceModel)
} }
@ -65,9 +73,11 @@ func (wfa dataMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil { if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err return nil, 404, err
} }
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results { for _, r := range results {
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
continue
}
if err == nil && len(resources) > 0 { if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel) r.ResourceModel = resources[0].(*resource_model.ResourceModel)
} }
@ -98,9 +108,11 @@ func (wfa *dataMongoAccessor) Search(filters *dbs.Filters, search string) ([]uti
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil { if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err return nil, 404, err
} }
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results { for _, r := range results {
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
continue
}
if err == nil && len(resources) > 0 { if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel) r.ResourceModel = resources[0].(*resource_model.ResourceModel)
} }

View File

@ -3,7 +3,7 @@ package data
import ( import (
"testing" "testing"
"cloud.o-forge.io/core/oc-lib/models/resource_model" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"

View File

@ -1,10 +1,8 @@
package processing package processing
import ( import (
"encoding/json"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resources/compute" "cloud.o-forge.io/core/oc-lib/models/resources/compute"
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
@ -29,39 +27,20 @@ type Expose struct {
*/ */
type ProcessingResource struct { type ProcessingResource struct {
resource_model.AbstractResource resource_model.AbstractResource
IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service
CPUs []*compute.CPU `bson:"cpus,omitempty" json:"cp_us,omitempty"` // CPUs is the list of CPUs CPUs []*compute.CPU `bson:"cpus,omitempty" json:"cp_us,omitempty"` // CPUs is the list of CPUs
GPUs []*compute.GPU `bson:"gpus,omitempty" json:"gp_us,omitempty"` // GPUs is the list of GPUs GPUs []*compute.GPU `bson:"gpus,omitempty" json:"gp_us,omitempty"` // GPUs is the list of GPUs
RAM *compute.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM RAM *compute.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
Storage uint `bson:"storage,omitempty" json:"storage,omitempty"` // Storage is the storage Storage uint `bson:"storage,omitempty" json:"storage,omitempty"` // Storage is the storage
Parallel bool `bson:"parallel,omitempty" json:"parallel,omitempty"` // Parallel is a flag that indicates if the processing is parallel Parallel bool `bson:"parallel,omitempty" json:"parallel,omitempty"` // Parallel is a flag that indicates if the processing is parallel
ScalingModel uint `bson:"scaling_model,omitempty" json:"scaling_model,omitempty"` // ScalingModel is the scaling model ScalingModel uint `bson:"scaling_model,omitempty" json:"scaling_model,omitempty"` // ScalingModel is the scaling model
DiskIO string `bson:"disk_io,omitempty" json:"disk_io,omitempty"` // DiskIO is the disk IO DiskIO string `bson:"disk_io,omitempty" json:"disk_io,omitempty"` // DiskIO is the disk IO
Container *Container `bson:"container,omitempty" json:"container,omitempty"` // Container is the container Container *Container `bson:"container,omitempty" json:"container,omitempty"` // Container is the container
Expose []Expose `bson:"expose,omitempty" json:"expose,omitempty"` // Expose is the execution Expose []Expose `bson:"expose,omitempty" json:"expose,omitempty"` // Expose is the execution
} }
func (dma *ProcessingResource) Deserialize(j map[string]interface{}) utils.DBObject { func (d *ProcessingResource) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
b, err := json.Marshal(j) data := New() // Create a new instance of the accessor
if err != nil { data.Init(tools.PROCESSING_RESOURCE, peerID, groups, caller) // Initialize the accessor with the PROCESSING_RESOURCE model type
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *ProcessingResource) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}
func (d *ProcessingResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.PROCESSING_RESOURCE, caller) // Initialize the accessor with the PROCESSING_RESOURCE model type
return data return data
} }

View File

@ -1,9 +1,11 @@
package processing package processing
import ( import (
"errors"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/models/resource_model" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
) )
@ -13,7 +15,11 @@ type processingMongoAccessor struct {
// New creates a new instance of the processingMongoAccessor // New creates a new instance of the processingMongoAccessor
func New() *processingMongoAccessor { func New() *processingMongoAccessor {
return &processingMongoAccessor{} return &processingMongoAccessor{
utils.AbstractAccessor{
ResourceModelAccessor: resource_model.New(),
},
}
} }
/* /*
@ -26,16 +32,16 @@ func (pma *processingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, e
func (pma *processingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (pma *processingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
set.(*ProcessingResource).ResourceModel = nil set.(*ProcessingResource).ResourceModel = nil
return pma.GenericUpdateOne(set, id, pma, &ProcessingResource{}) return pma.GenericUpdateOne(set.(*ProcessingResource).Trim(), id, pma, &ProcessingResource{})
} }
func (pma *processingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (pma *processingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
data.(*ProcessingResource).ResourceModel = nil data.(*ProcessingResource).ResourceModel = nil
return pma.GenericStoreOne(data, pma) return pma.GenericStoreOne(data.(*ProcessingResource).Trim(), pma)
} }
func (pma *processingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (pma *processingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return pma.GenericStoreOne(data, pma) return pma.StoreOne(data)
} }
func (pma *processingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (pma *processingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
@ -49,8 +55,10 @@ func (pma *processingMongoAccessor) LoadOne(id string) (utils.DBObject, int, err
} }
res_mongo.Decode(&processing) res_mongo.Decode(&processing)
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) if !processing.VerifyAuth(pma.PeerID, pma.Groups) {
resources, _, err := accessor.Search(nil, pma.GetType()) return nil, 403, errors.New("You are not allowed to access this collaborative area")
}
resources, _, err := pma.ResourceModelAccessor.Search(nil, pma.GetType())
if err == nil && len(resources) > 0 { if err == nil && len(resources) > 0 {
processing.ResourceModel = resources[0].(*resource_model.ResourceModel) processing.ResourceModel = resources[0].(*resource_model.ResourceModel)
} }
@ -68,9 +76,11 @@ func (wfa processingMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, erro
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil { if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err return nil, 404, err
} }
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results { for _, r := range results {
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
continue
}
if err == nil && len(resources) > 0 { if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel) r.ResourceModel = resources[0].(*resource_model.ResourceModel)
} }
@ -102,9 +112,11 @@ func (wfa *processingMongoAccessor) Search(filters *dbs.Filters, search string)
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil { if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err return nil, 404, err
} }
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results { for _, r := range results {
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
continue
}
if err == nil && len(resources) > 0 { if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel) r.ResourceModel = resources[0].(*resource_model.ResourceModel)
} }

View File

@ -1,12 +1,13 @@
package resources package resources
import ( import (
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resources/data"
"cloud.o-forge.io/core/oc-lib/models/resources/compute" "cloud.o-forge.io/core/oc-lib/models/resources/compute"
"cloud.o-forge.io/core/oc-lib/models/resources/data"
"cloud.o-forge.io/core/oc-lib/models/resources/processing" "cloud.o-forge.io/core/oc-lib/models/resources/processing"
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resources/storage" "cloud.o-forge.io/core/oc-lib/models/resources/storage"
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow" w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
"cloud.o-forge.io/core/oc-lib/models/utils"
) )
// AbstractResource is the struct containing all of the attributes commons to all ressources // AbstractResource is the struct containing all of the attributes commons to all ressources
@ -18,40 +19,68 @@ type ResourceSet struct {
Datas []string `bson:"datas,omitempty" json:"datas,omitempty"` Datas []string `bson:"datas,omitempty" json:"datas,omitempty"`
Storages []string `bson:"storages,omitempty" json:"storages,omitempty"` Storages []string `bson:"storages,omitempty" json:"storages,omitempty"`
Processings []string `bson:"processings,omitempty" json:"processings,omitempty"` Processings []string `bson:"processings,omitempty" json:"processings,omitempty"`
Computes []string `bson:"computes,omitempty" json:"computes,omitempty"` Computes []string `bson:"computes,omitempty" json:"computes,omitempty"`
Workflows []string `bson:"workflows,omitempty" json:"workflows,omitempty"` Workflows []string `bson:"workflows,omitempty" json:"workflows,omitempty"`
DataResources []*data.DataResource `bson:"-" json:"data_resources,omitempty"` DataResources []*data.DataResource `bson:"-" json:"data_resources,omitempty"`
StorageResources []*storage.StorageResource `bson:"-" json:"storage_resources,omitempty"` StorageResources []*storage.StorageResource `bson:"-" json:"storage_resources,omitempty"`
ProcessingResources []*processing.ProcessingResource `bson:"-" json:"processing_resources,omitempty"` ProcessingResources []*processing.ProcessingResource `bson:"-" json:"processing_resources,omitempty"`
ComputeResources []*compute.ComputeResource `bson:"-" json:"compute_resources,omitempty"` ComputeResources []*compute.ComputeResource `bson:"-" json:"compute_resources,omitempty"`
WorkflowResources []*w.WorkflowResource `bson:"-" json:"workflow_resources,omitempty"` WorkflowResources []*w.WorkflowResource `bson:"-" json:"workflow_resources,omitempty"`
} }
func (r *ResourceSet) Fill(peerID string, groups []string) {
for k, v := range map[utils.DBObject][]string{
(&data.DataResource{}): r.Datas,
(&compute.ComputeResource{}): r.Computes,
(&storage.StorageResource{}): r.Storages,
(&processing.ProcessingResource{}): r.Processings,
(&w.WorkflowResource{}): r.Workflows,
} {
for _, id := range v {
d, _, e := k.GetAccessor(peerID, groups, nil).LoadOne(id)
if e == nil {
switch k.(type) {
case *data.DataResource:
r.DataResources = append(r.DataResources, d.(*data.DataResource))
case *compute.ComputeResource:
r.ComputeResources = append(r.ComputeResources, d.(*compute.ComputeResource))
case *storage.StorageResource:
r.StorageResources = append(r.StorageResources, d.(*storage.StorageResource))
case *processing.ProcessingResource:
r.ProcessingResources = append(r.ProcessingResources, d.(*processing.ProcessingResource))
case *w.WorkflowResource:
r.WorkflowResources = append(r.WorkflowResources, d.(*w.WorkflowResource))
}
}
}
}
}
type ItemResource struct { type ItemResource struct {
Data *data.DataResource `bson:"data,omitempty" json:"data,omitempty"` Data *data.DataResource `bson:"data,omitempty" json:"data,omitempty"`
Processing *processing.ProcessingResource `bson:"processing,omitempty" json:"processing,omitempty"` Processing *processing.ProcessingResource `bson:"processing,omitempty" json:"processing,omitempty"`
Storage *storage.StorageResource `bson:"storage,omitempty" json:"storage,omitempty"` Storage *storage.StorageResource `bson:"storage,omitempty" json:"storage,omitempty"`
Compute *compute.ComputeResource `bson:"compute,omitempty" json:"compute,omitempty"` Compute *compute.ComputeResource `bson:"compute,omitempty" json:"compute,omitempty"`
Workflow *w.WorkflowResource `bson:"workflow,omitempty" json:"workflow,omitempty"` Workflow *w.WorkflowResource `bson:"workflow,omitempty" json:"workflow,omitempty"`
} }
func (i *ItemResource) GetAbstractRessource() *resource_model.AbstractResource { func (i *ItemResource) GetAbstractRessource() *resource_model.AbstractResource {
if(i.Data != nil){ if i.Data != nil {
return &i.Data.AbstractResource return &i.Data.AbstractResource
} }
if(i.Processing != nil){ if i.Processing != nil {
return &i.Processing.AbstractResource return &i.Processing.AbstractResource
} }
if(i.Storage != nil){ if i.Storage != nil {
return &i.Storage.AbstractResource return &i.Storage.AbstractResource
} }
if(i.Compute != nil){ if i.Compute != nil {
return &i.Compute.AbstractResource return &i.Compute.AbstractResource
} }
if(i.Workflow != nil){ if i.Workflow != nil {
return &i.Workflow.AbstractResource return &i.Workflow.AbstractResource
} }
return nil return nil
} }

View File

@ -2,7 +2,10 @@ package resource_model
import ( import (
"encoding/json" "encoding/json"
"slices"
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid" "github.com/google/uuid"
@ -26,9 +29,39 @@ type AbstractResource struct {
OwnerLogo string `json:"owner_logo,omitempty" bson:"owner_logo,omitempty"` // OwnerLogo is the owner logo of the resource OwnerLogo string `json:"owner_logo,omitempty" bson:"owner_logo,omitempty"` // OwnerLogo is the owner logo of the resource
SourceUrl string `json:"source_url,omitempty" bson:"source_url,omitempty" validate:"required"` // SourceUrl is the source URL of the resource SourceUrl string `json:"source_url,omitempty" bson:"source_url,omitempty" validate:"required"` // SourceUrl is the source URL of the resource
PeerID string `json:"peer_id,omitempty" bson:"peer_id,omitempty" validate:"required"` // PeerID is the ID of the peer getting this resource PeerID string `json:"peer_id,omitempty" bson:"peer_id,omitempty" validate:"required"` // PeerID is the ID of the peer getting this resource
Price string `json:"price,omitempty" bson:"price,omitempty"` // Price is the price of access to the resource
License string `json:"license,omitempty" bson:"license,omitempty"` // License is the license of the resource License string `json:"license,omitempty" bson:"license,omitempty"` // License is the license of the resource
ResourceModel *ResourceModel `json:"resource_model,omitempty" bson:"resource_model,omitempty"` // ResourceModel is the model of the resource ResourceModel *ResourceModel `json:"resource_model,omitempty" bson:"resource_model,omitempty"` // ResourceModel is the model of the resource
AllowedPeersGroup map[string][]string `json:"allowed_peers_group,omitempty" bson:"allowed_peers_group,omitempty"` // AllowedPeersGroup is the group of allowed peers
Price string `json:"price,omitempty" bson:"price,omitempty"` // Price is the price of access to the resource
Currency string `json:"currency,omitempty" bson:"currency,omitempty"` // Currency is the currency of the price
}
func (abs *AbstractResource) VerifyAuth(peerID string, groups []string) bool {
if grps, ok := abs.AllowedPeersGroup[peerID]; ok {
if slices.Contains(grps, "*") {
return true
}
for _, grp := range grps {
if slices.Contains(groups, grp) {
return true
}
}
}
return false
}
func (abs *AbstractResource) GetResourceFilter(search string) *dbs.Filters {
return &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
},
}
} }
/* /*
@ -68,6 +101,13 @@ func (abs *AbstractResource) GetModelReadOnly(cat string, key string) interface{
return abs.ResourceModel.Model[cat][key].ReadOnly return abs.ResourceModel.Model[cat][key].ReadOnly
} }
func (d *AbstractResource) Trim() *AbstractResource {
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.PeerID}}).IsMySelf(); !ok {
d.AllowedPeersGroup = map[string][]string{}
}
return d
}
type Model struct { type Model struct {
Type string `json:"type,omitempty" bson:"type,omitempty"` // Type is the type of the model Type string `json:"type,omitempty" bson:"type,omitempty"` // Type is the type of the model
ReadOnly bool `json:"readonly,omitempty" bson:"readonly,omitempty"` // ReadOnly is the readonly of the model ReadOnly bool `json:"readonly,omitempty" bson:"readonly,omitempty"` // ReadOnly is the readonly of the model
@ -99,24 +139,28 @@ func (d *ResourceModel) GetName() string {
return d.UUID return d.UUID
} }
func (d *ResourceModel) GetAccessor(caller *tools.HTTPCaller) utils.Accessor { func (abs *ResourceModel) VerifyAuth(peerID string, groups []string) bool {
return true
}
func (d *ResourceModel) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
data := &ResourceModelMongoAccessor{} data := &ResourceModelMongoAccessor{}
data.Init(tools.RESOURCE_MODEL, caller) data.Init(tools.RESOURCE_MODEL, peerID, groups, caller)
return data return data
} }
func (dma *ResourceModel) Deserialize(j map[string]interface{}) utils.DBObject { func (dma *ResourceModel) Deserialize(j map[string]interface{}, obj utils.DBObject) utils.DBObject {
b, err := json.Marshal(j) b, err := json.Marshal(j)
if err != nil { if err != nil {
return nil return nil
} }
json.Unmarshal(b, dma) json.Unmarshal(b, obj)
return dma return obj
} }
func (dma *ResourceModel) Serialize() map[string]interface{} { func (dma *ResourceModel) Serialize(obj utils.DBObject) map[string]interface{} {
var m map[string]interface{} var m map[string]interface{}
b, err := json.Marshal(dma) b, err := json.Marshal(obj)
if err != nil { if err != nil {
return nil return nil
} }

View File

@ -14,6 +14,10 @@ type ResourceModelMongoAccessor struct {
* Nothing special here, just the basic CRUD operations * Nothing special here, just the basic CRUD operations
*/ */
func New() *ResourceModelMongoAccessor {
return &ResourceModelMongoAccessor{}
}
func (wfa *ResourceModelMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) { func (wfa *ResourceModelMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return wfa.GenericDeleteOne(id, wfa) return wfa.GenericDeleteOne(id, wfa)
} }

View File

@ -1,9 +1,7 @@
package storage package storage
import ( import (
"encoding/json" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
@ -58,27 +56,8 @@ type StorageResource struct {
Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage
} }
func (dma *StorageResource) Deserialize(j map[string]interface{}) utils.DBObject { func (d *StorageResource) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
b, err := json.Marshal(j) data := New() // Create a new instance of the accessor
if err != nil { data.Init(tools.STORAGE_RESOURCE, peerID, groups, caller) // Initialize the accessor with the STORAGE_RESOURCE model type
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *StorageResource) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}
func (d *StorageResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.STORAGE_RESOURCE, caller) // Initialize the accessor with the STORAGE_RESOURCE model type
return data return data
} }

View File

@ -1,9 +1,11 @@
package storage package storage
import ( import (
"errors"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/models/resource_model" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
) )
@ -13,7 +15,11 @@ type storageMongoAccessor struct {
// New creates a new instance of the storageMongoAccessor // New creates a new instance of the storageMongoAccessor
func New() *storageMongoAccessor { func New() *storageMongoAccessor {
return &storageMongoAccessor{} return &storageMongoAccessor{
utils.AbstractAccessor{
ResourceModelAccessor: resource_model.New(),
},
}
} }
/* /*
@ -26,16 +32,16 @@ func (sma *storageMongoAccessor) DeleteOne(id string) (utils.DBObject, int, erro
func (sma *storageMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (sma *storageMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
set.(*StorageResource).ResourceModel = nil set.(*StorageResource).ResourceModel = nil
return sma.GenericUpdateOne(set, id, sma, &StorageResource{}) return sma.GenericUpdateOne(set.(*StorageResource).Trim(), id, sma, &StorageResource{})
} }
func (sma *storageMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (sma *storageMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
data.(*StorageResource).ResourceModel = nil data.(*StorageResource).ResourceModel = nil
return sma.GenericStoreOne(data, sma) return sma.GenericStoreOne(data.(*StorageResource).Trim(), sma)
} }
func (sma *storageMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (sma *storageMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return sma.GenericStoreOne(data, sma) return sma.StoreOne(data)
} }
func (sma *storageMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (sma *storageMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
@ -49,8 +55,10 @@ func (sma *storageMongoAccessor) LoadOne(id string) (utils.DBObject, int, error)
} }
res_mongo.Decode(&storage) res_mongo.Decode(&storage)
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) if !storage.VerifyAuth(sma.PeerID, sma.Groups) {
resources, _, err := accessor.Search(nil, sma.GetType()) return nil, 403, errors.New("You are not allowed to access this collaborative area")
}
resources, _, err := sma.ResourceModelAccessor.Search(nil, sma.GetType())
if err == nil && len(resources) > 0 { if err == nil && len(resources) > 0 {
storage.ResourceModel = resources[0].(*resource_model.ResourceModel) storage.ResourceModel = resources[0].(*resource_model.ResourceModel)
} }
@ -68,9 +76,11 @@ func (wfa storageMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error)
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil { if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err return nil, 404, err
} }
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results { for _, r := range results {
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
continue
}
if err == nil && len(resources) > 0 { if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel) r.ResourceModel = resources[0].(*resource_model.ResourceModel)
} }
@ -102,9 +112,11 @@ func (wfa *storageMongoAccessor) Search(filters *dbs.Filters, search string) ([]
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil { if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err return nil, 404, err
} }
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results { for _, r := range results {
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
continue
}
if err == nil && len(resources) > 0 { if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel) r.ResourceModel = resources[0].(*resource_model.ResourceModel)
} }

View File

@ -3,7 +3,7 @@ package storage
import ( import (
"testing" "testing"
"cloud.o-forge.io/core/oc-lib/models/resource_model" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"

View File

@ -1,9 +1,7 @@
package oclib package oclib
import ( import (
"encoding/json" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
@ -15,27 +13,8 @@ type WorkflowResource struct {
WorkflowID string `bson:"workflow_id,omitempty" json:"workflow_id,omitempty"` // WorkflowID is the ID of the native workflow WorkflowID string `bson:"workflow_id,omitempty" json:"workflow_id,omitempty"` // WorkflowID is the ID of the native workflow
} }
func (d *WorkflowResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor { func (d *WorkflowResource) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor data := New() // Create a new instance of the accessor
data.Init(tools.WORKFLOW_RESOURCE, caller) // Initialize the accessor with the WORKFLOW_RESOURCE model type data.Init(tools.WORKFLOW_RESOURCE, peerID, groups, caller) // Initialize the accessor with the WORKFLOW_RESOURCE model type
return data return data
} }
func (dma *WorkflowResource) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *WorkflowResource) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}

View File

@ -1,9 +1,11 @@
package oclib package oclib
import ( import (
"errors"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/models/resource_model" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
) )
@ -12,7 +14,11 @@ type workflowResourceMongoAccessor struct {
} }
func New() *workflowResourceMongoAccessor { func New() *workflowResourceMongoAccessor {
return &workflowResourceMongoAccessor{} return &workflowResourceMongoAccessor{
utils.AbstractAccessor{
ResourceModelAccessor: resource_model.New(),
},
}
} }
func (wfa *workflowResourceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) { func (wfa *workflowResourceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
@ -21,22 +27,22 @@ func (wfa *workflowResourceMongoAccessor) DeleteOne(id string) (utils.DBObject,
func (wfa *workflowResourceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (wfa *workflowResourceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
set.(*WorkflowResource).ResourceModel = nil set.(*WorkflowResource).ResourceModel = nil
return wfa.GenericUpdateOne(set, id, wfa, &WorkflowResource{}) return wfa.GenericUpdateOne(set.(*WorkflowResource).Trim(), id, wfa, &WorkflowResource{})
} }
func (wfa *workflowResourceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (wfa *workflowResourceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
data.(*WorkflowResource).ResourceModel = nil data.(*WorkflowResource).ResourceModel = nil
return wfa.GenericStoreOne(data, wfa) return wfa.GenericStoreOne(data.(*WorkflowResource).Trim(), wfa)
} }
func (wfa *workflowResourceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (wfa *workflowResourceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
res, _, _ := wfa.LoadOne(data.GetID()) res, _, _ := wfa.LoadOne(data.GetID())
data.(*WorkflowResource).WorkflowID = data.GetID() data.(*WorkflowResource).WorkflowID = data.GetID()
if res == nil { if res == nil {
return wfa.GenericStoreOne(data, wfa) return wfa.GenericStoreOne(data.(*WorkflowResource).Trim(), wfa)
} else { } else {
data.(*WorkflowResource).UUID = res.GetID() data.(*WorkflowResource).UUID = res.GetID()
return wfa.GenericUpdateOne(data, res.GetID(), wfa, &WorkflowResource{}) return wfa.GenericUpdateOne(data.(*WorkflowResource).Trim(), res.GetID(), wfa, &WorkflowResource{})
} }
} }
@ -48,8 +54,10 @@ func (wfa *workflowResourceMongoAccessor) LoadOne(id string) (utils.DBObject, in
return nil, code, err return nil, code, err
} }
res_mongo.Decode(&workflow) res_mongo.Decode(&workflow)
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) if !workflow.VerifyAuth(wfa.PeerID, wfa.Groups) {
resources, _, err := accessor.Search(nil, wfa.GetType()) return nil, 403, errors.New("You are not allowed to access this collaborative area")
}
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
if err == nil && len(resources) > 0 { if err == nil && len(resources) > 0 {
workflow.ResourceModel = resources[0].(*resource_model.ResourceModel) workflow.ResourceModel = resources[0].(*resource_model.ResourceModel)
} }
@ -67,9 +75,11 @@ func (wfa workflowResourceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil { if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err return nil, 404, err
} }
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results { for _, r := range results {
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
continue
}
if err == nil && len(resources) > 0 { if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel) r.ResourceModel = resources[0].(*resource_model.ResourceModel)
} }
@ -101,9 +111,11 @@ func (wfa *workflowResourceMongoAccessor) Search(filters *dbs.Filters, search st
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil { if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err return nil, 404, err
} }
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil) resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
resources, _, err := accessor.Search(nil, wfa.GetType())
for _, r := range results { for _, r := range results {
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
continue
}
if err == nil && len(resources) > 0 { if err == nil && len(resources) > 0 {
r.ResourceModel = resources[0].(*resource_model.ResourceModel) r.ResourceModel = resources[0].(*resource_model.ResourceModel)
} }

View File

@ -3,7 +3,7 @@ package oclib
import ( import (
"testing" "testing"
"cloud.o-forge.io/core/oc-lib/models/resource_model" "cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"

View File

@ -45,22 +45,26 @@ func (ao *AbstractObject) UpToDate() {
} }
// GetAccessor returns the accessor of the object (abstract) // GetAccessor returns the accessor of the object (abstract)
func (dma *AbstractObject) GetAccessor(caller *tools.HTTPCaller) Accessor { func (dma *AbstractObject) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) Accessor {
return nil return nil
} }
func (dma *AbstractObject) Deserialize(j map[string]interface{}) DBObject { func (ao *AbstractObject) VerifyAuth(peerID string, groups []string) bool {
return true
}
func (dma *AbstractObject) Deserialize(j map[string]interface{}, obj DBObject) DBObject {
b, err := json.Marshal(j) b, err := json.Marshal(j)
if err != nil { if err != nil {
return nil return nil
} }
json.Unmarshal(b, dma) json.Unmarshal(b, obj)
return dma return obj
} }
func (dma *AbstractObject) Serialize() map[string]interface{} { func (dma *AbstractObject) Serialize(obj DBObject) map[string]interface{} {
var m map[string]interface{} var m map[string]interface{}
b, err := json.Marshal(dma) b, err := json.Marshal(obj)
if err != nil { if err != nil {
return nil return nil
} }
@ -78,6 +82,14 @@ type AbstractAccessor struct {
Logger zerolog.Logger // Logger is the logger of the accessor, it's a specilized logger for the accessor Logger zerolog.Logger // Logger is the logger of the accessor, it's a specilized logger for the accessor
Type string // Type is the data type of the accessor Type string // Type is the data type of the accessor
Caller *tools.HTTPCaller // Caller is the http caller of the accessor (optionnal) only need in a peer connection Caller *tools.HTTPCaller // Caller is the http caller of the accessor (optionnal) only need in a peer connection
PeerID string // PeerID is the id of the peer
Groups []string // Groups is the list of groups that can access the accessor
ResourceModelAccessor Accessor
}
func (dma *AbstractAccessor) VerifyAuth() string {
return ""
} }
func (dma *AbstractAccessor) GetType() string { func (dma *AbstractAccessor) GetType() string {
@ -89,10 +101,12 @@ func (dma *AbstractAccessor) GetCaller() *tools.HTTPCaller {
} }
// Init initializes the accessor with the data type and the http caller // Init initializes the accessor with the data type and the http caller
func (dma *AbstractAccessor) Init(t tools.DataType, caller *tools.HTTPCaller) { func (dma *AbstractAccessor) Init(t tools.DataType, peerID string, groups []string, caller *tools.HTTPCaller) {
dma.Logger = logs.CreateLogger(t.String()) // Create a logger with the data type dma.Logger = logs.CreateLogger(t.String()) // Create a logger with the data type
dma.Caller = caller // Set the caller dma.Caller = caller
dma.Type = t.String() // Set the data type dma.PeerID = peerID
dma.Groups = groups // Set the caller
dma.Type = t.String() // Set the data type
} }
// GenericLoadOne loads one object from the database (generic) // GenericLoadOne loads one object from the database (generic)
@ -147,13 +161,13 @@ func (dma *AbstractAccessor) GenericUpdateOne(set DBObject, id string, accessor
if err != nil { if err != nil {
return nil, c, err return nil, c, err
} }
change := set.Serialize() // get the changes change := set.Serialize(set) // get the changes
loaded := r.Serialize() // get the loaded object loaded := r.Serialize(r) // get the loaded object
for k, v := range change { // apply the changes, with a flatten method for k, v := range change { // apply the changes, with a flatten method
loaded[k] = v loaded[k] = v
} }
id, code, err := mongo.MONGOService.UpdateOne(new.Deserialize(loaded), id, accessor.GetType()) id, code, err := mongo.MONGOService.UpdateOne(new.Deserialize(loaded, new), id, accessor.GetType())
if err != nil { if err != nil {
dma.Logger.Error().Msg("Could not update " + id + " to db. Error: " + err.Error()) dma.Logger.Error().Msg("Could not update " + id + " to db. Error: " + err.Error())
return nil, code, err return nil, code, err
@ -161,6 +175,64 @@ func (dma *AbstractAccessor) GenericUpdateOne(set DBObject, id string, accessor
return accessor.LoadOne(id) return accessor.LoadOne(id)
} }
func GenericLoadOne[T DBObject](id string, f func(DBObject) (DBObject, int, error), wfa Accessor) (DBObject, int, error) {
var data T
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
if !data.VerifyAuth(wfa.GetPeerID(), wfa.GetGroups()) {
return nil, 403, errors.New("You are not allowed to access this collaborative area")
}
if err != nil {
wfa.GetLogger().Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err
}
res_mongo.Decode(&data)
return f(data)
}
func GenericLoadAll[T DBObject](f func(DBObject, []ShallowDBObject) []ShallowDBObject, wfa Accessor) ([]ShallowDBObject, int, error) {
results := []T{}
objs := []ShallowDBObject{}
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
if err != nil {
wfa.GetLogger().Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err
}
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
if !r.VerifyAuth(wfa.GetPeerID(), wfa.GetGroups()) {
continue
}
objs = f(r, objs)
}
return objs, 200, nil
}
func GenericSearch[T DBObject](filters *dbs.Filters, search string, defaultFilters *dbs.Filters,
f func(DBObject, []ShallowDBObject) []ShallowDBObject, wfa Accessor) ([]ShallowDBObject, int, error) {
results := []T{}
objs := []ShallowDBObject{}
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
filters = defaultFilters
}
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
if err != nil {
wfa.GetLogger().Error().Msg("Could not store to db. Error: " + err.Error())
return nil, code, err
}
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err
}
for _, r := range results {
if !r.VerifyAuth(wfa.GetPeerID(), wfa.GetGroups()) {
continue
}
objs = f(r, objs)
}
return objs, 200, nil
}
// GenericLoadOne loads one object from the database (generic) // GenericLoadOne loads one object from the database (generic)
// json expected in entry is a flatted object no need to respect the inheritance hierarchy // json expected in entry is a flatted object no need to respect the inheritance hierarchy
func (dma *AbstractAccessor) GenericRawUpdateOne(set DBObject, id string, accessor Accessor) (DBObject, int, error) { func (dma *AbstractAccessor) GenericRawUpdateOne(set DBObject, id string, accessor Accessor) (DBObject, int, error) {
@ -171,3 +243,13 @@ func (dma *AbstractAccessor) GenericRawUpdateOne(set DBObject, id string, access
} }
return accessor.LoadOne(id) return accessor.LoadOne(id)
} }
func (dma *AbstractAccessor) GetPeerID() string {
return dma.PeerID
}
func (dma *AbstractAccessor) GetGroups() []string {
return dma.Groups
}
func (dma *AbstractAccessor) GetLogger() *zerolog.Logger {
return &dma.Logger
}

View File

@ -3,6 +3,7 @@ package utils
import ( import (
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/rs/zerolog"
) )
// ShallowDBObject is an interface that defines the basic methods shallowed version of a DBObject // ShallowDBObject is an interface that defines the basic methods shallowed version of a DBObject
@ -10,8 +11,8 @@ type ShallowDBObject interface {
GenerateID() GenerateID()
GetID() string GetID() string
GetName() string GetName() string
Deserialize(j map[string]interface{}) DBObject Deserialize(j map[string]interface{}, obj DBObject) DBObject
Serialize() map[string]interface{} Serialize(obj DBObject) map[string]interface{}
} }
// DBObject is an interface that defines the basic methods for a DBObject // DBObject is an interface that defines the basic methods for a DBObject
@ -20,15 +21,19 @@ type DBObject interface {
GetID() string GetID() string
GetName() string GetName() string
UpToDate() UpToDate()
Deserialize(j map[string]interface{}) DBObject VerifyAuth(PeerID string, groups []string) bool
Serialize() map[string]interface{} Deserialize(j map[string]interface{}, obj DBObject) DBObject
GetAccessor(caller *tools.HTTPCaller) Accessor Serialize(obj DBObject) map[string]interface{}
GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) Accessor
} }
// Accessor is an interface that defines the basic methods for an Accessor // Accessor is an interface that defines the basic methods for an Accessor
type Accessor interface { type Accessor interface {
Init(t tools.DataType, caller *tools.HTTPCaller) Init(t tools.DataType, peerID string, groups []string, caller *tools.HTTPCaller)
GetType() string GetType() string
GetPeerID() string
GetGroups() []string
GetLogger() *zerolog.Logger
GetCaller() *tools.HTTPCaller GetCaller() *tools.HTTPCaller
Search(filters *dbs.Filters, search string) ([]ShallowDBObject, int, error) Search(filters *dbs.Filters, search string) ([]ShallowDBObject, int, error)
LoadAll() ([]ShallowDBObject, int, error) LoadAll() ([]ShallowDBObject, int, error)

View File

@ -1,7 +1,6 @@
package workflow package workflow
import ( import (
"encoding/json"
"errors" "errors"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
@ -108,7 +107,7 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
if wfa.Graph == nil { // no graph no booking if wfa.Graph == nil { // no graph no booking
return false, nil return false, nil
} }
accessor := (&compute.ComputeResource{}).GetAccessor(nil) accessor := (&compute.ComputeResource{}).GetAccessor("", []string{}, caller)
for _, link := range wfa.Graph.Links { for _, link := range wfa.Graph.Links {
if ok, dc_id := wfa.isDCLink(link); ok { // check if the link is a link between a compute and a resource if ok, dc_id := wfa.isDCLink(link); ok { // check if the link is a link between a compute and a resource
dc, code, _ := accessor.LoadOne(dc_id) dc, code, _ := accessor.LoadOne(dc_id)
@ -129,31 +128,8 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
return true, nil return true, nil
} }
func (d *Workflow) GetName() string { func (d *Workflow) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return d.Name data := New(peerID, groups) // Create a new instance of the accessor
} data.Init(tools.WORKFLOW, peerID, groups, caller) // Initialize the accessor with the WORKFLOW model type
func (d *Workflow) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.WORKFLOW, caller) // Initialize the accessor with the WORKFLOW model type
return data return data
} }
func (dma *Workflow) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
func (dma *Workflow) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}

View File

@ -8,9 +8,9 @@ import (
type WorkflowHistory struct{ Workflow } type WorkflowHistory struct{ Workflow }
func (d *WorkflowHistory) GetAccessor(caller *tools.HTTPCaller) utils.Accessor { func (d *WorkflowHistory) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor data := New(peerID, groups) // Create a new instance of the accessor
data.Init(tools.WORKSPACE_HISTORY, caller) // Initialize the accessor with the WORKSPACE model type data.Init(tools.WORKSPACE_HISTORY, peerID, groups, caller) // Initialize the accessor with the WORKSPACE model type
return data return data
} }
func (r *WorkflowHistory) GenerateID() { func (r *WorkflowHistory) GenerateID() {

View File

@ -22,11 +22,21 @@ import (
type workflowMongoAccessor struct { type workflowMongoAccessor struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller) utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
computeResourceAccessor utils.Accessor
collaborativeAreaAccessor utils.Accessor
executionAccessor utils.Accessor
workspaceAccessor utils.Accessor
} }
// New creates a new instance of the workflowMongoAccessor // New creates a new instance of the workflowMongoAccessor
func New() *workflowMongoAccessor { func New(peerID string, groups []string) *workflowMongoAccessor {
return &workflowMongoAccessor{} return &workflowMongoAccessor{
computeResourceAccessor: (&compute.ComputeResource{}).GetAccessor(peerID, groups, nil),
collaborativeAreaAccessor: (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(peerID, groups, nil),
executionAccessor: (&workflow_execution.WorkflowExecution{}).GetAccessor(peerID, groups, nil),
workspaceAccessor: (&workspace.Workspace{}).GetAccessor(peerID, groups, nil),
}
} }
/* /*
@ -127,7 +137,6 @@ func (wfa *workflowMongoAccessor) book(id string, realData *Workflow, execs []*w
g = realData.Graph g = realData.Graph
} }
if g != nil && g.Links != nil && len(g.Links) > 0 { // if the graph is set and has links then book the workflow (even on ourselves) if g != nil && g.Links != nil && len(g.Links) > 0 { // if the graph is set and has links then book the workflow (even on ourselves)
accessor := (&compute.ComputeResource{}).GetAccessor(nil)
isDCFound := []string{} isDCFound := []string{}
for _, link := range g.Links { for _, link := range g.Links {
if ok, dc_id := realData.isDCLink(link); ok { // check if the link is a link between a compute and a resource booking is only on compute if ok, dc_id := realData.isDCLink(link); ok { // check if the link is a link between a compute and a resource booking is only on compute
@ -135,7 +144,7 @@ func (wfa *workflowMongoAccessor) book(id string, realData *Workflow, execs []*w
continue continue
} // if the compute is already found, skip it } // if the compute is already found, skip it
isDCFound = append(isDCFound, dc_id) isDCFound = append(isDCFound, dc_id)
dc, code, _ := accessor.LoadOne(dc_id) dc, code, _ := wfa.computeResourceAccessor.LoadOne(dc_id)
if code != 200 { if code != 200 {
continue continue
} }
@ -169,8 +178,7 @@ func (wfa *workflowMongoAccessor) share(realData *Workflow, delete bool, caller
return return
} }
for _, sharedID := range realData.Shared { // loop through the shared ids for _, sharedID := range realData.Shared { // loop through the shared ids
access := (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(nil) res, code, _ := wfa.collaborativeAreaAccessor.LoadOne(sharedID)
res, code, _ := access.LoadOne(sharedID)
if code != 200 { if code != 200 {
continue continue
} }
@ -186,7 +194,8 @@ func (wfa *workflowMongoAccessor) share(realData *Workflow, delete bool, caller
history.StoreOne(history.MapFromWorkflow(res.(*Workflow))) history.StoreOne(history.MapFromWorkflow(res.(*Workflow)))
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.DELETE, map[string]interface{}{}, caller) _, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.DELETE, map[string]interface{}{}, caller)
} else { // if the workflow is updated, share the update } else { // if the workflow is updated, share the update
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.PUT, res.Serialize(), caller) _, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.PUT,
res.Serialize(res), caller)
} }
} }
if err != nil { if err != nil {
@ -211,7 +220,6 @@ func (wfa *workflowMongoAccessor) execution(id string, realData *Workflow, delet
return 409, err return 409, err
} }
accessor := (&workflow_execution.WorkflowExecution{}).GetAccessor(nil)
execs, err := wfa.getExecutions(id, realData) // get the executions of the workflow execs, err := wfa.getExecutions(id, realData) // get the executions of the workflow
if err != nil { if err != nil {
return 422, err return 422, err
@ -226,7 +234,7 @@ func (wfa *workflowMongoAccessor) execution(id string, realData *Workflow, delet
} }
fmt.Println("BOOKING", delete) fmt.Println("BOOKING", delete)
for _, obj := range execs { for _, obj := range execs {
_, code, err := accessor.StoreOne(obj) _, code, err := wfa.executionAccessor.StoreOne(obj)
fmt.Println("EXEC", code, err) fmt.Println("EXEC", code, err)
if code != 200 { if code != 200 {
return code, err return code, err
@ -301,21 +309,20 @@ func (wfa *workflowMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject,
// it stores the workflow resources in a specific workspace to never have a conflict in UI and logic // it stores the workflow resources in a specific workspace to never have a conflict in UI and logic
func (wfa *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active bool) { func (wfa *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active bool) {
accessor := (&workspace.Workspace{}).GetAccessor(nil)
filters := &dbs.Filters{ filters := &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by standard workspace name attached to a workflow Or: map[string][]dbs.Filter{ // filter by standard workspace name attached to a workflow
"abstractobject.name": {{dbs.LIKE.String(), workflow.Name + "_workspace"}}, "abstractobject.name": {{dbs.LIKE.String(), workflow.Name + "_workspace"}},
}, },
} }
resource, _, err := accessor.Search(filters, "") resource, _, err := wfa.workspaceAccessor.Search(filters, "")
if delete { // if delete is set to true, delete the workspace if delete { // if delete is set to true, delete the workspace
for _, r := range resource { for _, r := range resource {
accessor.DeleteOne(r.GetID()) wfa.workspaceAccessor.DeleteOne(r.GetID())
} }
return return
} }
if err == nil && len(resource) > 0 { // if the workspace already exists, update it if err == nil && len(resource) > 0 { // if the workspace already exists, update it
accessor.UpdateOne(&workspace.Workspace{ wfa.workspaceAccessor.UpdateOne(&workspace.Workspace{
Active: active, Active: active,
ResourceSet: resources.ResourceSet{ ResourceSet: resources.ResourceSet{
Datas: workflow.Datas, Datas: workflow.Datas,
@ -326,7 +333,7 @@ func (wfa *workflowMongoAccessor) execute(workflow *Workflow, delete bool, activ
}, },
}, resource[0].GetID()) }, resource[0].GetID())
} else { // if the workspace does not exist, create it } else { // if the workspace does not exist, create it
accessor.StoreOne(&workspace.Workspace{ wfa.workspaceAccessor.StoreOne(&workspace.Workspace{
Active: active, Active: active,
AbstractObject: utils.AbstractObject{Name: workflow.Name + "_workspace"}, AbstractObject: utils.AbstractObject{Name: workflow.Name + "_workspace"},
ResourceSet: resources.ResourceSet{ ResourceSet: resources.ResourceSet{
@ -354,7 +361,6 @@ func (wfa *workflowMongoAccessor) LoadOne(id string) (utils.DBObject, int, error
if (workflow.Schedule.End != nil && now.After(*workflow.Schedule.End)) || (workflow.Schedule.End == nil && workflow.Schedule.Start != nil && now.After(*workflow.Schedule.Start)) { // if the start date is passed, then you can book if (workflow.Schedule.End != nil && now.After(*workflow.Schedule.End)) || (workflow.Schedule.End == nil && workflow.Schedule.Start != nil && now.After(*workflow.Schedule.Start)) { // if the start date is passed, then you can book
workflow.ScheduleActive = false workflow.ScheduleActive = false
wfa.GenericRawUpdateOne(&workflow, id, wfa) wfa.GenericRawUpdateOne(&workflow, id, wfa)
} // if the start date is passed, update the executions } // if the start date is passed, update the executions
} }
wfa.execute(&workflow, false, true) // if no workspace is attached to the workflow, create it wfa.execute(&workflow, false, true) // if no workspace is attached to the workflow, create it

View File

@ -12,7 +12,7 @@ func TestStoreOneWorkflow(t *testing.T) {
AbstractObject: utils.AbstractObject{Name: "testWorkflow"}, AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
} }
wma := New() wma := New("", nil)
id, _, _ := wma.StoreOne(&w) id, _, _ := wma.StoreOne(&w)
assert.NotEmpty(t, id) assert.NotEmpty(t, id)
@ -23,7 +23,7 @@ func TestLoadOneWorkflow(t *testing.T) {
AbstractObject: utils.AbstractObject{Name: "testWorkflow"}, AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
} }
wma := New() wma := New("", nil)
new_w, _, _ := wma.StoreOne(&w) new_w, _, _ := wma.StoreOne(&w)
assert.Equal(t, w, new_w) assert.Equal(t, w, new_w)
} }

View File

@ -107,10 +107,6 @@ func (wfa *WorkflowExecution) ArgoStatusToState(status string) *WorkflowExecutio
return wfa return wfa
} }
func (ao *WorkflowExecution) GetID() string {
return ao.UUID
}
func (r *WorkflowExecution) GenerateID() { func (r *WorkflowExecution) GenerateID() {
r.UUID = uuid.New().String() r.UUID = uuid.New().String()
} }
@ -119,29 +115,8 @@ func (d *WorkflowExecution) GetName() string {
return d.UUID + "_" + d.ExecDate.String() return d.UUID + "_" + d.ExecDate.String()
} }
func (d *WorkflowExecution) GetAccessor(caller *tools.HTTPCaller) utils.Accessor { func (d *WorkflowExecution) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor data := New() // Create a new instance of the accessor
data.Init(tools.WORKFLOW_EXECUTION, caller) // Initialize the accessor with the WORKFLOW_EXECUTION model type data.Init(tools.WORKFLOW_EXECUTION, peerID, groups, caller) // Initialize the accessor with the WORKFLOW_EXECUTION model type
return data return data
} }
// New creates a new instance of the WorkflowExecution from a map
func (dma *WorkflowExecution) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
// Serialize returns the WorkflowExecution as a map
func (dma *WorkflowExecution) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}

View File

@ -1,12 +1,9 @@
package workspace package workspace
import ( import (
"encoding/json"
"cloud.o-forge.io/core/oc-lib/models/resources" "cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
) )
// Workspace is a struct that represents a workspace // Workspace is a struct that represents a workspace
@ -18,43 +15,8 @@ type Workspace struct {
Shared string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workspace Shared string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workspace
} }
func (ao *Workspace) GetID() string { func (d *Workspace) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
return ao.UUID data := New(peerID, groups) // Create a new instance of the accessor
} data.Init(tools.WORKSPACE, peerID, groups, caller) // Initialize the accessor with the WORKSPACE model type
func (r *Workspace) GenerateID() {
if r.UUID == "" {
r.UUID = uuid.New().String()
}
}
func (d *Workspace) GetName() string {
return d.Name
}
func (d *Workspace) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor
data.Init(tools.WORKSPACE, caller) // Initialize the accessor with the WORKSPACE model type
return data return data
} }
// New creates a new instance of the workspaceMongoAccessor from a map
func (dma *Workspace) Deserialize(j map[string]interface{}) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, dma)
return dma
}
// Serialize returns the workspaceMongoAccessor as a map
func (dma *Workspace) Serialize() map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(dma)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}

View File

@ -8,9 +8,9 @@ import (
type WorkspaceHistory struct{ Workspace } type WorkspaceHistory struct{ Workspace }
func (d *WorkspaceHistory) GetAccessor(caller *tools.HTTPCaller) utils.Accessor { func (d *WorkspaceHistory) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
data := New() // Create a new instance of the accessor data := New(peerID, groups) // Create a new instance of the accessor
data.Init(tools.WORKSPACE_HISTORY, caller) // Initialize the accessor with the WORKSPACE model type data.Init(tools.WORKSPACE_HISTORY, peerID, groups, caller) // Initialize the accessor with the WORKSPACE model type
return data return data
} }
func (r *WorkspaceHistory) GenerateID() { func (r *WorkspaceHistory) GenerateID() {

View File

@ -8,11 +8,6 @@ import (
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
"cloud.o-forge.io/core/oc-lib/models/resources/data"
"cloud.o-forge.io/core/oc-lib/models/resources/processing"
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
@ -23,7 +18,7 @@ type workspaceMongoAccessor struct {
} }
// New creates a new instance of the workspaceMongoAccessor // New creates a new instance of the workspaceMongoAccessor
func New() *workspaceMongoAccessor { func New(peerID string, groups []string) *workspaceMongoAccessor {
return &workspaceMongoAccessor{} return &workspaceMongoAccessor{}
} }
@ -67,7 +62,7 @@ func (wfa *workspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (uti
func (wfa *workspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (wfa *workspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
filters := &dbs.Filters{ filters := &dbs.Filters{
Or: map[string][]dbs.Filter{ Or: map[string][]dbs.Filter{
"abstractobject.name": {{dbs.LIKE.String(), data.GetName() + "_workspace"}}, "abstractobject.name": {{Operator: dbs.LIKE.String(), Value: data.GetName() + "_workspace"}},
}, },
} }
res, _, err := wfa.Search(filters, "") // Search for the workspace res, _, err := wfa.Search(filters, "") // Search for the workspace
@ -89,63 +84,6 @@ func (wfa *workspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject,
return wfa.GenericStoreOne(data, wfa) return wfa.GenericStoreOne(data, wfa)
} }
/*
This function is used to fill the workspace with the resources
*/
func (wfa *workspaceMongoAccessor) fill(workflow *Workspace) *Workspace {
// Fill the workspace with the resources
if workflow.Datas != nil && len(workflow.Datas) > 0 {
dataAccessor := (&data.DataResource{}).GetAccessor(nil)
for _, id := range workflow.Datas {
d, _, e := dataAccessor.LoadOne(id)
if e == nil {
workflow.DataResources = append(workflow.DataResources, d.(*data.DataResource))
}
}
}
// Fill the workspace with the computes
if workflow.Computes != nil && len(workflow.Computes) > 0 {
dataAccessor := (&compute.ComputeResource{}).GetAccessor(nil)
for _, id := range workflow.Computes {
d, _, e := dataAccessor.LoadOne(id)
if e == nil {
workflow.ComputeResources = append(workflow.ComputeResources, d.(*compute.ComputeResource))
}
}
}
// Fill the workspace with the storages
if workflow.Storages != nil && len(workflow.Storages) > 0 {
dataAccessor := (&storage.StorageResource{}).GetAccessor(nil)
for _, id := range workflow.Storages {
d, _, e := dataAccessor.LoadOne(id)
if e == nil {
workflow.StorageResources = append(workflow.StorageResources, d.(*storage.StorageResource))
}
}
}
// Fill the workspace with the processings
if workflow.Processings != nil && len(workflow.Processings) > 0 {
dataAccessor := (&processing.ProcessingResource{}).GetAccessor(nil)
for _, id := range workflow.Processings {
d, _, e := dataAccessor.LoadOne(id)
if e == nil {
workflow.ProcessingResources = append(workflow.ProcessingResources, d.(*processing.ProcessingResource))
}
}
}
// Fill the workspace with the workflows
if workflow.Workflows != nil && len(workflow.Workflows) > 0 {
dataAccessor := (&w.WorkflowResource{}).GetAccessor(nil)
for _, id := range workflow.Workflows {
d, _, e := dataAccessor.LoadOne(id)
if e == nil {
workflow.WorkflowResources = append(workflow.WorkflowResources, d.(*w.WorkflowResource))
}
}
}
return workflow
}
// LoadOne loads a workspace from the database, given its ID // LoadOne loads a workspace from the database, given its ID
func (wfa *workspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (wfa *workspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
var workflow Workspace var workflow Workspace
@ -155,8 +93,8 @@ func (wfa *workspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, erro
return nil, code, err return nil, code, err
} }
res_mongo.Decode(&workflow) res_mongo.Decode(&workflow)
workflow.Fill(wfa.PeerID, wfa.Groups)
return wfa.fill(&workflow), 200, nil return &workflow, 200, nil
} }
// LoadAll loads all the workspaces from the database // LoadAll loads all the workspaces from the database
@ -172,7 +110,8 @@ func (wfa workspaceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error
return nil, 404, err return nil, 404, err
} }
for _, r := range results { for _, r := range results {
objs = append(objs, wfa.fill(&r)) r.Fill(wfa.PeerID, wfa.Groups)
objs = append(objs, &r)
} }
return objs, 200, nil return objs, 200, nil
} }
@ -197,7 +136,8 @@ func (wfa *workspaceMongoAccessor) Search(filters *dbs.Filters, search string) (
return nil, 404, err return nil, 404, err
} }
for _, r := range results { for _, r := range results {
objs = append(objs, wfa.fill(&r)) r.Fill(wfa.PeerID, wfa.Groups)
objs = append(objs, &r)
} }
return objs, 200, nil return objs, 200, nil
} }
@ -210,7 +150,8 @@ func (wfa *workspaceMongoAccessor) share(realData *Workspace, method tools.METHO
if realData == nil || realData.Shared == "" || caller == nil || caller.Disabled { if realData == nil || realData.Shared == "" || caller == nil || caller.Disabled {
return return
} }
access := (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(nil) shallow := &shallow_collaborative_area.ShallowCollaborativeArea{}
access := (shallow).GetAccessor(wfa.PeerID, wfa.Groups, nil)
res, code, _ := access.LoadOne(realData.Shared) res, code, _ := access.LoadOne(realData.Shared)
if code != 200 { if code != 200 {
return return
@ -227,7 +168,7 @@ func (wfa *workspaceMongoAccessor) share(realData *Workspace, method tools.METHO
history.StoreOne(history.MapFromWorkspace(res.(*Workspace))) history.StoreOne(history.MapFromWorkspace(res.(*Workspace)))
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.DELETE, map[string]interface{}{}, caller) _, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.DELETE, map[string]interface{}{}, caller)
} else { // If the workspace is updated, share the update } else { // If the workspace is updated, share the update
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.PUT, res.Serialize(), caller) _, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.PUT, res.Serialize(res), caller)
} }
} }
if err != nil { if err != nil {