test of a lightest formula of code
This commit is contained in:
parent
15ca06aba8
commit
2816e3ea35
@ -16,6 +16,7 @@ type Config struct {
|
||||
Port string
|
||||
LokiUrl string
|
||||
LogLevel string
|
||||
Whitelist bool
|
||||
}
|
||||
|
||||
func (c Config) GetUrl() string {
|
||||
@ -37,19 +38,11 @@ func GetConfig() *Config {
|
||||
}
|
||||
|
||||
func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string, logLevel string) *Config {
|
||||
/*once.Do(func() {
|
||||
instance = &Config{
|
||||
MongoUrl: mongoUrl,
|
||||
MongoDatabase: database,
|
||||
NATSUrl: natsUrl,
|
||||
LokiUrl: lokiUrl,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
})*/
|
||||
GetConfig().MongoUrl = mongoUrl
|
||||
GetConfig().MongoDatabase = database
|
||||
GetConfig().NATSUrl = natsUrl
|
||||
GetConfig().LokiUrl = lokiUrl
|
||||
GetConfig().LogLevel = logLevel
|
||||
GetConfig().Whitelist = true
|
||||
return GetConfig()
|
||||
}
|
||||
|
@ -15,10 +15,10 @@ import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area"
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/data"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/processing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
|
||||
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
@ -48,7 +48,7 @@ const (
|
||||
WORKSPACE = tools.WORKSPACE
|
||||
WORKFLOW_EXECUTION = tools.WORKFLOW_EXECUTION
|
||||
PEER = tools.PEER
|
||||
SHARED_WORKSPACE = tools.COLLABORATIVE_AREA
|
||||
COLLABORATIVE_AREA = tools.COLLABORATIVE_AREA
|
||||
RULE = tools.RULE
|
||||
BOOKING = tools.BOOKING
|
||||
)
|
||||
@ -158,7 +158,7 @@ func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string,
|
||||
If not we will store it
|
||||
Resource model is the model that will define the structure of the resources
|
||||
*/
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor("", []string{}, nil)
|
||||
for _, model := range []string{tools.DATA_RESOURCE.String(), tools.PROCESSING_RESOURCE.String(), tools.STORAGE_RESOURCE.String(), tools.COMPUTE_RESOURCE.String(), tools.WORKFLOW_RESOURCE.String()} {
|
||||
data, code, _ := accessor.Search(nil, model)
|
||||
if code == 404 || len(data) == 0 {
|
||||
@ -225,6 +225,17 @@ func GetConfLoader() *onion.Onion {
|
||||
return config.GetConfLoader()
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
collection LibDataEnum
|
||||
peerID string
|
||||
groups []string
|
||||
caller *tools.HTTPCaller
|
||||
}
|
||||
|
||||
func NewRequest(collection LibDataEnum, peerID string, groups []string, caller *tools.HTTPCaller) *Request {
|
||||
return &Request{collection: collection, peerID: peerID, groups: groups, caller: caller}
|
||||
}
|
||||
|
||||
/*
|
||||
* Search will search for the data in the database
|
||||
* @param filters *dbs.Filters
|
||||
@ -233,18 +244,14 @@ func GetConfLoader() *onion.Onion {
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibDataShallow
|
||||
*/
|
||||
func Search(filters *dbs.Filters, word string, collection LibDataEnum, c ...*tools.HTTPCaller) (data LibDataShallow) {
|
||||
func (r *Request) Search(filters *dbs.Filters, word string, collection LibDataEnum) (data LibDataShallow) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in Search : "+fmt.Sprintf("%v", r)))
|
||||
data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||
}
|
||||
}()
|
||||
var caller *tools.HTTPCaller // define the caller
|
||||
if len(c) > 0 {
|
||||
caller = c[0]
|
||||
}
|
||||
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).Search(filters, word)
|
||||
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(r.peerID, r.groups, r.caller).Search(filters, word)
|
||||
if err != nil {
|
||||
data = LibDataShallow{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -259,18 +266,14 @@ func Search(filters *dbs.Filters, word string, collection LibDataEnum, c ...*too
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibDataShallow
|
||||
*/
|
||||
func LoadAll(collection LibDataEnum, c ...*tools.HTTPCaller) (data LibDataShallow) {
|
||||
func (r *Request) LoadAll() (data LibDataShallow) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadAll : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||
data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||
}
|
||||
}()
|
||||
var caller *tools.HTTPCaller // define the caller
|
||||
if len(c) > 0 {
|
||||
caller = c[0]
|
||||
}
|
||||
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).LoadAll()
|
||||
d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.peerID, r.groups, r.caller).LoadAll()
|
||||
if err != nil {
|
||||
data = LibDataShallow{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -286,18 +289,14 @@ func LoadAll(collection LibDataEnum, c ...*tools.HTTPCaller) (data LibDataShallo
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibData
|
||||
*/
|
||||
func LoadOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data LibData) {
|
||||
func (r *Request) LoadOne(id string) (data LibData) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in LoadOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||
}
|
||||
}()
|
||||
var caller *tools.HTTPCaller // define the caller
|
||||
if len(c) > 0 {
|
||||
caller = c[0]
|
||||
}
|
||||
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).LoadOne(id)
|
||||
d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.peerID, r.groups, r.caller).LoadOne(id)
|
||||
if err != nil {
|
||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -314,19 +313,15 @@ func LoadOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data Li
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibData
|
||||
*/
|
||||
func UpdateOne(collection LibDataEnum, set map[string]interface{}, id string, c ...*tools.HTTPCaller) (data LibData) {
|
||||
func (r *Request) UpdateOne(set map[string]interface{}, id string) (data LibData) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in UpdateOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in UpdateOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||
}
|
||||
}()
|
||||
var caller *tools.HTTPCaller // define the caller
|
||||
if len(c) > 0 {
|
||||
caller = c[0]
|
||||
}
|
||||
model := models.Model(collection.EnumIndex())
|
||||
d, code, err := model.GetAccessor(caller).UpdateOne(model.Deserialize(set), id)
|
||||
model := models.Model(r.collection.EnumIndex())
|
||||
d, code, err := model.GetAccessor(r.peerID, r.groups, r.caller).UpdateOne(model.Deserialize(set, model), id)
|
||||
if err != nil {
|
||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -342,18 +337,14 @@ func UpdateOne(collection LibDataEnum, set map[string]interface{}, id string, c
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibData
|
||||
*/
|
||||
func DeleteOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data LibData) {
|
||||
func (r *Request) DeleteOne(id string) (data LibData) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in DeleteOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in DeleteOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||
}
|
||||
}()
|
||||
var caller *tools.HTTPCaller // define the caller
|
||||
if len(c) > 0 {
|
||||
caller = c[0]
|
||||
}
|
||||
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).DeleteOne(id)
|
||||
d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.peerID, r.groups, r.caller).DeleteOne(id)
|
||||
if err != nil {
|
||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -369,19 +360,15 @@ func DeleteOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibData
|
||||
*/
|
||||
func StoreOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.HTTPCaller) (data LibData) {
|
||||
func (r *Request) StoreOne(object map[string]interface{}) (data LibData) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in StoreOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in StoreOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||
}
|
||||
}()
|
||||
var caller *tools.HTTPCaller // define the caller
|
||||
if len(c) > 0 {
|
||||
caller = c[0]
|
||||
}
|
||||
model := models.Model(collection.EnumIndex())
|
||||
d, code, err := model.GetAccessor(caller).StoreOne(model.Deserialize(object))
|
||||
model := models.Model(r.collection.EnumIndex())
|
||||
d, code, err := model.GetAccessor(r.peerID, r.groups, r.caller).StoreOne(model.Deserialize(object, model))
|
||||
if err != nil {
|
||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -397,7 +384,7 @@ func StoreOne(collection LibDataEnum, object map[string]interface{}, c ...*tools
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibData
|
||||
*/
|
||||
func CopyOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.HTTPCaller) (data LibData) {
|
||||
func CopyOne(collection LibDataEnum, object map[string]interface{}, peerID string, groups []string, c ...*tools.HTTPCaller) (data LibData) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in CopyOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||
@ -409,7 +396,7 @@ func CopyOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.
|
||||
caller = c[0]
|
||||
}
|
||||
model := models.Model(collection.EnumIndex())
|
||||
d, code, err := model.GetAccessor(caller).CopyOne(model.Deserialize(object))
|
||||
d, code, err := model.GetAccessor(peerID, groups, caller).CopyOne(model.Deserialize(object, model))
|
||||
if err != nil {
|
||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -421,72 +408,72 @@ func CopyOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.
|
||||
// ================ CAST ========================= //
|
||||
|
||||
func (l *LibData) ToDataResource() *data.DataResource {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.DATA_RESOURCE.String() {
|
||||
if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.DATA_RESOURCE.String() {
|
||||
return l.Data.(*data.DataResource)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LibData) ToComputeResource() *compute.ComputeResource {
|
||||
if l.Data != nil && l.Data.GetAccessor(nil).GetType() == tools.COMPUTE_RESOURCE.String() {
|
||||
if l.Data != nil && l.Data.GetAccessor("", []string{}, nil).GetType() == tools.COMPUTE_RESOURCE.String() {
|
||||
return l.Data.(*compute.ComputeResource)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (l *LibData) ToStorageResource() *storage.StorageResource {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.STORAGE_RESOURCE.String() {
|
||||
if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.STORAGE_RESOURCE.String() {
|
||||
return l.Data.(*storage.StorageResource)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (l *LibData) ToProcessingResource() *processing.ProcessingResource {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.PROCESSING_RESOURCE.String() {
|
||||
if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.PROCESSING_RESOURCE.String() {
|
||||
return l.Data.(*processing.ProcessingResource)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (l *LibData) ToWorkflowResource() *w.WorkflowResource {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_RESOURCE.String() {
|
||||
if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.WORKFLOW_RESOURCE.String() {
|
||||
return l.Data.(*w.WorkflowResource)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (l *LibData) ToPeer() *peer.Peer {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.PEER.String() {
|
||||
if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.PEER.String() {
|
||||
return l.Data.(*peer.Peer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LibData) ToWorkflow() *w2.Workflow {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW.String() {
|
||||
if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.WORKFLOW.String() {
|
||||
return l.Data.(*w2.Workflow)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (l *LibData) ToWorkspace() *workspace.Workspace {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.WORKSPACE.String() {
|
||||
if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.WORKSPACE.String() {
|
||||
return l.Data.(*workspace.Workspace)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LibData) ToCollaborativeArea() *collaborative_area.CollaborativeArea {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.COLLABORATIVE_AREA.String() {
|
||||
if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.COLLABORATIVE_AREA.String() {
|
||||
return l.Data.(*collaborative_area.CollaborativeArea)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LibData) ToRule() *rule.Rule {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.COLLABORATIVE_AREA.String() {
|
||||
if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.COLLABORATIVE_AREA.String() {
|
||||
return l.Data.(*rule.Rule)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecution {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_EXECUTION.String() {
|
||||
if l.Data.GetAccessor("", []string{}, nil).GetType() == tools.WORKFLOW_EXECUTION.String() {
|
||||
return l.Data.(*workflow_execution.WorkflowExecution)
|
||||
}
|
||||
return nil
|
||||
|
@ -1,14 +1,12 @@
|
||||
package booking
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
@ -28,7 +26,7 @@ func (wfa *Booking) CheckBooking(id string, start time.Time, end *time.Time) (bo
|
||||
return true, nil
|
||||
}
|
||||
e := *end
|
||||
accessor := wfa.GetAccessor(nil)
|
||||
accessor := New()
|
||||
res, code, err := accessor.Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date
|
||||
"compute_resource_id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
||||
@ -51,41 +49,12 @@ func (wfa *Booking) ArgoStatusToState(status string) *Booking {
|
||||
return wfa
|
||||
}
|
||||
|
||||
func (ao *Booking) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (r *Booking) GenerateID() {
|
||||
if r.UUID == "" {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Booking) GetName() string {
|
||||
return d.UUID + "_" + d.ExecDate.String()
|
||||
}
|
||||
|
||||
func (d *Booking) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.BOOKING, caller) // Initialize the accessor with the BOOKING model type
|
||||
func (d *Booking) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.BOOKING, peerID, groups, caller) // Initialize the accessor with the BOOKING model type
|
||||
return data
|
||||
}
|
||||
|
||||
func (dma *Booking) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *Booking) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
package collaborative_area
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
||||
@ -10,7 +10,6 @@ import (
|
||||
w "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workspace"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type CollaborativeAreaRule struct {
|
||||
@ -34,7 +33,7 @@ type CollaborativeArea struct {
|
||||
Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"` // Attributes is the attributes of the workspace (TODO)
|
||||
Workspaces []string `json:"workspaces" bson:"workspaces"` // Workspaces is the workspaces of the workspace
|
||||
Workflows []string `json:"workflows" bson:"workflows"` // Workflows is the workflows of the workspace
|
||||
Peers []string `json:"peers" bson:"peers"` // Peers is the peers of the workspace
|
||||
AllowedPeersGroup map[string][]string `json:"allowed_peers_group,omitempty" bson:"allowed_peers_group,omitempty"` // AllowedPeersGroup is the group of allowed peers
|
||||
Rules []string `json:"rules" bson:"rules,omitempty"` // Rules is the rules of the workspace
|
||||
|
||||
SharedRules []*rule.Rule `json:"shared_rules,omitempty" bson:"-"` // SharedRules is the shared rules of the workspace
|
||||
@ -43,41 +42,31 @@ type CollaborativeArea struct {
|
||||
SharedPeers []*peer.Peer `json:"shared_peers,omitempty" bson:"-"` // SharedPeers is the shared peers of the workspace
|
||||
}
|
||||
|
||||
func (ao *CollaborativeArea) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (r *CollaborativeArea) GenerateID() {
|
||||
if r.UUID == "" {
|
||||
r.UUID = uuid.New().String()
|
||||
func (ao *CollaborativeArea) VerifyAuth(peerID string, groups []string) bool {
|
||||
if ao.AllowedPeersGroup != nil && len(ao.AllowedPeersGroup) > 0 {
|
||||
if grps, ok := ao.AllowedPeersGroup[peerID]; ok {
|
||||
if slices.Contains(grps, "*") {
|
||||
return true
|
||||
}
|
||||
for _, grp := range grps {
|
||||
if slices.Contains(groups, grp) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *CollaborativeArea) GetName() string {
|
||||
return d.Name
|
||||
}
|
||||
|
||||
func (d *CollaborativeArea) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.COLLABORATIVE_AREA, caller) // Initialize the accessor with the SHARED_WORKSPACE model type
|
||||
func (d *CollaborativeArea) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New(peerID, groups) // Create a new instance of the accessor
|
||||
data.Init(tools.COLLABORATIVE_AREA, peerID, groups, caller) // Initialize the accessor with the SHARED_WORKSPACE model type
|
||||
return data
|
||||
}
|
||||
|
||||
func (dma *CollaborativeArea) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
func (d *CollaborativeArea) Trim() *CollaborativeArea {
|
||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.CreatorID}}).IsMySelf(); !ok {
|
||||
d.AllowedPeersGroup = map[string][]string{}
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *CollaborativeArea) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
return d
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/config"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
||||
@ -19,19 +20,30 @@ import (
|
||||
// SharedWorkspace is a struct that represents a collaborative area
|
||||
type collaborativeAreaMongoAccessor struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
|
||||
workspaceAccessor utils.Accessor
|
||||
workflowAccessor utils.Accessor
|
||||
peerAccessor utils.Accessor
|
||||
ruleAccessor utils.Accessor
|
||||
}
|
||||
|
||||
// New creates a new instance of the collaborativeAreaMongoAccessor
|
||||
func New() *collaborativeAreaMongoAccessor {
|
||||
return &collaborativeAreaMongoAccessor{}
|
||||
func New(peerID string, groups []string) *collaborativeAreaMongoAccessor {
|
||||
return &collaborativeAreaMongoAccessor{
|
||||
workspaceAccessor: (&workspace.Workspace{}).GetAccessor(peerID, groups, nil),
|
||||
workflowAccessor: (&w.Workflow{}).GetAccessor(peerID, groups, nil),
|
||||
peerAccessor: (&peer.Peer{}).GetAccessor(peerID, groups, nil),
|
||||
ruleAccessor: (&rule.Rule{}).GetAccessor(peerID, groups, nil),
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteOne deletes a collaborative area from the database, given its ID, it automatically share to peers if the workspace is shared
|
||||
func (wfa *collaborativeAreaMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
set, code, _ := wfa.LoadOne(id)
|
||||
if code == 200 { // always delete on peers than recreate
|
||||
wfa.deleteToPeer(set.(*CollaborativeArea))
|
||||
set, code, err := wfa.LoadOne(id)
|
||||
if code != 200 {
|
||||
return nil, code, err
|
||||
}
|
||||
wfa.deleteToPeer(set.(*CollaborativeArea))
|
||||
wfa.sharedWorkflow(&CollaborativeArea{}, id) // create all shared workflows
|
||||
wfa.sharedWorkspace(&CollaborativeArea{}, id) // create all collaborative areas
|
||||
return wfa.GenericDeleteOne(id, wfa) // then add on yours
|
||||
@ -42,20 +54,19 @@ sharedWorkspace is a function that shares the collaborative area to the peers
|
||||
*/
|
||||
func (wfa *collaborativeAreaMongoAccessor) sharedWorkspace(shared *CollaborativeArea, id string) {
|
||||
eldest, code, _ := wfa.LoadOne(id) // get the eldest
|
||||
accessor := (&workspace.Workspace{}).GetAccessor(nil)
|
||||
if code == 200 {
|
||||
eld := eldest.(*CollaborativeArea)
|
||||
if eld.Workspaces != nil { // update all your workspaces in the eldest by replacing shared ref by an empty string
|
||||
for _, v := range eld.Workspaces {
|
||||
accessor.UpdateOne(&workspace.Workspace{Shared: ""}, v)
|
||||
wfa.workspaceAccessor.UpdateOne(&workspace.Workspace{Shared: ""}, v)
|
||||
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKSPACE] == nil {
|
||||
continue
|
||||
}
|
||||
paccess := (&peer.Peer{}) // send to all peers
|
||||
for _, p := range shared.Peers { // delete the collaborative area on the peer
|
||||
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKSPACE, tools.DELETE, nil, wfa.Caller)
|
||||
paccess := (&peer.Peer{}) // send to all peers
|
||||
for k := range shared.AllowedPeersGroup { // delete the collaborative area on the peer
|
||||
b, err := paccess.LaunchPeerExecution(k, v, tools.WORKSPACE, tools.DELETE, nil, wfa.Caller)
|
||||
if err != nil && b == nil {
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -63,20 +74,20 @@ func (wfa *collaborativeAreaMongoAccessor) sharedWorkspace(shared *Collaborative
|
||||
}
|
||||
if shared.Workspaces != nil {
|
||||
for _, v := range shared.Workspaces { // update all the collaborative areas
|
||||
workspace, code, _ := accessor.UpdateOne(&workspace.Workspace{Shared: shared.UUID}, v) // add the shared ref to workspace
|
||||
workspace, code, _ := wfa.workspaceAccessor.UpdateOne(&workspace.Workspace{Shared: shared.UUID}, v) // add the shared ref to workspace
|
||||
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKSPACE] == nil {
|
||||
continue
|
||||
}
|
||||
for _, p := range shared.Peers {
|
||||
for k := range shared.AllowedPeersGroup {
|
||||
if code != 200 {
|
||||
continue
|
||||
}
|
||||
paccess := (&peer.Peer{}) // send to all peers, add the collaborative area on the peer
|
||||
s := workspace.Serialize()
|
||||
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + p
|
||||
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKSPACE, tools.POST, s, wfa.Caller)
|
||||
s := workspace.Serialize(workspace)
|
||||
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + k
|
||||
b, err := paccess.LaunchPeerExecution(k, v, tools.WORKSPACE, tools.POST, s, wfa.Caller)
|
||||
if err != nil && b == nil {
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -87,13 +98,12 @@ func (wfa *collaborativeAreaMongoAccessor) sharedWorkspace(shared *Collaborative
|
||||
|
||||
// sharedWorkflow is a function that shares the shared workflow to the peers
|
||||
func (wfa *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeArea, id string) {
|
||||
accessor := (&w.Workflow{}).GetAccessor(nil)
|
||||
eldest, code, _ := wfa.LoadOne(id) // get the eldest
|
||||
if code == 200 {
|
||||
eld := eldest.(*CollaborativeArea)
|
||||
if eld.Workflows != nil {
|
||||
for _, v := range eld.Workflows {
|
||||
data, code, _ := accessor.LoadOne(v)
|
||||
data, code, _ := wfa.workflowAccessor.LoadOne(v)
|
||||
if code == 200 {
|
||||
s := data.(*w.Workflow)
|
||||
new := []string{}
|
||||
@ -104,15 +114,15 @@ func (wfa *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeA
|
||||
} // kick the shared reference in your old shared workflow
|
||||
n := &w.Workflow{}
|
||||
n.Shared = new
|
||||
accessor.UpdateOne(n, v)
|
||||
wfa.workflowAccessor.UpdateOne(n, v)
|
||||
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKFLOW] == nil {
|
||||
continue
|
||||
}
|
||||
paccess := (&peer.Peer{}) // send to all peers
|
||||
for _, p := range shared.Peers { // delete the shared workflow on the peer
|
||||
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKFLOW, tools.DELETE, nil, wfa.Caller)
|
||||
paccess := (&peer.Peer{}) // send to all peers
|
||||
for k := range shared.AllowedPeersGroup { // delete the shared workflow on the peer
|
||||
b, err := paccess.LaunchPeerExecution(k, v, tools.WORKFLOW, tools.DELETE, nil, wfa.Caller)
|
||||
if err != nil && b == nil {
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -121,23 +131,23 @@ func (wfa *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeA
|
||||
}
|
||||
if shared.Workflows != nil { // update all the shared workflows
|
||||
for _, v := range shared.Workflows {
|
||||
data, code, _ := accessor.LoadOne(v)
|
||||
data, code, _ := wfa.workflowAccessor.LoadOne(v)
|
||||
if code == 200 {
|
||||
s := data.(*w.Workflow)
|
||||
if !slices.Contains(s.Shared, id) {
|
||||
s.Shared = append(s.Shared, id)
|
||||
workflow, code, _ := accessor.UpdateOne(s, v)
|
||||
workflow, code, _ := wfa.workflowAccessor.UpdateOne(s, v)
|
||||
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKFLOW] == nil {
|
||||
continue
|
||||
}
|
||||
paccess := (&peer.Peer{})
|
||||
for _, p := range shared.Peers { // send to all peers
|
||||
for k := range shared.AllowedPeersGroup { // send to all peers
|
||||
if code == 200 {
|
||||
s := workflow.Serialize() // add the shared workflow on the peer
|
||||
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + p
|
||||
b, err := paccess.LaunchPeerExecution(p, shared.UUID, tools.WORKFLOW, tools.POST, s, wfa.Caller)
|
||||
s := workflow.Serialize(workflow) // add the shared workflow on the peer
|
||||
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + k
|
||||
b, err := paccess.LaunchPeerExecution(k, shared.UUID, tools.WORKFLOW, tools.POST, s, wfa.Caller)
|
||||
if err != nil && b == nil {
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -155,13 +165,13 @@ func (wfa *collaborativeAreaMongoAccessor) deleteToPeer(shared *CollaborativeAre
|
||||
return
|
||||
}
|
||||
paccess := (&peer.Peer{})
|
||||
for _, v := range shared.Peers {
|
||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: v}}).IsMySelf(); ok {
|
||||
for k, _ := range shared.AllowedPeersGroup {
|
||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: k}}).IsMySelf(); ok {
|
||||
continue
|
||||
}
|
||||
b, err := paccess.LaunchPeerExecution(v, shared.UUID, tools.COLLABORATIVE_AREA, tools.DELETE, nil, wfa.Caller)
|
||||
b, err := paccess.LaunchPeerExecution(k, shared.UUID, tools.COLLABORATIVE_AREA, tools.DELETE, nil, wfa.Caller)
|
||||
if err != nil && b == nil {
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + v + ". Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -173,22 +183,21 @@ func (wfa *collaborativeAreaMongoAccessor) sendToPeer(shared *CollaborativeArea)
|
||||
}
|
||||
|
||||
paccess := (&peer.Peer{})
|
||||
for _, v := range shared.Peers {
|
||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: v}}).IsMySelf(); ok || shared.IsSent {
|
||||
for k := range shared.AllowedPeersGroup {
|
||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: k}}).IsMySelf(); ok || shared.IsSent {
|
||||
continue
|
||||
}
|
||||
shared.IsSent = true
|
||||
b, err := paccess.LaunchPeerExecution(v, v, tools.COLLABORATIVE_AREA, tools.POST, shared.Serialize(), wfa.Caller)
|
||||
b, err := paccess.LaunchPeerExecution(k, k, tools.COLLABORATIVE_AREA, tools.POST, shared.Serialize(shared), wfa.Caller)
|
||||
if err != nil && b == nil {
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + v + ". Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateOne updates a collaborative area in the database, given its ID and the new data, it automatically share to peers if the workspace is shared
|
||||
func (wfa *collaborativeAreaMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
res, code, err := wfa.GenericUpdateOne(set.(*CollaborativeArea), id, wfa, &CollaborativeArea{})
|
||||
fmt.Println("UpdateOne", set, res, code, err)
|
||||
res, code, err := wfa.GenericUpdateOne(set.(*CollaborativeArea).Trim(), id, wfa, &CollaborativeArea{})
|
||||
// wfa.deleteToPeer(res.(*CollaborativeArea)) // delete the collaborative area on the peer
|
||||
wfa.sharedWorkflow(res.(*CollaborativeArea), id) // replace all shared workflows
|
||||
wfa.sharedWorkspace(res.(*CollaborativeArea), id) // replace all collaborative areas (not shared worspace obj but workspace one)
|
||||
@ -198,9 +207,14 @@ func (wfa *collaborativeAreaMongoAccessor) UpdateOne(set utils.DBObject, id stri
|
||||
|
||||
// StoreOne stores a collaborative area in the database, it automatically share to peers if the workspace is shared
|
||||
func (wfa *collaborativeAreaMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
_, id := (&peer.Peer{}).IsMySelf() // get the local peer
|
||||
data.(*CollaborativeArea).CreatorID = id // set the creator id
|
||||
data.(*CollaborativeArea).Peers = append(data.(*CollaborativeArea).Peers, id) // add the creator id to the peers
|
||||
_, id := (&peer.Peer{}).IsMySelf() // get the local peer
|
||||
data.(*CollaborativeArea).CreatorID = id // set the creator id
|
||||
// add the creator id to the peers
|
||||
if config.GetConfig().Whitelist {
|
||||
data.(*CollaborativeArea).AllowedPeersGroup[id] = []string{"*"}
|
||||
} else {
|
||||
data.(*CollaborativeArea).AllowedPeersGroup[id] = []string{}
|
||||
}
|
||||
// then reset the shared fields
|
||||
if data.(*CollaborativeArea).Workspaces == nil {
|
||||
data.(*CollaborativeArea).Workspaces = []string{}
|
||||
@ -219,12 +233,12 @@ func (wfa *collaborativeAreaMongoAccessor) StoreOne(data utils.DBObject) (utils.
|
||||
}
|
||||
data.(*CollaborativeArea).CollaborativeAreaRule.CreatedAt = time.Now().UTC()
|
||||
// retrieve or proper peer
|
||||
dd, code, err := (&peer.Peer{}).GetAccessor(nil).Search(nil, "0")
|
||||
dd, code, err := wfa.peerAccessor.Search(nil, "0")
|
||||
if code != 200 || len(dd) == 0 {
|
||||
return nil, code, errors.New("Could not retrieve the peer" + err.Error())
|
||||
}
|
||||
data.(*CollaborativeArea).CollaborativeAreaRule.Creator = dd[0].GetID()
|
||||
d, code, err := wfa.GenericStoreOne(data.(*CollaborativeArea), wfa)
|
||||
d, code, err := wfa.GenericStoreOne(data.(*CollaborativeArea).Trim(), wfa)
|
||||
if code == 200 {
|
||||
wfa.sharedWorkflow(d.(*CollaborativeArea), d.GetID()) // create all shared workflows
|
||||
wfa.sharedWorkspace(d.(*CollaborativeArea), d.GetID()) // create all collaborative areas
|
||||
@ -240,8 +254,7 @@ func (wfa *collaborativeAreaMongoAccessor) CopyOne(data utils.DBObject) (utils.D
|
||||
|
||||
// enrich is a function that enriches the CollaborativeArea with the shared objects
|
||||
func (wfa *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *CollaborativeArea) *CollaborativeArea {
|
||||
access := (&workspace.Workspace{}).GetAccessor(nil)
|
||||
res, code, _ := access.Search(&dbs.Filters{
|
||||
res, code, _ := wfa.workspaceAccessor.Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Workspaces}},
|
||||
},
|
||||
@ -251,8 +264,7 @@ func (wfa *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *Collaborative
|
||||
sharedWorkspace.SharedWorkspaces = append(sharedWorkspace.SharedWorkspaces, r.(*workspace.Workspace))
|
||||
}
|
||||
}
|
||||
access = (&w.Workflow{}).GetAccessor(nil)
|
||||
res, code, _ = access.Search(&dbs.Filters{
|
||||
res, code, _ = wfa.workflowAccessor.Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Workflows}},
|
||||
},
|
||||
@ -262,10 +274,13 @@ func (wfa *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *Collaborative
|
||||
sharedWorkspace.SharedWorkflows = append(sharedWorkspace.SharedWorkflows, r.(*w.Workflow))
|
||||
}
|
||||
}
|
||||
access = (&peer.Peer{}).GetAccessor(nil)
|
||||
res, code, _ = access.Search(&dbs.Filters{
|
||||
peerskey := []string{}
|
||||
for k := range sharedWorkspace.AllowedPeersGroup {
|
||||
peerskey = append(peerskey, k)
|
||||
}
|
||||
res, code, _ = wfa.peerAccessor.Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Peers}},
|
||||
"abstractobject.id": {{Operator: dbs.IN.String(), Value: peerskey}},
|
||||
},
|
||||
}, "")
|
||||
if code == 200 {
|
||||
@ -273,8 +288,7 @@ func (wfa *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *Collaborative
|
||||
sharedWorkspace.SharedPeers = append(sharedWorkspace.SharedPeers, r.(*peer.Peer))
|
||||
}
|
||||
}
|
||||
access = (&rule.Rule{}).GetAccessor(nil)
|
||||
res, code, _ = access.Search(&dbs.Filters{
|
||||
res, code, _ = wfa.ruleAccessor.Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Rules}},
|
||||
},
|
||||
@ -296,6 +310,9 @@ func (wfa *collaborativeAreaMongoAccessor) LoadOne(id string) (utils.DBObject, i
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&sharedWorkspace)
|
||||
if !sharedWorkspace.VerifyAuth(wfa.PeerID, wfa.Groups) {
|
||||
return nil, 403, errors.New("You are not allowed to access this collaborative area")
|
||||
}
|
||||
return wfa.enrich(&sharedWorkspace), 200, nil // enrich the collaborative area
|
||||
}
|
||||
|
||||
@ -312,6 +329,9 @@ func (wfa collaborativeAreaMongoAccessor) LoadAll() ([]utils.ShallowDBObject, in
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
|
||||
continue
|
||||
}
|
||||
objs = append(objs, wfa.enrich(&r)) // enrich the collaborative area
|
||||
}
|
||||
return objs, 200, nil
|
||||
@ -337,6 +357,9 @@ func (wfa *collaborativeAreaMongoAccessor) Search(filters *dbs.Filters, search s
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
|
||||
continue
|
||||
}
|
||||
objs = append(objs, wfa.enrich(&r)) // enrich the collaborative area
|
||||
}
|
||||
return objs, 200, nil
|
||||
|
@ -1,8 +1,6 @@
|
||||
package rule
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
@ -18,39 +16,12 @@ type Rule struct {
|
||||
Actions []string `json:"actions,omitempty" bson:"actions,omitempty"` // NOT DEFINITIVE TO SPECIFICATION
|
||||
}
|
||||
|
||||
func (ao *Rule) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (r *Rule) GenerateID() {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
|
||||
func (d *Rule) GetName() string {
|
||||
return d.Name
|
||||
}
|
||||
|
||||
func (d *Rule) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
func (d *Rule) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New()
|
||||
data.Init(tools.RULE, caller)
|
||||
data.Init(tools.RULE, peerID, groups, caller)
|
||||
return data
|
||||
}
|
||||
|
||||
func (dma *Rule) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *Rule) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -1,11 +1,8 @@
|
||||
package shallow_collaborative_area
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type ShallowCollaborativeArea struct {
|
||||
@ -21,41 +18,8 @@ type ShallowCollaborativeArea struct {
|
||||
Rules []string `json:"rules,omitempty" bson:"rules,omitempty"`
|
||||
}
|
||||
|
||||
func (ao *ShallowCollaborativeArea) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (r *ShallowCollaborativeArea) GenerateID() {
|
||||
if r.UUID == "" {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ShallowCollaborativeArea) GetName() string {
|
||||
return d.Name
|
||||
}
|
||||
|
||||
func (d *ShallowCollaborativeArea) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
func (d *ShallowCollaborativeArea) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New()
|
||||
data.Init(tools.COLLABORATIVE_AREA, caller)
|
||||
data.Init(tools.COLLABORATIVE_AREA, peerID, groups, caller)
|
||||
return data
|
||||
}
|
||||
|
||||
func (dma *ShallowCollaborativeArea) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *ShallowCollaborativeArea) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -1,12 +1,10 @@
|
||||
package peer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// now write a go enum for the state partner with self, blacklist, partner
|
||||
@ -65,7 +63,7 @@ func (ao *Peer) RemoveExecution(exec PeerExecution) {
|
||||
|
||||
// IsMySelf checks if the peer is the local peer
|
||||
func (ao *Peer) IsMySelf() (bool, string) {
|
||||
d, code, err := ao.GetAccessor(nil).Search(nil, SELF.String())
|
||||
d, code, err := New().Search(nil, SELF.String())
|
||||
if code != 200 || err != nil || len(d) == 0 {
|
||||
return false, ""
|
||||
}
|
||||
@ -78,42 +76,8 @@ func (p *Peer) LaunchPeerExecution(peerID string, dataID string, dt tools.DataTy
|
||||
p.UUID = peerID
|
||||
return cache.LaunchPeerExecution(peerID, dataID, dt, method, body, caller) // Launch the execution on the peer through the cache
|
||||
}
|
||||
|
||||
func (ao *Peer) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (r *Peer) GenerateID() {
|
||||
if r.UUID == "" {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Peer) GetName() string {
|
||||
return d.Name
|
||||
}
|
||||
|
||||
func (d *Peer) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.PEER, caller) // Initialize the accessor with the PEER model type
|
||||
func (d *Peer) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.PEER, peerID, groups, caller) // Initialize the accessor with the PEER model type
|
||||
return data
|
||||
}
|
||||
|
||||
func (dma *Peer) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *Peer) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ func (p *PeerCache) urlFormat(url string, dt tools.DataType) string {
|
||||
// checkPeerStatus checks the status of a peer
|
||||
func (p *PeerCache) checkPeerStatus(peerID string, appName string, caller *tools.HTTPCaller) (*Peer, bool) {
|
||||
api := tools.API{}
|
||||
access := (&Peer{}).GetAccessor(nil)
|
||||
access := New()
|
||||
res, code, _ := access.LoadOne(peerID) // Load the peer from db
|
||||
if code != 200 { // no peer no party
|
||||
return nil, false
|
||||
@ -101,18 +101,18 @@ func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string,
|
||||
DataID: dataID,
|
||||
}
|
||||
mypeer.AddExecution(*pexec)
|
||||
mypeer.GetAccessor(nil).UpdateOne(mypeer, peerID) // Update the peer in the db
|
||||
New().UpdateOne(mypeer, peerID) // Update the peer in the db
|
||||
return nil, errors.New("peer is not reachable")
|
||||
} else {
|
||||
if mypeer == nil {
|
||||
return nil, errors.New("peer not found")
|
||||
}
|
||||
// If the peer is reachable, launch the execution
|
||||
url = p.urlFormat((mypeer.Url)+meth, dt) // Format the URL
|
||||
tmp := mypeer.FailedExecution // Get the failed executions list
|
||||
mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list
|
||||
mypeer.GetAccessor(nil).UpdateOne(mypeer, peerID) // Update the peer in the db
|
||||
for _, v := range tmp { // Retry the failed executions
|
||||
url = p.urlFormat((mypeer.Url)+meth, dt) // Format the URL
|
||||
tmp := mypeer.FailedExecution // Get the failed executions list
|
||||
mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list
|
||||
New().UpdateOne(mypeer, peerID) // Update the peer in the db
|
||||
for _, v := range tmp { // Retry the failed executions
|
||||
go p.exec(v.Url, tools.ToMethod(v.Method), v.Body, caller)
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,7 @@
|
||||
package compute
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
@ -54,28 +52,9 @@ type ComputeResource struct {
|
||||
GPUs []*GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs
|
||||
}
|
||||
|
||||
func (dma *ComputeResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *ComputeResource) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
||||
func (d *ComputeResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New()
|
||||
data.Init(tools.COMPUTE_RESOURCE, caller)
|
||||
func (d *ComputeResource) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New(peerID, groups)
|
||||
data.Init(tools.COMPUTE_RESOURCE, peerID, groups, caller)
|
||||
return data
|
||||
}
|
||||
|
||||
|
@ -2,8 +2,7 @@ package compute
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
)
|
||||
|
||||
@ -12,8 +11,12 @@ type computeMongoAccessor struct {
|
||||
}
|
||||
|
||||
// New creates a new instance of the computeMongoAccessor
|
||||
func New() *computeMongoAccessor {
|
||||
return &computeMongoAccessor{}
|
||||
func New(peerID string, groups []string) *computeMongoAccessor {
|
||||
return &computeMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
ResourceModelAccessor: resource_model.New(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -26,87 +29,48 @@ func (dca *computeMongoAccessor) DeleteOne(id string) (utils.DBObject, int, erro
|
||||
|
||||
func (dca *computeMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
set.(*ComputeResource).ResourceModel = nil
|
||||
return dca.GenericUpdateOne(set, id, dca, &ComputeResource{})
|
||||
return dca.GenericUpdateOne(set.(*ComputeResource).Trim(), id, dca, &ComputeResource{})
|
||||
}
|
||||
|
||||
func (dca *computeMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
data.(*ComputeResource).ResourceModel = nil
|
||||
return dca.GenericStoreOne(data, dca)
|
||||
return dca.GenericStoreOne(data.(*ComputeResource).Trim(), dca)
|
||||
}
|
||||
|
||||
func (dca *computeMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return dca.GenericStoreOne(data, dca)
|
||||
return dca.StoreOne(data)
|
||||
}
|
||||
|
||||
func (dca *computeMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var compute ComputeResource
|
||||
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, dca.GetType())
|
||||
if err != nil {
|
||||
dca.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
|
||||
res_mongo.Decode(&compute)
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, dca.GetType())
|
||||
if err == nil && len(resources) > 0 {
|
||||
compute.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
return &compute, 200, nil
|
||||
return utils.GenericLoadOne[*ComputeResource](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
resources, _, err := dca.ResourceModelAccessor.Search(nil, dca.GetType())
|
||||
if err == nil && len(resources) > 0 {
|
||||
d.(*ComputeResource).ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
return d, 200, nil
|
||||
}, dca)
|
||||
}
|
||||
|
||||
func (wfa computeMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []ComputeResource
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
func (wfa *computeMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
|
||||
return utils.GenericLoadAll[*ComputeResource](func(d utils.DBObject, a []utils.ShallowDBObject) []utils.ShallowDBObject {
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
d.(*ComputeResource).ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
objs = append(objs, &r) // only get the abstract resource !
|
||||
}
|
||||
return objs, 200, nil
|
||||
a = append(a, d) // only get the abstract resource !
|
||||
return a
|
||||
}, wfa)
|
||||
}
|
||||
|
||||
func (wfa *computeMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []ComputeResource
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
objs = append(objs, &r) // only get the abstract resource !
|
||||
}
|
||||
return objs, 200, nil
|
||||
root := &ComputeResource{}
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
|
||||
return utils.GenericSearch[*ComputeResource](filters, search, root.GetResourceFilter(search),
|
||||
func(d utils.DBObject, a []utils.ShallowDBObject) []utils.ShallowDBObject {
|
||||
if err == nil && len(resources) > 0 {
|
||||
d.(*ComputeResource).ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
a = append(a, d) // only get the abstract resource !
|
||||
return a
|
||||
}, wfa)
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package compute
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -21,7 +21,7 @@ func TestStoreOneCompute(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
dcma := New()
|
||||
dcma := New("", nil)
|
||||
id, _, _ := dcma.StoreOne(&dc)
|
||||
|
||||
assert.NotEmpty(t, id)
|
||||
@ -39,7 +39,7 @@ func TestLoadOneCompute(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
dcma := New()
|
||||
dcma := New("", nil)
|
||||
new_dc, _, _ := dcma.StoreOne(&dc)
|
||||
|
||||
assert.Equal(t, dc, new_dc)
|
||||
|
@ -1,9 +1,7 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
@ -39,27 +37,8 @@ type DataResource struct {
|
||||
Example string `json:"example,omitempty" bson:"example,omitempty" description:"base64 encoded data"` // Example is an example of the data
|
||||
}
|
||||
|
||||
func (dma *DataResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *DataResource) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
||||
func (d *DataResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.DATA_RESOURCE, caller) // Initialize the accessor with the DATA_RESOURCE model type
|
||||
func (d *DataResource) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.DATA_RESOURCE, peerID, groups, caller) // Initialize the accessor with the DATA_RESOURCE model type
|
||||
return data
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
mongo "cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
)
|
||||
|
||||
@ -13,7 +15,11 @@ type dataMongoAccessor struct {
|
||||
|
||||
// New creates a new instance of the dataMongoAccessor
|
||||
func New() *dataMongoAccessor {
|
||||
return &dataMongoAccessor{}
|
||||
return &dataMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
ResourceModelAccessor: resource_model.New(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -26,16 +32,16 @@ func (dma *dataMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error)
|
||||
|
||||
func (dma *dataMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
set.(*DataResource).ResourceModel = nil
|
||||
return dma.GenericUpdateOne(set, id, dma, &DataResource{})
|
||||
return dma.GenericUpdateOne(set.(*DataResource).Trim(), id, dma, &DataResource{})
|
||||
}
|
||||
|
||||
func (dma *dataMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
data.(*DataResource).ResourceModel = nil
|
||||
return dma.GenericStoreOne(data, dma)
|
||||
return dma.GenericStoreOne(data.(*DataResource).Trim(), dma)
|
||||
}
|
||||
|
||||
func (dma *dataMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return dma.GenericStoreOne(data, dma)
|
||||
return dma.StoreOne(data)
|
||||
}
|
||||
|
||||
func (dma *dataMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
@ -46,8 +52,10 @@ func (dma *dataMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&data)
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, dma.GetType())
|
||||
if !data.VerifyAuth(dma.PeerID, dma.Groups) {
|
||||
return nil, 403, errors.New("You are not allowed to access this collaborative area")
|
||||
}
|
||||
resources, _, err := dma.ResourceModelAccessor.Search(nil, dma.GetType())
|
||||
if err == nil && len(resources) > 0 {
|
||||
data.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
@ -65,9 +73,11 @@ func (wfa dataMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
|
||||
continue
|
||||
}
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
@ -98,9 +108,11 @@ func (wfa *dataMongoAccessor) Search(filters *dbs.Filters, search string) ([]uti
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
|
||||
continue
|
||||
}
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package data
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -1,10 +1,8 @@
|
||||
package processing
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
@ -29,39 +27,20 @@ type Expose struct {
|
||||
*/
|
||||
type ProcessingResource struct {
|
||||
resource_model.AbstractResource
|
||||
IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service
|
||||
IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service
|
||||
CPUs []*compute.CPU `bson:"cpus,omitempty" json:"cp_us,omitempty"` // CPUs is the list of CPUs
|
||||
GPUs []*compute.GPU `bson:"gpus,omitempty" json:"gp_us,omitempty"` // GPUs is the list of GPUs
|
||||
RAM *compute.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
|
||||
Storage uint `bson:"storage,omitempty" json:"storage,omitempty"` // Storage is the storage
|
||||
Parallel bool `bson:"parallel,omitempty" json:"parallel,omitempty"` // Parallel is a flag that indicates if the processing is parallel
|
||||
ScalingModel uint `bson:"scaling_model,omitempty" json:"scaling_model,omitempty"` // ScalingModel is the scaling model
|
||||
DiskIO string `bson:"disk_io,omitempty" json:"disk_io,omitempty"` // DiskIO is the disk IO
|
||||
Container *Container `bson:"container,omitempty" json:"container,omitempty"` // Container is the container
|
||||
Expose []Expose `bson:"expose,omitempty" json:"expose,omitempty"` // Expose is the execution
|
||||
Storage uint `bson:"storage,omitempty" json:"storage,omitempty"` // Storage is the storage
|
||||
Parallel bool `bson:"parallel,omitempty" json:"parallel,omitempty"` // Parallel is a flag that indicates if the processing is parallel
|
||||
ScalingModel uint `bson:"scaling_model,omitempty" json:"scaling_model,omitempty"` // ScalingModel is the scaling model
|
||||
DiskIO string `bson:"disk_io,omitempty" json:"disk_io,omitempty"` // DiskIO is the disk IO
|
||||
Container *Container `bson:"container,omitempty" json:"container,omitempty"` // Container is the container
|
||||
Expose []Expose `bson:"expose,omitempty" json:"expose,omitempty"` // Expose is the execution
|
||||
}
|
||||
|
||||
func (dma *ProcessingResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *ProcessingResource) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
||||
func (d *ProcessingResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.PROCESSING_RESOURCE, caller) // Initialize the accessor with the PROCESSING_RESOURCE model type
|
||||
func (d *ProcessingResource) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.PROCESSING_RESOURCE, peerID, groups, caller) // Initialize the accessor with the PROCESSING_RESOURCE model type
|
||||
return data
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
package processing
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
)
|
||||
|
||||
@ -13,7 +15,11 @@ type processingMongoAccessor struct {
|
||||
|
||||
// New creates a new instance of the processingMongoAccessor
|
||||
func New() *processingMongoAccessor {
|
||||
return &processingMongoAccessor{}
|
||||
return &processingMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
ResourceModelAccessor: resource_model.New(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -26,16 +32,16 @@ func (pma *processingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, e
|
||||
|
||||
func (pma *processingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
set.(*ProcessingResource).ResourceModel = nil
|
||||
return pma.GenericUpdateOne(set, id, pma, &ProcessingResource{})
|
||||
return pma.GenericUpdateOne(set.(*ProcessingResource).Trim(), id, pma, &ProcessingResource{})
|
||||
}
|
||||
|
||||
func (pma *processingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
data.(*ProcessingResource).ResourceModel = nil
|
||||
return pma.GenericStoreOne(data, pma)
|
||||
return pma.GenericStoreOne(data.(*ProcessingResource).Trim(), pma)
|
||||
}
|
||||
|
||||
func (pma *processingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return pma.GenericStoreOne(data, pma)
|
||||
return pma.StoreOne(data)
|
||||
}
|
||||
|
||||
func (pma *processingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
@ -49,8 +55,10 @@ func (pma *processingMongoAccessor) LoadOne(id string) (utils.DBObject, int, err
|
||||
}
|
||||
|
||||
res_mongo.Decode(&processing)
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, pma.GetType())
|
||||
if !processing.VerifyAuth(pma.PeerID, pma.Groups) {
|
||||
return nil, 403, errors.New("You are not allowed to access this collaborative area")
|
||||
}
|
||||
resources, _, err := pma.ResourceModelAccessor.Search(nil, pma.GetType())
|
||||
if err == nil && len(resources) > 0 {
|
||||
processing.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
@ -68,9 +76,11 @@ func (wfa processingMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, erro
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
|
||||
continue
|
||||
}
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
@ -102,9 +112,11 @@ func (wfa *processingMongoAccessor) Search(filters *dbs.Filters, search string)
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
|
||||
continue
|
||||
}
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
|
@ -1,12 +1,13 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/data"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/data"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/processing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
|
||||
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
)
|
||||
|
||||
// AbstractResource is the struct containing all of the attributes commons to all ressources
|
||||
@ -18,39 +19,67 @@ type ResourceSet struct {
|
||||
Datas []string `bson:"datas,omitempty" json:"datas,omitempty"`
|
||||
Storages []string `bson:"storages,omitempty" json:"storages,omitempty"`
|
||||
Processings []string `bson:"processings,omitempty" json:"processings,omitempty"`
|
||||
Computes []string `bson:"computes,omitempty" json:"computes,omitempty"`
|
||||
Computes []string `bson:"computes,omitempty" json:"computes,omitempty"`
|
||||
Workflows []string `bson:"workflows,omitempty" json:"workflows,omitempty"`
|
||||
|
||||
DataResources []*data.DataResource `bson:"-" json:"data_resources,omitempty"`
|
||||
StorageResources []*storage.StorageResource `bson:"-" json:"storage_resources,omitempty"`
|
||||
ProcessingResources []*processing.ProcessingResource `bson:"-" json:"processing_resources,omitempty"`
|
||||
ComputeResources []*compute.ComputeResource `bson:"-" json:"compute_resources,omitempty"`
|
||||
ComputeResources []*compute.ComputeResource `bson:"-" json:"compute_resources,omitempty"`
|
||||
WorkflowResources []*w.WorkflowResource `bson:"-" json:"workflow_resources,omitempty"`
|
||||
}
|
||||
|
||||
func (r *ResourceSet) Fill(peerID string, groups []string) {
|
||||
for k, v := range map[utils.DBObject][]string{
|
||||
(&data.DataResource{}): r.Datas,
|
||||
(&compute.ComputeResource{}): r.Computes,
|
||||
(&storage.StorageResource{}): r.Storages,
|
||||
(&processing.ProcessingResource{}): r.Processings,
|
||||
(&w.WorkflowResource{}): r.Workflows,
|
||||
} {
|
||||
for _, id := range v {
|
||||
d, _, e := k.GetAccessor(peerID, groups, nil).LoadOne(id)
|
||||
if e == nil {
|
||||
switch k.(type) {
|
||||
case *data.DataResource:
|
||||
r.DataResources = append(r.DataResources, d.(*data.DataResource))
|
||||
case *compute.ComputeResource:
|
||||
r.ComputeResources = append(r.ComputeResources, d.(*compute.ComputeResource))
|
||||
case *storage.StorageResource:
|
||||
r.StorageResources = append(r.StorageResources, d.(*storage.StorageResource))
|
||||
case *processing.ProcessingResource:
|
||||
r.ProcessingResources = append(r.ProcessingResources, d.(*processing.ProcessingResource))
|
||||
case *w.WorkflowResource:
|
||||
r.WorkflowResources = append(r.WorkflowResources, d.(*w.WorkflowResource))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ItemResource struct {
|
||||
Data *data.DataResource `bson:"data,omitempty" json:"data,omitempty"`
|
||||
Processing *processing.ProcessingResource `bson:"processing,omitempty" json:"processing,omitempty"`
|
||||
Storage *storage.StorageResource `bson:"storage,omitempty" json:"storage,omitempty"`
|
||||
Compute *compute.ComputeResource `bson:"compute,omitempty" json:"compute,omitempty"`
|
||||
Compute *compute.ComputeResource `bson:"compute,omitempty" json:"compute,omitempty"`
|
||||
Workflow *w.WorkflowResource `bson:"workflow,omitempty" json:"workflow,omitempty"`
|
||||
}
|
||||
|
||||
func (i *ItemResource) GetAbstractRessource() *resource_model.AbstractResource {
|
||||
|
||||
if(i.Data != nil){
|
||||
if i.Data != nil {
|
||||
return &i.Data.AbstractResource
|
||||
}
|
||||
if(i.Processing != nil){
|
||||
if i.Processing != nil {
|
||||
return &i.Processing.AbstractResource
|
||||
}
|
||||
if(i.Storage != nil){
|
||||
if i.Storage != nil {
|
||||
return &i.Storage.AbstractResource
|
||||
}
|
||||
if(i.Compute != nil){
|
||||
if i.Compute != nil {
|
||||
return &i.Compute.AbstractResource
|
||||
}
|
||||
if(i.Workflow != nil){
|
||||
if i.Workflow != nil {
|
||||
return &i.Workflow.AbstractResource
|
||||
}
|
||||
return nil
|
||||
|
@ -2,7 +2,10 @@ package resource_model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
@ -26,9 +29,39 @@ type AbstractResource struct {
|
||||
OwnerLogo string `json:"owner_logo,omitempty" bson:"owner_logo,omitempty"` // OwnerLogo is the owner logo of the resource
|
||||
SourceUrl string `json:"source_url,omitempty" bson:"source_url,omitempty" validate:"required"` // SourceUrl is the source URL of the resource
|
||||
PeerID string `json:"peer_id,omitempty" bson:"peer_id,omitempty" validate:"required"` // PeerID is the ID of the peer getting this resource
|
||||
Price string `json:"price,omitempty" bson:"price,omitempty"` // Price is the price of access to the resource
|
||||
License string `json:"license,omitempty" bson:"license,omitempty"` // License is the license of the resource
|
||||
ResourceModel *ResourceModel `json:"resource_model,omitempty" bson:"resource_model,omitempty"` // ResourceModel is the model of the resource
|
||||
|
||||
AllowedPeersGroup map[string][]string `json:"allowed_peers_group,omitempty" bson:"allowed_peers_group,omitempty"` // AllowedPeersGroup is the group of allowed peers
|
||||
|
||||
Price string `json:"price,omitempty" bson:"price,omitempty"` // Price is the price of access to the resource
|
||||
Currency string `json:"currency,omitempty" bson:"currency,omitempty"` // Currency is the currency of the price
|
||||
}
|
||||
|
||||
func (abs *AbstractResource) VerifyAuth(peerID string, groups []string) bool {
|
||||
if grps, ok := abs.AllowedPeersGroup[peerID]; ok {
|
||||
if slices.Contains(grps, "*") {
|
||||
return true
|
||||
}
|
||||
for _, grp := range grps {
|
||||
if slices.Contains(groups, grp) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (abs *AbstractResource) GetResourceFilter(search string) *dbs.Filters {
|
||||
return &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -68,6 +101,13 @@ func (abs *AbstractResource) GetModelReadOnly(cat string, key string) interface{
|
||||
return abs.ResourceModel.Model[cat][key].ReadOnly
|
||||
}
|
||||
|
||||
func (d *AbstractResource) Trim() *AbstractResource {
|
||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.PeerID}}).IsMySelf(); !ok {
|
||||
d.AllowedPeersGroup = map[string][]string{}
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
type Model struct {
|
||||
Type string `json:"type,omitempty" bson:"type,omitempty"` // Type is the type of the model
|
||||
ReadOnly bool `json:"readonly,omitempty" bson:"readonly,omitempty"` // ReadOnly is the readonly of the model
|
||||
@ -99,24 +139,28 @@ func (d *ResourceModel) GetName() string {
|
||||
return d.UUID
|
||||
}
|
||||
|
||||
func (d *ResourceModel) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
func (abs *ResourceModel) VerifyAuth(peerID string, groups []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *ResourceModel) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := &ResourceModelMongoAccessor{}
|
||||
data.Init(tools.RESOURCE_MODEL, caller)
|
||||
data.Init(tools.RESOURCE_MODEL, peerID, groups, caller)
|
||||
return data
|
||||
}
|
||||
|
||||
func (dma *ResourceModel) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
func (dma *ResourceModel) Deserialize(j map[string]interface{}, obj utils.DBObject) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
json.Unmarshal(b, obj)
|
||||
return obj
|
||||
}
|
||||
|
||||
func (dma *ResourceModel) Serialize() map[string]interface{} {
|
||||
func (dma *ResourceModel) Serialize(obj utils.DBObject) map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
b, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
@ -14,6 +14,10 @@ type ResourceModelMongoAccessor struct {
|
||||
* Nothing special here, just the basic CRUD operations
|
||||
*/
|
||||
|
||||
func New() *ResourceModelMongoAccessor {
|
||||
return &ResourceModelMongoAccessor{}
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return wfa.GenericDeleteOne(id, wfa)
|
||||
}
|
@ -1,9 +1,7 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
@ -58,27 +56,8 @@ type StorageResource struct {
|
||||
Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage
|
||||
}
|
||||
|
||||
func (dma *StorageResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *StorageResource) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
||||
func (d *StorageResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.STORAGE_RESOURCE, caller) // Initialize the accessor with the STORAGE_RESOURCE model type
|
||||
func (d *StorageResource) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.STORAGE_RESOURCE, peerID, groups, caller) // Initialize the accessor with the STORAGE_RESOURCE model type
|
||||
return data
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
)
|
||||
|
||||
@ -13,7 +15,11 @@ type storageMongoAccessor struct {
|
||||
|
||||
// New creates a new instance of the storageMongoAccessor
|
||||
func New() *storageMongoAccessor {
|
||||
return &storageMongoAccessor{}
|
||||
return &storageMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
ResourceModelAccessor: resource_model.New(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -26,16 +32,16 @@ func (sma *storageMongoAccessor) DeleteOne(id string) (utils.DBObject, int, erro
|
||||
|
||||
func (sma *storageMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
set.(*StorageResource).ResourceModel = nil
|
||||
return sma.GenericUpdateOne(set, id, sma, &StorageResource{})
|
||||
return sma.GenericUpdateOne(set.(*StorageResource).Trim(), id, sma, &StorageResource{})
|
||||
}
|
||||
|
||||
func (sma *storageMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
data.(*StorageResource).ResourceModel = nil
|
||||
return sma.GenericStoreOne(data, sma)
|
||||
return sma.GenericStoreOne(data.(*StorageResource).Trim(), sma)
|
||||
}
|
||||
|
||||
func (sma *storageMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return sma.GenericStoreOne(data, sma)
|
||||
return sma.StoreOne(data)
|
||||
}
|
||||
|
||||
func (sma *storageMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
@ -49,8 +55,10 @@ func (sma *storageMongoAccessor) LoadOne(id string) (utils.DBObject, int, error)
|
||||
}
|
||||
|
||||
res_mongo.Decode(&storage)
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, sma.GetType())
|
||||
if !storage.VerifyAuth(sma.PeerID, sma.Groups) {
|
||||
return nil, 403, errors.New("You are not allowed to access this collaborative area")
|
||||
}
|
||||
resources, _, err := sma.ResourceModelAccessor.Search(nil, sma.GetType())
|
||||
if err == nil && len(resources) > 0 {
|
||||
storage.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
@ -68,9 +76,11 @@ func (wfa storageMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error)
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
|
||||
continue
|
||||
}
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
@ -102,9 +112,11 @@ func (wfa *storageMongoAccessor) Search(filters *dbs.Filters, search string) ([]
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
|
||||
continue
|
||||
}
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package storage
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -1,9 +1,7 @@
|
||||
package oclib
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
@ -15,27 +13,8 @@ type WorkflowResource struct {
|
||||
WorkflowID string `bson:"workflow_id,omitempty" json:"workflow_id,omitempty"` // WorkflowID is the ID of the native workflow
|
||||
}
|
||||
|
||||
func (d *WorkflowResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKFLOW_RESOURCE, caller) // Initialize the accessor with the WORKFLOW_RESOURCE model type
|
||||
func (d *WorkflowResource) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKFLOW_RESOURCE, peerID, groups, caller) // Initialize the accessor with the WORKFLOW_RESOURCE model type
|
||||
return data
|
||||
}
|
||||
|
||||
func (dma *WorkflowResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *WorkflowResource) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
package oclib
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
)
|
||||
|
||||
@ -12,7 +14,11 @@ type workflowResourceMongoAccessor struct {
|
||||
}
|
||||
|
||||
func New() *workflowResourceMongoAccessor {
|
||||
return &workflowResourceMongoAccessor{}
|
||||
return &workflowResourceMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
ResourceModelAccessor: resource_model.New(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (wfa *workflowResourceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
@ -21,22 +27,22 @@ func (wfa *workflowResourceMongoAccessor) DeleteOne(id string) (utils.DBObject,
|
||||
|
||||
func (wfa *workflowResourceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
set.(*WorkflowResource).ResourceModel = nil
|
||||
return wfa.GenericUpdateOne(set, id, wfa, &WorkflowResource{})
|
||||
return wfa.GenericUpdateOne(set.(*WorkflowResource).Trim(), id, wfa, &WorkflowResource{})
|
||||
}
|
||||
|
||||
func (wfa *workflowResourceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
data.(*WorkflowResource).ResourceModel = nil
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
return wfa.GenericStoreOne(data.(*WorkflowResource).Trim(), wfa)
|
||||
}
|
||||
|
||||
func (wfa *workflowResourceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
res, _, _ := wfa.LoadOne(data.GetID())
|
||||
data.(*WorkflowResource).WorkflowID = data.GetID()
|
||||
if res == nil {
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
return wfa.GenericStoreOne(data.(*WorkflowResource).Trim(), wfa)
|
||||
} else {
|
||||
data.(*WorkflowResource).UUID = res.GetID()
|
||||
return wfa.GenericUpdateOne(data, res.GetID(), wfa, &WorkflowResource{})
|
||||
return wfa.GenericUpdateOne(data.(*WorkflowResource).Trim(), res.GetID(), wfa, &WorkflowResource{})
|
||||
}
|
||||
}
|
||||
|
||||
@ -48,8 +54,10 @@ func (wfa *workflowResourceMongoAccessor) LoadOne(id string) (utils.DBObject, in
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&workflow)
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
if !workflow.VerifyAuth(wfa.PeerID, wfa.Groups) {
|
||||
return nil, 403, errors.New("You are not allowed to access this collaborative area")
|
||||
}
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
|
||||
if err == nil && len(resources) > 0 {
|
||||
workflow.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
@ -67,9 +75,11 @@ func (wfa workflowResourceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
|
||||
continue
|
||||
}
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
@ -101,9 +111,11 @@ func (wfa *workflowResourceMongoAccessor) Search(filters *dbs.Filters, search st
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(wfa.PeerID, wfa.Groups) {
|
||||
continue
|
||||
}
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package oclib
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -45,22 +45,26 @@ func (ao *AbstractObject) UpToDate() {
|
||||
}
|
||||
|
||||
// GetAccessor returns the accessor of the object (abstract)
|
||||
func (dma *AbstractObject) GetAccessor(caller *tools.HTTPCaller) Accessor {
|
||||
func (dma *AbstractObject) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) Accessor {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dma *AbstractObject) Deserialize(j map[string]interface{}) DBObject {
|
||||
func (ao *AbstractObject) VerifyAuth(peerID string, groups []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (dma *AbstractObject) Deserialize(j map[string]interface{}, obj DBObject) DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
json.Unmarshal(b, obj)
|
||||
return obj
|
||||
}
|
||||
|
||||
func (dma *AbstractObject) Serialize() map[string]interface{} {
|
||||
func (dma *AbstractObject) Serialize(obj DBObject) map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
b, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@ -78,6 +82,14 @@ type AbstractAccessor struct {
|
||||
Logger zerolog.Logger // Logger is the logger of the accessor, it's a specilized logger for the accessor
|
||||
Type string // Type is the data type of the accessor
|
||||
Caller *tools.HTTPCaller // Caller is the http caller of the accessor (optionnal) only need in a peer connection
|
||||
PeerID string // PeerID is the id of the peer
|
||||
Groups []string // Groups is the list of groups that can access the accessor
|
||||
|
||||
ResourceModelAccessor Accessor
|
||||
}
|
||||
|
||||
func (dma *AbstractAccessor) VerifyAuth() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (dma *AbstractAccessor) GetType() string {
|
||||
@ -89,10 +101,12 @@ func (dma *AbstractAccessor) GetCaller() *tools.HTTPCaller {
|
||||
}
|
||||
|
||||
// Init initializes the accessor with the data type and the http caller
|
||||
func (dma *AbstractAccessor) Init(t tools.DataType, caller *tools.HTTPCaller) {
|
||||
func (dma *AbstractAccessor) Init(t tools.DataType, peerID string, groups []string, caller *tools.HTTPCaller) {
|
||||
dma.Logger = logs.CreateLogger(t.String()) // Create a logger with the data type
|
||||
dma.Caller = caller // Set the caller
|
||||
dma.Type = t.String() // Set the data type
|
||||
dma.Caller = caller
|
||||
dma.PeerID = peerID
|
||||
dma.Groups = groups // Set the caller
|
||||
dma.Type = t.String() // Set the data type
|
||||
}
|
||||
|
||||
// GenericLoadOne loads one object from the database (generic)
|
||||
@ -147,13 +161,13 @@ func (dma *AbstractAccessor) GenericUpdateOne(set DBObject, id string, accessor
|
||||
if err != nil {
|
||||
return nil, c, err
|
||||
}
|
||||
change := set.Serialize() // get the changes
|
||||
loaded := r.Serialize() // get the loaded object
|
||||
change := set.Serialize(set) // get the changes
|
||||
loaded := r.Serialize(r) // get the loaded object
|
||||
|
||||
for k, v := range change { // apply the changes, with a flatten method
|
||||
loaded[k] = v
|
||||
}
|
||||
id, code, err := mongo.MONGOService.UpdateOne(new.Deserialize(loaded), id, accessor.GetType())
|
||||
id, code, err := mongo.MONGOService.UpdateOne(new.Deserialize(loaded, new), id, accessor.GetType())
|
||||
if err != nil {
|
||||
dma.Logger.Error().Msg("Could not update " + id + " to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
@ -161,6 +175,64 @@ func (dma *AbstractAccessor) GenericUpdateOne(set DBObject, id string, accessor
|
||||
return accessor.LoadOne(id)
|
||||
}
|
||||
|
||||
func GenericLoadOne[T DBObject](id string, f func(DBObject) (DBObject, int, error), wfa Accessor) (DBObject, int, error) {
|
||||
var data T
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||
if !data.VerifyAuth(wfa.GetPeerID(), wfa.GetGroups()) {
|
||||
return nil, 403, errors.New("You are not allowed to access this collaborative area")
|
||||
}
|
||||
if err != nil {
|
||||
wfa.GetLogger().Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&data)
|
||||
return f(data)
|
||||
}
|
||||
|
||||
func GenericLoadAll[T DBObject](f func(DBObject, []ShallowDBObject) []ShallowDBObject, wfa Accessor) ([]ShallowDBObject, int, error) {
|
||||
results := []T{}
|
||||
objs := []ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.GetLogger().Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(wfa.GetPeerID(), wfa.GetGroups()) {
|
||||
continue
|
||||
}
|
||||
objs = f(r, objs)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
func GenericSearch[T DBObject](filters *dbs.Filters, search string, defaultFilters *dbs.Filters,
|
||||
f func(DBObject, []ShallowDBObject) []ShallowDBObject, wfa Accessor) ([]ShallowDBObject, int, error) {
|
||||
results := []T{}
|
||||
objs := []ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = defaultFilters
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.GetLogger().Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(wfa.GetPeerID(), wfa.GetGroups()) {
|
||||
continue
|
||||
}
|
||||
objs = f(r, objs)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
// GenericLoadOne loads one object from the database (generic)
|
||||
// json expected in entry is a flatted object no need to respect the inheritance hierarchy
|
||||
func (dma *AbstractAccessor) GenericRawUpdateOne(set DBObject, id string, accessor Accessor) (DBObject, int, error) {
|
||||
@ -171,3 +243,13 @@ func (dma *AbstractAccessor) GenericRawUpdateOne(set DBObject, id string, access
|
||||
}
|
||||
return accessor.LoadOne(id)
|
||||
}
|
||||
|
||||
func (dma *AbstractAccessor) GetPeerID() string {
|
||||
return dma.PeerID
|
||||
}
|
||||
func (dma *AbstractAccessor) GetGroups() []string {
|
||||
return dma.Groups
|
||||
}
|
||||
func (dma *AbstractAccessor) GetLogger() *zerolog.Logger {
|
||||
return &dma.Logger
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package utils
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// ShallowDBObject is an interface that defines the basic methods shallowed version of a DBObject
|
||||
@ -10,8 +11,8 @@ type ShallowDBObject interface {
|
||||
GenerateID()
|
||||
GetID() string
|
||||
GetName() string
|
||||
Deserialize(j map[string]interface{}) DBObject
|
||||
Serialize() map[string]interface{}
|
||||
Deserialize(j map[string]interface{}, obj DBObject) DBObject
|
||||
Serialize(obj DBObject) map[string]interface{}
|
||||
}
|
||||
|
||||
// DBObject is an interface that defines the basic methods for a DBObject
|
||||
@ -20,15 +21,19 @@ type DBObject interface {
|
||||
GetID() string
|
||||
GetName() string
|
||||
UpToDate()
|
||||
Deserialize(j map[string]interface{}) DBObject
|
||||
Serialize() map[string]interface{}
|
||||
GetAccessor(caller *tools.HTTPCaller) Accessor
|
||||
VerifyAuth(PeerID string, groups []string) bool
|
||||
Deserialize(j map[string]interface{}, obj DBObject) DBObject
|
||||
Serialize(obj DBObject) map[string]interface{}
|
||||
GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) Accessor
|
||||
}
|
||||
|
||||
// Accessor is an interface that defines the basic methods for an Accessor
|
||||
type Accessor interface {
|
||||
Init(t tools.DataType, caller *tools.HTTPCaller)
|
||||
Init(t tools.DataType, peerID string, groups []string, caller *tools.HTTPCaller)
|
||||
GetType() string
|
||||
GetPeerID() string
|
||||
GetGroups() []string
|
||||
GetLogger() *zerolog.Logger
|
||||
GetCaller() *tools.HTTPCaller
|
||||
Search(filters *dbs.Filters, search string) ([]ShallowDBObject, int, error)
|
||||
LoadAll() ([]ShallowDBObject, int, error)
|
||||
|
@ -1,7 +1,6 @@
|
||||
package workflow
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
@ -108,7 +107,7 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
|
||||
if wfa.Graph == nil { // no graph no booking
|
||||
return false, nil
|
||||
}
|
||||
accessor := (&compute.ComputeResource{}).GetAccessor(nil)
|
||||
accessor := (&compute.ComputeResource{}).GetAccessor("", []string{}, caller)
|
||||
for _, link := range wfa.Graph.Links {
|
||||
if ok, dc_id := wfa.isDCLink(link); ok { // check if the link is a link between a compute and a resource
|
||||
dc, code, _ := accessor.LoadOne(dc_id)
|
||||
@ -129,31 +128,8 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (d *Workflow) GetName() string {
|
||||
return d.Name
|
||||
}
|
||||
|
||||
func (d *Workflow) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKFLOW, caller) // Initialize the accessor with the WORKFLOW model type
|
||||
func (d *Workflow) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New(peerID, groups) // Create a new instance of the accessor
|
||||
data.Init(tools.WORKFLOW, peerID, groups, caller) // Initialize the accessor with the WORKFLOW model type
|
||||
return data
|
||||
}
|
||||
|
||||
func (dma *Workflow) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *Workflow) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -8,9 +8,9 @@ import (
|
||||
|
||||
type WorkflowHistory struct{ Workflow }
|
||||
|
||||
func (d *WorkflowHistory) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKSPACE_HISTORY, caller) // Initialize the accessor with the WORKSPACE model type
|
||||
func (d *WorkflowHistory) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New(peerID, groups) // Create a new instance of the accessor
|
||||
data.Init(tools.WORKSPACE_HISTORY, peerID, groups, caller) // Initialize the accessor with the WORKSPACE model type
|
||||
return data
|
||||
}
|
||||
func (r *WorkflowHistory) GenerateID() {
|
||||
|
@ -22,11 +22,21 @@ import (
|
||||
|
||||
type workflowMongoAccessor struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
|
||||
computeResourceAccessor utils.Accessor
|
||||
collaborativeAreaAccessor utils.Accessor
|
||||
executionAccessor utils.Accessor
|
||||
workspaceAccessor utils.Accessor
|
||||
}
|
||||
|
||||
// New creates a new instance of the workflowMongoAccessor
|
||||
func New() *workflowMongoAccessor {
|
||||
return &workflowMongoAccessor{}
|
||||
func New(peerID string, groups []string) *workflowMongoAccessor {
|
||||
return &workflowMongoAccessor{
|
||||
computeResourceAccessor: (&compute.ComputeResource{}).GetAccessor(peerID, groups, nil),
|
||||
collaborativeAreaAccessor: (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(peerID, groups, nil),
|
||||
executionAccessor: (&workflow_execution.WorkflowExecution{}).GetAccessor(peerID, groups, nil),
|
||||
workspaceAccessor: (&workspace.Workspace{}).GetAccessor(peerID, groups, nil),
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -127,7 +137,6 @@ func (wfa *workflowMongoAccessor) book(id string, realData *Workflow, execs []*w
|
||||
g = realData.Graph
|
||||
}
|
||||
if g != nil && g.Links != nil && len(g.Links) > 0 { // if the graph is set and has links then book the workflow (even on ourselves)
|
||||
accessor := (&compute.ComputeResource{}).GetAccessor(nil)
|
||||
isDCFound := []string{}
|
||||
for _, link := range g.Links {
|
||||
if ok, dc_id := realData.isDCLink(link); ok { // check if the link is a link between a compute and a resource booking is only on compute
|
||||
@ -135,7 +144,7 @@ func (wfa *workflowMongoAccessor) book(id string, realData *Workflow, execs []*w
|
||||
continue
|
||||
} // if the compute is already found, skip it
|
||||
isDCFound = append(isDCFound, dc_id)
|
||||
dc, code, _ := accessor.LoadOne(dc_id)
|
||||
dc, code, _ := wfa.computeResourceAccessor.LoadOne(dc_id)
|
||||
if code != 200 {
|
||||
continue
|
||||
}
|
||||
@ -169,8 +178,7 @@ func (wfa *workflowMongoAccessor) share(realData *Workflow, delete bool, caller
|
||||
return
|
||||
}
|
||||
for _, sharedID := range realData.Shared { // loop through the shared ids
|
||||
access := (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(nil)
|
||||
res, code, _ := access.LoadOne(sharedID)
|
||||
res, code, _ := wfa.collaborativeAreaAccessor.LoadOne(sharedID)
|
||||
if code != 200 {
|
||||
continue
|
||||
}
|
||||
@ -186,7 +194,8 @@ func (wfa *workflowMongoAccessor) share(realData *Workflow, delete bool, caller
|
||||
history.StoreOne(history.MapFromWorkflow(res.(*Workflow)))
|
||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.DELETE, map[string]interface{}{}, caller)
|
||||
} else { // if the workflow is updated, share the update
|
||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.PUT, res.Serialize(), caller)
|
||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.PUT,
|
||||
res.Serialize(res), caller)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -211,7 +220,6 @@ func (wfa *workflowMongoAccessor) execution(id string, realData *Workflow, delet
|
||||
return 409, err
|
||||
}
|
||||
|
||||
accessor := (&workflow_execution.WorkflowExecution{}).GetAccessor(nil)
|
||||
execs, err := wfa.getExecutions(id, realData) // get the executions of the workflow
|
||||
if err != nil {
|
||||
return 422, err
|
||||
@ -226,7 +234,7 @@ func (wfa *workflowMongoAccessor) execution(id string, realData *Workflow, delet
|
||||
}
|
||||
fmt.Println("BOOKING", delete)
|
||||
for _, obj := range execs {
|
||||
_, code, err := accessor.StoreOne(obj)
|
||||
_, code, err := wfa.executionAccessor.StoreOne(obj)
|
||||
fmt.Println("EXEC", code, err)
|
||||
if code != 200 {
|
||||
return code, err
|
||||
@ -301,21 +309,20 @@ func (wfa *workflowMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject,
|
||||
// it stores the workflow resources in a specific workspace to never have a conflict in UI and logic
|
||||
func (wfa *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active bool) {
|
||||
|
||||
accessor := (&workspace.Workspace{}).GetAccessor(nil)
|
||||
filters := &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by standard workspace name attached to a workflow
|
||||
"abstractobject.name": {{dbs.LIKE.String(), workflow.Name + "_workspace"}},
|
||||
},
|
||||
}
|
||||
resource, _, err := accessor.Search(filters, "")
|
||||
resource, _, err := wfa.workspaceAccessor.Search(filters, "")
|
||||
if delete { // if delete is set to true, delete the workspace
|
||||
for _, r := range resource {
|
||||
accessor.DeleteOne(r.GetID())
|
||||
wfa.workspaceAccessor.DeleteOne(r.GetID())
|
||||
}
|
||||
return
|
||||
}
|
||||
if err == nil && len(resource) > 0 { // if the workspace already exists, update it
|
||||
accessor.UpdateOne(&workspace.Workspace{
|
||||
wfa.workspaceAccessor.UpdateOne(&workspace.Workspace{
|
||||
Active: active,
|
||||
ResourceSet: resources.ResourceSet{
|
||||
Datas: workflow.Datas,
|
||||
@ -326,7 +333,7 @@ func (wfa *workflowMongoAccessor) execute(workflow *Workflow, delete bool, activ
|
||||
},
|
||||
}, resource[0].GetID())
|
||||
} else { // if the workspace does not exist, create it
|
||||
accessor.StoreOne(&workspace.Workspace{
|
||||
wfa.workspaceAccessor.StoreOne(&workspace.Workspace{
|
||||
Active: active,
|
||||
AbstractObject: utils.AbstractObject{Name: workflow.Name + "_workspace"},
|
||||
ResourceSet: resources.ResourceSet{
|
||||
@ -354,7 +361,6 @@ func (wfa *workflowMongoAccessor) LoadOne(id string) (utils.DBObject, int, error
|
||||
if (workflow.Schedule.End != nil && now.After(*workflow.Schedule.End)) || (workflow.Schedule.End == nil && workflow.Schedule.Start != nil && now.After(*workflow.Schedule.Start)) { // if the start date is passed, then you can book
|
||||
workflow.ScheduleActive = false
|
||||
wfa.GenericRawUpdateOne(&workflow, id, wfa)
|
||||
|
||||
} // if the start date is passed, update the executions
|
||||
}
|
||||
wfa.execute(&workflow, false, true) // if no workspace is attached to the workflow, create it
|
||||
|
@ -12,7 +12,7 @@ func TestStoreOneWorkflow(t *testing.T) {
|
||||
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
|
||||
}
|
||||
|
||||
wma := New()
|
||||
wma := New("", nil)
|
||||
id, _, _ := wma.StoreOne(&w)
|
||||
|
||||
assert.NotEmpty(t, id)
|
||||
@ -23,7 +23,7 @@ func TestLoadOneWorkflow(t *testing.T) {
|
||||
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
|
||||
}
|
||||
|
||||
wma := New()
|
||||
wma := New("", nil)
|
||||
new_w, _, _ := wma.StoreOne(&w)
|
||||
assert.Equal(t, w, new_w)
|
||||
}
|
||||
|
@ -107,10 +107,6 @@ func (wfa *WorkflowExecution) ArgoStatusToState(status string) *WorkflowExecutio
|
||||
return wfa
|
||||
}
|
||||
|
||||
func (ao *WorkflowExecution) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (r *WorkflowExecution) GenerateID() {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
@ -119,29 +115,8 @@ func (d *WorkflowExecution) GetName() string {
|
||||
return d.UUID + "_" + d.ExecDate.String()
|
||||
}
|
||||
|
||||
func (d *WorkflowExecution) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKFLOW_EXECUTION, caller) // Initialize the accessor with the WORKFLOW_EXECUTION model type
|
||||
func (d *WorkflowExecution) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKFLOW_EXECUTION, peerID, groups, caller) // Initialize the accessor with the WORKFLOW_EXECUTION model type
|
||||
return data
|
||||
}
|
||||
|
||||
// New creates a new instance of the WorkflowExecution from a map
|
||||
func (dma *WorkflowExecution) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
// Serialize returns the WorkflowExecution as a map
|
||||
func (dma *WorkflowExecution) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -1,12 +1,9 @@
|
||||
package workspace
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// Workspace is a struct that represents a workspace
|
||||
@ -18,43 +15,8 @@ type Workspace struct {
|
||||
Shared string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workspace
|
||||
}
|
||||
|
||||
func (ao *Workspace) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (r *Workspace) GenerateID() {
|
||||
if r.UUID == "" {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Workspace) GetName() string {
|
||||
return d.Name
|
||||
}
|
||||
|
||||
func (d *Workspace) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKSPACE, caller) // Initialize the accessor with the WORKSPACE model type
|
||||
func (d *Workspace) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New(peerID, groups) // Create a new instance of the accessor
|
||||
data.Init(tools.WORKSPACE, peerID, groups, caller) // Initialize the accessor with the WORKSPACE model type
|
||||
return data
|
||||
}
|
||||
|
||||
// New creates a new instance of the workspaceMongoAccessor from a map
|
||||
func (dma *Workspace) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
// Serialize returns the workspaceMongoAccessor as a map
|
||||
func (dma *Workspace) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -8,9 +8,9 @@ import (
|
||||
|
||||
type WorkspaceHistory struct{ Workspace }
|
||||
|
||||
func (d *WorkspaceHistory) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKSPACE_HISTORY, caller) // Initialize the accessor with the WORKSPACE model type
|
||||
func (d *WorkspaceHistory) GetAccessor(peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New(peerID, groups) // Create a new instance of the accessor
|
||||
data.Init(tools.WORKSPACE_HISTORY, peerID, groups, caller) // Initialize the accessor with the WORKSPACE model type
|
||||
return data
|
||||
}
|
||||
func (r *WorkspaceHistory) GenerateID() {
|
||||
|
@ -8,11 +8,6 @@ import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/data"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/processing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
|
||||
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
@ -23,7 +18,7 @@ type workspaceMongoAccessor struct {
|
||||
}
|
||||
|
||||
// New creates a new instance of the workspaceMongoAccessor
|
||||
func New() *workspaceMongoAccessor {
|
||||
func New(peerID string, groups []string) *workspaceMongoAccessor {
|
||||
return &workspaceMongoAccessor{}
|
||||
}
|
||||
|
||||
@ -67,7 +62,7 @@ func (wfa *workspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (uti
|
||||
func (wfa *workspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
filters := &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.name": {{dbs.LIKE.String(), data.GetName() + "_workspace"}},
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: data.GetName() + "_workspace"}},
|
||||
},
|
||||
}
|
||||
res, _, err := wfa.Search(filters, "") // Search for the workspace
|
||||
@ -89,63 +84,6 @@ func (wfa *workspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject,
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
/*
|
||||
This function is used to fill the workspace with the resources
|
||||
*/
|
||||
func (wfa *workspaceMongoAccessor) fill(workflow *Workspace) *Workspace {
|
||||
// Fill the workspace with the resources
|
||||
if workflow.Datas != nil && len(workflow.Datas) > 0 {
|
||||
dataAccessor := (&data.DataResource{}).GetAccessor(nil)
|
||||
for _, id := range workflow.Datas {
|
||||
d, _, e := dataAccessor.LoadOne(id)
|
||||
if e == nil {
|
||||
workflow.DataResources = append(workflow.DataResources, d.(*data.DataResource))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fill the workspace with the computes
|
||||
if workflow.Computes != nil && len(workflow.Computes) > 0 {
|
||||
dataAccessor := (&compute.ComputeResource{}).GetAccessor(nil)
|
||||
for _, id := range workflow.Computes {
|
||||
d, _, e := dataAccessor.LoadOne(id)
|
||||
if e == nil {
|
||||
workflow.ComputeResources = append(workflow.ComputeResources, d.(*compute.ComputeResource))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fill the workspace with the storages
|
||||
if workflow.Storages != nil && len(workflow.Storages) > 0 {
|
||||
dataAccessor := (&storage.StorageResource{}).GetAccessor(nil)
|
||||
for _, id := range workflow.Storages {
|
||||
d, _, e := dataAccessor.LoadOne(id)
|
||||
if e == nil {
|
||||
workflow.StorageResources = append(workflow.StorageResources, d.(*storage.StorageResource))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fill the workspace with the processings
|
||||
if workflow.Processings != nil && len(workflow.Processings) > 0 {
|
||||
dataAccessor := (&processing.ProcessingResource{}).GetAccessor(nil)
|
||||
for _, id := range workflow.Processings {
|
||||
d, _, e := dataAccessor.LoadOne(id)
|
||||
if e == nil {
|
||||
workflow.ProcessingResources = append(workflow.ProcessingResources, d.(*processing.ProcessingResource))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fill the workspace with the workflows
|
||||
if workflow.Workflows != nil && len(workflow.Workflows) > 0 {
|
||||
dataAccessor := (&w.WorkflowResource{}).GetAccessor(nil)
|
||||
for _, id := range workflow.Workflows {
|
||||
d, _, e := dataAccessor.LoadOne(id)
|
||||
if e == nil {
|
||||
workflow.WorkflowResources = append(workflow.WorkflowResources, d.(*w.WorkflowResource))
|
||||
}
|
||||
}
|
||||
}
|
||||
return workflow
|
||||
}
|
||||
|
||||
// LoadOne loads a workspace from the database, given its ID
|
||||
func (wfa *workspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var workflow Workspace
|
||||
@ -155,8 +93,8 @@ func (wfa *workspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, erro
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&workflow)
|
||||
|
||||
return wfa.fill(&workflow), 200, nil
|
||||
workflow.Fill(wfa.PeerID, wfa.Groups)
|
||||
return &workflow, 200, nil
|
||||
}
|
||||
|
||||
// LoadAll loads all the workspaces from the database
|
||||
@ -172,7 +110,8 @@ func (wfa workspaceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, wfa.fill(&r))
|
||||
r.Fill(wfa.PeerID, wfa.Groups)
|
||||
objs = append(objs, &r)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
@ -197,7 +136,8 @@ func (wfa *workspaceMongoAccessor) Search(filters *dbs.Filters, search string) (
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, wfa.fill(&r))
|
||||
r.Fill(wfa.PeerID, wfa.Groups)
|
||||
objs = append(objs, &r)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
@ -210,7 +150,8 @@ func (wfa *workspaceMongoAccessor) share(realData *Workspace, method tools.METHO
|
||||
if realData == nil || realData.Shared == "" || caller == nil || caller.Disabled {
|
||||
return
|
||||
}
|
||||
access := (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(nil)
|
||||
shallow := &shallow_collaborative_area.ShallowCollaborativeArea{}
|
||||
access := (shallow).GetAccessor(wfa.PeerID, wfa.Groups, nil)
|
||||
res, code, _ := access.LoadOne(realData.Shared)
|
||||
if code != 200 {
|
||||
return
|
||||
@ -227,7 +168,7 @@ func (wfa *workspaceMongoAccessor) share(realData *Workspace, method tools.METHO
|
||||
history.StoreOne(history.MapFromWorkspace(res.(*Workspace)))
|
||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.DELETE, map[string]interface{}{}, caller)
|
||||
} else { // If the workspace is updated, share the update
|
||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.PUT, res.Serialize(), caller)
|
||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.PUT, res.Serialize(res), caller)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user