Compare commits
No commits in common. "master" and "issue#4" have entirely different histories.
@ -16,7 +16,6 @@ type Config struct {
|
||||
Port string
|
||||
LokiUrl string
|
||||
LogLevel string
|
||||
Whitelist bool
|
||||
}
|
||||
|
||||
func (c Config) GetUrl() string {
|
||||
@ -38,11 +37,19 @@ func GetConfig() *Config {
|
||||
}
|
||||
|
||||
func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string, logLevel string) *Config {
|
||||
/*once.Do(func() {
|
||||
instance = &Config{
|
||||
MongoUrl: mongoUrl,
|
||||
MongoDatabase: database,
|
||||
NATSUrl: natsUrl,
|
||||
LokiUrl: lokiUrl,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
})*/
|
||||
GetConfig().MongoUrl = mongoUrl
|
||||
GetConfig().MongoDatabase = database
|
||||
GetConfig().NATSUrl = natsUrl
|
||||
GetConfig().LokiUrl = lokiUrl
|
||||
GetConfig().LogLevel = logLevel
|
||||
GetConfig().Whitelist = true
|
||||
return GetConfig()
|
||||
}
|
||||
|
@ -3,7 +3,6 @@ package mongo
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
@ -289,7 +288,6 @@ func (m *MongoDB) Search(filters *dbs.Filters, collection_name string) (*mongo.C
|
||||
}
|
||||
opts := options.Find()
|
||||
opts.SetLimit(100)
|
||||
fmt.Println("Filters: ", CollectionMap, collection_name)
|
||||
targetDBCollection := CollectionMap[collection_name]
|
||||
orList := bson.A{}
|
||||
andList := bson.A{}
|
||||
|
174
entrypoint.go
174
entrypoint.go
@ -1,11 +1,8 @@
|
||||
package oclib
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"runtime/debug"
|
||||
@ -18,8 +15,12 @@ import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area"
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/data"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/processing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
|
||||
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
w2 "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||
@ -47,7 +48,7 @@ const (
|
||||
WORKSPACE = tools.WORKSPACE
|
||||
WORKFLOW_EXECUTION = tools.WORKFLOW_EXECUTION
|
||||
PEER = tools.PEER
|
||||
COLLABORATIVE_AREA = tools.COLLABORATIVE_AREA
|
||||
SHARED_WORKSPACE = tools.COLLABORATIVE_AREA
|
||||
RULE = tools.RULE
|
||||
BOOKING = tools.BOOKING
|
||||
)
|
||||
@ -117,49 +118,6 @@ func InitDaemon(appName string) {
|
||||
beego.BConfig.WebConfig.StaticDir["/swagger"] = "swagger"
|
||||
}
|
||||
|
||||
type IDTokenClaims struct {
|
||||
UserID string `json:"user_id"`
|
||||
PeerID string `json:"peer_id"`
|
||||
Groups []string `json:"groups"`
|
||||
}
|
||||
|
||||
// SessionClaims struct
|
||||
type SessionClaims struct {
|
||||
AccessToken map[string]interface{} `json:"access_token"`
|
||||
IDToken IDTokenClaims `json:"id_token"`
|
||||
}
|
||||
|
||||
// Claims struct
|
||||
type Claims struct {
|
||||
Session SessionClaims `json:"session"`
|
||||
}
|
||||
|
||||
func ExtractTokenInfo(request http.Request) (string, string, []string) {
|
||||
reqToken := request.Header.Get("Authorization")
|
||||
splitToken := strings.Split(reqToken, "Bearer ")
|
||||
if len(splitToken) < 2 {
|
||||
reqToken = ""
|
||||
} else {
|
||||
reqToken = splitToken[1]
|
||||
}
|
||||
if reqToken != "" {
|
||||
token := strings.Split(reqToken, ".")
|
||||
if len(token) > 2 {
|
||||
bytes, err := base64.StdEncoding.DecodeString(token[2])
|
||||
if err != nil {
|
||||
return "", "", []string{}
|
||||
}
|
||||
var c Claims
|
||||
err = json.Unmarshal(bytes, &c)
|
||||
if err != nil {
|
||||
return "", "", []string{}
|
||||
}
|
||||
return c.Session.IDToken.UserID, c.Session.IDToken.PeerID, c.Session.IDToken.Groups
|
||||
}
|
||||
}
|
||||
return "", "", []string{}
|
||||
}
|
||||
|
||||
func Init(appName string) {
|
||||
InitDaemon(appName)
|
||||
api := &tools.API{}
|
||||
@ -200,7 +158,7 @@ func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string,
|
||||
If not we will store it
|
||||
Resource model is the model that will define the structure of the resources
|
||||
*/
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor("", "", []string{}, nil)
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
for _, model := range []string{tools.DATA_RESOURCE.String(), tools.PROCESSING_RESOURCE.String(), tools.STORAGE_RESOURCE.String(), tools.COMPUTE_RESOURCE.String(), tools.WORKFLOW_RESOURCE.String()} {
|
||||
data, code, _ := accessor.Search(nil, model)
|
||||
if code == 404 || len(data) == 0 {
|
||||
@ -267,18 +225,6 @@ func GetConfLoader() *onion.Onion {
|
||||
return config.GetConfLoader()
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
collection LibDataEnum
|
||||
user string
|
||||
peerID string
|
||||
groups []string
|
||||
caller *tools.HTTPCaller
|
||||
}
|
||||
|
||||
func NewRequest(collection LibDataEnum, user string, peerID string, groups []string, caller *tools.HTTPCaller) *Request {
|
||||
return &Request{collection: collection, user: user, peerID: peerID, groups: groups, caller: caller}
|
||||
}
|
||||
|
||||
/*
|
||||
* Search will search for the data in the database
|
||||
* @param filters *dbs.Filters
|
||||
@ -287,14 +233,18 @@ func NewRequest(collection LibDataEnum, user string, peerID string, groups []str
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibDataShallow
|
||||
*/
|
||||
func (r *Request) Search(filters *dbs.Filters, word string) (data LibDataShallow) {
|
||||
func Search(filters *dbs.Filters, word string, collection LibDataEnum, c ...*tools.HTTPCaller) (data LibDataShallow) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in Search : "+fmt.Sprintf("%v", r)))
|
||||
data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||
}
|
||||
}()
|
||||
d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.user, r.peerID, r.groups, r.caller).Search(filters, word)
|
||||
var caller *tools.HTTPCaller // define the caller
|
||||
if len(c) > 0 {
|
||||
caller = c[0]
|
||||
}
|
||||
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).Search(filters, word)
|
||||
if err != nil {
|
||||
data = LibDataShallow{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -309,14 +259,18 @@ func (r *Request) Search(filters *dbs.Filters, word string) (data LibDataShallow
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibDataShallow
|
||||
*/
|
||||
func (r *Request) LoadAll() (data LibDataShallow) {
|
||||
func LoadAll(collection LibDataEnum, c ...*tools.HTTPCaller) (data LibDataShallow) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadAll : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||
data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||
}
|
||||
}()
|
||||
d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.user, r.peerID, r.groups, r.caller).LoadAll()
|
||||
var caller *tools.HTTPCaller // define the caller
|
||||
if len(c) > 0 {
|
||||
caller = c[0]
|
||||
}
|
||||
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).LoadAll()
|
||||
if err != nil {
|
||||
data = LibDataShallow{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -332,14 +286,18 @@ func (r *Request) LoadAll() (data LibDataShallow) {
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibData
|
||||
*/
|
||||
func (r *Request) LoadOne(id string) (data LibData) {
|
||||
func LoadOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data LibData) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in LoadOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||
}
|
||||
}()
|
||||
d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.user, r.peerID, r.groups, r.caller).LoadOne(id)
|
||||
var caller *tools.HTTPCaller // define the caller
|
||||
if len(c) > 0 {
|
||||
caller = c[0]
|
||||
}
|
||||
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).LoadOne(id)
|
||||
if err != nil {
|
||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -356,15 +314,19 @@ func (r *Request) LoadOne(id string) (data LibData) {
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibData
|
||||
*/
|
||||
func (r *Request) UpdateOne(set map[string]interface{}, id string) (data LibData) {
|
||||
func UpdateOne(collection LibDataEnum, set map[string]interface{}, id string, c ...*tools.HTTPCaller) (data LibData) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in UpdateOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in UpdateOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||
}
|
||||
}()
|
||||
model := models.Model(r.collection.EnumIndex())
|
||||
d, code, err := model.GetAccessor(r.user, r.peerID, r.groups, r.caller).UpdateOne(model.Deserialize(set, model), id)
|
||||
var caller *tools.HTTPCaller // define the caller
|
||||
if len(c) > 0 {
|
||||
caller = c[0]
|
||||
}
|
||||
model := models.Model(collection.EnumIndex())
|
||||
d, code, err := model.GetAccessor(caller).UpdateOne(model.Deserialize(set), id)
|
||||
if err != nil {
|
||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -380,14 +342,18 @@ func (r *Request) UpdateOne(set map[string]interface{}, id string) (data LibData
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibData
|
||||
*/
|
||||
func (r *Request) DeleteOne(id string) (data LibData) {
|
||||
func DeleteOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data LibData) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in DeleteOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in DeleteOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||
}
|
||||
}()
|
||||
d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(r.user, r.peerID, r.groups, r.caller).DeleteOne(id)
|
||||
var caller *tools.HTTPCaller // define the caller
|
||||
if len(c) > 0 {
|
||||
caller = c[0]
|
||||
}
|
||||
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).DeleteOne(id)
|
||||
if err != nil {
|
||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -403,15 +369,19 @@ func (r *Request) DeleteOne(id string) (data LibData) {
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibData
|
||||
*/
|
||||
func (r *Request) StoreOne(object map[string]interface{}) (data LibData) {
|
||||
func StoreOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.HTTPCaller) (data LibData) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in StoreOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in StoreOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||
}
|
||||
}()
|
||||
model := models.Model(r.collection.EnumIndex())
|
||||
d, code, err := model.GetAccessor(r.user, r.peerID, r.groups, r.caller).StoreOne(model.Deserialize(object, model))
|
||||
var caller *tools.HTTPCaller // define the caller
|
||||
if len(c) > 0 {
|
||||
caller = c[0]
|
||||
}
|
||||
model := models.Model(collection.EnumIndex())
|
||||
d, code, err := model.GetAccessor(caller).StoreOne(model.Deserialize(object))
|
||||
if err != nil {
|
||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -427,15 +397,19 @@ func (r *Request) StoreOne(object map[string]interface{}) (data LibData) {
|
||||
* @param c ...*tools.HTTPCaller
|
||||
* @return data LibData
|
||||
*/
|
||||
func (r *Request) CopyOne(object map[string]interface{}) (data LibData) {
|
||||
func CopyOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.HTTPCaller) (data LibData) {
|
||||
defer func() { // recover the panic
|
||||
if r := recover(); r != nil {
|
||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in CopyOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in UpdateOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||
}
|
||||
}()
|
||||
model := models.Model(r.collection.EnumIndex())
|
||||
d, code, err := model.GetAccessor(r.user, r.peerID, r.groups, r.caller).CopyOne(model.Deserialize(object, model))
|
||||
var caller *tools.HTTPCaller // define the caller
|
||||
if len(c) > 0 {
|
||||
caller = c[0]
|
||||
}
|
||||
model := models.Model(collection.EnumIndex())
|
||||
d, code, err := model.GetAccessor(caller).CopyOne(model.Deserialize(object))
|
||||
if err != nil {
|
||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||
return
|
||||
@ -446,73 +420,73 @@ func (r *Request) CopyOne(object map[string]interface{}) (data LibData) {
|
||||
|
||||
// ================ CAST ========================= //
|
||||
|
||||
func (l *LibData) ToDataResource() *resources.DataResource {
|
||||
if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.DATA_RESOURCE {
|
||||
return l.Data.(*resources.DataResource)
|
||||
func (l *LibData) ToDataResource() *data.DataResource {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.DATA_RESOURCE.String() {
|
||||
return l.Data.(*data.DataResource)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LibData) ToComputeResource() *resources.ComputeResource {
|
||||
if l.Data != nil && l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.COMPUTE_RESOURCE {
|
||||
return l.Data.(*resources.ComputeResource)
|
||||
func (l *LibData) ToComputeResource() *compute.ComputeResource {
|
||||
if l.Data != nil && l.Data.GetAccessor(nil).GetType() == tools.COMPUTE_RESOURCE.String() {
|
||||
return l.Data.(*compute.ComputeResource)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (l *LibData) ToStorageResource() *resources.StorageResource {
|
||||
if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.STORAGE_RESOURCE {
|
||||
return l.Data.(*resources.StorageResource)
|
||||
func (l *LibData) ToStorageResource() *storage.StorageResource {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.STORAGE_RESOURCE.String() {
|
||||
return l.Data.(*storage.StorageResource)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (l *LibData) ToProcessingResource() *resources.ProcessingResource {
|
||||
if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.PROCESSING_RESOURCE {
|
||||
return l.Data.(*resources.ProcessingResource)
|
||||
func (l *LibData) ToProcessingResource() *processing.ProcessingResource {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.PROCESSING_RESOURCE.String() {
|
||||
return l.Data.(*processing.ProcessingResource)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (l *LibData) ToWorkflowResource() *resources.WorkflowResource {
|
||||
if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.WORKFLOW_RESOURCE {
|
||||
return l.Data.(*resources.WorkflowResource)
|
||||
func (l *LibData) ToWorkflowResource() *w.WorkflowResource {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_RESOURCE.String() {
|
||||
return l.Data.(*w.WorkflowResource)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (l *LibData) ToPeer() *peer.Peer {
|
||||
if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.PEER {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.PEER.String() {
|
||||
return l.Data.(*peer.Peer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LibData) ToWorkflow() *w2.Workflow {
|
||||
if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.WORKFLOW {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW.String() {
|
||||
return l.Data.(*w2.Workflow)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (l *LibData) ToWorkspace() *workspace.Workspace {
|
||||
if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.WORKSPACE {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.WORKSPACE.String() {
|
||||
return l.Data.(*workspace.Workspace)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LibData) ToCollaborativeArea() *collaborative_area.CollaborativeArea {
|
||||
if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.COLLABORATIVE_AREA {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.COLLABORATIVE_AREA.String() {
|
||||
return l.Data.(*collaborative_area.CollaborativeArea)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LibData) ToRule() *rule.Rule {
|
||||
if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.COLLABORATIVE_AREA {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.COLLABORATIVE_AREA.String() {
|
||||
return l.Data.(*rule.Rule)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecution {
|
||||
if l.Data.GetAccessor("", "", []string{}, nil).GetType() == tools.WORKFLOW_EXECUTION {
|
||||
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_EXECUTION.String() {
|
||||
return l.Data.(*workflow_execution.WorkflowExecution)
|
||||
}
|
||||
return nil
|
||||
|
1
go.mod
1
go.mod
@ -44,7 +44,6 @@ require (
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/robfig/cron v1.2.0
|
||||
github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 // indirect
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||
github.com/xdg-go/scram v1.1.2 // indirect
|
||||
|
2
go.sum
2
go.sum
@ -87,8 +87,6 @@ github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSz
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
|
||||
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
|
@ -1,12 +1,14 @@
|
||||
package booking
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
@ -26,7 +28,7 @@ func (wfa *Booking) CheckBooking(id string, start time.Time, end *time.Time) (bo
|
||||
return true, nil
|
||||
}
|
||||
e := *end
|
||||
accessor := New(tools.BOOKING, "", "", nil, nil)
|
||||
accessor := wfa.GetAccessor(nil)
|
||||
res, code, err := accessor.Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date
|
||||
"compute_resource_id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
||||
@ -49,14 +51,41 @@ func (wfa *Booking) ArgoStatusToState(status string) *Booking {
|
||||
return wfa
|
||||
}
|
||||
|
||||
func (ao *Booking) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (r *Booking) GenerateID() {
|
||||
if r.UUID == "" {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Booking) GetName() string {
|
||||
return d.UUID + "_" + d.ExecDate.String()
|
||||
}
|
||||
|
||||
func (d *Booking) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New(tools.BOOKING, username, peerID, groups, caller) // Create a new instance of the accessor
|
||||
func (d *Booking) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.BOOKING, caller) // Initialize the accessor with the BOOKING model type
|
||||
return data
|
||||
}
|
||||
|
||||
func (d *Booking) VerifyAuth(username string, peerID string, groups []string) bool {
|
||||
return true
|
||||
func (dma *Booking) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *Booking) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -4,10 +4,9 @@ import (
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type bookingMongoAccessor struct {
|
||||
@ -15,62 +14,90 @@ type bookingMongoAccessor struct {
|
||||
}
|
||||
|
||||
// New creates a new instance of the bookingMongoAccessor
|
||||
func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *bookingMongoAccessor {
|
||||
return &bookingMongoAccessor{
|
||||
AbstractAccessor: utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
||||
Caller: caller,
|
||||
PeerID: peerID,
|
||||
Groups: groups,
|
||||
User: username, // Set the caller
|
||||
Type: t,
|
||||
},
|
||||
}
|
||||
func New() *bookingMongoAccessor {
|
||||
return &bookingMongoAccessor{}
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing special here, just the basic CRUD operations
|
||||
*/
|
||||
func (a *bookingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericDeleteOne(id, a)
|
||||
func (wfa *bookingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return wfa.GenericDeleteOne(id, wfa)
|
||||
}
|
||||
|
||||
func (a *bookingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericUpdateOne(set, id, a, &Booking{})
|
||||
func (wfa *bookingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
return wfa.GenericUpdateOne(set, id, wfa, &Booking{})
|
||||
}
|
||||
|
||||
func (a *bookingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data, a)
|
||||
func (wfa *bookingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
func (a *bookingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data, a)
|
||||
func (wfa *bookingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
func (a *bookingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[*Booking](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
if d.(*Booking).State == workflow_execution.SCHEDULED && time.Now().UTC().After(*d.(*Booking).ExecDate) {
|
||||
d.(*Booking).State = workflow_execution.FORGOTTEN
|
||||
utils.GenericRawUpdateOne(d, id, a)
|
||||
}
|
||||
return d, 200, nil
|
||||
}, a)
|
||||
}
|
||||
|
||||
func (a *bookingMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericLoadAll[*Booking](a.getExec(), a)
|
||||
}
|
||||
|
||||
func (a *bookingMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericSearch[*Booking](filters, search, (&Booking{}).GetObjectFilters(search), a.getExec(), a)
|
||||
}
|
||||
|
||||
func (a *bookingMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
|
||||
return func(d utils.DBObject) utils.ShallowDBObject {
|
||||
if d.(*Booking).State == workflow_execution.SCHEDULED && time.Now().UTC().After(*d.(*Booking).ExecDate) {
|
||||
d.(*Booking).State = workflow_execution.FORGOTTEN
|
||||
utils.GenericRawUpdateOne(d, d.GetID(), a)
|
||||
}
|
||||
return d
|
||||
func (wfa *bookingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var workflow Booking
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&workflow)
|
||||
if workflow.State == workflow_execution.SCHEDULED && time.Now().UTC().After(*workflow.ExecDate) {
|
||||
workflow.State = workflow_execution.FORGOTTEN
|
||||
wfa.GenericRawUpdateOne(&workflow, id, wfa)
|
||||
}
|
||||
return &workflow, 200, nil
|
||||
}
|
||||
|
||||
func (wfa *bookingMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []Booking
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
if r.State == workflow_execution.SCHEDULED && time.Now().UTC().After(*r.ExecDate) {
|
||||
r.State = workflow_execution.FORGOTTEN
|
||||
wfa.GenericRawUpdateOne(&r, r.UUID, wfa)
|
||||
}
|
||||
objs = append(objs, &r.AbstractObject) // Warning only AbstractObject is returned
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
// Search is a function that searches for a booking in the database
|
||||
func (wfa *bookingMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []Booking
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
if r.State == workflow_execution.SCHEDULED && time.Now().UTC().After(*r.ExecDate) {
|
||||
r.State = workflow_execution.FORGOTTEN
|
||||
wfa.GenericRawUpdateOne(&r, r.UUID, wfa)
|
||||
}
|
||||
objs = append(objs, &r)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
@ -1,17 +1,16 @@
|
||||
package collaborative_area
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/config"
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
w "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workspace"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type CollaborativeAreaRule struct {
|
||||
@ -28,13 +27,14 @@ type CollaborativeAreaRule struct {
|
||||
type CollaborativeArea struct {
|
||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||
IsSent bool `json:"is_sent" bson:"-"` // IsSent is a flag that indicates if the workspace is sent
|
||||
CreatorID string `json:"peer_id,omitempty" bson:"peer_id,omitempty" validate:"required"` // CreatorID is the ID of the creator
|
||||
Version string `json:"version,omitempty" bson:"version,omitempty"` // Version is the version of the workspace
|
||||
Description string `json:"description,omitempty" bson:"description,omitempty" validate:"required"` // Description is the description of the workspace
|
||||
CollaborativeAreaRule *CollaborativeAreaRule `json:"collaborative_area,omitempty" bson:"collaborative_area,omitempty"` // CollaborativeArea is the collaborative area of the workspace
|
||||
Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"` // Attributes is the attributes of the workspace (TODO)
|
||||
Workspaces []string `json:"workspaces" bson:"workspaces"` // Workspaces is the workspaces of the workspace
|
||||
Workflows []string `json:"workflows" bson:"workflows"` // Workflows is the workflows of the workspace
|
||||
AllowedPeersGroup map[string][]string `json:"allowed_peers_group,omitempty" bson:"allowed_peers_group,omitempty"` // AllowedPeersGroup is the group of allowed peers
|
||||
Peers []string `json:"peers" bson:"peers"` // Peers is the peers of the workspace
|
||||
Rules []string `json:"rules" bson:"rules,omitempty"` // Rules is the rules of the workspace
|
||||
|
||||
SharedRules []*rule.Rule `json:"shared_rules,omitempty" bson:"-"` // SharedRules is the shared rules of the workspace
|
||||
@ -43,56 +43,41 @@ type CollaborativeArea struct {
|
||||
SharedPeers []*peer.Peer `json:"shared_peers,omitempty" bson:"-"` // SharedPeers is the shared peers of the workspace
|
||||
}
|
||||
|
||||
func (ao *CollaborativeArea) Clear(peerID string) {
|
||||
ao.CreatorID = peerID
|
||||
if config.GetConfig().Whitelist {
|
||||
ao.AllowedPeersGroup[peerID] = []string{"*"}
|
||||
} else {
|
||||
ao.AllowedPeersGroup[peerID] = []string{}
|
||||
}
|
||||
// then reset the shared fields
|
||||
if ao.Workspaces == nil {
|
||||
ao.Workspaces = []string{}
|
||||
}
|
||||
if ao.Workflows == nil {
|
||||
ao.Workflows = []string{}
|
||||
}
|
||||
if ao.Rules == nil {
|
||||
ao.Rules = []string{}
|
||||
}
|
||||
if ao.CollaborativeAreaRule == nil {
|
||||
ao.CollaborativeAreaRule = &CollaborativeAreaRule{
|
||||
ShareMode: "private",
|
||||
ExploitedBy: "collaborators only",
|
||||
}
|
||||
}
|
||||
ao.CollaborativeAreaRule.CreatedAt = time.Now().UTC()
|
||||
func (ao *CollaborativeArea) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (ao *CollaborativeArea) VerifyAuth(username string, peerID string, groups []string) bool {
|
||||
if ao.AllowedPeersGroup != nil || config.GetConfig().Whitelist {
|
||||
if grps, ok := ao.AllowedPeersGroup[peerID]; ok || config.GetConfig().Whitelist {
|
||||
fmt.Println("grps", grps, "ok", ok, "config.GetConfig().Whitelist", config.GetConfig().Whitelist)
|
||||
if slices.Contains(grps, "*") || (!ok && config.GetConfig().Whitelist) {
|
||||
return true
|
||||
}
|
||||
for _, grp := range grps {
|
||||
if slices.Contains(groups, grp) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
func (r *CollaborativeArea) GenerateID() {
|
||||
if r.UUID == "" {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
return ao.AbstractObject.VerifyAuth(username, peerID, groups)
|
||||
}
|
||||
|
||||
func (d *CollaborativeArea) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New(tools.COLLABORATIVE_AREA, username, peerID, groups, caller) // Create a new instance of the accessor
|
||||
func (d *CollaborativeArea) GetName() string {
|
||||
return d.Name
|
||||
}
|
||||
|
||||
func (d *CollaborativeArea) Trim() *CollaborativeArea {
|
||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.CreatorID}}).IsMySelf(); !ok {
|
||||
d.AllowedPeersGroup = map[string][]string{}
|
||||
}
|
||||
return d
|
||||
func (d *CollaborativeArea) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.COLLABORATIVE_AREA, caller) // Initialize the accessor with the SHARED_WORKSPACE model type
|
||||
return data
|
||||
}
|
||||
|
||||
func (dma *CollaborativeArea) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *CollaborativeArea) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -4,13 +4,13 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
w "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workspace"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
@ -19,144 +19,43 @@ import (
|
||||
// SharedWorkspace is a struct that represents a collaborative area
|
||||
type collaborativeAreaMongoAccessor struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
|
||||
workspaceAccessor utils.Accessor
|
||||
workflowAccessor utils.Accessor
|
||||
peerAccessor utils.Accessor
|
||||
ruleAccessor utils.Accessor
|
||||
}
|
||||
|
||||
func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *collaborativeAreaMongoAccessor {
|
||||
return &collaborativeAreaMongoAccessor{
|
||||
AbstractAccessor: utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
||||
Caller: caller,
|
||||
PeerID: peerID,
|
||||
Groups: groups, // Set the caller
|
||||
Type: t,
|
||||
},
|
||||
workspaceAccessor: (&workspace.Workspace{}).GetAccessor(username, peerID, groups, nil),
|
||||
workflowAccessor: (&w.Workflow{}).GetAccessor(username, peerID, groups, nil),
|
||||
peerAccessor: (&peer.Peer{}).GetAccessor(username, peerID, groups, nil),
|
||||
ruleAccessor: (&rule.Rule{}).GetAccessor(username, peerID, groups, nil),
|
||||
}
|
||||
// New creates a new instance of the collaborativeAreaMongoAccessor
|
||||
func New() *collaborativeAreaMongoAccessor {
|
||||
return &collaborativeAreaMongoAccessor{}
|
||||
}
|
||||
|
||||
// DeleteOne deletes a collaborative area from the database, given its ID, it automatically share to peers if the workspace is shared
|
||||
func (a *collaborativeAreaMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
set, code, err := a.LoadOne(id)
|
||||
if code != 200 {
|
||||
return nil, code, err
|
||||
func (wfa *collaborativeAreaMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
set, code, _ := wfa.LoadOne(id)
|
||||
if code == 200 { // always delete on peers than recreate
|
||||
wfa.deleteToPeer(set.(*CollaborativeArea))
|
||||
}
|
||||
a.deleteToPeer(set.(*CollaborativeArea))
|
||||
a.sharedWorkflow(&CollaborativeArea{}, id) // create all shared workflows
|
||||
a.sharedWorkspace(&CollaborativeArea{}, id) // create all collaborative areas
|
||||
return utils.GenericDeleteOne(id, a) // then add on yours
|
||||
}
|
||||
|
||||
// UpdateOne updates a collaborative area in the database, given its ID and the new data, it automatically share to peers if the workspace is shared
|
||||
func (a *collaborativeAreaMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
res, code, err := utils.GenericUpdateOne(set.(*CollaborativeArea).Trim(), id, a, &CollaborativeArea{})
|
||||
// a.deleteToPeer(res.(*CollaborativeArea)) // delete the collaborative area on the peer
|
||||
a.sharedWorkflow(res.(*CollaborativeArea), id) // replace all shared workflows
|
||||
a.sharedWorkspace(res.(*CollaborativeArea), id) // replace all collaborative areas (not shared worspace obj but workspace one)
|
||||
// a.sendToPeer(res.(*CollaborativeArea)) // send the collaborative area (collaborative area object) to the peers
|
||||
return res, code, err
|
||||
}
|
||||
|
||||
// StoreOne stores a collaborative area in the database, it automatically share to peers if the workspace is shared
|
||||
func (a *collaborativeAreaMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
_, id := (&peer.Peer{}).IsMySelf() // get the local peer
|
||||
data.(*CollaborativeArea).Clear(id) // set the creator
|
||||
// retrieve or proper peer
|
||||
dd, code, err := a.peerAccessor.Search(nil, "0")
|
||||
if code != 200 || len(dd) == 0 {
|
||||
return nil, code, errors.New("Could not retrieve the peer" + err.Error())
|
||||
}
|
||||
data.(*CollaborativeArea).CollaborativeAreaRule.Creator = dd[0].GetID()
|
||||
d, code, err := utils.GenericStoreOne(data.(*CollaborativeArea).Trim(), a)
|
||||
if code == 200 {
|
||||
a.sharedWorkflow(d.(*CollaborativeArea), d.GetID()) // create all shared workflows
|
||||
a.sharedWorkspace(d.(*CollaborativeArea), d.GetID()) // create all collaborative areas
|
||||
a.sendToPeer(d.(*CollaborativeArea)) // send the collaborative area (collaborative area object) to the peers
|
||||
}
|
||||
return data, code, err
|
||||
}
|
||||
|
||||
// CopyOne copies a CollaborativeArea in the database
|
||||
func (a *collaborativeAreaMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return a.StoreOne(data)
|
||||
}
|
||||
|
||||
func filterEnrich[T utils.ShallowDBObject](arr []string, a utils.Accessor) []T {
|
||||
var new []T
|
||||
res, code, _ := a.Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.id": {{Operator: dbs.IN.String(), Value: arr}},
|
||||
},
|
||||
}, "")
|
||||
if code == 200 {
|
||||
for _, r := range res {
|
||||
new = append(new, r.(T))
|
||||
}
|
||||
}
|
||||
return new
|
||||
}
|
||||
|
||||
// enrich is a function that enriches the CollaborativeArea with the shared objects
|
||||
func (a *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *CollaborativeArea) *CollaborativeArea {
|
||||
sharedWorkspace.SharedWorkspaces = append(sharedWorkspace.SharedWorkspaces,
|
||||
filterEnrich[*workspace.Workspace](sharedWorkspace.Workspaces, a.workspaceAccessor)...)
|
||||
sharedWorkspace.SharedWorkflows = append(sharedWorkspace.SharedWorkflows,
|
||||
filterEnrich[*workflow.Workflow](sharedWorkspace.Workflows, a.workflowAccessor)...)
|
||||
peerskey := []string{}
|
||||
for k := range sharedWorkspace.AllowedPeersGroup {
|
||||
peerskey = append(peerskey, k)
|
||||
}
|
||||
sharedWorkspace.SharedPeers = append(sharedWorkspace.SharedPeers,
|
||||
filterEnrich[*peer.Peer](peerskey, a.peerAccessor)...)
|
||||
sharedWorkspace.SharedRules = append(sharedWorkspace.SharedRules,
|
||||
filterEnrich[*rule.Rule](sharedWorkspace.Rules, a.ruleAccessor)...)
|
||||
return sharedWorkspace
|
||||
}
|
||||
|
||||
func (a *collaborativeAreaMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[*CollaborativeArea](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
return a.enrich(d.(*CollaborativeArea)), 200, nil
|
||||
}, a)
|
||||
}
|
||||
|
||||
func (a *collaborativeAreaMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericLoadAll[*CollaborativeArea](func(d utils.DBObject) utils.ShallowDBObject {
|
||||
return a.enrich(d.(*CollaborativeArea))
|
||||
}, a)
|
||||
}
|
||||
|
||||
func (a *collaborativeAreaMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericSearch[*CollaborativeArea](filters, search, (&CollaborativeArea{}).GetObjectFilters(search),
|
||||
func(d utils.DBObject) utils.ShallowDBObject {
|
||||
return a.enrich(d.(*CollaborativeArea))
|
||||
}, a)
|
||||
wfa.sharedWorkflow(&CollaborativeArea{}, id) // create all shared workflows
|
||||
wfa.sharedWorkspace(&CollaborativeArea{}, id) // create all collaborative areas
|
||||
return wfa.GenericDeleteOne(id, wfa) // then add on yours
|
||||
}
|
||||
|
||||
/*
|
||||
sharedWorkspace is a function that shares the collaborative area to the peers
|
||||
*/
|
||||
func (a *collaborativeAreaMongoAccessor) sharedWorkspace(shared *CollaborativeArea, id string) {
|
||||
eldest, code, _ := a.LoadOne(id) // get the eldest
|
||||
func (wfa *collaborativeAreaMongoAccessor) sharedWorkspace(shared *CollaborativeArea, id string) {
|
||||
eldest, code, _ := wfa.LoadOne(id) // get the eldest
|
||||
accessor := (&workspace.Workspace{}).GetAccessor(nil)
|
||||
if code == 200 {
|
||||
eld := eldest.(*CollaborativeArea)
|
||||
if eld.Workspaces != nil { // update all your workspaces in the eldest by replacing shared ref by an empty string
|
||||
for _, v := range eld.Workspaces {
|
||||
a.workspaceAccessor.UpdateOne(&workspace.Workspace{Shared: ""}, v)
|
||||
if a.Caller != nil || a.Caller.URLS == nil || a.Caller.URLS[tools.WORKSPACE] == nil {
|
||||
accessor.UpdateOne(&workspace.Workspace{Shared: ""}, v)
|
||||
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKSPACE] == nil {
|
||||
continue
|
||||
}
|
||||
paccess := (&peer.Peer{}) // send to all peers
|
||||
for k := range shared.AllowedPeersGroup { // delete the collaborative area on the peer
|
||||
b, err := paccess.LaunchPeerExecution(k, v, tools.WORKSPACE, tools.DELETE, nil, a.Caller)
|
||||
paccess := (&peer.Peer{}) // send to all peers
|
||||
for _, p := range shared.Peers { // delete the collaborative area on the peer
|
||||
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKSPACE, tools.DELETE, nil, wfa.Caller)
|
||||
if err != nil && b == nil {
|
||||
a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -164,20 +63,20 @@ func (a *collaborativeAreaMongoAccessor) sharedWorkspace(shared *CollaborativeAr
|
||||
}
|
||||
if shared.Workspaces != nil {
|
||||
for _, v := range shared.Workspaces { // update all the collaborative areas
|
||||
workspace, code, _ := a.workspaceAccessor.UpdateOne(&workspace.Workspace{Shared: shared.UUID}, v) // add the shared ref to workspace
|
||||
if a.Caller != nil || a.Caller.URLS == nil || a.Caller.URLS[tools.WORKSPACE] == nil {
|
||||
workspace, code, _ := accessor.UpdateOne(&workspace.Workspace{Shared: shared.UUID}, v) // add the shared ref to workspace
|
||||
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKSPACE] == nil {
|
||||
continue
|
||||
}
|
||||
for k := range shared.AllowedPeersGroup {
|
||||
for _, p := range shared.Peers {
|
||||
if code != 200 {
|
||||
continue
|
||||
}
|
||||
paccess := (&peer.Peer{}) // send to all peers, add the collaborative area on the peer
|
||||
s := workspace.Serialize(workspace)
|
||||
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + k
|
||||
b, err := paccess.LaunchPeerExecution(k, v, tools.WORKSPACE, tools.POST, s, a.Caller)
|
||||
s := workspace.Serialize()
|
||||
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + p
|
||||
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKSPACE, tools.POST, s, wfa.Caller)
|
||||
if err != nil && b == nil {
|
||||
a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -187,13 +86,14 @@ func (a *collaborativeAreaMongoAccessor) sharedWorkspace(shared *CollaborativeAr
|
||||
}
|
||||
|
||||
// sharedWorkflow is a function that shares the shared workflow to the peers
|
||||
func (a *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeArea, id string) {
|
||||
eldest, code, _ := a.LoadOne(id) // get the eldest
|
||||
func (wfa *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeArea, id string) {
|
||||
accessor := (&w.Workflow{}).GetAccessor(nil)
|
||||
eldest, code, _ := wfa.LoadOne(id) // get the eldest
|
||||
if code == 200 {
|
||||
eld := eldest.(*CollaborativeArea)
|
||||
if eld.Workflows != nil {
|
||||
for _, v := range eld.Workflows {
|
||||
data, code, _ := a.workflowAccessor.LoadOne(v)
|
||||
data, code, _ := accessor.LoadOne(v)
|
||||
if code == 200 {
|
||||
s := data.(*w.Workflow)
|
||||
new := []string{}
|
||||
@ -204,15 +104,15 @@ func (a *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeAre
|
||||
} // kick the shared reference in your old shared workflow
|
||||
n := &w.Workflow{}
|
||||
n.Shared = new
|
||||
a.workflowAccessor.UpdateOne(n, v)
|
||||
if a.Caller != nil || a.Caller.URLS == nil || a.Caller.URLS[tools.WORKFLOW] == nil {
|
||||
accessor.UpdateOne(n, v)
|
||||
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKFLOW] == nil {
|
||||
continue
|
||||
}
|
||||
paccess := (&peer.Peer{}) // send to all peers
|
||||
for k := range shared.AllowedPeersGroup { // delete the shared workflow on the peer
|
||||
b, err := paccess.LaunchPeerExecution(k, v, tools.WORKFLOW, tools.DELETE, nil, a.Caller)
|
||||
paccess := (&peer.Peer{}) // send to all peers
|
||||
for _, p := range shared.Peers { // delete the shared workflow on the peer
|
||||
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKFLOW, tools.DELETE, nil, wfa.Caller)
|
||||
if err != nil && b == nil {
|
||||
a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -221,23 +121,23 @@ func (a *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeAre
|
||||
}
|
||||
if shared.Workflows != nil { // update all the shared workflows
|
||||
for _, v := range shared.Workflows {
|
||||
data, code, _ := a.workflowAccessor.LoadOne(v)
|
||||
data, code, _ := accessor.LoadOne(v)
|
||||
if code == 200 {
|
||||
s := data.(*w.Workflow)
|
||||
if !slices.Contains(s.Shared, id) {
|
||||
s.Shared = append(s.Shared, id)
|
||||
workflow, code, _ := a.workflowAccessor.UpdateOne(s, v)
|
||||
if a.Caller != nil || a.Caller.URLS == nil || a.Caller.URLS[tools.WORKFLOW] == nil {
|
||||
workflow, code, _ := accessor.UpdateOne(s, v)
|
||||
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKFLOW] == nil {
|
||||
continue
|
||||
}
|
||||
paccess := (&peer.Peer{})
|
||||
for k := range shared.AllowedPeersGroup { // send to all peers
|
||||
for _, p := range shared.Peers { // send to all peers
|
||||
if code == 200 {
|
||||
s := workflow.Serialize(workflow) // add the shared workflow on the peer
|
||||
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + k
|
||||
b, err := paccess.LaunchPeerExecution(k, shared.UUID, tools.WORKFLOW, tools.POST, s, a.Caller)
|
||||
s := workflow.Serialize() // add the shared workflow on the peer
|
||||
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + p
|
||||
b, err := paccess.LaunchPeerExecution(p, shared.UUID, tools.WORKFLOW, tools.POST, s, wfa.Caller)
|
||||
if err != nil && b == nil {
|
||||
a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -250,29 +150,194 @@ func (a *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeAre
|
||||
}
|
||||
|
||||
// sharedWorkspace is a function that shares the collaborative area to the peers
|
||||
func (a *collaborativeAreaMongoAccessor) deleteToPeer(shared *CollaborativeArea) {
|
||||
a.contactPeer(shared, tools.POST)
|
||||
func (wfa *collaborativeAreaMongoAccessor) deleteToPeer(shared *CollaborativeArea) {
|
||||
if wfa.Caller == nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.COLLABORATIVE_AREA] == nil || wfa.Caller.Disabled {
|
||||
return
|
||||
}
|
||||
paccess := (&peer.Peer{})
|
||||
for _, v := range shared.Peers {
|
||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: v}}).IsMySelf(); ok {
|
||||
continue
|
||||
}
|
||||
b, err := paccess.LaunchPeerExecution(v, shared.UUID, tools.COLLABORATIVE_AREA, tools.DELETE, nil, wfa.Caller)
|
||||
if err != nil && b == nil {
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + v + ". Error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sharedWorkspace is a function that shares the collaborative area to the peers
|
||||
func (a *collaborativeAreaMongoAccessor) sendToPeer(shared *CollaborativeArea) {
|
||||
a.contactPeer(shared, tools.POST)
|
||||
}
|
||||
|
||||
func (a *collaborativeAreaMongoAccessor) contactPeer(shared *CollaborativeArea, meth tools.METHOD) {
|
||||
if a.Caller == nil || a.Caller.URLS == nil || a.Caller.URLS[tools.COLLABORATIVE_AREA] == nil || a.Caller.Disabled {
|
||||
func (wfa *collaborativeAreaMongoAccessor) sendToPeer(shared *CollaborativeArea) {
|
||||
if wfa.Caller == nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.COLLABORATIVE_AREA] == nil || wfa.Caller.Disabled {
|
||||
return
|
||||
}
|
||||
|
||||
paccess := (&peer.Peer{})
|
||||
for k := range shared.AllowedPeersGroup {
|
||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: k}}).IsMySelf(); ok || (shared.IsSent && meth == tools.POST) || (!shared.IsSent && meth != tools.POST) {
|
||||
for _, v := range shared.Peers {
|
||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: v}}).IsMySelf(); ok || shared.IsSent {
|
||||
continue
|
||||
}
|
||||
shared.IsSent = meth == tools.POST
|
||||
b, err := paccess.LaunchPeerExecution(k, k, tools.COLLABORATIVE_AREA, meth, shared.Serialize(shared), a.Caller)
|
||||
shared.IsSent = true
|
||||
b, err := paccess.LaunchPeerExecution(v, v, tools.COLLABORATIVE_AREA, tools.POST, shared.Serialize(), wfa.Caller)
|
||||
if err != nil && b == nil {
|
||||
a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not send to peer " + v + ". Error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateOne updates a collaborative area in the database, given its ID and the new data, it automatically share to peers if the workspace is shared
|
||||
func (wfa *collaborativeAreaMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
res, code, err := wfa.GenericUpdateOne(set.(*CollaborativeArea), id, wfa, &CollaborativeArea{})
|
||||
fmt.Println("UpdateOne", set, res, code, err)
|
||||
// wfa.deleteToPeer(res.(*CollaborativeArea)) // delete the collaborative area on the peer
|
||||
wfa.sharedWorkflow(res.(*CollaborativeArea), id) // replace all shared workflows
|
||||
wfa.sharedWorkspace(res.(*CollaborativeArea), id) // replace all collaborative areas (not shared worspace obj but workspace one)
|
||||
// wfa.sendToPeer(res.(*CollaborativeArea)) // send the collaborative area (collaborative area object) to the peers
|
||||
return res, code, err
|
||||
}
|
||||
|
||||
// StoreOne stores a collaborative area in the database, it automatically share to peers if the workspace is shared
|
||||
func (wfa *collaborativeAreaMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
_, id := (&peer.Peer{}).IsMySelf() // get the local peer
|
||||
data.(*CollaborativeArea).CreatorID = id // set the creator id
|
||||
data.(*CollaborativeArea).Peers = append(data.(*CollaborativeArea).Peers, id) // add the creator id to the peers
|
||||
// then reset the shared fields
|
||||
if data.(*CollaborativeArea).Workspaces == nil {
|
||||
data.(*CollaborativeArea).Workspaces = []string{}
|
||||
}
|
||||
if data.(*CollaborativeArea).Workflows == nil {
|
||||
data.(*CollaborativeArea).Workflows = []string{}
|
||||
}
|
||||
if data.(*CollaborativeArea).Rules == nil {
|
||||
data.(*CollaborativeArea).Rules = []string{}
|
||||
}
|
||||
if data.(*CollaborativeArea).CollaborativeAreaRule == nil {
|
||||
data.(*CollaborativeArea).CollaborativeAreaRule = &CollaborativeAreaRule{
|
||||
ShareMode: "private",
|
||||
ExploitedBy: "collaborators only",
|
||||
}
|
||||
}
|
||||
data.(*CollaborativeArea).CollaborativeAreaRule.CreatedAt = time.Now().UTC()
|
||||
// retrieve or proper peer
|
||||
dd, code, err := (&peer.Peer{}).GetAccessor(nil).Search(nil, "0")
|
||||
if code != 200 || len(dd) == 0 {
|
||||
return nil, code, errors.New("Could not retrieve the peer" + err.Error())
|
||||
}
|
||||
data.(*CollaborativeArea).CollaborativeAreaRule.Creator = dd[0].GetID()
|
||||
d, code, err := wfa.GenericStoreOne(data.(*CollaborativeArea), wfa)
|
||||
if code == 200 {
|
||||
wfa.sharedWorkflow(d.(*CollaborativeArea), d.GetID()) // create all shared workflows
|
||||
wfa.sharedWorkspace(d.(*CollaborativeArea), d.GetID()) // create all collaborative areas
|
||||
wfa.sendToPeer(d.(*CollaborativeArea)) // send the collaborative area (collaborative area object) to the peers
|
||||
}
|
||||
return data, code, err
|
||||
}
|
||||
|
||||
// CopyOne copies a CollaborativeArea in the database
|
||||
func (wfa *collaborativeAreaMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return wfa.StoreOne(data)
|
||||
}
|
||||
|
||||
// enrich is a function that enriches the CollaborativeArea with the shared objects
|
||||
func (wfa *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *CollaborativeArea) *CollaborativeArea {
|
||||
access := (&workspace.Workspace{}).GetAccessor(nil)
|
||||
res, code, _ := access.Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Workspaces}},
|
||||
},
|
||||
}, "")
|
||||
if code == 200 {
|
||||
for _, r := range res {
|
||||
sharedWorkspace.SharedWorkspaces = append(sharedWorkspace.SharedWorkspaces, r.(*workspace.Workspace))
|
||||
}
|
||||
}
|
||||
access = (&w.Workflow{}).GetAccessor(nil)
|
||||
res, code, _ = access.Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Workflows}},
|
||||
},
|
||||
}, "")
|
||||
if code == 200 {
|
||||
for _, r := range res {
|
||||
sharedWorkspace.SharedWorkflows = append(sharedWorkspace.SharedWorkflows, r.(*w.Workflow))
|
||||
}
|
||||
}
|
||||
access = (&peer.Peer{}).GetAccessor(nil)
|
||||
res, code, _ = access.Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Peers}},
|
||||
},
|
||||
}, "")
|
||||
if code == 200 {
|
||||
for _, r := range res {
|
||||
sharedWorkspace.SharedPeers = append(sharedWorkspace.SharedPeers, r.(*peer.Peer))
|
||||
}
|
||||
}
|
||||
access = (&rule.Rule{}).GetAccessor(nil)
|
||||
res, code, _ = access.Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Rules}},
|
||||
},
|
||||
}, "")
|
||||
if code == 200 {
|
||||
for _, r := range res {
|
||||
sharedWorkspace.SharedRules = append(sharedWorkspace.SharedRules, r.(*rule.Rule))
|
||||
}
|
||||
}
|
||||
return sharedWorkspace
|
||||
}
|
||||
|
||||
// LoadOne loads a collaborative area from the database, given its ID and enrich it
|
||||
func (wfa *collaborativeAreaMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var sharedWorkspace CollaborativeArea
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&sharedWorkspace)
|
||||
return wfa.enrich(&sharedWorkspace), 200, nil // enrich the collaborative area
|
||||
}
|
||||
|
||||
// LoadAll loads all the collaborative areas from the database and enrich them
|
||||
func (wfa collaborativeAreaMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []CollaborativeArea
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, wfa.enrich(&r)) // enrich the collaborative area
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
// Search searches for collaborative areas in the database, given some filters OR a search string
|
||||
func (wfa *collaborativeAreaMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // search by name only by default can be override
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []CollaborativeArea
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, wfa.enrich(&r)) // enrich the collaborative area
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
package rule
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
@ -16,14 +18,39 @@ type Rule struct {
|
||||
Actions []string `json:"actions,omitempty" bson:"actions,omitempty"` // NOT DEFINITIVE TO SPECIFICATION
|
||||
}
|
||||
|
||||
func (ao *Rule) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (r *Rule) GenerateID() {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
|
||||
func (d *Rule) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New(tools.RULE, username, peerID, groups, caller)
|
||||
func (d *Rule) GetName() string {
|
||||
return d.Name
|
||||
}
|
||||
|
||||
func (d *Rule) VerifyAuth(username string, peerID string, groups []string) bool {
|
||||
return true
|
||||
func (d *Rule) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New()
|
||||
data.Init(tools.RULE, caller)
|
||||
return data
|
||||
}
|
||||
|
||||
func (dma *Rule) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *Rule) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -3,9 +3,7 @@ package rule
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type ruleMongoAccessor struct {
|
||||
@ -13,44 +11,35 @@ type ruleMongoAccessor struct {
|
||||
}
|
||||
|
||||
// New creates a new instance of the ruleMongoAccessor
|
||||
func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *ruleMongoAccessor {
|
||||
return &ruleMongoAccessor{
|
||||
AbstractAccessor: utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
||||
Caller: caller,
|
||||
PeerID: peerID,
|
||||
Groups: groups, // Set the caller
|
||||
User: username,
|
||||
Type: t,
|
||||
},
|
||||
}
|
||||
func New() *ruleMongoAccessor {
|
||||
return &ruleMongoAccessor{}
|
||||
}
|
||||
|
||||
// GetType returns the type of the rule
|
||||
func (a *ruleMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericDeleteOne(id, a)
|
||||
func (wfa *ruleMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return wfa.GenericDeleteOne(id, wfa)
|
||||
}
|
||||
|
||||
// UpdateOne updates a rule in the database
|
||||
func (a *ruleMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericUpdateOne(set.(*Rule), id, a, &Rule{})
|
||||
func (wfa *ruleMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
return wfa.GenericUpdateOne(set.(*Rule), id, wfa, &Rule{})
|
||||
}
|
||||
|
||||
// StoreOne stores a rule in the database
|
||||
func (a *ruleMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data.(*Rule), a)
|
||||
func (wfa *ruleMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return wfa.GenericStoreOne(data.(*Rule), wfa)
|
||||
}
|
||||
|
||||
func (a *ruleMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data, a)
|
||||
func (wfa *ruleMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
// LoadOne loads a rule from the database
|
||||
func (a *ruleMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
func (wfa *ruleMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var rule Rule
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, a.GetType().String())
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||
if err != nil {
|
||||
a.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&rule)
|
||||
@ -58,11 +47,11 @@ func (a *ruleMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
}
|
||||
|
||||
// LoadAll loads all rules from the database
|
||||
func (a ruleMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
func (wfa ruleMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(a.GetType().String())
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
a.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []Rule
|
||||
@ -76,7 +65,7 @@ func (a ruleMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
}
|
||||
|
||||
// Search searches for rules in the database, given some filters OR a search string
|
||||
func (a *ruleMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
func (wfa *ruleMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
@ -85,9 +74,9 @@ func (a *ruleMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils
|
||||
},
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, a.GetType().String())
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
a.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []Rule
|
||||
|
@ -1,13 +1,17 @@
|
||||
package shallow_collaborative_area
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type ShallowCollaborativeArea struct {
|
||||
utils.AbstractObject
|
||||
IsSent bool `json:"is_sent" bson:"-"`
|
||||
CreatorID string `json:"peer_id,omitempty" bson:"peer_id,omitempty" validate:"required"`
|
||||
Version string `json:"version,omitempty" bson:"version,omitempty"`
|
||||
Description string `json:"description,omitempty" bson:"description,omitempty" validate:"required"`
|
||||
Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"`
|
||||
@ -17,6 +21,41 @@ type ShallowCollaborativeArea struct {
|
||||
Rules []string `json:"rules,omitempty" bson:"rules,omitempty"`
|
||||
}
|
||||
|
||||
func (d *ShallowCollaborativeArea) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New(tools.COLLABORATIVE_AREA, username, peerID, groups, caller)
|
||||
func (ao *ShallowCollaborativeArea) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (r *ShallowCollaborativeArea) GenerateID() {
|
||||
if r.UUID == "" {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ShallowCollaborativeArea) GetName() string {
|
||||
return d.Name
|
||||
}
|
||||
|
||||
func (d *ShallowCollaborativeArea) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New()
|
||||
data.Init(tools.COLLABORATIVE_AREA, caller)
|
||||
return data
|
||||
}
|
||||
|
||||
func (dma *ShallowCollaborativeArea) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *ShallowCollaborativeArea) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -2,58 +2,82 @@ package shallow_collaborative_area
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type shallowSharedWorkspaceMongoAccessor struct {
|
||||
utils.AbstractAccessor
|
||||
}
|
||||
|
||||
func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *shallowSharedWorkspaceMongoAccessor {
|
||||
return &shallowSharedWorkspaceMongoAccessor{
|
||||
AbstractAccessor: utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
||||
Caller: caller,
|
||||
PeerID: peerID,
|
||||
User: username, // Set the caller
|
||||
Groups: groups, // Set the caller
|
||||
Type: t,
|
||||
},
|
||||
func New() *shallowSharedWorkspaceMongoAccessor {
|
||||
return &shallowSharedWorkspaceMongoAccessor{}
|
||||
}
|
||||
|
||||
func (wfa *shallowSharedWorkspaceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return wfa.GenericDeleteOne(id, wfa)
|
||||
}
|
||||
|
||||
func (wfa *shallowSharedWorkspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
return wfa.GenericUpdateOne(set.(*ShallowCollaborativeArea), id, wfa, &ShallowCollaborativeArea{})
|
||||
}
|
||||
|
||||
func (wfa *shallowSharedWorkspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return wfa.GenericStoreOne(data.(*ShallowCollaborativeArea), wfa)
|
||||
}
|
||||
|
||||
func (wfa *shallowSharedWorkspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return wfa.StoreOne(data)
|
||||
}
|
||||
|
||||
func (wfa *shallowSharedWorkspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var sharedWorkspace ShallowCollaborativeArea
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&sharedWorkspace)
|
||||
return &sharedWorkspace, 200, nil
|
||||
}
|
||||
|
||||
func (a *shallowSharedWorkspaceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericDeleteOne(id, a)
|
||||
func (wfa shallowSharedWorkspaceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []ShallowCollaborativeArea
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, &r)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
func (a *shallowSharedWorkspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericUpdateOne(set.(*ShallowCollaborativeArea), id, a, &ShallowCollaborativeArea{})
|
||||
}
|
||||
|
||||
func (a *shallowSharedWorkspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data.(*ShallowCollaborativeArea), a)
|
||||
}
|
||||
|
||||
func (a *shallowSharedWorkspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return a.StoreOne(data)
|
||||
}
|
||||
|
||||
func (a *shallowSharedWorkspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[*ShallowCollaborativeArea](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
return d, 200, nil
|
||||
}, a)
|
||||
}
|
||||
|
||||
func (a *shallowSharedWorkspaceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericLoadAll[*ShallowCollaborativeArea](func(d utils.DBObject) utils.ShallowDBObject {
|
||||
return d
|
||||
}, a)
|
||||
}
|
||||
|
||||
func (a *shallowSharedWorkspaceMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericSearch[*ShallowCollaborativeArea](filters, search, (&ShallowCollaborativeArea{}).GetObjectFilters(search), func(d utils.DBObject) utils.ShallowDBObject {
|
||||
return d
|
||||
}, a)
|
||||
func (wfa *shallowSharedWorkspaceMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []ShallowCollaborativeArea
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, &r)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
@ -8,8 +8,12 @@ import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area"
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
resource "cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
d "cloud.o-forge.io/core/oc-lib/models/resources/data"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||
p "cloud.o-forge.io/core/oc-lib/models/resources/processing"
|
||||
s "cloud.o-forge.io/core/oc-lib/models/resources/storage"
|
||||
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
w2 "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||
@ -21,11 +25,11 @@ This package contains the models used in the application
|
||||
It's used to create the models dynamically
|
||||
*/
|
||||
var models = map[string]func() utils.DBObject{
|
||||
tools.WORKFLOW_RESOURCE.String(): func() utils.DBObject { return &resource.WorkflowResource{} },
|
||||
tools.DATA_RESOURCE.String(): func() utils.DBObject { return &resource.DataResource{} },
|
||||
tools.COMPUTE_RESOURCE.String(): func() utils.DBObject { return &resource.ComputeResource{} },
|
||||
tools.STORAGE_RESOURCE.String(): func() utils.DBObject { return &resource.StorageResource{} },
|
||||
tools.PROCESSING_RESOURCE.String(): func() utils.DBObject { return &resource.ProcessingResource{} },
|
||||
tools.WORKFLOW_RESOURCE.String(): func() utils.DBObject { return &w.WorkflowResource{} },
|
||||
tools.DATA_RESOURCE.String(): func() utils.DBObject { return &d.DataResource{} },
|
||||
tools.COMPUTE_RESOURCE.String(): func() utils.DBObject { return &compute.ComputeResource{} },
|
||||
tools.STORAGE_RESOURCE.String(): func() utils.DBObject { return &s.StorageResource{} },
|
||||
tools.PROCESSING_RESOURCE.String(): func() utils.DBObject { return &p.ProcessingResource{} },
|
||||
tools.WORKFLOW.String(): func() utils.DBObject { return &w2.Workflow{} },
|
||||
tools.WORKFLOW_EXECUTION.String(): func() utils.DBObject { return &workflow_execution.WorkflowExecution{} },
|
||||
tools.WORKSPACE.String(): func() utils.DBObject { return &w3.Workspace{} },
|
||||
|
@ -1,10 +1,12 @@
|
||||
package peer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// now write a go enum for the state partner with self, blacklist, partner
|
||||
@ -63,7 +65,7 @@ func (ao *Peer) RemoveExecution(exec PeerExecution) {
|
||||
|
||||
// IsMySelf checks if the peer is the local peer
|
||||
func (ao *Peer) IsMySelf() (bool, string) {
|
||||
d, code, err := New(tools.PEER, "", "", nil, nil).Search(nil, SELF.String())
|
||||
d, code, err := ao.GetAccessor(nil).Search(nil, SELF.String())
|
||||
if code != 200 || err != nil || len(d) == 0 {
|
||||
return false, ""
|
||||
}
|
||||
@ -76,11 +78,42 @@ func (p *Peer) LaunchPeerExecution(peerID string, dataID string, dt tools.DataTy
|
||||
p.UUID = peerID
|
||||
return cache.LaunchPeerExecution(peerID, dataID, dt, method, body, caller) // Launch the execution on the peer through the cache
|
||||
}
|
||||
func (d *Peer) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New(tools.PEER, username, peerID, groups, caller) // Create a new instance of the accessor
|
||||
|
||||
func (ao *Peer) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (r *Peer) GenerateID() {
|
||||
if r.UUID == "" {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Peer) GetName() string {
|
||||
return d.Name
|
||||
}
|
||||
|
||||
func (d *Peer) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.PEER, caller) // Initialize the accessor with the PEER model type
|
||||
return data
|
||||
}
|
||||
|
||||
func (d *Peer) VerifyAuth(username string, peerID string, groups []string) bool {
|
||||
return true
|
||||
func (dma *Peer) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *Peer) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ func (p *PeerCache) urlFormat(url string, dt tools.DataType) string {
|
||||
// checkPeerStatus checks the status of a peer
|
||||
func (p *PeerCache) checkPeerStatus(peerID string, appName string, caller *tools.HTTPCaller) (*Peer, bool) {
|
||||
api := tools.API{}
|
||||
access := NewShallow()
|
||||
access := (&Peer{}).GetAccessor(nil)
|
||||
res, code, _ := access.LoadOne(peerID) // Load the peer from db
|
||||
if code != 200 { // no peer no party
|
||||
return nil, false
|
||||
@ -101,18 +101,18 @@ func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string,
|
||||
DataID: dataID,
|
||||
}
|
||||
mypeer.AddExecution(*pexec)
|
||||
NewShallow().UpdateOne(mypeer, peerID) // Update the peer in the db
|
||||
mypeer.GetAccessor(nil).UpdateOne(mypeer, peerID) // Update the peer in the db
|
||||
return nil, errors.New("peer is not reachable")
|
||||
} else {
|
||||
if mypeer == nil {
|
||||
return nil, errors.New("peer not found")
|
||||
}
|
||||
// If the peer is reachable, launch the execution
|
||||
url = p.urlFormat((mypeer.Url)+meth, dt) // Format the URL
|
||||
tmp := mypeer.FailedExecution // Get the failed executions list
|
||||
mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list
|
||||
NewShallow().UpdateOne(mypeer, peerID) // Update the peer in the db
|
||||
for _, v := range tmp { // Retry the failed executions
|
||||
url = p.urlFormat((mypeer.Url)+meth, dt) // Format the URL
|
||||
tmp := mypeer.FailedExecution // Get the failed executions list
|
||||
mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list
|
||||
mypeer.GetAccessor(nil).UpdateOne(mypeer, peerID) // Update the peer in the db
|
||||
for _, v := range tmp { // Retry the failed executions
|
||||
go p.exec(v.Url, tools.ToMethod(v.Method), v.Body, caller)
|
||||
}
|
||||
}
|
||||
|
@ -4,9 +4,8 @@ import (
|
||||
"strconv"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type peerMongoAccessor struct {
|
||||
@ -14,26 +13,8 @@ type peerMongoAccessor struct {
|
||||
}
|
||||
|
||||
// New creates a new instance of the peerMongoAccessor
|
||||
func NewShallow() *peerMongoAccessor {
|
||||
return &peerMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(tools.PEER.String()), // Create a logger with the data type
|
||||
Type: tools.PEER,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *peerMongoAccessor {
|
||||
return &peerMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
||||
Caller: caller,
|
||||
PeerID: peerID,
|
||||
User: username,
|
||||
Groups: groups, // Set the caller
|
||||
Type: t,
|
||||
},
|
||||
}
|
||||
func New() *peerMongoAccessor {
|
||||
return &peerMongoAccessor{}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -41,53 +22,80 @@ func New(t tools.DataType, username string, peerID string, groups []string, call
|
||||
*/
|
||||
|
||||
func (wfa *peerMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericDeleteOne(id, wfa)
|
||||
return wfa.GenericDeleteOne(id, wfa)
|
||||
}
|
||||
|
||||
func (wfa *peerMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericUpdateOne(set.(*Peer), id, wfa, &Peer{})
|
||||
return wfa.GenericUpdateOne(set.(*Peer), id, wfa, &Peer{})
|
||||
}
|
||||
|
||||
func (wfa *peerMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data.(*Peer), wfa)
|
||||
return wfa.GenericStoreOne(data.(*Peer), wfa)
|
||||
}
|
||||
|
||||
func (wfa *peerMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data, wfa)
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
func (dca *peerMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[*Peer](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
return d, 200, nil
|
||||
}, dca)
|
||||
func (wfa *peerMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var peer Peer
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&peer)
|
||||
|
||||
return &peer, 200, nil
|
||||
}
|
||||
|
||||
func (wfa *peerMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericLoadAll[*Peer](func(d utils.DBObject) utils.ShallowDBObject {
|
||||
return d
|
||||
}, wfa)
|
||||
func (wfa peerMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []Peer
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, &r)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
func (wfa *peerMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericSearch[*Peer](filters, search, wfa.getDefaultFilter(search),
|
||||
func(d utils.DBObject) utils.ShallowDBObject {
|
||||
return d
|
||||
}, wfa)
|
||||
}
|
||||
func (a *peerMongoAccessor) getDefaultFilter(search string) *dbs.Filters {
|
||||
s, err := strconv.Atoi(search)
|
||||
if err == nil {
|
||||
return &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // search by name if no filters are provided
|
||||
"state": {{Operator: dbs.EQUAL.String(), Value: s}},
|
||||
},
|
||||
}
|
||||
} else {
|
||||
return &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // search by name if no filters are provided
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
s, err := strconv.Atoi(search)
|
||||
if err == nil {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // search by name if no filters are provided
|
||||
"state": {{Operator: dbs.EQUAL.String(), Value: s}},
|
||||
},
|
||||
}
|
||||
} else {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // search by name if no filters are provided
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []Peer
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, &r)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
@ -2,10 +2,7 @@ package resource_model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/config"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
@ -29,35 +26,9 @@ type AbstractResource struct {
|
||||
OwnerLogo string `json:"owner_logo,omitempty" bson:"owner_logo,omitempty"` // OwnerLogo is the owner logo of the resource
|
||||
SourceUrl string `json:"source_url,omitempty" bson:"source_url,omitempty" validate:"required"` // SourceUrl is the source URL of the resource
|
||||
PeerID string `json:"peer_id,omitempty" bson:"peer_id,omitempty" validate:"required"` // PeerID is the ID of the peer getting this resource
|
||||
Price string `json:"price,omitempty" bson:"price,omitempty"` // Price is the price of access to the resource
|
||||
License string `json:"license,omitempty" bson:"license,omitempty"` // License is the license of the resource
|
||||
ResourceModel *ResourceModel `json:"resource_model,omitempty" bson:"resource_model,omitempty"` // ResourceModel is the model of the resource
|
||||
|
||||
AllowedPeersGroup map[string][]string `json:"allowed_peers_group,omitempty" bson:"allowed_peers_group,omitempty"` // AllowedPeersGroup is the group of allowed peers
|
||||
|
||||
Price string `json:"price,omitempty" bson:"price,omitempty"` // Price is the price of access to the resource
|
||||
Currency string `json:"currency,omitempty" bson:"currency,omitempty"` // Currency is the currency of the price
|
||||
}
|
||||
|
||||
func (ao *AbstractResource) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (abs *AbstractResource) SetResourceModel(model *ResourceModel) {
|
||||
abs.ResourceModel = model
|
||||
}
|
||||
|
||||
func (abs *AbstractResource) VerifyAuth(username string, peerID string, groups []string) bool {
|
||||
if grps, ok := abs.AllowedPeersGroup[peerID]; ok || config.GetConfig().Whitelist {
|
||||
if (ok && slices.Contains(grps, "*")) || (!ok && config.GetConfig().Whitelist) {
|
||||
return true
|
||||
}
|
||||
for _, grp := range grps {
|
||||
if slices.Contains(groups, grp) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return abs.AbstractObject.VerifyAuth(username, peerID, groups)
|
||||
}
|
||||
|
||||
/*
|
||||
@ -97,13 +68,6 @@ func (abs *AbstractResource) GetModelReadOnly(cat string, key string) interface{
|
||||
return abs.ResourceModel.Model[cat][key].ReadOnly
|
||||
}
|
||||
|
||||
func (d *AbstractResource) Trim() *AbstractResource {
|
||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.PeerID}}).IsMySelf(); !ok {
|
||||
d.AllowedPeersGroup = map[string][]string{}
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
type Model struct {
|
||||
Type string `json:"type,omitempty" bson:"type,omitempty"` // Type is the type of the model
|
||||
ReadOnly bool `json:"readonly,omitempty" bson:"readonly,omitempty"` // ReadOnly is the readonly of the model
|
||||
@ -125,7 +89,7 @@ func (ao *ResourceModel) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (ao *ResourceModel) UpToDate(user string, create bool) {}
|
||||
func (ao *ResourceModel) UpToDate() {}
|
||||
|
||||
func (r *ResourceModel) GenerateID() {
|
||||
r.UUID = uuid.New().String()
|
||||
@ -135,34 +99,24 @@ func (d *ResourceModel) GetName() string {
|
||||
return d.UUID
|
||||
}
|
||||
|
||||
func (abs *ResourceModel) VerifyAuth(username string, peerID string, groups []string) bool {
|
||||
return true
|
||||
func (d *ResourceModel) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := &ResourceModelMongoAccessor{}
|
||||
data.Init(tools.RESOURCE_MODEL, caller)
|
||||
return data
|
||||
}
|
||||
|
||||
func (d *ResourceModel) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return &ResourceModelMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
Type: tools.RESOURCE_MODEL,
|
||||
PeerID: peerID,
|
||||
Groups: groups,
|
||||
User: username,
|
||||
Caller: caller,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (dma *ResourceModel) Deserialize(j map[string]interface{}, obj utils.DBObject) utils.DBObject {
|
||||
func (dma *ResourceModel) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, obj)
|
||||
return obj
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *ResourceModel) Serialize(obj utils.DBObject) map[string]interface{} {
|
||||
func (dma *ResourceModel) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(obj)
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
83
models/resource_model/resource_model_mongo_accessor.go
Normal file
83
models/resource_model/resource_model_mongo_accessor.go
Normal file
@ -0,0 +1,83 @@
|
||||
package resource_model
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
)
|
||||
|
||||
type ResourceModelMongoAccessor struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing special here, just the basic CRUD operations
|
||||
*/
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return wfa.GenericDeleteOne(id, wfa)
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
return wfa.GenericUpdateOne(set, id, wfa, &ResourceModel{})
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var workflow ResourceModel
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&workflow)
|
||||
return &workflow, 200, nil
|
||||
}
|
||||
|
||||
func (wfa ResourceModelMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []ResourceModel
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, &r)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"resource_type": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []ResourceModel
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, &r)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
@ -1,53 +1,13 @@
|
||||
package resources
|
||||
package compute
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
/*
|
||||
* ComputeResource is a struct that represents a compute resource
|
||||
* it defines the resource compute
|
||||
*/
|
||||
type ComputeResource struct {
|
||||
resource_model.AbstractResource
|
||||
Technology TechnologyEnum `json:"technology" bson:"technology" default:"0"` // Technology is the technology
|
||||
Architecture string `json:"architecture,omitempty" bson:"architecture,omitempty"` // Architecture is the architecture
|
||||
Access AccessEnum `json:"access" bson:"access" default:"0"` // Access is the access
|
||||
|
||||
Localisation string `json:"localisation,omitempty" bson:"localisation,omitempty"` // Localisation is the localisation
|
||||
|
||||
CPUs []*CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs
|
||||
RAM *RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
|
||||
GPUs []*GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs
|
||||
}
|
||||
|
||||
func (d *ComputeResource) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New[*ComputeResource](tools.COMPUTE_RESOURCE, username, peerID, groups, caller, func() utils.DBObject { return &ComputeResource{} })
|
||||
}
|
||||
|
||||
// CPU is a struct that represents a CPU
|
||||
type CPU struct {
|
||||
Cores uint `bson:"cores,omitempty" json:"cores,omitempty"` //TODO: validate
|
||||
Architecture string `bson:"architecture,omitempty" json:"architecture,omitempty"` //TOOD: enum
|
||||
Shared bool `bson:"shared,omitempty" json:"shared,omitempty"`
|
||||
MinimumMemory uint `bson:"minimum_memory,omitempty" json:"minimum_memory,omitempty"`
|
||||
Platform string `bson:"platform,omitempty" json:"platform,omitempty"`
|
||||
}
|
||||
|
||||
type RAM struct {
|
||||
Size uint `bson:"size,omitempty" json:"size,omitempty" description:"Units in MB"`
|
||||
Ecc bool `bson:"ecc,omitempty" json:"ecc,omitempty"`
|
||||
}
|
||||
|
||||
type GPU struct {
|
||||
CudaCores uint `bson:"cuda_cores,omitempty" json:"cuda_cores,omitempty"`
|
||||
Model string `bson:"model,omitempty" json:"model,omitempty"`
|
||||
Memory uint `bson:"memory,omitempty" json:"memory,omitempty" description:"Units in MB"`
|
||||
TensorCores uint `bson:"tensor_cores,omitempty" json:"tensor_cores,omitempty"`
|
||||
}
|
||||
|
||||
type TechnologyEnum int
|
||||
|
||||
const (
|
||||
@ -76,3 +36,66 @@ const (
|
||||
func (a AccessEnum) String() string {
|
||||
return [...]string{"SSH", "SSH_KUBE_API", "SSH_SLURM", "SSH_DOCKER", "OPENCLOUD", "VPN"}[a]
|
||||
}
|
||||
|
||||
/*
|
||||
* ComputeResource is a struct that represents a compute resource
|
||||
* it defines the resource compute
|
||||
*/
|
||||
type ComputeResource struct {
|
||||
resource_model.AbstractResource
|
||||
Technology TechnologyEnum `json:"technology" bson:"technology" default:"0"` // Technology is the technology
|
||||
Architecture string `json:"architecture,omitempty" bson:"architecture,omitempty"` // Architecture is the architecture
|
||||
Access AccessEnum `json:"access" bson:"access default:"0"` // Access is the access
|
||||
|
||||
Localisation string `json:"localisation,omitempty" bson:"localisation,omitempty"` // Localisation is the localisation
|
||||
|
||||
CPUs []*CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs
|
||||
RAM *RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
|
||||
GPUs []*GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs
|
||||
}
|
||||
|
||||
func (dma *ComputeResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *ComputeResource) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
||||
func (d *ComputeResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New()
|
||||
data.Init(tools.COMPUTE_RESOURCE, caller)
|
||||
return data
|
||||
}
|
||||
|
||||
// CPU is a struct that represents a CPU
|
||||
type CPU struct {
|
||||
Cores uint `bson:"cores,omitempty" json:"cores,omitempty"` //TODO: validate
|
||||
Architecture string `bson:"architecture,omitempty" json:"architecture,omitempty"` //TOOD: enum
|
||||
Shared bool `bson:"shared,omitempty" json:"shared,omitempty"`
|
||||
MinimumMemory uint `bson:"minimum_memory,omitempty" json:"minimum_memory,omitempty"`
|
||||
Platform string `bson:"platform,omitempty" json:"platform,omitempty"`
|
||||
}
|
||||
|
||||
type RAM struct {
|
||||
Size uint `bson:"size,omitempty" json:"size,omitempty" description:"Units in MB"`
|
||||
Ecc bool `bson:"ecc,omitempty" json:"ecc,omitempty"`
|
||||
}
|
||||
|
||||
type GPU struct {
|
||||
CudaCores uint `bson:"cuda_cores,omitempty" json:"cuda_cores,omitempty"`
|
||||
Model string `bson:"model,omitempty" json:"model,omitempty"`
|
||||
Memory uint `bson:"memory,omitempty" json:"memory,omitempty" description:"Units in MB"`
|
||||
TensorCores uint `bson:"tensor_cores,omitempty" json:"tensor_cores,omitempty"`
|
||||
}
|
112
models/resources/compute/compute_mongo_accessor.go
Normal file
112
models/resources/compute/compute_mongo_accessor.go
Normal file
@ -0,0 +1,112 @@
|
||||
package compute
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
)
|
||||
|
||||
type computeMongoAccessor struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
}
|
||||
|
||||
// New creates a new instance of the computeMongoAccessor
|
||||
func New() *computeMongoAccessor {
|
||||
return &computeMongoAccessor{}
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing special here, just the basic CRUD operations
|
||||
*/
|
||||
|
||||
func (dca *computeMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return dca.GenericDeleteOne(id, dca)
|
||||
}
|
||||
|
||||
func (dca *computeMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
set.(*ComputeResource).ResourceModel = nil
|
||||
return dca.GenericUpdateOne(set, id, dca, &ComputeResource{})
|
||||
}
|
||||
|
||||
func (dca *computeMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
data.(*ComputeResource).ResourceModel = nil
|
||||
return dca.GenericStoreOne(data, dca)
|
||||
}
|
||||
|
||||
func (dca *computeMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return dca.GenericStoreOne(data, dca)
|
||||
}
|
||||
|
||||
func (dca *computeMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var compute ComputeResource
|
||||
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, dca.GetType())
|
||||
if err != nil {
|
||||
dca.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
|
||||
res_mongo.Decode(&compute)
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, dca.GetType())
|
||||
if err == nil && len(resources) > 0 {
|
||||
compute.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
return &compute, 200, nil
|
||||
}
|
||||
|
||||
func (wfa computeMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []ComputeResource
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
objs = append(objs, &r) // only get the abstract resource !
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
func (wfa *computeMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []ComputeResource
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
objs = append(objs, &r) // only get the abstract resource !
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
46
models/resources/compute/compute_test.go
Normal file
46
models/resources/compute/compute_test.go
Normal file
@ -0,0 +1,46 @@
|
||||
package compute
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStoreOneCompute(t *testing.T) {
|
||||
dc := ComputeResource{
|
||||
AbstractResource: resource_model.AbstractResource{
|
||||
AbstractObject: utils.AbstractObject{Name: "testCompute"},
|
||||
Description: "Lorem Ipsum",
|
||||
Logo: "azerty.com",
|
||||
Owner: "toto",
|
||||
OwnerLogo: "totoLogo",
|
||||
SourceUrl: "azerty.fr",
|
||||
},
|
||||
}
|
||||
|
||||
dcma := New()
|
||||
id, _, _ := dcma.StoreOne(&dc)
|
||||
|
||||
assert.NotEmpty(t, id)
|
||||
}
|
||||
|
||||
func TestLoadOneCompute(t *testing.T) {
|
||||
dc := ComputeResource{
|
||||
AbstractResource: resource_model.AbstractResource{
|
||||
AbstractObject: utils.AbstractObject{Name: "testCompute"},
|
||||
Description: "Lorem Ipsum",
|
||||
Logo: "azerty.com",
|
||||
Owner: "toto",
|
||||
OwnerLogo: "totoLogo",
|
||||
SourceUrl: "azerty.fr",
|
||||
},
|
||||
}
|
||||
|
||||
dcma := New()
|
||||
new_dc, _, _ := dcma.StoreOne(&dc)
|
||||
|
||||
assert.Equal(t, dc, new_dc)
|
||||
}
|
@ -1,7 +1,9 @@
|
||||
package resources
|
||||
package data
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
@ -37,6 +39,27 @@ type DataResource struct {
|
||||
Example string `json:"example,omitempty" bson:"example,omitempty" description:"base64 encoded data"` // Example is an example of the data
|
||||
}
|
||||
|
||||
func (d *DataResource) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New[*DataResource](tools.DATA_RESOURCE, username, peerID, groups, caller, func() utils.DBObject { return &DataResource{} }) // Create a new instance of the accessor
|
||||
func (dma *DataResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *DataResource) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
||||
func (d *DataResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.DATA_RESOURCE, caller) // Initialize the accessor with the DATA_RESOURCE model type
|
||||
return data
|
||||
}
|
110
models/resources/data/data_mongo_accessor.go
Normal file
110
models/resources/data/data_mongo_accessor.go
Normal file
@ -0,0 +1,110 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
mongo "cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
)
|
||||
|
||||
type dataMongoAccessor struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
}
|
||||
|
||||
// New creates a new instance of the dataMongoAccessor
|
||||
func New() *dataMongoAccessor {
|
||||
return &dataMongoAccessor{}
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing special here, just the basic CRUD operations
|
||||
*/
|
||||
|
||||
func (dma *dataMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return dma.GenericDeleteOne(id, dma)
|
||||
}
|
||||
|
||||
func (dma *dataMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
set.(*DataResource).ResourceModel = nil
|
||||
return dma.GenericUpdateOne(set, id, dma, &DataResource{})
|
||||
}
|
||||
|
||||
func (dma *dataMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
data.(*DataResource).ResourceModel = nil
|
||||
return dma.GenericStoreOne(data, dma)
|
||||
}
|
||||
|
||||
func (dma *dataMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return dma.GenericStoreOne(data, dma)
|
||||
}
|
||||
|
||||
func (dma *dataMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var data DataResource
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, dma.GetType())
|
||||
if err != nil {
|
||||
dma.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&data)
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, dma.GetType())
|
||||
if err == nil && len(resources) > 0 {
|
||||
data.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
return &data, 200, nil
|
||||
}
|
||||
|
||||
func (wfa dataMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []DataResource
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
objs = append(objs, &r) // only get the abstract resource !
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
func (wfa *dataMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []DataResource
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
objs = append(objs, &r) // only get the abstract resource !
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
53
models/resources/data/data_test.go
Normal file
53
models/resources/data/data_test.go
Normal file
@ -0,0 +1,53 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStoreOneData(t *testing.T) {
|
||||
d := DataResource{
|
||||
WebResource: resource_model.WebResource{
|
||||
Protocol: "http", Path: "azerty.fr",
|
||||
},
|
||||
Example: "123456",
|
||||
AbstractResource: resource_model.AbstractResource{
|
||||
AbstractObject: utils.AbstractObject{Name: "testData"},
|
||||
Description: "Lorem Ipsum",
|
||||
Logo: "azerty.com",
|
||||
Owner: "toto",
|
||||
OwnerLogo: "totoLogo",
|
||||
SourceUrl: "azerty.fr",
|
||||
},
|
||||
}
|
||||
|
||||
dma := New()
|
||||
id, _, _ := dma.StoreOne(&d)
|
||||
|
||||
assert.NotEmpty(t, id)
|
||||
}
|
||||
|
||||
func TestLoadOneDate(t *testing.T) {
|
||||
d := DataResource{
|
||||
WebResource: resource_model.WebResource{
|
||||
Protocol: "http", Path: "azerty.fr",
|
||||
},
|
||||
Example: "123456",
|
||||
AbstractResource: resource_model.AbstractResource{
|
||||
AbstractObject: utils.AbstractObject{Name: "testData"},
|
||||
Description: "Lorem Ipsum",
|
||||
Logo: "azerty.com",
|
||||
Owner: "toto",
|
||||
OwnerLogo: "totoLogo",
|
||||
SourceUrl: "azerty.fr",
|
||||
},
|
||||
}
|
||||
|
||||
dma := New()
|
||||
new_d, _, _ := dma.StoreOne(&d)
|
||||
assert.Equal(t, d, new_d)
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type Container struct {
|
||||
Image string `json:"image,omitempty" bson:"image,omitempty"` // Image is the container image
|
||||
Command string `json:"command,omitempty" bson:"command,omitempty"` // Command is the container command
|
||||
Args string `json:"args,omitempty" bson:"args,omitempty"` // Args is the container arguments
|
||||
Env map[string]string `json:"env,omitempty" bson:"env,omitempty"` // Env is the container environment variables
|
||||
Volumes map[string]string `json:"volumes,omitempty" bson:"volumes,omitempty"` // Volumes is the container volumes
|
||||
}
|
||||
|
||||
type Expose struct {
|
||||
Port int `json:"port,omitempty" bson:"port,omitempty"` // Port is the port
|
||||
Reverse string `json:"reverse,omitempty" bson:"reverse,omitempty"` // Reverse is the reverse
|
||||
PAT int `json:"pat,omitempty" bson:"pat,omitempty"` // PAT is the PAT
|
||||
}
|
||||
|
||||
/*
|
||||
* ProcessingResource is a struct that represents a processing resource
|
||||
* it defines the resource processing
|
||||
*/
|
||||
type ProcessingResource struct {
|
||||
resource_model.AbstractResource
|
||||
IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service
|
||||
CPUs []*CPU `bson:"cpus,omitempty" json:"cp_us,omitempty"` // CPUs is the list of CPUs
|
||||
GPUs []*GPU `bson:"gpus,omitempty" json:"gp_us,omitempty"` // GPUs is the list of GPUs
|
||||
RAM *RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
|
||||
Storage uint `bson:"storage,omitempty" json:"storage,omitempty"` // Storage is the storage
|
||||
Parallel bool `bson:"parallel,omitempty" json:"parallel,omitempty"` // Parallel is a flag that indicates if the processing is parallel
|
||||
ScalingModel uint `bson:"scaling_model,omitempty" json:"scaling_model,omitempty"` // ScalingModel is the scaling model
|
||||
DiskIO string `bson:"disk_io,omitempty" json:"disk_io,omitempty"` // DiskIO is the disk IO
|
||||
Container *Container `bson:"container,omitempty" json:"container,omitempty"` // Container is the container
|
||||
Expose []Expose `bson:"expose,omitempty" json:"expose,omitempty"` // Expose is the execution
|
||||
}
|
||||
|
||||
func (d *ProcessingResource) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New[*ProcessingResource](tools.PROCESSING_RESOURCE, username, peerID, groups, caller, func() utils.DBObject { return &ProcessingResource{} }) // Create a new instance of the accessor
|
||||
}
|
67
models/resources/processing/processing.go
Normal file
67
models/resources/processing/processing.go
Normal file
@ -0,0 +1,67 @@
|
||||
package processing
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type Container struct {
|
||||
Image string `json:"image,omitempty" bson:"image,omitempty"` // Image is the container image
|
||||
Command string `json:"command,omitempty" bson:"command,omitempty"` // Command is the container command
|
||||
Args string `json:"args,omitempty" bson:"args,omitempty"` // Args is the container arguments
|
||||
Env map[string]string `json:"env,omitempty" bson:"env,omitempty"` // Env is the container environment variables
|
||||
Volumes map[string]string `json:"volumes,omitempty" bson:"volumes,omitempty"` // Volumes is the container volumes
|
||||
}
|
||||
|
||||
type Expose struct {
|
||||
Port int `json:"port,omitempty" bson:"port,omitempty"` // Port is the port
|
||||
Reverse string `json:"reverse,omitempty" bson:"reverse,omitempty"` // Reverse is the reverse
|
||||
PAT int `json:"pat,omitempty" bson:"pat,omitempty"` // PAT is the PAT
|
||||
}
|
||||
|
||||
/*
|
||||
* ProcessingResource is a struct that represents a processing resource
|
||||
* it defines the resource processing
|
||||
*/
|
||||
type ProcessingResource struct {
|
||||
resource_model.AbstractResource
|
||||
IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service
|
||||
CPUs []*compute.CPU `bson:"cpus,omitempty" json:"cp_us,omitempty"` // CPUs is the list of CPUs
|
||||
GPUs []*compute.GPU `bson:"gpus,omitempty" json:"gp_us,omitempty"` // GPUs is the list of GPUs
|
||||
RAM *compute.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
|
||||
Storage uint `bson:"storage,omitempty" json:"storage,omitempty"` // Storage is the storage
|
||||
Parallel bool `bson:"parallel,omitempty" json:"parallel,omitempty"` // Parallel is a flag that indicates if the processing is parallel
|
||||
ScalingModel uint `bson:"scaling_model,omitempty" json:"scaling_model,omitempty"` // ScalingModel is the scaling model
|
||||
DiskIO string `bson:"disk_io,omitempty" json:"disk_io,omitempty"` // DiskIO is the disk IO
|
||||
Container *Container `bson:"container,omitempty" json:"container,omitempty"` // Container is the container
|
||||
Expose []Expose `bson:"expose,omitempty" json:"expose,omitempty"` // Expose is the execution
|
||||
}
|
||||
|
||||
func (dma *ProcessingResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *ProcessingResource) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
||||
func (d *ProcessingResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.PROCESSING_RESOURCE, caller) // Initialize the accessor with the PROCESSING_RESOURCE model type
|
||||
return data
|
||||
}
|
114
models/resources/processing/processing_mongo_accessor.go
Normal file
114
models/resources/processing/processing_mongo_accessor.go
Normal file
@ -0,0 +1,114 @@
|
||||
package processing
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
)
|
||||
|
||||
type processingMongoAccessor struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
}
|
||||
|
||||
// New creates a new instance of the processingMongoAccessor
|
||||
func New() *processingMongoAccessor {
|
||||
return &processingMongoAccessor{}
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing special here, just the basic CRUD operations
|
||||
*/
|
||||
|
||||
func (pma *processingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return pma.GenericDeleteOne(id, pma)
|
||||
}
|
||||
|
||||
func (pma *processingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
set.(*ProcessingResource).ResourceModel = nil
|
||||
return pma.GenericUpdateOne(set, id, pma, &ProcessingResource{})
|
||||
}
|
||||
|
||||
func (pma *processingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
data.(*ProcessingResource).ResourceModel = nil
|
||||
return pma.GenericStoreOne(data, pma)
|
||||
}
|
||||
|
||||
func (pma *processingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return pma.GenericStoreOne(data, pma)
|
||||
}
|
||||
|
||||
func (pma *processingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
|
||||
var processing ProcessingResource
|
||||
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, pma.GetType())
|
||||
if err != nil {
|
||||
pma.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
|
||||
res_mongo.Decode(&processing)
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, pma.GetType())
|
||||
if err == nil && len(resources) > 0 {
|
||||
processing.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
return &processing, 200, nil
|
||||
}
|
||||
|
||||
func (wfa processingMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []ProcessingResource
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
objs = append(objs, &r) // only get the abstract resource !
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
// Search searches for processing resources in the database, given some filters OR a search string
|
||||
func (wfa *processingMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []ProcessingResource
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
objs = append(objs, &r) // only get the abstract resource !
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
38
models/resources/processing/processing_test.go
Normal file
38
models/resources/processing/processing_test.go
Normal file
@ -0,0 +1,38 @@
|
||||
package processing
|
||||
|
||||
/*
|
||||
func TestStoreOneProcessing(t *testing.T) {
|
||||
p := ProcessingResource{Container: "totoCont",
|
||||
AbstractResource: resources.AbstractResource{
|
||||
AbstractObject: utils.AbstractObject{Name: "testData"},
|
||||
Description: "Lorem Ipsum",
|
||||
Logo: "azerty.com",
|
||||
Owner: "toto",
|
||||
OwnerLogo: "totoLogo",
|
||||
SourceUrl: "azerty.fr",
|
||||
},
|
||||
}
|
||||
|
||||
sma := ProcessingMongoAccessor{}
|
||||
id, _, _ := sma.StoreOne(&p)
|
||||
|
||||
assert.NotEmpty(t, id)
|
||||
}
|
||||
|
||||
func TestLoadOneProcessing(t *testing.T) {
|
||||
p := ProcessingResource{Container: "totoCont",
|
||||
AbstractResource: resources.AbstractResource{
|
||||
AbstractObject: utils.AbstractObject{Name: "testData"},
|
||||
Description: "Lorem Ipsum",
|
||||
Logo: "azerty.com",
|
||||
Owner: "toto",
|
||||
OwnerLogo: "totoLogo",
|
||||
SourceUrl: "azerty.fr",
|
||||
},
|
||||
}
|
||||
|
||||
sma := ProcessingMongoAccessor{}
|
||||
new_s, _, _ := sma.StoreOne(&p)
|
||||
assert.Equal(t, p, new_s)
|
||||
}
|
||||
*/
|
@ -1,8 +1,12 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/data"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/processing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
|
||||
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
|
||||
)
|
||||
|
||||
// AbstractResource is the struct containing all of the attributes commons to all ressources
|
||||
@ -10,86 +14,44 @@ import (
|
||||
// Resource is the interface to be implemented by all classes inheriting from Resource to have the same behavior
|
||||
|
||||
// http://www.inanzzz.com/index.php/post/wqbs/a-basic-usage-of-int-and-string-enum-types-in-golang
|
||||
type ResourceInterface interface {
|
||||
utils.DBObject
|
||||
Trim() *resource_model.AbstractResource
|
||||
SetResourceModel(model *resource_model.ResourceModel)
|
||||
}
|
||||
|
||||
type ResourceSet struct {
|
||||
Datas []string `bson:"datas,omitempty" json:"datas,omitempty"`
|
||||
Storages []string `bson:"storages,omitempty" json:"storages,omitempty"`
|
||||
Processings []string `bson:"processings,omitempty" json:"processings,omitempty"`
|
||||
Computes []string `bson:"computes,omitempty" json:"computes,omitempty"`
|
||||
Computes []string `bson:"computes,omitempty" json:"computes,omitempty"`
|
||||
Workflows []string `bson:"workflows,omitempty" json:"workflows,omitempty"`
|
||||
|
||||
DataResources []*DataResource `bson:"-" json:"data_resources,omitempty"`
|
||||
StorageResources []*StorageResource `bson:"-" json:"storage_resources,omitempty"`
|
||||
ProcessingResources []*ProcessingResource `bson:"-" json:"processing_resources,omitempty"`
|
||||
ComputeResources []*ComputeResource `bson:"-" json:"compute_resources,omitempty"`
|
||||
WorkflowResources []*WorkflowResource `bson:"-" json:"workflow_resources,omitempty"`
|
||||
}
|
||||
|
||||
func (r *ResourceSet) Clear() {
|
||||
r.DataResources = nil
|
||||
r.StorageResources = nil
|
||||
r.ProcessingResources = nil
|
||||
r.ComputeResources = nil
|
||||
r.WorkflowResources = nil
|
||||
}
|
||||
|
||||
func (r *ResourceSet) Fill(username string, peerID string, groups []string) {
|
||||
for k, v := range map[utils.DBObject][]string{
|
||||
(&DataResource{}): r.Datas,
|
||||
(&ComputeResource{}): r.Computes,
|
||||
(&StorageResource{}): r.Storages,
|
||||
(&ProcessingResource{}): r.Processings,
|
||||
(&WorkflowResource{}): r.Workflows,
|
||||
} {
|
||||
for _, id := range v {
|
||||
d, _, e := k.GetAccessor(username, peerID, groups, nil).LoadOne(id)
|
||||
if e == nil {
|
||||
switch k.(type) {
|
||||
case *DataResource:
|
||||
r.DataResources = append(r.DataResources, d.(*DataResource))
|
||||
case *ComputeResource:
|
||||
r.ComputeResources = append(r.ComputeResources, d.(*ComputeResource))
|
||||
case *StorageResource:
|
||||
r.StorageResources = append(r.StorageResources, d.(*StorageResource))
|
||||
case *ProcessingResource:
|
||||
r.ProcessingResources = append(r.ProcessingResources, d.(*ProcessingResource))
|
||||
case *WorkflowResource:
|
||||
r.WorkflowResources = append(r.WorkflowResources, d.(*WorkflowResource))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
DataResources []*data.DataResource `bson:"-" json:"data_resources,omitempty"`
|
||||
StorageResources []*storage.StorageResource `bson:"-" json:"storage_resources,omitempty"`
|
||||
ProcessingResources []*processing.ProcessingResource `bson:"-" json:"processing_resources,omitempty"`
|
||||
ComputeResources []*compute.ComputeResource `bson:"-" json:"compute_resources,omitempty"`
|
||||
WorkflowResources []*w.WorkflowResource `bson:"-" json:"workflow_resources,omitempty"`
|
||||
}
|
||||
|
||||
type ItemResource struct {
|
||||
Data *DataResource `bson:"data,omitempty" json:"data,omitempty"`
|
||||
Processing *ProcessingResource `bson:"processing,omitempty" json:"processing,omitempty"`
|
||||
Storage *StorageResource `bson:"storage,omitempty" json:"storage,omitempty"`
|
||||
Compute *ComputeResource `bson:"compute,omitempty" json:"compute,omitempty"`
|
||||
Workflow *WorkflowResource `bson:"workflow,omitempty" json:"workflow,omitempty"`
|
||||
Data *data.DataResource `bson:"data,omitempty" json:"data,omitempty"`
|
||||
Processing *processing.ProcessingResource `bson:"processing,omitempty" json:"processing,omitempty"`
|
||||
Storage *storage.StorageResource `bson:"storage,omitempty" json:"storage,omitempty"`
|
||||
Compute *compute.ComputeResource `bson:"compute,omitempty" json:"compute,omitempty"`
|
||||
Workflow *w.WorkflowResource `bson:"workflow,omitempty" json:"workflow,omitempty"`
|
||||
}
|
||||
|
||||
func (i *ItemResource) GetAbstractRessource() *resource_model.AbstractResource {
|
||||
|
||||
if i.Data != nil {
|
||||
|
||||
if(i.Data != nil){
|
||||
return &i.Data.AbstractResource
|
||||
}
|
||||
if i.Processing != nil {
|
||||
if(i.Processing != nil){
|
||||
return &i.Processing.AbstractResource
|
||||
}
|
||||
if i.Storage != nil {
|
||||
if(i.Storage != nil){
|
||||
return &i.Storage.AbstractResource
|
||||
}
|
||||
if i.Compute != nil {
|
||||
if(i.Compute != nil){
|
||||
return &i.Compute.AbstractResource
|
||||
}
|
||||
if i.Workflow != nil {
|
||||
if(i.Workflow != nil){
|
||||
return &i.Workflow.AbstractResource
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
@ -1,95 +0,0 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type resourceMongoAccessor[T ResourceInterface] struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
generateData func() utils.DBObject
|
||||
}
|
||||
|
||||
// New creates a new instance of the computeMongoAccessor
|
||||
func New[T ResourceInterface](t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller, g func() utils.DBObject) *resourceMongoAccessor[T] {
|
||||
return &resourceMongoAccessor[T]{
|
||||
AbstractAccessor: utils.AbstractAccessor{
|
||||
ResourceModelAccessor: resource_model.New(),
|
||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
||||
Caller: caller,
|
||||
PeerID: peerID,
|
||||
User: username, // Set the caller
|
||||
Groups: groups, // Set the caller
|
||||
Type: t,
|
||||
},
|
||||
generateData: g,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing special here, just the basic CRUD operations
|
||||
*/
|
||||
|
||||
func (dca *resourceMongoAccessor[T]) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericDeleteOne(id, dca)
|
||||
}
|
||||
|
||||
func (dca *resourceMongoAccessor[T]) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
set.(T).SetResourceModel(nil)
|
||||
return utils.GenericUpdateOne(set.(T).Trim(), id, dca, dca.generateData()) // TODO CHANGE
|
||||
}
|
||||
|
||||
func (dca *resourceMongoAccessor[T]) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
data.(T).SetResourceModel(nil)
|
||||
return utils.GenericStoreOne(data.(T).Trim(), dca)
|
||||
}
|
||||
|
||||
func (dca *resourceMongoAccessor[T]) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return dca.StoreOne(data)
|
||||
}
|
||||
|
||||
func (dca *resourceMongoAccessor[T]) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[T](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
resources, _, err := dca.ResourceModelAccessor.Search(nil, dca.GetType().String())
|
||||
if err == nil && len(resources) > 0 {
|
||||
d.(T).SetResourceModel(resources[0].(*resource_model.ResourceModel))
|
||||
}
|
||||
return d, 200, nil
|
||||
}, dca)
|
||||
}
|
||||
|
||||
func (wfa *resourceMongoAccessor[T]) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType().String())
|
||||
return utils.GenericLoadAll[T](func(d utils.DBObject) utils.ShallowDBObject {
|
||||
if err == nil && len(resources) > 0 {
|
||||
d.(T).SetResourceModel(resources[0].(*resource_model.ResourceModel))
|
||||
}
|
||||
return d
|
||||
}, wfa)
|
||||
}
|
||||
|
||||
func (wfa *resourceMongoAccessor[T]) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType().String())
|
||||
return utils.GenericSearch[T](filters, search, wfa.getResourceFilter(search),
|
||||
func(d utils.DBObject) utils.ShallowDBObject {
|
||||
if err == nil && len(resources) > 0 {
|
||||
d.(T).SetResourceModel(resources[0].(*resource_model.ResourceModel))
|
||||
}
|
||||
return d
|
||||
}, wfa)
|
||||
}
|
||||
|
||||
func (abs *resourceMongoAccessor[T]) getResourceFilter(search string) *dbs.Filters {
|
||||
return &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
@ -1,62 +0,0 @@
|
||||
package resource_model
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type ResourceModelMongoAccessor struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing special here, just the basic CRUD operations
|
||||
*/
|
||||
|
||||
func New() *ResourceModelMongoAccessor {
|
||||
return &ResourceModelMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
Type: tools.RESOURCE_MODEL,
|
||||
Logger: logs.CreateLogger(tools.RESOURCE_MODEL.String()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericDeleteOne(id, wfa)
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericUpdateOne(set, id, wfa, &ResourceModel{})
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
func (wfa *ResourceModelMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
func (a *ResourceModelMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[*ResourceModel](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
return d, 200, nil
|
||||
}, a)
|
||||
}
|
||||
|
||||
func (a *ResourceModelMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericLoadAll[*ResourceModel](func(d utils.DBObject) utils.ShallowDBObject {
|
||||
return d
|
||||
}, a)
|
||||
}
|
||||
|
||||
func (a *ResourceModelMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericSearch[*ResourceModel](filters, search,
|
||||
&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"resource_type": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}, func(d utils.DBObject) utils.ShallowDBObject { return d }, a)
|
||||
}
|
@ -1,7 +1,9 @@
|
||||
package resources
|
||||
package storage
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
@ -56,6 +58,27 @@ type StorageResource struct {
|
||||
Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage
|
||||
}
|
||||
|
||||
func (d *StorageResource) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New[*StorageResource](tools.STORAGE_RESOURCE, username, peerID, groups, caller, func() utils.DBObject { return &StorageResource{} }) // Create a new instance of the accessor
|
||||
func (dma *StorageResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *StorageResource) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
||||
func (d *StorageResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.STORAGE_RESOURCE, caller) // Initialize the accessor with the STORAGE_RESOURCE model type
|
||||
return data
|
||||
}
|
114
models/resources/storage/storage_mongo_accessor.go
Normal file
114
models/resources/storage/storage_mongo_accessor.go
Normal file
@ -0,0 +1,114 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
)
|
||||
|
||||
type storageMongoAccessor struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
}
|
||||
|
||||
// New creates a new instance of the storageMongoAccessor
|
||||
func New() *storageMongoAccessor {
|
||||
return &storageMongoAccessor{}
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing special here, just the basic CRUD operations
|
||||
*/
|
||||
|
||||
func (sma *storageMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return sma.GenericDeleteOne(id, sma)
|
||||
}
|
||||
|
||||
func (sma *storageMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
set.(*StorageResource).ResourceModel = nil
|
||||
return sma.GenericUpdateOne(set, id, sma, &StorageResource{})
|
||||
}
|
||||
|
||||
func (sma *storageMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
data.(*StorageResource).ResourceModel = nil
|
||||
return sma.GenericStoreOne(data, sma)
|
||||
}
|
||||
|
||||
func (sma *storageMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return sma.GenericStoreOne(data, sma)
|
||||
}
|
||||
|
||||
func (sma *storageMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
|
||||
var storage StorageResource
|
||||
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, sma.GetType())
|
||||
if err != nil {
|
||||
sma.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
|
||||
res_mongo.Decode(&storage)
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, sma.GetType())
|
||||
if err == nil && len(resources) > 0 {
|
||||
storage.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
return &storage, 200, nil
|
||||
}
|
||||
|
||||
func (wfa storageMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []StorageResource
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
objs = append(objs, &r) // only get the abstract resource !
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
// Search searches for storage resources in the database, given some filters OR a search string
|
||||
func (wfa *storageMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []StorageResource
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
objs = append(objs, &r) // only get the abstract resource !
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
46
models/resources/storage/storage_test.go
Normal file
46
models/resources/storage/storage_test.go
Normal file
@ -0,0 +1,46 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStoreOneStorage(t *testing.T) {
|
||||
s := StorageResource{Size: 123, WebResource: resource_model.WebResource{Protocol: "http", Path: "azerty.fr"},
|
||||
AbstractResource: resource_model.AbstractResource{
|
||||
AbstractObject: utils.AbstractObject{Name: "testData"},
|
||||
Description: "Lorem Ipsum",
|
||||
Logo: "azerty.com",
|
||||
Owner: "toto",
|
||||
OwnerLogo: "totoLogo",
|
||||
SourceUrl: "azerty.fr",
|
||||
},
|
||||
}
|
||||
|
||||
sma := New()
|
||||
id, _, _ := sma.StoreOne(&s)
|
||||
|
||||
assert.NotEmpty(t, id)
|
||||
}
|
||||
|
||||
func TestLoadOneStorage(t *testing.T) {
|
||||
s := StorageResource{Size: 123, WebResource: resource_model.WebResource{Protocol: "http", Path: "azerty.fr"},
|
||||
AbstractResource: resource_model.AbstractResource{
|
||||
AbstractObject: utils.AbstractObject{Name: "testData"},
|
||||
Description: "Lorem Ipsum",
|
||||
Logo: "azerty.com",
|
||||
Owner: "toto",
|
||||
OwnerLogo: "totoLogo",
|
||||
SourceUrl: "azerty.fr",
|
||||
},
|
||||
}
|
||||
|
||||
sma := New()
|
||||
new_s, _, _ := sma.StoreOne(&s)
|
||||
|
||||
assert.Equal(t, s, new_s)
|
||||
}
|
@ -1,18 +0,0 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
// WorkflowResource is a struct that represents a workflow resource
|
||||
// it defines the resource workflow
|
||||
type WorkflowResource struct {
|
||||
resource_model.AbstractResource
|
||||
WorkflowID string `bson:"workflow_id,omitempty" json:"workflow_id,omitempty"` // WorkflowID is the ID of the native workflow
|
||||
}
|
||||
|
||||
func (d *WorkflowResource) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New[*WorkflowResource](tools.WORKFLOW_RESOURCE, username, peerID, groups, caller, func() utils.DBObject { return &WorkflowResource{} }) // Create a new instance of the accessor
|
||||
}
|
41
models/resources/workflow/workflow.go
Normal file
41
models/resources/workflow/workflow.go
Normal file
@ -0,0 +1,41 @@
|
||||
package oclib
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
// WorkflowResource is a struct that represents a workflow resource
|
||||
// it defines the resource workflow
|
||||
type WorkflowResource struct {
|
||||
resource_model.AbstractResource
|
||||
WorkflowID string `bson:"workflow_id,omitempty" json:"workflow_id,omitempty"` // WorkflowID is the ID of the native workflow
|
||||
}
|
||||
|
||||
func (d *WorkflowResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKFLOW_RESOURCE, caller) // Initialize the accessor with the WORKFLOW_RESOURCE model type
|
||||
return data
|
||||
}
|
||||
|
||||
func (dma *WorkflowResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *WorkflowResource) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
113
models/resources/workflow/workflow_mongo_accessor.go
Normal file
113
models/resources/workflow/workflow_mongo_accessor.go
Normal file
@ -0,0 +1,113 @@
|
||||
package oclib
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
)
|
||||
|
||||
type workflowResourceMongoAccessor struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
}
|
||||
|
||||
func New() *workflowResourceMongoAccessor {
|
||||
return &workflowResourceMongoAccessor{}
|
||||
}
|
||||
|
||||
func (wfa *workflowResourceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return wfa.GenericDeleteOne(id, wfa)
|
||||
}
|
||||
|
||||
func (wfa *workflowResourceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
set.(*WorkflowResource).ResourceModel = nil
|
||||
return wfa.GenericUpdateOne(set, id, wfa, &WorkflowResource{})
|
||||
}
|
||||
|
||||
func (wfa *workflowResourceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
data.(*WorkflowResource).ResourceModel = nil
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
func (wfa *workflowResourceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
res, _, _ := wfa.LoadOne(data.GetID())
|
||||
data.(*WorkflowResource).WorkflowID = data.GetID()
|
||||
if res == nil {
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
} else {
|
||||
data.(*WorkflowResource).UUID = res.GetID()
|
||||
return wfa.GenericUpdateOne(data, res.GetID(), wfa, &WorkflowResource{})
|
||||
}
|
||||
}
|
||||
|
||||
func (wfa *workflowResourceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var workflow WorkflowResource
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&workflow)
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
if err == nil && len(resources) > 0 {
|
||||
workflow.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
return &workflow, 200, nil
|
||||
}
|
||||
|
||||
func (wfa workflowResourceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []WorkflowResource
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
objs = append(objs, &r)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
// Search searches for workflow resources in the database, given some filters OR a search string
|
||||
func (wfa *workflowResourceMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []WorkflowResource
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||
for _, r := range results {
|
||||
if err == nil && len(resources) > 0 {
|
||||
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||
}
|
||||
objs = append(objs, &r)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
43
models/resources/workflow/workflow_test.go
Normal file
43
models/resources/workflow/workflow_test.go
Normal file
@ -0,0 +1,43 @@
|
||||
package oclib
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStoreOneWorkflow(t *testing.T) {
|
||||
w := WorkflowResource{AbstractResource: resource_model.AbstractResource{
|
||||
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
|
||||
Description: "Lorem Ipsum",
|
||||
Logo: "azerty.com",
|
||||
Owner: "toto",
|
||||
OwnerLogo: "totoLogo",
|
||||
SourceUrl: "azerty.fr",
|
||||
},
|
||||
}
|
||||
|
||||
wma := New()
|
||||
id, _, _ := wma.StoreOne(&w)
|
||||
|
||||
assert.NotEmpty(t, id)
|
||||
}
|
||||
|
||||
func TestLoadOneWorkflow(t *testing.T) {
|
||||
w := WorkflowResource{AbstractResource: resource_model.AbstractResource{
|
||||
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
|
||||
Description: "Lorem Ipsum",
|
||||
Logo: "azerty.com",
|
||||
Owner: "toto",
|
||||
OwnerLogo: "totoLogo",
|
||||
SourceUrl: "azerty.fr",
|
||||
},
|
||||
}
|
||||
|
||||
wma := New()
|
||||
new_w, _, _ := wma.StoreOne(&w)
|
||||
assert.Equal(t, w, new_w)
|
||||
}
|
@ -3,40 +3,69 @@ package utils
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/go-playground/validator/v10"
|
||||
"github.com/google/uuid"
|
||||
"github.com/rs/zerolog"
|
||||
mgb "go.mongodb.org/mongo-driver/mongo"
|
||||
)
|
||||
|
||||
// single instance of the validator used in every model Struct to validate the fields
|
||||
var validate = validator.New(validator.WithRequiredStructEnabled())
|
||||
|
||||
type AccessMode int
|
||||
|
||||
const (
|
||||
Private AccessMode = iota
|
||||
Public
|
||||
)
|
||||
|
||||
/*
|
||||
* AbstractObject is a struct that represents the basic fields of an object
|
||||
* it defines the object id and name
|
||||
* every data in base root model should inherit from this struct (only exception is the ResourceModel)
|
||||
*/
|
||||
type AbstractObject struct {
|
||||
UUID string `json:"id,omitempty" bson:"id,omitempty" validate:"required"`
|
||||
Name string `json:"name,omitempty" bson:"name,omitempty" validate:"required"`
|
||||
UpdateDate time.Time `json:"update_date" bson:"update_date"`
|
||||
LastPeerWriter string `json:"last_peer_writer" bson:"last_peer_writer"`
|
||||
CreatorID string `json:"creator_id" bson:"creator_id" default:"unknown"`
|
||||
AccessMode AccessMode `json:"access_mode" bson:"access_mode" default:"0"`
|
||||
UUID string `json:"id,omitempty" bson:"id,omitempty" validate:"required"`
|
||||
Name string `json:"name,omitempty" bson:"name,omitempty" validate:"required"`
|
||||
UpdateDate time.Time `json:"update_date" bson:"update_date"`
|
||||
LastPeerWriter string `json:"last_peer_writer" bson:"last_peer_writer"`
|
||||
}
|
||||
|
||||
// GetID returns the id of the object (abstract)
|
||||
func (ao *AbstractObject) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
// GetName returns the name of the object (abstract)
|
||||
func (ao *AbstractObject) GetName() string {
|
||||
return ao.Name
|
||||
}
|
||||
|
||||
func (ao *AbstractObject) UpToDate() {
|
||||
ao.UpdateDate = time.Now()
|
||||
// ao.LastPeerWriter, _ = static.GetMyLocalJsonPeer()
|
||||
}
|
||||
|
||||
// GetAccessor returns the accessor of the object (abstract)
|
||||
func (dma *AbstractObject) GetAccessor(caller *tools.HTTPCaller) Accessor {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dma *AbstractObject) Deserialize(j map[string]interface{}) DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *AbstractObject) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
||||
func (r *AbstractObject) GenerateID() {
|
||||
@ -45,83 +74,13 @@ func (r *AbstractObject) GenerateID() {
|
||||
}
|
||||
}
|
||||
|
||||
// GetID implements ShallowDBObject.
|
||||
func (ao AbstractObject) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
// GetName implements ShallowDBObject.
|
||||
func (ao AbstractObject) GetName() string {
|
||||
return ao.Name
|
||||
}
|
||||
|
||||
func (ao *AbstractObject) UpToDate(user string, create bool) {
|
||||
ao.UpdateDate = time.Now()
|
||||
ao.LastPeerWriter = user
|
||||
if create {
|
||||
ao.CreatorID = user
|
||||
}
|
||||
}
|
||||
|
||||
func (ao *AbstractObject) VerifyAuth(username string, peerID string, groups []string) bool {
|
||||
return ao.AccessMode == Public || ao.CreatorID == username
|
||||
}
|
||||
|
||||
func (ao *AbstractObject) GetObjectFilters(search string) *dbs.Filters {
|
||||
return &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
}}
|
||||
}
|
||||
|
||||
func (dma *AbstractObject) Deserialize(j map[string]interface{}, obj DBObject) DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, obj)
|
||||
return obj
|
||||
}
|
||||
|
||||
func (dma *AbstractObject) Serialize(obj DBObject) map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
||||
type AbstractAccessor struct {
|
||||
Logger zerolog.Logger // Logger is the logger of the accessor, it's a specilized logger for the accessor
|
||||
Type tools.DataType // Type is the data type of the accessor
|
||||
Caller *tools.HTTPCaller // Caller is the http caller of the accessor (optionnal) only need in a peer connection
|
||||
PeerID string // PeerID is the id of the peer
|
||||
Groups []string // Groups is the list of groups that can access the accessor
|
||||
User string // User is the user that is using the accessor
|
||||
ResourceModelAccessor Accessor
|
||||
Logger zerolog.Logger // Logger is the logger of the accessor, it's a specilized logger for the accessor
|
||||
Type string // Type is the data type of the accessor
|
||||
Caller *tools.HTTPCaller // Caller is the http caller of the accessor (optionnal) only need in a peer connection
|
||||
}
|
||||
|
||||
func (dma *AbstractAccessor) GetUser() string {
|
||||
return dma.User
|
||||
}
|
||||
|
||||
func (dma *AbstractAccessor) GetPeerID() string {
|
||||
return dma.PeerID
|
||||
}
|
||||
func (dma *AbstractAccessor) GetGroups() []string {
|
||||
return dma.Groups
|
||||
}
|
||||
func (dma *AbstractAccessor) GetLogger() *zerolog.Logger {
|
||||
return &dma.Logger
|
||||
}
|
||||
|
||||
func (dma *AbstractAccessor) VerifyAuth() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (dma *AbstractAccessor) GetType() tools.DataType {
|
||||
func (dma *AbstractAccessor) GetType() string {
|
||||
return dma.Type
|
||||
}
|
||||
|
||||
@ -129,10 +88,16 @@ func (dma *AbstractAccessor) GetCaller() *tools.HTTPCaller {
|
||||
return dma.Caller
|
||||
}
|
||||
|
||||
// Init initializes the accessor with the data type and the http caller
|
||||
func (dma *AbstractAccessor) Init(t tools.DataType, caller *tools.HTTPCaller) {
|
||||
dma.Logger = logs.CreateLogger(t.String()) // Create a logger with the data type
|
||||
dma.Caller = caller // Set the caller
|
||||
dma.Type = t.String() // Set the data type
|
||||
}
|
||||
|
||||
// GenericLoadOne loads one object from the database (generic)
|
||||
func GenericStoreOne(data DBObject, a Accessor) (DBObject, int, error) {
|
||||
func (wfa *AbstractAccessor) GenericStoreOne(data DBObject, accessor Accessor) (DBObject, int, error) {
|
||||
data.GenerateID()
|
||||
data.UpToDate(a.GetUser(), true)
|
||||
f := dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractresource.abstractobject.name": {{
|
||||
@ -145,37 +110,31 @@ func GenericStoreOne(data DBObject, a Accessor) (DBObject, int, error) {
|
||||
}},
|
||||
},
|
||||
}
|
||||
if !data.VerifyAuth(a.GetUser(), a.GetPeerID(), a.GetGroups()) {
|
||||
return nil, 403, errors.New("You are not allowed to access this collaborative area")
|
||||
}
|
||||
if cursor, _, _ := a.Search(&f, ""); len(cursor) > 0 {
|
||||
return nil, 409, errors.New(a.GetType().String() + " with name " + data.GetName() + " already exists")
|
||||
if cursor, _, _ := accessor.Search(&f, ""); len(cursor) > 0 {
|
||||
return nil, 409, errors.New(accessor.GetType() + " with name " + data.GetName() + " already exists")
|
||||
}
|
||||
err := validate.Struct(data)
|
||||
if err != nil {
|
||||
return nil, 422, err
|
||||
}
|
||||
id, code, err := mongo.MONGOService.StoreOne(data, data.GetID(), a.GetType().String())
|
||||
id, code, err := mongo.MONGOService.StoreOne(data, data.GetID(), wfa.GetType())
|
||||
if err != nil {
|
||||
a.GetLogger().Error().Msg("Could not store " + data.GetName() + " to db. Error: " + err.Error())
|
||||
wfa.Logger.Error().Msg("Could not store " + data.GetName() + " to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
return a.LoadOne(id)
|
||||
return accessor.LoadOne(id)
|
||||
}
|
||||
|
||||
// GenericLoadOne loads one object from the database (generic)
|
||||
func GenericDeleteOne(id string, a Accessor) (DBObject, int, error) {
|
||||
res, code, err := a.LoadOne(id)
|
||||
func (dma *AbstractAccessor) GenericDeleteOne(id string, accessor Accessor) (DBObject, int, error) {
|
||||
res, code, err := accessor.LoadOne(id)
|
||||
if err != nil {
|
||||
a.GetLogger().Error().Msg("Could not retrieve " + id + " to db. Error: " + err.Error())
|
||||
dma.Logger.Error().Msg("Could not retrieve " + id + " to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
if !res.VerifyAuth(a.GetUser(), a.GetPeerID(), a.GetGroups()) {
|
||||
return nil, 403, errors.New("You are not allowed to access this collaborative area")
|
||||
}
|
||||
_, code, err = mongo.MONGOService.DeleteOne(id, a.GetType().String())
|
||||
_, code, err = mongo.MONGOService.DeleteOne(id, accessor.GetType())
|
||||
if err != nil {
|
||||
a.GetLogger().Error().Msg("Could not delete " + id + " to db. Error: " + err.Error())
|
||||
dma.Logger.Error().Msg("Could not delete " + id + " to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
return res, 200, nil
|
||||
@ -183,84 +142,32 @@ func GenericDeleteOne(id string, a Accessor) (DBObject, int, error) {
|
||||
|
||||
// GenericLoadOne loads one object from the database (generic)
|
||||
// json expected in entry is a flatted object no need to respect the inheritance hierarchy
|
||||
func GenericUpdateOne(set DBObject, id string, a Accessor, new DBObject) (DBObject, int, error) {
|
||||
r, c, err := a.LoadOne(id)
|
||||
func (dma *AbstractAccessor) GenericUpdateOne(set DBObject, id string, accessor Accessor, new DBObject) (DBObject, int, error) {
|
||||
r, c, err := accessor.LoadOne(id)
|
||||
if err != nil {
|
||||
return nil, c, err
|
||||
}
|
||||
r.UpToDate(a.GetUser(), false)
|
||||
if !r.VerifyAuth(a.GetUser(), a.GetPeerID(), a.GetGroups()) {
|
||||
return nil, 403, errors.New("You are not allowed to access this collaborative area")
|
||||
}
|
||||
change := set.Serialize(set) // get the changes
|
||||
loaded := r.Serialize(r) // get the loaded object
|
||||
change := set.Serialize() // get the changes
|
||||
loaded := r.Serialize() // get the loaded object
|
||||
|
||||
for k, v := range change { // apply the changes, with a flatten method
|
||||
loaded[k] = v
|
||||
}
|
||||
id, code, err := mongo.MONGOService.UpdateOne(new.Deserialize(loaded, new), id, a.GetType().String())
|
||||
id, code, err := mongo.MONGOService.UpdateOne(new.Deserialize(loaded), id, accessor.GetType())
|
||||
if err != nil {
|
||||
a.GetLogger().Error().Msg("Could not update " + id + " to db. Error: " + err.Error())
|
||||
dma.Logger.Error().Msg("Could not update " + id + " to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
return a.LoadOne(id)
|
||||
}
|
||||
|
||||
func GenericLoadOne[T DBObject](id string, f func(DBObject) (DBObject, int, error), a Accessor) (DBObject, int, error) {
|
||||
var data T
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, a.GetType().String())
|
||||
if err != nil {
|
||||
a.GetLogger().Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&data)
|
||||
if !data.VerifyAuth(a.GetUser(), a.GetPeerID(), a.GetGroups()) {
|
||||
return nil, 403, errors.New("You are not allowed to access this collaborative area")
|
||||
}
|
||||
return f(data)
|
||||
}
|
||||
|
||||
func genericLoadAll[T DBObject](res *mgb.Cursor, code int, err error, f func(DBObject) ShallowDBObject, a Accessor) ([]ShallowDBObject, int, error) {
|
||||
objs := []ShallowDBObject{}
|
||||
var results []T
|
||||
if err != nil {
|
||||
a.GetLogger().Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
if err = res.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
if !r.VerifyAuth(a.GetUser(), a.GetPeerID(), a.GetGroups()) {
|
||||
continue
|
||||
}
|
||||
objs = append(objs, f(r))
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
func GenericLoadAll[T DBObject](f func(DBObject) ShallowDBObject, wfa Accessor) ([]ShallowDBObject, int, error) {
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType().String())
|
||||
fmt.Println("res_mongo", res_mongo)
|
||||
return genericLoadAll[T](res_mongo, code, err, f, wfa)
|
||||
}
|
||||
|
||||
func GenericSearch[T DBObject](filters *dbs.Filters, search string, defaultFilters *dbs.Filters,
|
||||
f func(DBObject) ShallowDBObject, wfa Accessor) ([]ShallowDBObject, int, error) {
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = defaultFilters
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType().String())
|
||||
return genericLoadAll[T](res_mongo, code, err, f, wfa)
|
||||
return accessor.LoadOne(id)
|
||||
}
|
||||
|
||||
// GenericLoadOne loads one object from the database (generic)
|
||||
// json expected in entry is a flatted object no need to respect the inheritance hierarchy
|
||||
func GenericRawUpdateOne(set DBObject, id string, a Accessor) (DBObject, int, error) {
|
||||
id, code, err := mongo.MONGOService.UpdateOne(set, id, a.GetType().String())
|
||||
func (dma *AbstractAccessor) GenericRawUpdateOne(set DBObject, id string, accessor Accessor) (DBObject, int, error) {
|
||||
id, code, err := mongo.MONGOService.UpdateOne(set, id, accessor.GetType())
|
||||
if err != nil {
|
||||
a.GetLogger().Error().Msg("Could not update " + id + " to db. Error: " + err.Error())
|
||||
dma.Logger.Error().Msg("Could not update " + id + " to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
return a.LoadOne(id)
|
||||
return accessor.LoadOne(id)
|
||||
}
|
||||
|
@ -3,7 +3,6 @@ package utils
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// ShallowDBObject is an interface that defines the basic methods shallowed version of a DBObject
|
||||
@ -11,8 +10,8 @@ type ShallowDBObject interface {
|
||||
GenerateID()
|
||||
GetID() string
|
||||
GetName() string
|
||||
Deserialize(j map[string]interface{}, obj DBObject) DBObject
|
||||
Serialize(obj DBObject) map[string]interface{}
|
||||
Deserialize(j map[string]interface{}) DBObject
|
||||
Serialize() map[string]interface{}
|
||||
}
|
||||
|
||||
// DBObject is an interface that defines the basic methods for a DBObject
|
||||
@ -20,20 +19,16 @@ type DBObject interface {
|
||||
GenerateID()
|
||||
GetID() string
|
||||
GetName() string
|
||||
UpToDate(user string, create bool)
|
||||
VerifyAuth(username string, PeerID string, groups []string) bool
|
||||
Deserialize(j map[string]interface{}, obj DBObject) DBObject
|
||||
Serialize(obj DBObject) map[string]interface{}
|
||||
GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) Accessor
|
||||
UpToDate()
|
||||
Deserialize(j map[string]interface{}) DBObject
|
||||
Serialize() map[string]interface{}
|
||||
GetAccessor(caller *tools.HTTPCaller) Accessor
|
||||
}
|
||||
|
||||
// Accessor is an interface that defines the basic methods for an Accessor
|
||||
type Accessor interface {
|
||||
GetType() tools.DataType
|
||||
GetUser() string
|
||||
GetPeerID() string
|
||||
GetGroups() []string
|
||||
GetLogger() *zerolog.Logger
|
||||
Init(t tools.DataType, caller *tools.HTTPCaller)
|
||||
GetType() string
|
||||
GetCaller() *tools.HTTPCaller
|
||||
Search(filters *dbs.Filters, search string) ([]ShallowDBObject, int, error)
|
||||
LoadAll() ([]ShallowDBObject, int, error)
|
||||
|
@ -1,13 +1,15 @@
|
||||
package workflow
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/workflow/graph"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow/graph"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
@ -34,8 +36,8 @@ func (w *AbstractWorkflow) GetWorkflows() (list_computings []graph.GraphItem) {
|
||||
return
|
||||
}
|
||||
|
||||
func (w *AbstractWorkflow) GetComputeByRelatedProcessing(processingID string) []*resources.ComputeResource {
|
||||
storages := []*resources.ComputeResource{}
|
||||
func (w *AbstractWorkflow) GetComputeByRelatedProcessing(processingID string) []*compute.ComputeResource {
|
||||
storages := []*compute.ComputeResource{}
|
||||
for _, link := range w.Graph.Links {
|
||||
nodeID := link.Destination.ID // we considers that the processing is the destination
|
||||
node := w.Graph.Items[link.Source.ID].Compute // we are looking for the storage as source
|
||||
@ -50,8 +52,8 @@ func (w *AbstractWorkflow) GetComputeByRelatedProcessing(processingID string) []
|
||||
return storages
|
||||
}
|
||||
|
||||
func (w *AbstractWorkflow) GetStoragesByRelatedProcessing(processingID string) []*resources.StorageResource {
|
||||
storages := []*resources.StorageResource{}
|
||||
func (w *AbstractWorkflow) GetStoragesByRelatedProcessing(processingID string) []*storage.StorageResource {
|
||||
storages := []*storage.StorageResource{}
|
||||
for _, link := range w.Graph.Links {
|
||||
nodeID := link.Destination.ID // we considers that the processing is the destination
|
||||
node := w.Graph.Items[link.Source.ID].Storage // we are looking for the storage as source
|
||||
@ -98,20 +100,6 @@ type Workflow struct {
|
||||
AbstractWorkflow // AbstractWorkflow contains the basic fields of a workflow
|
||||
}
|
||||
|
||||
func (ao *Workflow) VerifyAuth(username string, peerID string, groups []string) bool {
|
||||
isAuthorized := false
|
||||
if len(ao.Shared) > 0 {
|
||||
for _, shared := range ao.Shared {
|
||||
shared, code, _ := shallow_collaborative_area.New(tools.COLLABORATIVE_AREA, username, peerID, groups, nil).LoadOne(shared)
|
||||
if code != 200 || shared == nil {
|
||||
isAuthorized = false
|
||||
}
|
||||
isAuthorized = shared.VerifyAuth(username, peerID, groups)
|
||||
}
|
||||
}
|
||||
return ao.AbstractObject.VerifyAuth(username, peerID, groups) || isAuthorized
|
||||
}
|
||||
|
||||
/*
|
||||
* CheckBooking is a function that checks the booking of the workflow on peers (even ourselves)
|
||||
*/
|
||||
@ -120,7 +108,7 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
|
||||
if wfa.Graph == nil { // no graph no booking
|
||||
return false, nil
|
||||
}
|
||||
accessor := (&resources.ComputeResource{}).GetAccessor("", "", []string{}, caller)
|
||||
accessor := (&compute.ComputeResource{}).GetAccessor(nil)
|
||||
for _, link := range wfa.Graph.Links {
|
||||
if ok, dc_id := wfa.isDCLink(link); ok { // check if the link is a link between a compute and a resource
|
||||
dc, code, _ := accessor.LoadOne(dc_id)
|
||||
@ -128,7 +116,7 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
|
||||
continue
|
||||
}
|
||||
// CHECK BOOKING ON PEER, compute could be a remote one
|
||||
peerID := dc.(*resources.ComputeResource).PeerID
|
||||
peerID := dc.(*compute.ComputeResource).PeerID
|
||||
if peerID == "" {
|
||||
return false, errors.New("no peer id")
|
||||
} // no peer id no booking, we need to know where to book
|
||||
@ -141,6 +129,31 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (d *Workflow) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New(tools.WORKFLOW, username, peerID, groups, caller) // Create a new instance of the accessor
|
||||
func (d *Workflow) GetName() string {
|
||||
return d.Name
|
||||
}
|
||||
|
||||
func (d *Workflow) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKFLOW, caller) // Initialize the accessor with the WORKFLOW model type
|
||||
return data
|
||||
}
|
||||
|
||||
func (dma *Workflow) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
func (dma *Workflow) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -8,8 +8,10 @@ import (
|
||||
|
||||
type WorkflowHistory struct{ Workflow }
|
||||
|
||||
func (d *WorkflowHistory) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New(tools.WORKSPACE_HISTORY, username, peerID, groups, caller) // Create a new instance of the accessor
|
||||
func (d *WorkflowHistory) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKSPACE_HISTORY, caller) // Initialize the accessor with the WORKSPACE model type
|
||||
return data
|
||||
}
|
||||
func (r *WorkflowHistory) GenerateID() {
|
||||
r.UUID = uuid.New().String()
|
||||
|
@ -9,42 +9,24 @@ import (
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workspace"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
cron "github.com/robfig/cron"
|
||||
cron "github.com/robfig/cron/v3"
|
||||
)
|
||||
|
||||
type workflowMongoAccessor struct {
|
||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||
|
||||
computeResourceAccessor utils.Accessor
|
||||
collaborativeAreaAccessor utils.Accessor
|
||||
executionAccessor utils.Accessor
|
||||
workspaceAccessor utils.Accessor
|
||||
}
|
||||
|
||||
// New creates a new instance of the workflowMongoAccessor
|
||||
func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *workflowMongoAccessor {
|
||||
return &workflowMongoAccessor{
|
||||
computeResourceAccessor: (&resources.ComputeResource{}).GetAccessor(username, peerID, groups, nil),
|
||||
collaborativeAreaAccessor: (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(username, peerID, groups, nil),
|
||||
executionAccessor: (&workflow_execution.WorkflowExecution{}).GetAccessor(username, peerID, groups, nil),
|
||||
workspaceAccessor: (&workspace.Workspace{}).GetAccessor(username, peerID, groups, nil),
|
||||
AbstractAccessor: utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
||||
Caller: caller,
|
||||
PeerID: peerID,
|
||||
User: username,
|
||||
Groups: groups, // Set the caller
|
||||
Type: t,
|
||||
},
|
||||
}
|
||||
func New() *workflowMongoAccessor {
|
||||
return &workflowMongoAccessor{}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -55,7 +37,7 @@ func New(t tools.DataType, username string, peerID string, groups []string, call
|
||||
* getExecutions is a function that returns the executions of a workflow
|
||||
* it returns an array of workflow_execution.WorkflowExecution
|
||||
*/
|
||||
func (a *workflowMongoAccessor) getExecutions(id string, data *Workflow) ([]*workflow_execution.WorkflowExecution, error) {
|
||||
func (wfa *workflowMongoAccessor) getExecutions(id string, data *Workflow) ([]*workflow_execution.WorkflowExecution, error) {
|
||||
workflows_execution := []*workflow_execution.WorkflowExecution{}
|
||||
if data.Schedule != nil { // only set execution on a scheduled workflow
|
||||
if data.Schedule.Start == nil { // if no start date, return an error
|
||||
@ -110,14 +92,14 @@ func (a *workflowMongoAccessor) getExecutions(id string, data *Workflow) ([]*wor
|
||||
}
|
||||
|
||||
// DeleteOne deletes a workflow from the database, delete depending executions and bookings
|
||||
func (a *workflowMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
a.execution(id, &Workflow{
|
||||
func (wfa *workflowMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
wfa.execution(id, &Workflow{
|
||||
AbstractWorkflow: AbstractWorkflow{ScheduleActive: false},
|
||||
}, true) // delete the executions
|
||||
res, code, err := utils.GenericDeleteOne(id, a)
|
||||
res, code, err := wfa.GenericDeleteOne(id, wfa)
|
||||
if res != nil && code == 200 {
|
||||
a.execute(res.(*Workflow), true, false) // up to date the workspace for the workflow
|
||||
a.share(res.(*Workflow), true, a.Caller)
|
||||
wfa.execute(res.(*Workflow), true, false) // up to date the workspace for the workflow
|
||||
wfa.share(res.(*Workflow), true, wfa.Caller)
|
||||
}
|
||||
return res, code, err
|
||||
}
|
||||
@ -127,15 +109,15 @@ func (a *workflowMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error
|
||||
* it takes the workflow id, the real data and the executions
|
||||
* it returns an error if the booking fails
|
||||
*/
|
||||
func (a *workflowMongoAccessor) book(id string, realData *Workflow, execs []*workflow_execution.WorkflowExecution) error {
|
||||
if a.Caller == nil || a.Caller.URLS == nil || a.Caller.URLS[tools.BOOKING] == nil {
|
||||
func (wfa *workflowMongoAccessor) book(id string, realData *Workflow, execs []*workflow_execution.WorkflowExecution) error {
|
||||
if wfa.Caller == nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.BOOKING] == nil {
|
||||
return errors.New("no caller defined")
|
||||
}
|
||||
methods := a.Caller.URLS[tools.BOOKING]
|
||||
methods := wfa.Caller.URLS[tools.BOOKING]
|
||||
if _, ok := methods[tools.POST]; !ok {
|
||||
return errors.New("no path found")
|
||||
}
|
||||
res, code, _ := a.LoadOne(id)
|
||||
res, code, _ := wfa.LoadOne(id)
|
||||
if code != 200 {
|
||||
return errors.New("could not load workflow")
|
||||
}
|
||||
@ -145,6 +127,7 @@ func (a *workflowMongoAccessor) book(id string, realData *Workflow, execs []*wor
|
||||
g = realData.Graph
|
||||
}
|
||||
if g != nil && g.Links != nil && len(g.Links) > 0 { // if the graph is set and has links then book the workflow (even on ourselves)
|
||||
accessor := (&compute.ComputeResource{}).GetAccessor(nil)
|
||||
isDCFound := []string{}
|
||||
for _, link := range g.Links {
|
||||
if ok, dc_id := realData.isDCLink(link); ok { // check if the link is a link between a compute and a resource booking is only on compute
|
||||
@ -152,12 +135,12 @@ func (a *workflowMongoAccessor) book(id string, realData *Workflow, execs []*wor
|
||||
continue
|
||||
} // if the compute is already found, skip it
|
||||
isDCFound = append(isDCFound, dc_id)
|
||||
dc, code, _ := a.computeResourceAccessor.LoadOne(dc_id)
|
||||
dc, code, _ := accessor.LoadOne(dc_id)
|
||||
if code != 200 {
|
||||
continue
|
||||
}
|
||||
// CHECK BOOKING
|
||||
peerID := dc.(*resources.ComputeResource).PeerID
|
||||
peerID := dc.(*compute.ComputeResource).PeerID
|
||||
if peerID == "" { // no peer id no booking
|
||||
continue
|
||||
}
|
||||
@ -167,7 +150,7 @@ func (a *workflowMongoAccessor) book(id string, realData *Workflow, execs []*wor
|
||||
WorkflowID: id, // set the workflow id "WHO"
|
||||
ResourceID: dc_id, // set the compute id "WHERE"
|
||||
Executions: execs, // set the executions to book "WHAT"
|
||||
}).Serialize(), a.Caller)
|
||||
}).Serialize(), wfa.Caller)
|
||||
if err != nil {
|
||||
fmt.Println("BOOKING", err)
|
||||
return err
|
||||
@ -181,12 +164,13 @@ func (a *workflowMongoAccessor) book(id string, realData *Workflow, execs []*wor
|
||||
/*
|
||||
* share is a function that shares a workflow to the peers if the workflow is shared
|
||||
*/
|
||||
func (a *workflowMongoAccessor) share(realData *Workflow, delete bool, caller *tools.HTTPCaller) {
|
||||
func (wfa *workflowMongoAccessor) share(realData *Workflow, delete bool, caller *tools.HTTPCaller) {
|
||||
if realData == nil || realData.Shared == nil || len(realData.Shared) == 0 || caller == nil || caller.Disabled { // no shared no sharing
|
||||
return
|
||||
}
|
||||
for _, sharedID := range realData.Shared { // loop through the shared ids
|
||||
res, code, _ := a.collaborativeAreaAccessor.LoadOne(sharedID)
|
||||
access := (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(nil)
|
||||
res, code, _ := access.LoadOne(sharedID)
|
||||
if code != 200 {
|
||||
continue
|
||||
}
|
||||
@ -202,12 +186,11 @@ func (a *workflowMongoAccessor) share(realData *Workflow, delete bool, caller *t
|
||||
history.StoreOne(history.MapFromWorkflow(res.(*Workflow)))
|
||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.DELETE, map[string]interface{}{}, caller)
|
||||
} else { // if the workflow is updated, share the update
|
||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.PUT,
|
||||
res.Serialize(res), caller)
|
||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.PUT, res.Serialize(), caller)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
a.Logger.Error().Msg(err.Error())
|
||||
wfa.Logger.Error().Msg(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -215,34 +198,35 @@ func (a *workflowMongoAccessor) share(realData *Workflow, delete bool, caller *t
|
||||
/*
|
||||
* execution is a create or delete function for the workflow executions depending on the schedule of the workflow
|
||||
*/
|
||||
func (a *workflowMongoAccessor) execution(id string, realData *Workflow, delete bool) (int, error) {
|
||||
func (wfa *workflowMongoAccessor) execution(id string, realData *Workflow, delete bool) (int, error) {
|
||||
nats := tools.NewNATSCaller() // create a new nats caller because executions are sent to the nats for daemons
|
||||
mongo.MONGOService.DeleteMultiple(map[string]interface{}{
|
||||
"state": 1, // only delete the scheduled executions only scheduled if executions are in progress or ended, they should not be deleted for registration
|
||||
"workflow_id": id,
|
||||
}, tools.WORKFLOW_EXECUTION.String())
|
||||
err := a.book(id, realData, []*workflow_execution.WorkflowExecution{}) // delete the booking of the workflow on the peers
|
||||
err := wfa.book(id, realData, []*workflow_execution.WorkflowExecution{}) // delete the booking of the workflow on the peers
|
||||
fmt.Println("DELETE BOOKING", err)
|
||||
nats.SetNATSPub(tools.WORKFLOW.String(), tools.REMOVE, realData) // send the deletion to the nats
|
||||
if err != nil {
|
||||
return 409, err
|
||||
}
|
||||
|
||||
execs, err := a.getExecutions(id, realData) // get the executions of the workflow
|
||||
accessor := (&workflow_execution.WorkflowExecution{}).GetAccessor(nil)
|
||||
execs, err := wfa.getExecutions(id, realData) // get the executions of the workflow
|
||||
if err != nil {
|
||||
return 422, err
|
||||
}
|
||||
if !realData.ScheduleActive || delete { // if the schedule is not active, delete the executions
|
||||
execs = []*workflow_execution.WorkflowExecution{}
|
||||
}
|
||||
err = a.book(id, realData, execs) // book the workflow on the peers
|
||||
err = wfa.book(id, realData, execs) // book the workflow on the peers
|
||||
fmt.Println("BOOKING", err)
|
||||
if err != nil {
|
||||
return 409, err // if the booking fails, return an error for integrity between peers
|
||||
}
|
||||
fmt.Println("BOOKING", delete)
|
||||
for _, obj := range execs {
|
||||
_, code, err := a.executionAccessor.StoreOne(obj)
|
||||
_, code, err := accessor.StoreOne(obj)
|
||||
fmt.Println("EXEC", code, err)
|
||||
if code != 200 {
|
||||
return code, err
|
||||
@ -253,21 +237,21 @@ func (a *workflowMongoAccessor) execution(id string, realData *Workflow, delete
|
||||
}
|
||||
|
||||
// UpdateOne updates a workflow in the database
|
||||
func (a *workflowMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
res, code, err := a.LoadOne(id)
|
||||
func (wfa *workflowMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
res, code, err := wfa.LoadOne(id)
|
||||
if code != 200 {
|
||||
return nil, 409, err
|
||||
}
|
||||
|
||||
// avoid the update if the schedule is the same
|
||||
avoid := set.(*Workflow).Schedule == nil || (res.(*Workflow).Schedule != nil && res.(*Workflow).ScheduleActive == set.(*Workflow).ScheduleActive && res.(*Workflow).Schedule.Start == set.(*Workflow).Schedule.Start && res.(*Workflow).Schedule.End == set.(*Workflow).Schedule.End && res.(*Workflow).Schedule.Cron == set.(*Workflow).Schedule.Cron)
|
||||
res, code, err = utils.GenericUpdateOne(set, id, a, &Workflow{})
|
||||
res, code, err = wfa.GenericUpdateOne(set, id, wfa, &Workflow{})
|
||||
if code != 200 {
|
||||
return nil, code, err
|
||||
}
|
||||
workflow := res.(*Workflow)
|
||||
if !avoid { // if the schedule is not avoided, update the executions
|
||||
if code, err := a.execution(id, workflow, false); code != 200 {
|
||||
if code, err := wfa.execution(id, workflow, false); code != 200 {
|
||||
return nil, code, errors.New("could not update the executions : " + err.Error())
|
||||
}
|
||||
}
|
||||
@ -276,16 +260,16 @@ func (a *workflowMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.
|
||||
now := time.Now().UTC()
|
||||
if (workflow.Schedule.End != nil && now.After(*workflow.Schedule.End)) || (workflow.Schedule.End == nil && workflow.Schedule.Start != nil && now.After(*workflow.Schedule.Start)) { // if the start date is passed, then you can book
|
||||
workflow.ScheduleActive = false
|
||||
utils.GenericRawUpdateOne(workflow, id, a)
|
||||
wfa.GenericRawUpdateOne(workflow, id, wfa)
|
||||
} // if the start date is passed, update the executions
|
||||
}
|
||||
a.execute(workflow, false, false) // update the workspace for the workflow
|
||||
a.share(workflow, false, a.Caller) // share the update to the peers
|
||||
wfa.execute(workflow, false, false) // update the workspace for the workflow
|
||||
wfa.share(workflow, false, wfa.Caller) // share the update to the peers
|
||||
return res, code, nil
|
||||
}
|
||||
|
||||
// StoreOne stores a workflow in the database
|
||||
func (a *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
func (wfa *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
d := data.(*Workflow)
|
||||
if d.ScheduleActive && d.Schedule != nil { // if the workflow is scheduled, update the executions
|
||||
now := time.Now().UTC()
|
||||
@ -293,44 +277,45 @@ func (a *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, i
|
||||
d.ScheduleActive = false
|
||||
} // if the start date is passed, update the executions
|
||||
}
|
||||
res, code, err := utils.GenericStoreOne(d, a)
|
||||
res, code, err := wfa.GenericStoreOne(d, wfa)
|
||||
if err != nil || code != 200 {
|
||||
return nil, code, err
|
||||
}
|
||||
workflow := res.(*Workflow)
|
||||
|
||||
a.share(workflow, false, a.Caller) // share the creation to the peers
|
||||
wfa.share(workflow, false, wfa.Caller) // share the creation to the peers
|
||||
//store the executions
|
||||
if code, err := a.execution(res.GetID(), workflow, false); err != nil {
|
||||
if code, err := wfa.execution(res.GetID(), workflow, false); err != nil {
|
||||
return nil, code, err
|
||||
}
|
||||
a.execute(workflow, false, false) // store the workspace for the workflow
|
||||
wfa.execute(workflow, false, false) // store the workspace for the workflow
|
||||
return res, code, nil
|
||||
}
|
||||
|
||||
// CopyOne copies a workflow in the database
|
||||
func (a *workflowMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data, a)
|
||||
func (wfa *workflowMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
// execute is a function that executes a workflow
|
||||
// it stores the workflow resources in a specific workspace to never have a conflict in UI and logic
|
||||
func (a *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active bool) {
|
||||
func (wfa *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active bool) {
|
||||
|
||||
accessor := (&workspace.Workspace{}).GetAccessor(nil)
|
||||
filters := &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by standard workspace name attached to a workflow
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: workflow.Name + "_workspace"}},
|
||||
"abstractobject.name": {{dbs.LIKE.String(), workflow.Name + "_workspace"}},
|
||||
},
|
||||
}
|
||||
resource, _, err := a.workspaceAccessor.Search(filters, "")
|
||||
resource, _, err := accessor.Search(filters, "")
|
||||
if delete { // if delete is set to true, delete the workspace
|
||||
for _, r := range resource {
|
||||
a.workspaceAccessor.DeleteOne(r.GetID())
|
||||
accessor.DeleteOne(r.GetID())
|
||||
}
|
||||
return
|
||||
}
|
||||
if err == nil && len(resource) > 0 { // if the workspace already exists, update it
|
||||
a.workspaceAccessor.UpdateOne(&workspace.Workspace{
|
||||
accessor.UpdateOne(&workspace.Workspace{
|
||||
Active: active,
|
||||
ResourceSet: resources.ResourceSet{
|
||||
Datas: workflow.Datas,
|
||||
@ -341,7 +326,7 @@ func (a *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active
|
||||
},
|
||||
}, resource[0].GetID())
|
||||
} else { // if the workspace does not exist, create it
|
||||
a.workspaceAccessor.StoreOne(&workspace.Workspace{
|
||||
accessor.StoreOne(&workspace.Workspace{
|
||||
Active: active,
|
||||
AbstractObject: utils.AbstractObject{Name: workflow.Name + "_workspace"},
|
||||
ResourceSet: resources.ResourceSet{
|
||||
@ -355,25 +340,65 @@ func (a *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active
|
||||
}
|
||||
}
|
||||
|
||||
func (a *workflowMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[*Workflow](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
w := d.(*Workflow)
|
||||
if w.ScheduleActive && w.Schedule != nil { // if the workflow is scheduled, update the executions
|
||||
now := time.Now().UTC()
|
||||
if (w.Schedule.End != nil && now.After(*w.Schedule.End)) || (w.Schedule.End == nil && w.Schedule.Start != nil && now.After(*w.Schedule.Start)) { // if the start date is passed, then you can book
|
||||
w.ScheduleActive = false
|
||||
utils.GenericRawUpdateOne(d, id, a)
|
||||
} // if the start date is passed, update the executions
|
||||
// LoadOne loads a workflow from the database
|
||||
func (wfa *workflowMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var workflow Workflow
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&workflow)
|
||||
if workflow.ScheduleActive && workflow.Schedule != nil { // if the workflow is scheduled, update the executions
|
||||
now := time.Now().UTC()
|
||||
if (workflow.Schedule.End != nil && now.After(*workflow.Schedule.End)) || (workflow.Schedule.End == nil && workflow.Schedule.Start != nil && now.After(*workflow.Schedule.Start)) { // if the start date is passed, then you can book
|
||||
workflow.ScheduleActive = false
|
||||
wfa.GenericRawUpdateOne(&workflow, id, wfa)
|
||||
|
||||
} // if the start date is passed, update the executions
|
||||
}
|
||||
wfa.execute(&workflow, false, true) // if no workspace is attached to the workflow, create it
|
||||
return &workflow, 200, nil
|
||||
}
|
||||
|
||||
// LoadAll loads all the workflows from the database
|
||||
func (wfa workflowMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []Workflow
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, &r.AbstractObject) // only AbstractObject fields !
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
func (wfa *workflowMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
a.execute(w, false, true) // if no workspace is attached to the workflow, create it
|
||||
return d, 200, nil
|
||||
}, a)
|
||||
}
|
||||
|
||||
func (a *workflowMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericLoadAll[*Workflow](func(d utils.DBObject) utils.ShallowDBObject { return &d.(*Workflow).AbstractObject }, a)
|
||||
}
|
||||
|
||||
func (a *workflowMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericSearch[*Workflow](filters, search, (&Workflow{}).GetObjectFilters(search), func(d utils.DBObject) utils.ShallowDBObject { return d }, a)
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []Workflow
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, &r)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@ -13,7 +12,7 @@ func TestStoreOneWorkflow(t *testing.T) {
|
||||
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
|
||||
}
|
||||
|
||||
wma := New(tools.WORKFLOW, "", "", nil, nil)
|
||||
wma := New()
|
||||
id, _, _ := wma.StoreOne(&w)
|
||||
|
||||
assert.NotEmpty(t, id)
|
||||
@ -24,7 +23,7 @@ func TestLoadOneWorkflow(t *testing.T) {
|
||||
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
|
||||
}
|
||||
|
||||
wma := New(tools.WORKFLOW, "", "", nil, nil)
|
||||
wma := New()
|
||||
new_w, _, _ := wma.StoreOne(&w)
|
||||
assert.Equal(t, w, new_w)
|
||||
}
|
||||
|
@ -107,6 +107,10 @@ func (wfa *WorkflowExecution) ArgoStatusToState(status string) *WorkflowExecutio
|
||||
return wfa
|
||||
}
|
||||
|
||||
func (ao *WorkflowExecution) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (r *WorkflowExecution) GenerateID() {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
@ -115,10 +119,29 @@ func (d *WorkflowExecution) GetName() string {
|
||||
return d.UUID + "_" + d.ExecDate.String()
|
||||
}
|
||||
|
||||
func (d *WorkflowExecution) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New(tools.WORKFLOW_EXECUTION, username, peerID, groups, caller) // Create a new instance of the accessor
|
||||
func (d *WorkflowExecution) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKFLOW_EXECUTION, caller) // Initialize the accessor with the WORKFLOW_EXECUTION model type
|
||||
return data
|
||||
}
|
||||
|
||||
func (d *WorkflowExecution) VerifyAuth(username string, peerID string, groups []string) bool {
|
||||
return true
|
||||
// New creates a new instance of the WorkflowExecution from a map
|
||||
func (dma *WorkflowExecution) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
// Serialize returns the WorkflowExecution as a map
|
||||
func (dma *WorkflowExecution) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -4,68 +4,95 @@ import (
|
||||
"time"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type workflowExecutionMongoAccessor struct {
|
||||
utils.AbstractAccessor
|
||||
}
|
||||
|
||||
func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *workflowExecutionMongoAccessor {
|
||||
return &workflowExecutionMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
||||
Caller: caller,
|
||||
PeerID: peerID,
|
||||
User: username, // Set the caller
|
||||
Groups: groups, // Set the caller
|
||||
Type: t,
|
||||
},
|
||||
}
|
||||
func New() *workflowExecutionMongoAccessor {
|
||||
return &workflowExecutionMongoAccessor{}
|
||||
}
|
||||
|
||||
func (wfa *workflowExecutionMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericDeleteOne(id, wfa)
|
||||
return wfa.GenericDeleteOne(id, wfa)
|
||||
}
|
||||
|
||||
func (wfa *workflowExecutionMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericUpdateOne(set, id, wfa, &WorkflowExecution{})
|
||||
return wfa.GenericUpdateOne(set, id, wfa, &WorkflowExecution{})
|
||||
}
|
||||
|
||||
func (wfa *workflowExecutionMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data, wfa)
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
func (wfa *workflowExecutionMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data, wfa)
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
func (a *workflowExecutionMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[*WorkflowExecution](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
if d.(*WorkflowExecution).State == SCHEDULED && time.Now().UTC().After(*d.(*WorkflowExecution).ExecDate) {
|
||||
d.(*WorkflowExecution).State = FORGOTTEN
|
||||
utils.GenericRawUpdateOne(d, id, a)
|
||||
}
|
||||
return d, 200, nil
|
||||
}, a)
|
||||
}
|
||||
|
||||
func (a *workflowExecutionMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericLoadAll[*WorkflowExecution](a.getExec(), a)
|
||||
}
|
||||
|
||||
func (a *workflowExecutionMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericSearch[*WorkflowExecution](filters, search, (&WorkflowExecution{}).GetObjectFilters(search), a.getExec(), a)
|
||||
}
|
||||
|
||||
func (a *workflowExecutionMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
|
||||
return func(d utils.DBObject) utils.ShallowDBObject {
|
||||
if d.(*WorkflowExecution).State == SCHEDULED && time.Now().UTC().After(*d.(*WorkflowExecution).ExecDate) {
|
||||
d.(*WorkflowExecution).State = FORGOTTEN
|
||||
utils.GenericRawUpdateOne(d, d.GetID(), a)
|
||||
}
|
||||
return d
|
||||
func (wfa *workflowExecutionMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var workflow WorkflowExecution
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&workflow)
|
||||
if workflow.State == SCHEDULED && time.Now().UTC().After(*workflow.ExecDate) {
|
||||
workflow.State = FORGOTTEN
|
||||
wfa.GenericRawUpdateOne(&workflow, id, wfa)
|
||||
}
|
||||
return &workflow, 200, nil
|
||||
}
|
||||
|
||||
func (wfa *workflowExecutionMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []WorkflowExecution
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
if r.State == SCHEDULED && time.Now().UTC().After(*r.ExecDate) {
|
||||
r.State = FORGOTTEN
|
||||
wfa.GenericRawUpdateOne(&r, r.UUID, wfa)
|
||||
}
|
||||
objs = append(objs, &r.AbstractObject)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
// Search searches for workflow executions in the database, given some filters OR a search string
|
||||
func (wfa *workflowExecutionMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []WorkflowExecution
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
if r.State == SCHEDULED && time.Now().UTC().After(*r.ExecDate) {
|
||||
r.State = FORGOTTEN
|
||||
wfa.GenericRawUpdateOne(&r, r.UUID, wfa)
|
||||
}
|
||||
objs = append(objs, &r)
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
@ -1,10 +1,12 @@
|
||||
package workspace
|
||||
|
||||
import (
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
||||
"encoding/json"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// Workspace is a struct that represents a workspace
|
||||
@ -16,17 +18,43 @@ type Workspace struct {
|
||||
Shared string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workspace
|
||||
}
|
||||
|
||||
func (d *Workspace) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New(tools.WORKSPACE, username, peerID, groups, caller) // Create a new instance of the accessor
|
||||
func (ao *Workspace) GetID() string {
|
||||
return ao.UUID
|
||||
}
|
||||
|
||||
func (ao *Workspace) VerifyAuth(username string, peerID string, groups []string) bool {
|
||||
if ao.Shared != "" {
|
||||
shared, code, _ := shallow_collaborative_area.New(tools.COLLABORATIVE_AREA, username, peerID, groups, nil).LoadOne(ao.Shared)
|
||||
if code != 200 || shared == nil {
|
||||
return false
|
||||
}
|
||||
return shared.VerifyAuth(username, peerID, groups)
|
||||
func (r *Workspace) GenerateID() {
|
||||
if r.UUID == "" {
|
||||
r.UUID = uuid.New().String()
|
||||
}
|
||||
return ao.AbstractObject.VerifyAuth(username, peerID, groups)
|
||||
}
|
||||
|
||||
func (d *Workspace) GetName() string {
|
||||
return d.Name
|
||||
}
|
||||
|
||||
func (d *Workspace) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKSPACE, caller) // Initialize the accessor with the WORKSPACE model type
|
||||
return data
|
||||
}
|
||||
|
||||
// New creates a new instance of the workspaceMongoAccessor from a map
|
||||
func (dma *Workspace) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, dma)
|
||||
return dma
|
||||
}
|
||||
|
||||
// Serialize returns the workspaceMongoAccessor as a map
|
||||
func (dma *Workspace) Serialize() map[string]interface{} {
|
||||
var m map[string]interface{}
|
||||
b, err := json.Marshal(dma)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
json.Unmarshal(b, &m)
|
||||
return m
|
||||
}
|
||||
|
@ -8,8 +8,10 @@ import (
|
||||
|
||||
type WorkspaceHistory struct{ Workspace }
|
||||
|
||||
func (d *WorkspaceHistory) GetAccessor(username string, peerID string, groups []string, caller *tools.HTTPCaller) utils.Accessor {
|
||||
return New(tools.WORKFLOW_HISTORY, username, peerID, groups, caller) // Create a new instance of the accessor
|
||||
func (d *WorkspaceHistory) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||
data := New() // Create a new instance of the accessor
|
||||
data.Init(tools.WORKSPACE_HISTORY, caller) // Initialize the accessor with the WORKSPACE model type
|
||||
return data
|
||||
}
|
||||
func (r *WorkspaceHistory) GenerateID() {
|
||||
r.UUID = uuid.New().String()
|
||||
|
@ -5,9 +5,14 @@ import (
|
||||
"fmt"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/data"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/processing"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
|
||||
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
@ -18,104 +23,194 @@ type workspaceMongoAccessor struct {
|
||||
}
|
||||
|
||||
// New creates a new instance of the workspaceMongoAccessor
|
||||
func New(t tools.DataType, username string, peerID string, groups []string, caller *tools.HTTPCaller) *workspaceMongoAccessor {
|
||||
return &workspaceMongoAccessor{
|
||||
utils.AbstractAccessor{
|
||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
||||
Caller: caller,
|
||||
PeerID: peerID,
|
||||
User: username,
|
||||
Groups: groups, // Set the caller
|
||||
Type: t,
|
||||
},
|
||||
}
|
||||
func New() *workspaceMongoAccessor {
|
||||
return &workspaceMongoAccessor{}
|
||||
}
|
||||
|
||||
// DeleteOne deletes a workspace from the database, given its ID, it automatically share to peers if the workspace is shared
|
||||
// it checks if a workspace with the same name already exists
|
||||
func (a *workspaceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
res, code, err := utils.GenericDeleteOne(id, a)
|
||||
func (wfa *workspaceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||
res, code, err := wfa.GenericDeleteOne(id, wfa)
|
||||
if code == 200 && res != nil {
|
||||
a.share(res.(*Workspace), tools.DELETE, a.Caller) // Share the deletion to the peers
|
||||
wfa.share(res.(*Workspace), tools.DELETE, wfa.Caller) // Share the deletion to the peers
|
||||
}
|
||||
return res, code, err
|
||||
}
|
||||
|
||||
// UpdateOne updates a workspace in the database, given its ID, it automatically share to peers if the workspace is shared
|
||||
func (a *workspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
func (wfa *workspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||
d := set.(*Workspace) // Get the workspace from the set
|
||||
d.Clear()
|
||||
d.DataResources = nil // Reset the resources
|
||||
d.ComputeResources = nil
|
||||
d.StorageResources = nil
|
||||
d.ProcessingResources = nil
|
||||
d.WorkflowResources = nil
|
||||
if d.Active { // If the workspace is active, deactivate all the other workspaces
|
||||
res, _, err := a.LoadAll()
|
||||
res, _, err := wfa.LoadAll()
|
||||
if err == nil {
|
||||
for _, r := range res {
|
||||
if r.GetID() != id {
|
||||
r.(*Workspace).Active = false
|
||||
a.UpdateOne(r.(*Workspace), r.GetID())
|
||||
wfa.UpdateOne(r.(*Workspace), r.GetID())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
res, code, err := utils.GenericUpdateOne(set, id, a, &Workspace{})
|
||||
res, code, err := wfa.GenericUpdateOne(set, id, wfa, &Workspace{})
|
||||
if code == 200 && res != nil {
|
||||
a.share(res.(*Workspace), tools.PUT, a.Caller)
|
||||
wfa.share(res.(*Workspace), tools.PUT, wfa.Caller)
|
||||
}
|
||||
return res, code, err
|
||||
}
|
||||
|
||||
// StoreOne stores a workspace in the database, it checks if a workspace with the same name already exists
|
||||
func (a *workspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
func (wfa *workspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
filters := &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: data.GetName() + "_workspace"}},
|
||||
"abstractobject.name": {{dbs.LIKE.String(), data.GetName() + "_workspace"}},
|
||||
},
|
||||
}
|
||||
res, _, err := a.Search(filters, "") // Search for the workspace
|
||||
if err == nil && len(res) > 0 { // If the workspace already exists, return an error
|
||||
res, _, err := wfa.Search(filters, "") // Search for the workspace
|
||||
if err == nil && len(res) > 0 { // If the workspace already exists, return an error
|
||||
return nil, 409, errors.New("A workspace with the same name already exists")
|
||||
}
|
||||
// reset the resources
|
||||
d := data.(*Workspace)
|
||||
d.Clear()
|
||||
return utils.GenericStoreOne(d, a)
|
||||
d.DataResources = nil
|
||||
d.ComputeResources = nil
|
||||
d.StorageResources = nil
|
||||
d.ProcessingResources = nil
|
||||
d.WorkflowResources = nil
|
||||
return wfa.GenericStoreOne(d, wfa)
|
||||
}
|
||||
|
||||
// CopyOne copies a workspace in the database
|
||||
func (a *workspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return utils.GenericStoreOne(data, a)
|
||||
func (wfa *workspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||
return wfa.GenericStoreOne(data, wfa)
|
||||
}
|
||||
|
||||
func (a *workspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
return utils.GenericLoadOne[*Workspace](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
||||
d.(*Workspace).Fill(a.GetUser(), a.PeerID, a.Groups)
|
||||
return d, 200, nil
|
||||
}, a)
|
||||
/*
|
||||
This function is used to fill the workspace with the resources
|
||||
*/
|
||||
func (wfa *workspaceMongoAccessor) fill(workflow *Workspace) *Workspace {
|
||||
// Fill the workspace with the resources
|
||||
if workflow.Datas != nil && len(workflow.Datas) > 0 {
|
||||
dataAccessor := (&data.DataResource{}).GetAccessor(nil)
|
||||
for _, id := range workflow.Datas {
|
||||
d, _, e := dataAccessor.LoadOne(id)
|
||||
if e == nil {
|
||||
workflow.DataResources = append(workflow.DataResources, d.(*data.DataResource))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fill the workspace with the computes
|
||||
if workflow.Computes != nil && len(workflow.Computes) > 0 {
|
||||
dataAccessor := (&compute.ComputeResource{}).GetAccessor(nil)
|
||||
for _, id := range workflow.Computes {
|
||||
d, _, e := dataAccessor.LoadOne(id)
|
||||
if e == nil {
|
||||
workflow.ComputeResources = append(workflow.ComputeResources, d.(*compute.ComputeResource))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fill the workspace with the storages
|
||||
if workflow.Storages != nil && len(workflow.Storages) > 0 {
|
||||
dataAccessor := (&storage.StorageResource{}).GetAccessor(nil)
|
||||
for _, id := range workflow.Storages {
|
||||
d, _, e := dataAccessor.LoadOne(id)
|
||||
if e == nil {
|
||||
workflow.StorageResources = append(workflow.StorageResources, d.(*storage.StorageResource))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fill the workspace with the processings
|
||||
if workflow.Processings != nil && len(workflow.Processings) > 0 {
|
||||
dataAccessor := (&processing.ProcessingResource{}).GetAccessor(nil)
|
||||
for _, id := range workflow.Processings {
|
||||
d, _, e := dataAccessor.LoadOne(id)
|
||||
if e == nil {
|
||||
workflow.ProcessingResources = append(workflow.ProcessingResources, d.(*processing.ProcessingResource))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fill the workspace with the workflows
|
||||
if workflow.Workflows != nil && len(workflow.Workflows) > 0 {
|
||||
dataAccessor := (&w.WorkflowResource{}).GetAccessor(nil)
|
||||
for _, id := range workflow.Workflows {
|
||||
d, _, e := dataAccessor.LoadOne(id)
|
||||
if e == nil {
|
||||
workflow.WorkflowResources = append(workflow.WorkflowResources, d.(*w.WorkflowResource))
|
||||
}
|
||||
}
|
||||
}
|
||||
return workflow
|
||||
}
|
||||
|
||||
func (a *workspaceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericLoadAll[*Workspace](func(d utils.DBObject) utils.ShallowDBObject {
|
||||
d.(*Workspace).Fill(a.GetUser(), a.PeerID, a.Groups)
|
||||
return d
|
||||
}, a)
|
||||
// LoadOne loads a workspace from the database, given its ID
|
||||
func (wfa *workspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||
var workflow Workspace
|
||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
res_mongo.Decode(&workflow)
|
||||
|
||||
return wfa.fill(&workflow), 200, nil
|
||||
}
|
||||
|
||||
func (a *workspaceMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
return utils.GenericSearch[*Workspace](filters, search, (&Workspace{}).GetObjectFilters(search), func(d utils.DBObject) utils.ShallowDBObject {
|
||||
d.(*Workspace).Fill(a.GetUser(), a.PeerID, a.Groups)
|
||||
return d
|
||||
}, a)
|
||||
// LoadAll loads all the workspaces from the database
|
||||
func (wfa workspaceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []Workspace
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, wfa.fill(&r))
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
// Search searches for workspaces in the database, given some filters OR a search string
|
||||
func (wfa *workspaceMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||
objs := []utils.ShallowDBObject{}
|
||||
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||
filters = &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}
|
||||
}
|
||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||
if err != nil {
|
||||
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||
return nil, code, err
|
||||
}
|
||||
var results []Workspace
|
||||
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||
return nil, 404, err
|
||||
}
|
||||
for _, r := range results {
|
||||
objs = append(objs, wfa.fill(&r))
|
||||
}
|
||||
return objs, 200, nil
|
||||
}
|
||||
|
||||
/*
|
||||
This function is used to share the workspace with the peers
|
||||
*/
|
||||
func (a *workspaceMongoAccessor) share(realData *Workspace, method tools.METHOD, caller *tools.HTTPCaller) {
|
||||
func (wfa *workspaceMongoAccessor) share(realData *Workspace, method tools.METHOD, caller *tools.HTTPCaller) {
|
||||
fmt.Println("Sharing workspace", realData, caller)
|
||||
if realData == nil || realData.Shared == "" || caller == nil || caller.Disabled {
|
||||
return
|
||||
}
|
||||
shallow := &shallow_collaborative_area.ShallowCollaborativeArea{}
|
||||
access := (shallow).GetAccessor(a.GetUser(), a.PeerID, a.Groups, nil)
|
||||
access := (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(nil)
|
||||
res, code, _ := access.LoadOne(realData.Shared)
|
||||
if code != 200 {
|
||||
return
|
||||
@ -132,10 +227,10 @@ func (a *workspaceMongoAccessor) share(realData *Workspace, method tools.METHOD,
|
||||
history.StoreOne(history.MapFromWorkspace(res.(*Workspace)))
|
||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.DELETE, map[string]interface{}{}, caller)
|
||||
} else { // If the workspace is updated, share the update
|
||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.PUT, res.Serialize(res), caller)
|
||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.PUT, res.Serialize(), caller)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
a.Logger.Error().Msg(err.Error())
|
||||
wfa.Logger.Error().Msg(err.Error())
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user