Compare commits
No commits in common. "main" and "issue#4" have entirely different histories.
@ -16,7 +16,6 @@ type Config struct {
|
|||||||
Port string
|
Port string
|
||||||
LokiUrl string
|
LokiUrl string
|
||||||
LogLevel string
|
LogLevel string
|
||||||
Whitelist bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) GetUrl() string {
|
func (c Config) GetUrl() string {
|
||||||
@ -38,11 +37,19 @@ func GetConfig() *Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string, logLevel string) *Config {
|
func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string, logLevel string) *Config {
|
||||||
|
/*once.Do(func() {
|
||||||
|
instance = &Config{
|
||||||
|
MongoUrl: mongoUrl,
|
||||||
|
MongoDatabase: database,
|
||||||
|
NATSUrl: natsUrl,
|
||||||
|
LokiUrl: lokiUrl,
|
||||||
|
LogLevel: logLevel,
|
||||||
|
}
|
||||||
|
})*/
|
||||||
GetConfig().MongoUrl = mongoUrl
|
GetConfig().MongoUrl = mongoUrl
|
||||||
GetConfig().MongoDatabase = database
|
GetConfig().MongoDatabase = database
|
||||||
GetConfig().NATSUrl = natsUrl
|
GetConfig().NATSUrl = natsUrl
|
||||||
GetConfig().LokiUrl = lokiUrl
|
GetConfig().LokiUrl = lokiUrl
|
||||||
GetConfig().LogLevel = logLevel
|
GetConfig().LogLevel = logLevel
|
||||||
GetConfig().Whitelist = true
|
|
||||||
return GetConfig()
|
return GetConfig()
|
||||||
}
|
}
|
||||||
|
@ -48,7 +48,7 @@ func (m *MongoDB) Init(collections []string, config MongoConf) {
|
|||||||
mngoCollections = collections
|
mngoCollections = collections
|
||||||
mngoConfig = config
|
mngoConfig = config
|
||||||
if err := m.createClient(config.GetUrl(), false); err != nil {
|
if err := m.createClient(config.GetUrl(), false); err != nil {
|
||||||
// m.Logger.Error().Msg(err.Error())
|
m.Logger.Error().Msg(err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,12 +170,12 @@ func (m *MongoDB) DeleteOne(id string, collection_name string) (int64, int, erro
|
|||||||
filter := bson.M{"_id": id}
|
filter := bson.M{"_id": id}
|
||||||
targetDBCollection := CollectionMap[collection_name]
|
targetDBCollection := CollectionMap[collection_name]
|
||||||
opts := options.Delete().SetHint(bson.D{{Key: "_id", Value: 1}})
|
opts := options.Delete().SetHint(bson.D{{Key: "_id", Value: 1}})
|
||||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
//defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
result, err := targetDBCollection.DeleteOne(MngoCtx, filter, opts)
|
result, err := targetDBCollection.DeleteOne(MngoCtx, filter, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
|
m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
|
||||||
return 0, 404, err
|
return 0, 404, err
|
||||||
}
|
}
|
||||||
return result.DeletedCount, 200, nil
|
return result.DeletedCount, 200, nil
|
||||||
@ -191,12 +191,12 @@ func (m *MongoDB) DeleteMultiple(f map[string]interface{}, collection_name strin
|
|||||||
}
|
}
|
||||||
targetDBCollection := CollectionMap[collection_name]
|
targetDBCollection := CollectionMap[collection_name]
|
||||||
opts := options.Delete().SetHint(bson.D{{Key: "_id", Value: 1}})
|
opts := options.Delete().SetHint(bson.D{{Key: "_id", Value: 1}})
|
||||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
//defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
result, err := targetDBCollection.DeleteMany(MngoCtx, filter, opts)
|
result, err := targetDBCollection.DeleteMany(MngoCtx, filter, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
|
m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
|
||||||
return 0, 404, err
|
return 0, 404, err
|
||||||
}
|
}
|
||||||
return result.DeletedCount, 200, nil
|
return result.DeletedCount, 200, nil
|
||||||
@ -214,11 +214,11 @@ func (m *MongoDB) UpdateMultiple(set interface{}, filter map[string]interface{},
|
|||||||
f = append(f, bson.E{Key: k, Value: v})
|
f = append(f, bson.E{Key: k, Value: v})
|
||||||
}
|
}
|
||||||
targetDBCollection := CollectionMap[collection_name]
|
targetDBCollection := CollectionMap[collection_name]
|
||||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
MngoCtx, cancel = context.WithTimeout(context.Background(), 50*time.Second)
|
||||||
//defer cancel()
|
defer cancel()
|
||||||
res, err := targetDBCollection.UpdateMany(MngoCtx, f, dbs.InputToBson(doc, true))
|
res, err := targetDBCollection.UpdateMany(MngoCtx, f, dbs.InputToBson(doc, true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// m.Logger.Error().Msg("Couldn't update resource: " + err.Error())
|
m.Logger.Error().Msg("Couldn't update resource: " + err.Error())
|
||||||
return 0, 404, err
|
return 0, 404, err
|
||||||
}
|
}
|
||||||
return res.UpsertedCount, 200, nil
|
return res.UpsertedCount, 200, nil
|
||||||
@ -233,11 +233,11 @@ func (m *MongoDB) UpdateOne(set interface{}, id string, collection_name string)
|
|||||||
bson.Unmarshal(b, &doc)
|
bson.Unmarshal(b, &doc)
|
||||||
filter := bson.M{"_id": id}
|
filter := bson.M{"_id": id}
|
||||||
targetDBCollection := CollectionMap[collection_name]
|
targetDBCollection := CollectionMap[collection_name]
|
||||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
MngoCtx, cancel = context.WithTimeout(context.Background(), 50*time.Second)
|
||||||
//defer cancel()
|
defer cancel()
|
||||||
_, err := targetDBCollection.UpdateOne(MngoCtx, filter, dbs.InputToBson(doc, true))
|
_, err := targetDBCollection.UpdateOne(MngoCtx, filter, dbs.InputToBson(doc, true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// m.Logger.Error().Msg("Couldn't update resource: " + err.Error())
|
m.Logger.Error().Msg("Couldn't update resource: " + err.Error())
|
||||||
return "", 404, err
|
return "", 404, err
|
||||||
}
|
}
|
||||||
return id, 200, nil
|
return id, 200, nil
|
||||||
@ -252,12 +252,12 @@ func (m *MongoDB) StoreOne(obj interface{}, id string, collection_name string) (
|
|||||||
bson.Unmarshal(b, &doc)
|
bson.Unmarshal(b, &doc)
|
||||||
doc["_id"] = id
|
doc["_id"] = id
|
||||||
targetDBCollection := CollectionMap[collection_name]
|
targetDBCollection := CollectionMap[collection_name]
|
||||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
//defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
_, err := targetDBCollection.InsertOne(MngoCtx, doc)
|
_, err := targetDBCollection.InsertOne(MngoCtx, doc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
|
m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
|
||||||
return "", 409, err
|
return "", 409, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -270,12 +270,12 @@ func (m *MongoDB) LoadOne(id string, collection_name string) (*mongo.SingleResul
|
|||||||
}
|
}
|
||||||
filter := bson.M{"_id": id}
|
filter := bson.M{"_id": id}
|
||||||
targetDBCollection := CollectionMap[collection_name]
|
targetDBCollection := CollectionMap[collection_name]
|
||||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
//defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
res := targetDBCollection.FindOne(MngoCtx, filter)
|
res := targetDBCollection.FindOne(MngoCtx, filter)
|
||||||
if res.Err() != nil {
|
if res.Err() != nil {
|
||||||
// m.Logger.Error().Msg("Couldn't find resource " + id + ". Error : " + res.Err().Error())
|
m.Logger.Error().Msg("Couldn't find resource " + id + ". Error : " + res.Err().Error())
|
||||||
err := res.Err()
|
err := res.Err()
|
||||||
return nil, 404, err
|
return nil, 404, err
|
||||||
}
|
}
|
||||||
@ -313,8 +313,8 @@ func (m *MongoDB) Search(filters *dbs.Filters, collection_name string) (*mongo.C
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
// defer cancel()
|
defer cancel()
|
||||||
if cursor, err := targetDBCollection.Find(
|
if cursor, err := targetDBCollection.Find(
|
||||||
MngoCtx,
|
MngoCtx,
|
||||||
f,
|
f,
|
||||||
@ -336,12 +336,12 @@ func (m *MongoDB) LoadFilter(filter map[string]interface{}, collection_name stri
|
|||||||
}
|
}
|
||||||
targetDBCollection := CollectionMap[collection_name]
|
targetDBCollection := CollectionMap[collection_name]
|
||||||
|
|
||||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
//defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
res, err := targetDBCollection.Find(MngoCtx, f)
|
res, err := targetDBCollection.Find(MngoCtx, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// m.Logger.Error().Msg("Couldn't find any resources. Error : " + err.Error())
|
m.Logger.Error().Msg("Couldn't find any resources. Error : " + err.Error())
|
||||||
return nil, 404, err
|
return nil, 404, err
|
||||||
}
|
}
|
||||||
return res, 200, nil
|
return res, 200, nil
|
||||||
@ -353,12 +353,12 @@ func (m *MongoDB) LoadAll(collection_name string) (*mongo.Cursor, int, error) {
|
|||||||
}
|
}
|
||||||
targetDBCollection := CollectionMap[collection_name]
|
targetDBCollection := CollectionMap[collection_name]
|
||||||
|
|
||||||
MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
//defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
res, err := targetDBCollection.Find(MngoCtx, bson.D{})
|
res, err := targetDBCollection.Find(MngoCtx, bson.D{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// m.Logger.Error().Msg("Couldn't find any resources. Error : " + err.Error())
|
m.Logger.Error().Msg("Couldn't find any resources. Error : " + err.Error())
|
||||||
return nil, 404, err
|
return nil, 404, err
|
||||||
}
|
}
|
||||||
return res, 200, nil
|
return res, 200, nil
|
||||||
|
@ -7,7 +7,7 @@ abstract Resource{
|
|||||||
+icon: string
|
+icon: string
|
||||||
+description: string
|
+description: string
|
||||||
+graphic: GraphicElement
|
+graphic: GraphicElement
|
||||||
+element: DataResource/ProcessingResource/StorageResource/Workflow/ComputeResource
|
+element: DataResource/ProcessingResource/StorageResource/Workflow/DatacenterResource
|
||||||
}
|
}
|
||||||
|
|
||||||
class DataResource {
|
class DataResource {
|
||||||
@ -31,7 +31,7 @@ class StorageResource {
|
|||||||
+capacity: int
|
+capacity: int
|
||||||
}
|
}
|
||||||
|
|
||||||
class ComputeResource {
|
class DatacenterResource {
|
||||||
+UUID: int
|
+UUID: int
|
||||||
+name: string
|
+name: string
|
||||||
|
|
||||||
@ -96,7 +96,7 @@ class UserWorkflows {
|
|||||||
|
|
||||||
class DatacenterWorkflows {
|
class DatacenterWorkflows {
|
||||||
+UUID: int
|
+UUID: int
|
||||||
+compute: ComputeResource
|
+datacenter: DatacenterResource
|
||||||
+workflows: Workflow[]
|
+workflows: Workflow[]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,7 +159,7 @@ DatacenterWorkflows "1" o-- "0..*" Workflow
|
|||||||
Resource<|-- DataResource
|
Resource<|-- DataResource
|
||||||
Resource<|-- ProcessingResource
|
Resource<|-- ProcessingResource
|
||||||
Resource<|-- StorageResource
|
Resource<|-- StorageResource
|
||||||
Resource<|-- ComputeResource
|
Resource<|-- DatacenterResource
|
||||||
Resource<|-- Workflow
|
Resource<|-- Workflow
|
||||||
|
|
||||||
ResourceSet "1" o-- "0..*" Ressource
|
ResourceSet "1" o-- "0..*" Ressource
|
||||||
|
@ -1,325 +0,0 @@
|
|||||||
@startuml
|
|
||||||
|
|
||||||
class AbstractObject {
|
|
||||||
ID string
|
|
||||||
Name string
|
|
||||||
IsDraft bool // is consider as a draft
|
|
||||||
UpdateDate date
|
|
||||||
LastPeerWriter string
|
|
||||||
CreatorID string
|
|
||||||
AccessMode int // public or private
|
|
||||||
}
|
|
||||||
|
|
||||||
AbstractObject ^-- AbstractResource
|
|
||||||
AbstractObject ^-- Order
|
|
||||||
AbstractObject ^-- Booking
|
|
||||||
AbstractObject ^-- BuyingStatus
|
|
||||||
AbstractObject ^-- WorkflowExecution
|
|
||||||
AbstractObject ^-- Workflow
|
|
||||||
|
|
||||||
class AbstractResource {
|
|
||||||
Logo string
|
|
||||||
Description string
|
|
||||||
ShortDescription string
|
|
||||||
Owners []string
|
|
||||||
UsageRestrictions string
|
|
||||||
|
|
||||||
VerifyAuth(request) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
AbstractResource "1 " --* "many " ResourceInstanceITF
|
|
||||||
AbstractCustomizedResource "1 " --* "1 " ResourceInstanceITF
|
|
||||||
|
|
||||||
AbstractResource ^-- ComputeResource
|
|
||||||
AbstractResource ^-- DataResource
|
|
||||||
AbstractResource ^-- ProcessingResource
|
|
||||||
AbstractResource ^-- StorageResource
|
|
||||||
AbstractResource ^-- WorkflowResource
|
|
||||||
class ComputeResource {
|
|
||||||
Architecture string
|
|
||||||
Infrastructure string
|
|
||||||
}
|
|
||||||
class DataResource {
|
|
||||||
Type string
|
|
||||||
Quality string
|
|
||||||
OpenData bool
|
|
||||||
Static bool
|
|
||||||
UpdatePeriod date
|
|
||||||
PersonalData bool
|
|
||||||
AnonymizedPersonalData bool
|
|
||||||
SizeGB float64
|
|
||||||
Licence string
|
|
||||||
Example string
|
|
||||||
}
|
|
||||||
ProcessingResource "1 " *-- "1 " ProcessingUsage
|
|
||||||
class ProcessingUsage {
|
|
||||||
CPUs map[string]CPU
|
|
||||||
GPUs map[string]GPU
|
|
||||||
RAM RAM
|
|
||||||
StorageGB float64
|
|
||||||
Hypothesis string
|
|
||||||
ScalingModel string
|
|
||||||
}
|
|
||||||
|
|
||||||
class ProcessingResource {
|
|
||||||
Infrastructure string
|
|
||||||
Service bool
|
|
||||||
Usage ProcessingUsage
|
|
||||||
OpenSource bool
|
|
||||||
License string
|
|
||||||
Maturity string
|
|
||||||
}
|
|
||||||
class StorageResource {
|
|
||||||
Type string
|
|
||||||
Accronym string
|
|
||||||
}
|
|
||||||
WorkflowResource "1 " --* "many " ComputeResource
|
|
||||||
WorkflowResource "1 " --* "many " DataResource
|
|
||||||
WorkflowResource "1 " --* "many " ProcessingResource
|
|
||||||
WorkflowResource "1 " --* "many " StorageResource
|
|
||||||
class WorkflowResource {
|
|
||||||
WorkflowID string
|
|
||||||
}
|
|
||||||
|
|
||||||
class ExploitResourceSet {}
|
|
||||||
|
|
||||||
AbstractCustomizedResource --^ AbstractResource
|
|
||||||
AbstractCustomizedResource --* ExploitResourceSet
|
|
||||||
ExploitResourceSet ^-- CustomizedComputeResource
|
|
||||||
ExploitResourceSet ^-- CustomizedDataResource
|
|
||||||
ExploitResourceSet ^-- CustomizedProcessingResource
|
|
||||||
ExploitResourceSet ^-- CustomizedStorageResource
|
|
||||||
ExploitResourceSet ^-- CustomizedWorkflowResource
|
|
||||||
class AbstractCustomizedResource {
|
|
||||||
// A customized resource is an
|
|
||||||
// extended abstract resource not use in catalog
|
|
||||||
ExplicitBookingDurationS float64
|
|
||||||
UsageStart date
|
|
||||||
UsageEnd date
|
|
||||||
SelectedPricing string
|
|
||||||
}
|
|
||||||
class CustomizedComputeResource {
|
|
||||||
CPUsLocated map[string]float64
|
|
||||||
GPUsLocated map[string]float64
|
|
||||||
RAMLocated float64
|
|
||||||
}
|
|
||||||
class CustomizedDataResource {
|
|
||||||
StorageGB float64
|
|
||||||
}
|
|
||||||
class CustomizedProcessingResource {
|
|
||||||
Container Container
|
|
||||||
}
|
|
||||||
class CustomizedStorageResource {
|
|
||||||
StorageGB bool
|
|
||||||
}
|
|
||||||
class CustomizedWorkflowResource {}
|
|
||||||
|
|
||||||
interface ResourceInstanceITF {
|
|
||||||
GetID() string
|
|
||||||
VerifyPartnership() bool // eval if there is one partnership per peer groups in every instance
|
|
||||||
GetPeerGroups() []ResourcePartnerITF, []map[string][]string
|
|
||||||
ClearPeerGroups()
|
|
||||||
}
|
|
||||||
|
|
||||||
ResourceInstanceITF -- ResourceInstance
|
|
||||||
ResourceInstance ^-- ComputeResourceInstance
|
|
||||||
ResourceInstance ^-- StorageResourceInstance
|
|
||||||
ResourceInstance "many " --* "1 " ResourcePartnerITF
|
|
||||||
class ResourceInstance {
|
|
||||||
ID string
|
|
||||||
Location Geopoint
|
|
||||||
Country CountryCode
|
|
||||||
AccessProtocol string
|
|
||||||
}
|
|
||||||
class ComputeResourceInstance {
|
|
||||||
SecurityLevel string
|
|
||||||
PowerSource string
|
|
||||||
CPUs map[string]CPU
|
|
||||||
GPUs map[string]GPU
|
|
||||||
RAM RAM
|
|
||||||
}
|
|
||||||
class StorageResourceInstance {
|
|
||||||
Local bool
|
|
||||||
SecurityLevel string
|
|
||||||
SizeType string
|
|
||||||
SizeGB int
|
|
||||||
Encryption bool
|
|
||||||
Redundancy string
|
|
||||||
Throughput string
|
|
||||||
}
|
|
||||||
|
|
||||||
ResourcePartnerITF -- ResourcePartnership
|
|
||||||
ResourcePartnership ^-- ComputeResourcePartnership
|
|
||||||
ResourcePartnership ^-- DataResourcePartnership
|
|
||||||
ResourcePartnership ^-- StorageResourcePartnership
|
|
||||||
|
|
||||||
interface ResourcePartnerITF {
|
|
||||||
GetPricing(id string) PricingProfileITF
|
|
||||||
GetPeerGroups() []ResourcePartnerITF, []map[string][]string
|
|
||||||
ClearPeerGroups()
|
|
||||||
}
|
|
||||||
|
|
||||||
ResourcePartnership "many " --* "1 " PricingProfileITF
|
|
||||||
class ResourcePartnership{
|
|
||||||
Namespace string
|
|
||||||
PeerGroups map[string][]string
|
|
||||||
}
|
|
||||||
class ComputeResourcePartnership {
|
|
||||||
MaxAllowedCPUsCores map[string]int
|
|
||||||
MaxAllowedGPUsMemoryGB map[string]float64
|
|
||||||
RAMSizeGB float64
|
|
||||||
}
|
|
||||||
class DataResourcePartnership {
|
|
||||||
MaxDownloadableGBAllowed float64
|
|
||||||
PersonalDataAllowed bool
|
|
||||||
AnonymizedPersonalDataAllowed bool
|
|
||||||
}
|
|
||||||
class StorageResourcePartnership {
|
|
||||||
MaxSizeGBAllowed float64
|
|
||||||
OnlyEncryptedAllowed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
RefundType -- AccessPricingProfile
|
|
||||||
enum RefundType {
|
|
||||||
REFUND_DEAD_END
|
|
||||||
REFUND_ON_ERROR
|
|
||||||
REFUND_ON_EARLY_END
|
|
||||||
}
|
|
||||||
PricingProfileITF -- AccessPricingProfile
|
|
||||||
PricingProfileITF -- ExploitPricingProfile
|
|
||||||
PricingProfileITF -- WorkflowResourcePricingProfile
|
|
||||||
AccessPricingProfile ^-- DataResourcePricingProfile
|
|
||||||
AccessPricingProfile ^-- ProcessingResourcePricingProfile
|
|
||||||
ExploitPricingProfile ^-- ComputeResourcePricingProfile
|
|
||||||
ExploitPricingProfile ^-- StorageResourcePricingProfile
|
|
||||||
interface PricingProfileITF {
|
|
||||||
GetPrice(quantity float64, val float64, start date, end date, request) float64
|
|
||||||
IsPurchased() bool
|
|
||||||
}
|
|
||||||
class AccessPricingProfile {
|
|
||||||
ID string
|
|
||||||
Pricing PricingStrategy
|
|
||||||
DefaultRefundType RefundType
|
|
||||||
RefundRatio int // percentage of refund on price
|
|
||||||
}
|
|
||||||
class DataResourcePricingProfile {}
|
|
||||||
class ProcessingResourcePricingProfile {}
|
|
||||||
|
|
||||||
ExploitPrivilegeStrategy -- ExploitPricingProfile
|
|
||||||
enum ExploitPrivilegeStrategy {
|
|
||||||
BASIC
|
|
||||||
GARANTED_ON_DELAY
|
|
||||||
GARANTED
|
|
||||||
}
|
|
||||||
|
|
||||||
AccessPricingProfile --* PricingStrategy
|
|
||||||
AccessPricingProfile ^-- ExploitPricingProfile
|
|
||||||
class ExploitPricingProfile {
|
|
||||||
AdditionnalRefundTypes RefundTypeint
|
|
||||||
PrivilegeStrategy ExploitPrivilegeStrategy
|
|
||||||
GarantedDelaySecond int
|
|
||||||
Exceeding bool
|
|
||||||
ExceedingRatio int // percentage of Exceeding based on price
|
|
||||||
}
|
|
||||||
class ComputeResourcePricingProfile {
|
|
||||||
OverrideCPUsPrices map[string]float64
|
|
||||||
OverrideGPUsPrices map[string]float64
|
|
||||||
OverrideRAMPrice float64
|
|
||||||
}
|
|
||||||
class StorageResourcePricingProfile {}
|
|
||||||
WorkflowResourcePricingProfile "1 " --* "many " ExploitResourceSet
|
|
||||||
|
|
||||||
class WorkflowResourcePricingProfile {
|
|
||||||
ID string
|
|
||||||
}
|
|
||||||
|
|
||||||
BuyingStrategy -- PricingStrategy
|
|
||||||
enum BuyingStrategy {
|
|
||||||
UNLIMITED
|
|
||||||
SUBSCRIPTION
|
|
||||||
PAY_PER_USE
|
|
||||||
}
|
|
||||||
Strategy -- TimePricingStrategy
|
|
||||||
Strategy "0-1 " *-- " " PricingStrategy
|
|
||||||
interface Strategy {
|
|
||||||
GetStrategy () string
|
|
||||||
GetStrategyValue() int
|
|
||||||
}
|
|
||||||
enum TimePricingStrategy {
|
|
||||||
ONCE
|
|
||||||
PER_SECOND
|
|
||||||
PER_MINUTE
|
|
||||||
PER_HOUR
|
|
||||||
PER_DAY
|
|
||||||
PER_WEEK
|
|
||||||
PER_MONTH
|
|
||||||
}
|
|
||||||
|
|
||||||
class PricingStrategy {
|
|
||||||
Price float64
|
|
||||||
BuyingStrategy
|
|
||||||
TimePricingStrategy TimePricingStrategy
|
|
||||||
OverrideStrategy Strategy
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
PeerOrder "many " *-- "1 " Order
|
|
||||||
PeerItemOrder "many " *-- "1 " PeerOrder
|
|
||||||
PricedItemITF "many " *-- "1 " PeerItemOrder
|
|
||||||
|
|
||||||
PricedItemITF -- AbstractCustomizedResource
|
|
||||||
|
|
||||||
class Order {
|
|
||||||
OrderBy string
|
|
||||||
WorkflowExecutionIDs []string
|
|
||||||
Status string
|
|
||||||
Total float64
|
|
||||||
}
|
|
||||||
class PeerOrder {
|
|
||||||
PeerID string
|
|
||||||
Error string
|
|
||||||
Status string
|
|
||||||
BillingAddress string
|
|
||||||
Total float64
|
|
||||||
}
|
|
||||||
class PeerItemOrder {
|
|
||||||
Quantity int
|
|
||||||
BuyingStatus string
|
|
||||||
}
|
|
||||||
|
|
||||||
class BuyingStatus {}
|
|
||||||
|
|
||||||
WorkflowExecution "many " --* "1 " Workflow
|
|
||||||
Workflow "1 " --* "many " WorkflowScheduler
|
|
||||||
WorkflowScheduler "1 " --* "many " WorkflowExecution
|
|
||||||
|
|
||||||
class WorkflowExecution {
|
|
||||||
ExecDate date
|
|
||||||
EndDate date
|
|
||||||
State string
|
|
||||||
WorkflowID string
|
|
||||||
|
|
||||||
ToBookings() []Booking
|
|
||||||
}
|
|
||||||
class WorkflowScheduler* {
|
|
||||||
Message string
|
|
||||||
Warning string
|
|
||||||
Start date
|
|
||||||
End date
|
|
||||||
DurationS float64
|
|
||||||
Cron string
|
|
||||||
|
|
||||||
Schedules(workflowID string, request) []WorkflowExecution
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Workflow "1 " --* "many " ExploitResourceSet
|
|
||||||
|
|
||||||
class Workflow {}
|
|
||||||
|
|
||||||
interface PricedItemITF {
|
|
||||||
getPrice(request) float64, error
|
|
||||||
}
|
|
||||||
|
|
||||||
@enduml
|
|
@ -1,29 +0,0 @@
|
|||||||
@startuml
|
|
||||||
user -> client : schedule
|
|
||||||
client -> OrderAPIP1 : check book
|
|
||||||
OrderAPIP1 -> datacenterAPIP2 : check book
|
|
||||||
datacenterAPIP2 -> OrderAPIP1 : send ok
|
|
||||||
OrderAPIP1 -> datacenterAPIP2 : generate draft book
|
|
||||||
OrderAPIP1 -> client : send ok
|
|
||||||
client -> OrderAPIP1 : send scheduler
|
|
||||||
OrderAPIP1 -> OrderAPIP1 : draft executions
|
|
||||||
OrderAPIP1 -> OrderAPIP1 : draft order
|
|
||||||
OrderAPIP1 -> client : send drafted order
|
|
||||||
client -> user :
|
|
||||||
user -> client : select pricing profile
|
|
||||||
client -> OrderAPIP1 : update order
|
|
||||||
OrderAPIP1 -> datacenterAPIP2 : check book
|
|
||||||
datacenterAPIP2 -> OrderAPIP1 : send ok
|
|
||||||
OrderAPIP1 -> datacenterAPIP2 : generate draft book
|
|
||||||
OrderAPIP1 -> client : send order
|
|
||||||
user -> client : order
|
|
||||||
client -> OrderAPIP1 : order
|
|
||||||
OrderAPIP1 -> PaymentAPIBCP1 : send payment
|
|
||||||
PaymentAPIBCP1 -> OrderAPIP1 : send ok
|
|
||||||
OrderAPIP1 -> datacenterAPIP2 : undraft booking
|
|
||||||
OrderAPIP1 -> OrderAPIP1 : undraft execution
|
|
||||||
OrderAPIP1 -> OrderAPIP1 : undraft order
|
|
||||||
OrderAPIP1 -> client : send ok
|
|
||||||
client -> client : redirect
|
|
||||||
@enduml
|
|
||||||
|
|
317
entrypoint.go
317
entrypoint.go
@ -1,11 +1,8 @@
|
|||||||
package oclib
|
package oclib
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
@ -17,9 +14,13 @@ import (
|
|||||||
"cloud.o-forge.io/core/oc-lib/models"
|
"cloud.o-forge.io/core/oc-lib/models"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area"
|
"cloud.o-forge.io/core/oc-lib/models/collaborative_area"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/order"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/data"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/processing"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
|
||||||
|
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
w2 "cloud.o-forge.io/core/oc-lib/models/workflow"
|
w2 "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||||
@ -47,10 +48,9 @@ const (
|
|||||||
WORKSPACE = tools.WORKSPACE
|
WORKSPACE = tools.WORKSPACE
|
||||||
WORKFLOW_EXECUTION = tools.WORKFLOW_EXECUTION
|
WORKFLOW_EXECUTION = tools.WORKFLOW_EXECUTION
|
||||||
PEER = tools.PEER
|
PEER = tools.PEER
|
||||||
COLLABORATIVE_AREA = tools.COLLABORATIVE_AREA
|
SHARED_WORKSPACE = tools.COLLABORATIVE_AREA
|
||||||
RULE = tools.RULE
|
RULE = tools.RULE
|
||||||
BOOKING = tools.BOOKING
|
BOOKING = tools.BOOKING
|
||||||
ORDER = tools.ORDER
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// will turn into standards api hostnames
|
// will turn into standards api hostnames
|
||||||
@ -118,49 +118,6 @@ func InitDaemon(appName string) {
|
|||||||
beego.BConfig.WebConfig.StaticDir["/swagger"] = "swagger"
|
beego.BConfig.WebConfig.StaticDir["/swagger"] = "swagger"
|
||||||
}
|
}
|
||||||
|
|
||||||
type IDTokenClaims struct {
|
|
||||||
UserID string `json:"user_id"`
|
|
||||||
PeerID string `json:"peer_id"`
|
|
||||||
Groups []string `json:"groups"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SessionClaims struct
|
|
||||||
type SessionClaims struct {
|
|
||||||
AccessToken map[string]interface{} `json:"access_token"`
|
|
||||||
IDToken IDTokenClaims `json:"id_token"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Claims struct
|
|
||||||
type Claims struct {
|
|
||||||
Session SessionClaims `json:"session"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExtractTokenInfo(request http.Request) (string, string, []string) {
|
|
||||||
reqToken := request.Header.Get("Authorization")
|
|
||||||
splitToken := strings.Split(reqToken, "Bearer ")
|
|
||||||
if len(splitToken) < 2 {
|
|
||||||
reqToken = ""
|
|
||||||
} else {
|
|
||||||
reqToken = splitToken[1]
|
|
||||||
}
|
|
||||||
if reqToken != "" {
|
|
||||||
token := strings.Split(reqToken, ".")
|
|
||||||
if len(token) > 2 {
|
|
||||||
bytes, err := base64.StdEncoding.DecodeString(token[2])
|
|
||||||
if err != nil {
|
|
||||||
return "", "", []string{}
|
|
||||||
}
|
|
||||||
var c Claims
|
|
||||||
err = json.Unmarshal(bytes, &c)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", []string{}
|
|
||||||
}
|
|
||||||
return c.Session.IDToken.UserID, c.Session.IDToken.PeerID, c.Session.IDToken.Groups
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", "", []string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Init(appName string) {
|
func Init(appName string) {
|
||||||
InitDaemon(appName)
|
InitDaemon(appName)
|
||||||
api := &tools.API{}
|
api := &tools.API{}
|
||||||
@ -196,6 +153,49 @@ func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string,
|
|||||||
}()
|
}()
|
||||||
logs.CreateLogger("main")
|
logs.CreateLogger("main")
|
||||||
mongo.MONGOService.Init(models.GetModelsNames(), config.GetConfig()) // init the mongo service
|
mongo.MONGOService.Init(models.GetModelsNames(), config.GetConfig()) // init the mongo service
|
||||||
|
/*
|
||||||
|
Here we will check if the resource model is already stored in the database
|
||||||
|
If not we will store it
|
||||||
|
Resource model is the model that will define the structure of the resources
|
||||||
|
*/
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
for _, model := range []string{tools.DATA_RESOURCE.String(), tools.PROCESSING_RESOURCE.String(), tools.STORAGE_RESOURCE.String(), tools.COMPUTE_RESOURCE.String(), tools.WORKFLOW_RESOURCE.String()} {
|
||||||
|
data, code, _ := accessor.Search(nil, model)
|
||||||
|
if code == 404 || len(data) == 0 {
|
||||||
|
refs := map[string]string{}
|
||||||
|
m := map[string]resource_model.Model{}
|
||||||
|
// TODO Specify the model for each resource
|
||||||
|
// for now only processing is specified here (not an elegant way)
|
||||||
|
if model == tools.DATA_RESOURCE.String() || model == tools.STORAGE_RESOURCE.String() {
|
||||||
|
refs["path"] = "string"
|
||||||
|
}
|
||||||
|
if model == tools.PROCESSING_RESOURCE.String() {
|
||||||
|
m["command"] = resource_model.Model{
|
||||||
|
Type: "string",
|
||||||
|
ReadOnly: false,
|
||||||
|
}
|
||||||
|
m["args"] = resource_model.Model{
|
||||||
|
Type: "string",
|
||||||
|
ReadOnly: false,
|
||||||
|
}
|
||||||
|
m["env"] = resource_model.Model{
|
||||||
|
Type: "string",
|
||||||
|
ReadOnly: false,
|
||||||
|
}
|
||||||
|
m["volumes"] = resource_model.Model{
|
||||||
|
Type: "map[string]string",
|
||||||
|
ReadOnly: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
accessor.StoreOne(&resource_model.ResourceModel{
|
||||||
|
ResourceType: model,
|
||||||
|
VarRefs: refs,
|
||||||
|
Model: map[string]map[string]resource_model.Model{
|
||||||
|
"container": m,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
return cfg
|
return cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -225,77 +225,6 @@ func GetConfLoader() *onion.Onion {
|
|||||||
return config.GetConfLoader()
|
return config.GetConfLoader()
|
||||||
}
|
}
|
||||||
|
|
||||||
type Request struct {
|
|
||||||
collection LibDataEnum
|
|
||||||
user string
|
|
||||||
peerID string
|
|
||||||
groups []string
|
|
||||||
caller *tools.HTTPCaller
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRequest(collection LibDataEnum, user string, peerID string, groups []string, caller *tools.HTTPCaller) *Request {
|
|
||||||
return &Request{collection: collection, user: user, peerID: peerID, groups: groups, caller: caller}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToScheduler(m interface{}) (n *workflow_execution.WorkflowSchedule) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return m.(*workflow_execution.WorkflowSchedule)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Request) Schedule(wfID string, scheduler *workflow_execution.WorkflowSchedule) (*workflow_execution.WorkflowSchedule, error) {
|
|
||||||
ws, _, _, err := scheduler.Schedules(wfID, &tools.APIRequest{
|
|
||||||
Caller: r.caller,
|
|
||||||
Username: r.user,
|
|
||||||
PeerID: r.peerID,
|
|
||||||
Groups: r.groups,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
fmt.Println("BAM", ws)
|
|
||||||
return ws, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Request) CheckBooking(wfID string, start string, end string, durationInS float64, cron string) bool {
|
|
||||||
ok, _, _, _, err := workflow_execution.NewScheduler(start, end, durationInS, cron).CheckBooking(wfID, &tools.APIRequest{
|
|
||||||
Caller: r.caller,
|
|
||||||
Username: r.user,
|
|
||||||
PeerID: r.peerID,
|
|
||||||
Groups: r.groups,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Request) DraftOrder(scheduler *workflow_execution.WorkflowSchedule) (*order.Order, error) {
|
|
||||||
o := &order.Order{}
|
|
||||||
if err := o.DraftOrder(scheduler, &tools.APIRequest{
|
|
||||||
Caller: r.caller,
|
|
||||||
Username: r.user,
|
|
||||||
PeerID: r.peerID,
|
|
||||||
Groups: r.groups,
|
|
||||||
}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Request) PaymentTunnel(o *order.Order, scheduler *workflow_execution.WorkflowSchedule) error {
|
|
||||||
return o.Pay(scheduler, &tools.APIRequest{
|
|
||||||
Caller: r.caller,
|
|
||||||
Username: r.user,
|
|
||||||
PeerID: r.peerID,
|
|
||||||
Groups: r.groups,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Search will search for the data in the database
|
* Search will search for the data in the database
|
||||||
* @param filters *dbs.Filters
|
* @param filters *dbs.Filters
|
||||||
@ -304,19 +233,18 @@ func (r *Request) PaymentTunnel(o *order.Order, scheduler *workflow_execution.Wo
|
|||||||
* @param c ...*tools.HTTPCaller
|
* @param c ...*tools.HTTPCaller
|
||||||
* @return data LibDataShallow
|
* @return data LibDataShallow
|
||||||
*/
|
*/
|
||||||
func (r *Request) Search(filters *dbs.Filters, word string, isDraft bool) (data LibDataShallow) {
|
func Search(filters *dbs.Filters, word string, collection LibDataEnum, c ...*tools.HTTPCaller) (data LibDataShallow) {
|
||||||
defer func() { // recover the panic
|
defer func() { // recover the panic
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in Search : "+fmt.Sprintf("%v", r)))
|
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in Search : "+fmt.Sprintf("%v", r)))
|
||||||
data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(&tools.APIRequest{
|
var caller *tools.HTTPCaller // define the caller
|
||||||
Caller: r.caller,
|
if len(c) > 0 {
|
||||||
Username: r.user,
|
caller = c[0]
|
||||||
PeerID: r.peerID,
|
}
|
||||||
Groups: r.groups,
|
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).Search(filters, word)
|
||||||
}).Search(filters, word, isDraft)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
data = LibDataShallow{Data: d, Code: code, Err: err.Error()}
|
data = LibDataShallow{Data: d, Code: code, Err: err.Error()}
|
||||||
return
|
return
|
||||||
@ -331,19 +259,18 @@ func (r *Request) Search(filters *dbs.Filters, word string, isDraft bool) (data
|
|||||||
* @param c ...*tools.HTTPCaller
|
* @param c ...*tools.HTTPCaller
|
||||||
* @return data LibDataShallow
|
* @return data LibDataShallow
|
||||||
*/
|
*/
|
||||||
func (r *Request) LoadAll(isDraft bool) (data LibDataShallow) {
|
func LoadAll(collection LibDataEnum, c ...*tools.HTTPCaller) (data LibDataShallow) {
|
||||||
defer func() { // recover the panic
|
defer func() { // recover the panic
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadAll : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadAll : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||||
data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
data = LibDataShallow{Data: nil, Code: 500, Err: "Panic recovered in LoadAll : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(&tools.APIRequest{
|
var caller *tools.HTTPCaller // define the caller
|
||||||
Caller: r.caller,
|
if len(c) > 0 {
|
||||||
Username: r.user,
|
caller = c[0]
|
||||||
PeerID: r.peerID,
|
}
|
||||||
Groups: r.groups,
|
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).LoadAll()
|
||||||
}).LoadAll(isDraft)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
data = LibDataShallow{Data: d, Code: code, Err: err.Error()}
|
data = LibDataShallow{Data: d, Code: code, Err: err.Error()}
|
||||||
return
|
return
|
||||||
@ -359,19 +286,18 @@ func (r *Request) LoadAll(isDraft bool) (data LibDataShallow) {
|
|||||||
* @param c ...*tools.HTTPCaller
|
* @param c ...*tools.HTTPCaller
|
||||||
* @return data LibData
|
* @return data LibData
|
||||||
*/
|
*/
|
||||||
func (r *Request) LoadOne(id string) (data LibData) {
|
func LoadOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data LibData) {
|
||||||
defer func() { // recover the panic
|
defer func() { // recover the panic
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in LoadOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in LoadOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in LoadOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(&tools.APIRequest{
|
var caller *tools.HTTPCaller // define the caller
|
||||||
Caller: r.caller,
|
if len(c) > 0 {
|
||||||
Username: r.user,
|
caller = c[0]
|
||||||
PeerID: r.peerID,
|
}
|
||||||
Groups: r.groups,
|
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).LoadOne(id)
|
||||||
}).LoadOne(id)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||||
return
|
return
|
||||||
@ -388,20 +314,19 @@ func (r *Request) LoadOne(id string) (data LibData) {
|
|||||||
* @param c ...*tools.HTTPCaller
|
* @param c ...*tools.HTTPCaller
|
||||||
* @return data LibData
|
* @return data LibData
|
||||||
*/
|
*/
|
||||||
func (r *Request) UpdateOne(set map[string]interface{}, id string) (data LibData) {
|
func UpdateOne(collection LibDataEnum, set map[string]interface{}, id string, c ...*tools.HTTPCaller) (data LibData) {
|
||||||
defer func() { // recover the panic
|
defer func() { // recover the panic
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in UpdateOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in UpdateOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in UpdateOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in UpdateOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
model := models.Model(r.collection.EnumIndex())
|
var caller *tools.HTTPCaller // define the caller
|
||||||
d, code, err := model.GetAccessor(&tools.APIRequest{
|
if len(c) > 0 {
|
||||||
Caller: r.caller,
|
caller = c[0]
|
||||||
Username: r.user,
|
}
|
||||||
PeerID: r.peerID,
|
model := models.Model(collection.EnumIndex())
|
||||||
Groups: r.groups,
|
d, code, err := model.GetAccessor(caller).UpdateOne(model.Deserialize(set), id)
|
||||||
}).UpdateOne(model.Deserialize(set, model), id)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||||
return
|
return
|
||||||
@ -417,19 +342,18 @@ func (r *Request) UpdateOne(set map[string]interface{}, id string) (data LibData
|
|||||||
* @param c ...*tools.HTTPCaller
|
* @param c ...*tools.HTTPCaller
|
||||||
* @return data LibData
|
* @return data LibData
|
||||||
*/
|
*/
|
||||||
func (r *Request) DeleteOne(id string) (data LibData) {
|
func DeleteOne(collection LibDataEnum, id string, c ...*tools.HTTPCaller) (data LibData) {
|
||||||
defer func() { // recover the panic
|
defer func() { // recover the panic
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in DeleteOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in DeleteOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in DeleteOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in DeleteOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
d, code, err := models.Model(r.collection.EnumIndex()).GetAccessor(&tools.APIRequest{
|
var caller *tools.HTTPCaller // define the caller
|
||||||
Caller: r.caller,
|
if len(c) > 0 {
|
||||||
Username: r.user,
|
caller = c[0]
|
||||||
PeerID: r.peerID,
|
}
|
||||||
Groups: r.groups,
|
d, code, err := models.Model(collection.EnumIndex()).GetAccessor(caller).DeleteOne(id)
|
||||||
}).DeleteOne(id)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||||
return
|
return
|
||||||
@ -445,20 +369,19 @@ func (r *Request) DeleteOne(id string) (data LibData) {
|
|||||||
* @param c ...*tools.HTTPCaller
|
* @param c ...*tools.HTTPCaller
|
||||||
* @return data LibData
|
* @return data LibData
|
||||||
*/
|
*/
|
||||||
func (r *Request) StoreOne(object map[string]interface{}) (data LibData) {
|
func StoreOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.HTTPCaller) (data LibData) {
|
||||||
defer func() { // recover the panic
|
defer func() { // recover the panic
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in StoreOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in StoreOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in StoreOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in StoreOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
model := models.Model(r.collection.EnumIndex())
|
var caller *tools.HTTPCaller // define the caller
|
||||||
d, code, err := model.GetAccessor(&tools.APIRequest{
|
if len(c) > 0 {
|
||||||
Caller: r.caller,
|
caller = c[0]
|
||||||
Username: r.user,
|
}
|
||||||
PeerID: r.peerID,
|
model := models.Model(collection.EnumIndex())
|
||||||
Groups: r.groups,
|
d, code, err := model.GetAccessor(caller).StoreOne(model.Deserialize(object))
|
||||||
}).StoreOne(model.Deserialize(object, model))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||||
return
|
return
|
||||||
@ -474,20 +397,19 @@ func (r *Request) StoreOne(object map[string]interface{}) (data LibData) {
|
|||||||
* @param c ...*tools.HTTPCaller
|
* @param c ...*tools.HTTPCaller
|
||||||
* @return data LibData
|
* @return data LibData
|
||||||
*/
|
*/
|
||||||
func (r *Request) CopyOne(object map[string]interface{}) (data LibData) {
|
func CopyOne(collection LibDataEnum, object map[string]interface{}, c ...*tools.HTTPCaller) (data LibData) {
|
||||||
defer func() { // recover the panic
|
defer func() { // recover the panic
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in CopyOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
tools.UncatchedError = append(tools.UncatchedError, errors.New("Panic recovered in CopyOne : "+fmt.Sprintf("%v", r)+" - "+string(debug.Stack())))
|
||||||
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in UpdateOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
data = LibData{Data: nil, Code: 500, Err: "Panic recovered in UpdateOne : " + fmt.Sprintf("%v", r) + " - " + string(debug.Stack())}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
model := models.Model(r.collection.EnumIndex())
|
var caller *tools.HTTPCaller // define the caller
|
||||||
d, code, err := model.GetAccessor(&tools.APIRequest{
|
if len(c) > 0 {
|
||||||
Caller: r.caller,
|
caller = c[0]
|
||||||
Username: r.user,
|
}
|
||||||
PeerID: r.peerID,
|
model := models.Model(collection.EnumIndex())
|
||||||
Groups: r.groups,
|
d, code, err := model.GetAccessor(caller).CopyOne(model.Deserialize(object))
|
||||||
}).CopyOne(model.Deserialize(object, model))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
data = LibData{Data: d, Code: code, Err: err.Error()}
|
data = LibData{Data: d, Code: code, Err: err.Error()}
|
||||||
return
|
return
|
||||||
@ -498,81 +420,74 @@ func (r *Request) CopyOne(object map[string]interface{}) (data LibData) {
|
|||||||
|
|
||||||
// ================ CAST ========================= //
|
// ================ CAST ========================= //
|
||||||
|
|
||||||
func (l *LibData) ToDataResource() *resources.DataResource {
|
func (l *LibData) ToDataResource() *data.DataResource {
|
||||||
if l.Data.GetAccessor(nil).GetType() == tools.DATA_RESOURCE {
|
if l.Data.GetAccessor(nil).GetType() == tools.DATA_RESOURCE.String() {
|
||||||
return l.Data.(*resources.DataResource)
|
return l.Data.(*data.DataResource)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LibData) ToComputeResource() *resources.ComputeResource {
|
func (l *LibData) ToComputeResource() *compute.ComputeResource {
|
||||||
if l.Data != nil && l.Data.GetAccessor(nil).GetType() == tools.COMPUTE_RESOURCE {
|
if l.Data != nil && l.Data.GetAccessor(nil).GetType() == tools.COMPUTE_RESOURCE.String() {
|
||||||
return l.Data.(*resources.ComputeResource)
|
return l.Data.(*compute.ComputeResource)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (l *LibData) ToStorageResource() *resources.StorageResource {
|
func (l *LibData) ToStorageResource() *storage.StorageResource {
|
||||||
if l.Data.GetAccessor(nil).GetType() == tools.STORAGE_RESOURCE {
|
if l.Data.GetAccessor(nil).GetType() == tools.STORAGE_RESOURCE.String() {
|
||||||
return l.Data.(*resources.StorageResource)
|
return l.Data.(*storage.StorageResource)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (l *LibData) ToProcessingResource() *resources.ProcessingResource {
|
func (l *LibData) ToProcessingResource() *processing.ProcessingResource {
|
||||||
if l.Data.GetAccessor(nil).GetType() == tools.PROCESSING_RESOURCE {
|
if l.Data.GetAccessor(nil).GetType() == tools.PROCESSING_RESOURCE.String() {
|
||||||
return l.Data.(*resources.ProcessingResource)
|
return l.Data.(*processing.ProcessingResource)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (l *LibData) ToWorkflowResource() *resources.WorkflowResource {
|
func (l *LibData) ToWorkflowResource() *w.WorkflowResource {
|
||||||
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_RESOURCE {
|
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_RESOURCE.String() {
|
||||||
return l.Data.(*resources.WorkflowResource)
|
return l.Data.(*w.WorkflowResource)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (l *LibData) ToPeer() *peer.Peer {
|
func (l *LibData) ToPeer() *peer.Peer {
|
||||||
if l.Data.GetAccessor(nil).GetType() == tools.PEER {
|
if l.Data.GetAccessor(nil).GetType() == tools.PEER.String() {
|
||||||
return l.Data.(*peer.Peer)
|
return l.Data.(*peer.Peer)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LibData) ToWorkflow() *w2.Workflow {
|
func (l *LibData) ToWorkflow() *w2.Workflow {
|
||||||
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW {
|
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW.String() {
|
||||||
return l.Data.(*w2.Workflow)
|
return l.Data.(*w2.Workflow)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (l *LibData) ToWorkspace() *workspace.Workspace {
|
func (l *LibData) ToWorkspace() *workspace.Workspace {
|
||||||
if l.Data.GetAccessor(nil).GetType() == tools.WORKSPACE {
|
if l.Data.GetAccessor(nil).GetType() == tools.WORKSPACE.String() {
|
||||||
return l.Data.(*workspace.Workspace)
|
return l.Data.(*workspace.Workspace)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LibData) ToCollaborativeArea() *collaborative_area.CollaborativeArea {
|
func (l *LibData) ToCollaborativeArea() *collaborative_area.CollaborativeArea {
|
||||||
if l.Data.GetAccessor(nil).GetType() == tools.COLLABORATIVE_AREA {
|
if l.Data.GetAccessor(nil).GetType() == tools.COLLABORATIVE_AREA.String() {
|
||||||
return l.Data.(*collaborative_area.CollaborativeArea)
|
return l.Data.(*collaborative_area.CollaborativeArea)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LibData) ToRule() *rule.Rule {
|
func (l *LibData) ToRule() *rule.Rule {
|
||||||
if l.Data.GetAccessor(nil).GetType() == tools.COLLABORATIVE_AREA {
|
if l.Data.GetAccessor(nil).GetType() == tools.COLLABORATIVE_AREA.String() {
|
||||||
return l.Data.(*rule.Rule)
|
return l.Data.(*rule.Rule)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecution {
|
func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecution {
|
||||||
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_EXECUTION {
|
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_EXECUTION.String() {
|
||||||
return l.Data.(*workflow_execution.WorkflowExecution)
|
return l.Data.(*workflow_execution.WorkflowExecution)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LibData) ToOrder() *order.Order {
|
|
||||||
if l.Data.GetAccessor(nil).GetType() == tools.ORDER {
|
|
||||||
return l.Data.(*order.Order)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
2
go.mod
2
go.mod
@ -27,7 +27,6 @@ require (
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/biter777/countries v1.7.5
|
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.4 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.4 // indirect
|
||||||
@ -45,7 +44,6 @@ require (
|
|||||||
github.com/prometheus/client_model v0.5.0 // indirect
|
github.com/prometheus/client_model v0.5.0 // indirect
|
||||||
github.com/prometheus/common v0.48.0 // indirect
|
github.com/prometheus/common v0.48.0 // indirect
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
github.com/robfig/cron v1.2.0
|
|
||||||
github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 // indirect
|
github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 // indirect
|
||||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||||
github.com/xdg-go/scram v1.1.2 // indirect
|
github.com/xdg-go/scram v1.1.2 // indirect
|
||||||
|
4
go.sum
4
go.sum
@ -3,8 +3,6 @@ github.com/beego/beego/v2 v2.3.1 h1:7MUKMpJYzOXtCUsTEoXOxsDV/UcHw6CPbaWMlthVNsc=
|
|||||||
github.com/beego/beego/v2 v2.3.1/go.mod h1:5cqHsOHJIxkq44tBpRvtDe59GuVRVv/9/tyVDxd5ce4=
|
github.com/beego/beego/v2 v2.3.1/go.mod h1:5cqHsOHJIxkq44tBpRvtDe59GuVRVv/9/tyVDxd5ce4=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q=
|
|
||||||
github.com/biter777/countries v1.7.5/go.mod h1:1HSpZ526mYqKJcpT5Ti1kcGQ0L0SrXWIaptUWjFfv2E=
|
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
@ -89,8 +87,6 @@ github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSz
|
|||||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||||
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
|
|
||||||
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
|
||||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
|
@ -1,12 +1,14 @@
|
|||||||
package booking
|
package booking
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
"github.com/google/uuid"
|
||||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -14,97 +16,76 @@ import (
|
|||||||
* Booking is a struct that represents a booking
|
* Booking is a struct that represents a booking
|
||||||
*/
|
*/
|
||||||
type Booking struct {
|
type Booking struct {
|
||||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
workflow_execution.WorkflowExecution // WorkflowExecution contains the workflow execution data
|
||||||
ExecutionsID string `json:"executions_id,omitempty" bson:"executions_id,omitempty" validate:"required"` // ExecutionsID is the ID of the executions
|
ComputeResourceID string `json:"compute_resource_id,omitempty" bson:"compute_resource_id,omitempty" validate:"required"` // ComputeResourceID is the ID of the compute resource specified in the booking
|
||||||
DestPeerID string `json:"dest_peer_id,omitempty"` // DestPeerID is the ID of the destination peer
|
|
||||||
WorkflowID string `json:"workflow_id,omitempty" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow
|
|
||||||
ExecutionID string `json:"execution_id,omitempty" bson:"execution_id,omitempty" validate:"required"`
|
|
||||||
State enum.BookingStatus `json:"state,omitempty" bson:"state,omitempty" validate:"required"` // State is the state of the booking
|
|
||||||
ExpectedStartDate time.Time `json:"expected_start_date,omitempty" bson:"expected_start_date,omitempty" validate:"required"` // ExpectedStartDate is the expected start date of the booking
|
|
||||||
ExpectedEndDate *time.Time `json:"expected_end_date,omitempty" bson:"expected_end_date,omitempty" validate:"required"` // ExpectedEndDate is the expected end date of the booking
|
|
||||||
|
|
||||||
RealStartDate *time.Time `json:"real_start_date,omitempty" bson:"real_start_date,omitempty"` // RealStartDate is the real start date of the booking
|
|
||||||
RealEndDate *time.Time `json:"real_end_date,omitempty" bson:"real_end_date,omitempty"` // RealEndDate is the real end date of the booking
|
|
||||||
|
|
||||||
ResourceType tools.DataType `json:"resource_type,omitempty" bson:"resource_type,omitempty" validate:"required"` // ResourceType is the type of the resource
|
|
||||||
ResourceID string `json:"resource_id,omitempty" bson:"resource_id,omitempty" validate:"required"` // could be a Compute or a Storage
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckBooking checks if a booking is possible on a specific compute resource
|
// CheckBooking checks if a booking is possible on a specific compute resource
|
||||||
func (wfa *Booking) Check(id string, start time.Time, end *time.Time, parrallelAllowed int) (bool, error) {
|
func (wfa *Booking) CheckBooking(id string, start time.Time, end *time.Time) (bool, error) {
|
||||||
// check if
|
// check if
|
||||||
if end == nil {
|
if end == nil {
|
||||||
// if no end... then Book like a savage
|
// if no end... then Book like a savage
|
||||||
e := start.Add(time.Hour)
|
return true, nil
|
||||||
end = &e
|
|
||||||
}
|
}
|
||||||
accessor := NewAccessor(nil)
|
e := *end
|
||||||
|
accessor := wfa.GetAccessor(nil)
|
||||||
res, code, err := accessor.Search(&dbs.Filters{
|
res, code, err := accessor.Search(&dbs.Filters{
|
||||||
And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date
|
And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date
|
||||||
"resource_id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
"compute_resource_id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
||||||
"state": {{Operator: dbs.EQUAL.String(), Value: enum.DRAFT.EnumIndex()}},
|
"workflowexecution.state": {{Operator: dbs.EQUAL.String(), Value: workflow_execution.SCHEDULED.EnumIndex()}},
|
||||||
"expected_start_date": {
|
"workflowexecution.execution_date": {
|
||||||
{Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(*end)},
|
{Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(e)},
|
||||||
{Operator: dbs.GTE.String(), Value: primitive.NewDateTimeFromTime(start)},
|
{Operator: dbs.GTE.String(), Value: primitive.NewDateTimeFromTime(start)},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, "", wfa.IsDraft)
|
}, "")
|
||||||
if code != 200 {
|
if code != 200 {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return len(res) <= parrallelAllowed, nil
|
return len(res) == 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Booking) GetDelayForLaunch() time.Duration {
|
// tool to convert the argo status to a state
|
||||||
return d.RealStartDate.Sub(d.ExpectedStartDate)
|
func (wfa *Booking) ArgoStatusToState(status string) *Booking {
|
||||||
|
wfa.WorkflowExecution.ArgoStatusToState(status)
|
||||||
|
return wfa
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Booking) GetDelayForFinishing() time.Duration {
|
func (ao *Booking) GetID() string {
|
||||||
if d.ExpectedEndDate == nil {
|
return ao.UUID
|
||||||
return time.Duration(0)
|
}
|
||||||
|
|
||||||
|
func (r *Booking) GenerateID() {
|
||||||
|
if r.UUID == "" {
|
||||||
|
r.UUID = uuid.New().String()
|
||||||
}
|
}
|
||||||
return d.RealEndDate.Sub(d.ExpectedStartDate)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Booking) GetUsualDuration() time.Duration {
|
func (d *Booking) GetName() string {
|
||||||
return d.ExpectedEndDate.Sub(d.ExpectedStartDate)
|
return d.UUID + "_" + d.ExecDate.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Booking) GetRealDuration() time.Duration {
|
func (d *Booking) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
if d.RealEndDate == nil || d.RealStartDate == nil {
|
data := New() // Create a new instance of the accessor
|
||||||
return time.Duration(0)
|
data.Init(tools.BOOKING, caller) // Initialize the accessor with the BOOKING model type
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *Booking) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
return d.RealEndDate.Sub(*d.RealStartDate)
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Booking) GetDelayOnDuration() time.Duration {
|
func (dma *Booking) Serialize() map[string]interface{} {
|
||||||
return d.GetRealDuration() - d.GetUsualDuration()
|
var m map[string]interface{}
|
||||||
}
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
func (d *Booking) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
return nil
|
||||||
return NewAccessor(request) // Create a new instance of the accessor
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Booking) VerifyAuth(request *tools.APIRequest) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Booking) StoreDraftDefault() {
|
|
||||||
r.IsDraft = false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Booking) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
|
|
||||||
if !r.IsDraft && r.State != set.(*Booking).State || r.RealStartDate != set.(*Booking).RealStartDate || r.RealEndDate != set.(*Booking).RealEndDate {
|
|
||||||
return true, &Booking{
|
|
||||||
State: set.(*Booking).State,
|
|
||||||
RealStartDate: set.(*Booking).RealStartDate,
|
|
||||||
RealEndDate: set.(*Booking).RealEndDate,
|
|
||||||
} // only state can be updated
|
|
||||||
}
|
}
|
||||||
// TODO : HERE WE CAN HANDLE THE CASE WHERE THE BOOKING IS DELAYED OR EXCEEDING OR ending sooner
|
json.Unmarshal(b, &m)
|
||||||
return r.IsDraft, set
|
return m
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Booking) CanDelete() bool {
|
|
||||||
return r.IsDraft // only draft bookings can be deleted
|
|
||||||
}
|
}
|
||||||
|
@ -1,14 +1,12 @@
|
|||||||
package booking
|
package booking
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
"cloud.o-forge.io/core/oc-lib/logs"
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||||
)
|
)
|
||||||
|
|
||||||
type bookingMongoAccessor struct {
|
type bookingMongoAccessor struct {
|
||||||
@ -16,77 +14,90 @@ type bookingMongoAccessor struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new instance of the bookingMongoAccessor
|
// New creates a new instance of the bookingMongoAccessor
|
||||||
func NewAccessor(request *tools.APIRequest) *bookingMongoAccessor {
|
func New() *bookingMongoAccessor {
|
||||||
return &bookingMongoAccessor{
|
return &bookingMongoAccessor{}
|
||||||
AbstractAccessor: utils.AbstractAccessor{
|
|
||||||
Logger: logs.CreateLogger(tools.BOOKING.String()), // Create a logger with the data type
|
|
||||||
Request: request,
|
|
||||||
Type: tools.BOOKING,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Nothing special here, just the basic CRUD operations
|
* Nothing special here, just the basic CRUD operations
|
||||||
*/
|
*/
|
||||||
func (a *bookingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
func (wfa *bookingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
return utils.GenericDeleteOne(id, a)
|
return wfa.GenericDeleteOne(id, wfa)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *bookingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
func (wfa *bookingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
if set.(*Booking).State == 0 {
|
return wfa.GenericUpdateOne(set, id, wfa, &Booking{})
|
||||||
return nil, 400, errors.New("state is required")
|
}
|
||||||
|
|
||||||
|
func (wfa *bookingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
return wfa.GenericStoreOne(data, wfa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *bookingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
return wfa.GenericStoreOne(data, wfa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *bookingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
|
var workflow Booking
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
}
|
}
|
||||||
realSet := &Booking{State: set.(*Booking).State}
|
res_mongo.Decode(&workflow)
|
||||||
return utils.GenericUpdateOne(realSet, id, a, &Booking{})
|
if workflow.State == workflow_execution.SCHEDULED && time.Now().UTC().After(*workflow.ExecDate) {
|
||||||
}
|
workflow.State = workflow_execution.FORGOTTEN
|
||||||
|
wfa.GenericRawUpdateOne(&workflow, id, wfa)
|
||||||
func (a *bookingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericStoreOne(data, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *bookingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericStoreOne(data, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *bookingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericLoadOne[*Booking](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
now := time.Now()
|
|
||||||
now = now.Add(time.Second * -60)
|
|
||||||
if d.(*Booking).State == enum.DRAFT && now.UTC().After(d.(*Booking).ExpectedStartDate) {
|
|
||||||
return utils.GenericDeleteOne(d.GetID(), a)
|
|
||||||
}
|
}
|
||||||
if (d.(*Booking).ExpectedEndDate) == nil {
|
return &workflow, 200, nil
|
||||||
d.(*Booking).State = enum.FORGOTTEN
|
|
||||||
utils.GenericRawUpdateOne(d, id, a)
|
|
||||||
} else if d.(*Booking).State == enum.SCHEDULED && now.UTC().After(d.(*Booking).ExpectedStartDate) {
|
|
||||||
d.(*Booking).State = enum.DELAYED
|
|
||||||
utils.GenericRawUpdateOne(d, id, a)
|
|
||||||
}
|
|
||||||
return d, 200, nil
|
|
||||||
}, a)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *bookingMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
func (wfa *bookingMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
return utils.GenericLoadAll[*Booking](a.getExec(), isDraft, a)
|
objs := []utils.ShallowDBObject{}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []Booking
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
if r.State == workflow_execution.SCHEDULED && time.Now().UTC().After(*r.ExecDate) {
|
||||||
|
r.State = workflow_execution.FORGOTTEN
|
||||||
|
wfa.GenericRawUpdateOne(&r, r.UUID, wfa)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r.AbstractObject) // Warning only AbstractObject is returned
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *bookingMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
// Search is a function that searches for a booking in the database
|
||||||
return utils.GenericSearch[*Booking](filters, search, (&Booking{}).GetObjectFilters(search), a.getExec(), isDraft, a)
|
func (wfa *bookingMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
}
|
objs := []utils.ShallowDBObject{}
|
||||||
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
func (a *bookingMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
|
filters = &dbs.Filters{
|
||||||
return func(d utils.DBObject) utils.ShallowDBObject {
|
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||||
now := time.Now()
|
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
now = now.Add(time.Second * -60)
|
},
|
||||||
if d.(*Booking).State == enum.DRAFT && now.UTC().After(d.(*Booking).ExpectedStartDate) {
|
}
|
||||||
utils.GenericDeleteOne(d.GetID(), a)
|
}
|
||||||
return nil
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
}
|
if err != nil {
|
||||||
if d.(*Booking).State == enum.SCHEDULED && now.UTC().After(d.(*Booking).ExpectedStartDate) {
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
d.(*Booking).State = enum.DELAYED
|
return nil, code, err
|
||||||
utils.GenericRawUpdateOne(d, d.GetID(), a)
|
}
|
||||||
}
|
var results []Booking
|
||||||
return d
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
}
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
if r.State == workflow_execution.SCHEDULED && time.Now().UTC().After(*r.ExecDate) {
|
||||||
|
r.State = workflow_execution.FORGOTTEN
|
||||||
|
wfa.GenericRawUpdateOne(&r, r.UUID, wfa)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r)
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
}
|
}
|
||||||
|
@ -1,16 +1,16 @@
|
|||||||
package collaborative_area
|
package collaborative_area
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"slices"
|
"encoding/json"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/config"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
w "cloud.o-forge.io/core/oc-lib/models/workflow"
|
w "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workspace"
|
"cloud.o-forge.io/core/oc-lib/models/workspace"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
"github.com/google/uuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CollaborativeAreaRule struct {
|
type CollaborativeAreaRule struct {
|
||||||
@ -27,13 +27,14 @@ type CollaborativeAreaRule struct {
|
|||||||
type CollaborativeArea struct {
|
type CollaborativeArea struct {
|
||||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||||
IsSent bool `json:"is_sent" bson:"-"` // IsSent is a flag that indicates if the workspace is sent
|
IsSent bool `json:"is_sent" bson:"-"` // IsSent is a flag that indicates if the workspace is sent
|
||||||
|
CreatorID string `json:"peer_id,omitempty" bson:"peer_id,omitempty" validate:"required"` // CreatorID is the ID of the creator
|
||||||
Version string `json:"version,omitempty" bson:"version,omitempty"` // Version is the version of the workspace
|
Version string `json:"version,omitempty" bson:"version,omitempty"` // Version is the version of the workspace
|
||||||
Description string `json:"description,omitempty" bson:"description,omitempty" validate:"required"` // Description is the description of the workspace
|
Description string `json:"description,omitempty" bson:"description,omitempty" validate:"required"` // Description is the description of the workspace
|
||||||
CollaborativeAreaRule *CollaborativeAreaRule `json:"collaborative_area,omitempty" bson:"collaborative_area,omitempty"` // CollaborativeArea is the collaborative area of the workspace
|
CollaborativeAreaRule *CollaborativeAreaRule `json:"collaborative_area,omitempty" bson:"collaborative_area,omitempty"` // CollaborativeArea is the collaborative area of the workspace
|
||||||
Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"` // Attributes is the attributes of the workspace (TODO)
|
Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"` // Attributes is the attributes of the workspace (TODO)
|
||||||
Workspaces []string `json:"workspaces" bson:"workspaces"` // Workspaces is the workspaces of the workspace
|
Workspaces []string `json:"workspaces" bson:"workspaces"` // Workspaces is the workspaces of the workspace
|
||||||
Workflows []string `json:"workflows" bson:"workflows"` // Workflows is the workflows of the workspace
|
Workflows []string `json:"workflows" bson:"workflows"` // Workflows is the workflows of the workspace
|
||||||
AllowedPeersGroup map[string][]string `json:"allowed_peers_group" bson:"allowed_peers_group"` // AllowedPeersGroup is the group of allowed peers
|
Peers []string `json:"peers" bson:"peers"` // Peers is the peers of the workspace
|
||||||
Rules []string `json:"rules" bson:"rules,omitempty"` // Rules is the rules of the workspace
|
Rules []string `json:"rules" bson:"rules,omitempty"` // Rules is the rules of the workspace
|
||||||
|
|
||||||
SharedRules []*rule.Rule `json:"shared_rules,omitempty" bson:"-"` // SharedRules is the shared rules of the workspace
|
SharedRules []*rule.Rule `json:"shared_rules,omitempty" bson:"-"` // SharedRules is the shared rules of the workspace
|
||||||
@ -42,62 +43,41 @@ type CollaborativeArea struct {
|
|||||||
SharedPeers []*peer.Peer `json:"shared_peers,omitempty" bson:"-"` // SharedPeers is the shared peers of the workspace
|
SharedPeers []*peer.Peer `json:"shared_peers,omitempty" bson:"-"` // SharedPeers is the shared peers of the workspace
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ao *CollaborativeArea) Clear(peerID string) {
|
func (ao *CollaborativeArea) GetID() string {
|
||||||
if ao.AllowedPeersGroup == nil {
|
return ao.UUID
|
||||||
ao.AllowedPeersGroup = map[string][]string{}
|
|
||||||
}
|
|
||||||
ao.CreatorID = peerID
|
|
||||||
if config.GetConfig().Whitelist {
|
|
||||||
ao.AllowedPeersGroup[peerID] = []string{"*"}
|
|
||||||
} else {
|
|
||||||
ao.AllowedPeersGroup[peerID] = []string{}
|
|
||||||
}
|
|
||||||
// then reset the shared fields
|
|
||||||
if ao.Workspaces == nil {
|
|
||||||
ao.Workspaces = []string{}
|
|
||||||
}
|
|
||||||
if ao.Workflows == nil {
|
|
||||||
ao.Workflows = []string{}
|
|
||||||
}
|
|
||||||
if ao.Rules == nil {
|
|
||||||
ao.Rules = []string{}
|
|
||||||
}
|
|
||||||
if ao.CollaborativeAreaRule == nil {
|
|
||||||
ao.CollaborativeAreaRule = &CollaborativeAreaRule{
|
|
||||||
ShareMode: "private",
|
|
||||||
ExploitedBy: "collaborators only",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ao.CollaborativeAreaRule.CreatedAt = time.Now().UTC()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ao *CollaborativeArea) VerifyAuth(request *tools.APIRequest) bool {
|
func (r *CollaborativeArea) GenerateID() {
|
||||||
if (ao.AllowedPeersGroup != nil || config.GetConfig().Whitelist) && request != nil {
|
if r.UUID == "" {
|
||||||
if grps, ok := ao.AllowedPeersGroup[request.PeerID]; ok || config.GetConfig().Whitelist {
|
r.UUID = uuid.New().String()
|
||||||
if slices.Contains(grps, "*") || (!ok && config.GetConfig().Whitelist) {
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
for _, grp := range grps {
|
|
||||||
if slices.Contains(request.Groups, grp) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ao.AbstractObject.VerifyAuth(request)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *CollaborativeArea) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
func (d *CollaborativeArea) GetName() string {
|
||||||
return NewAccessor(request) // Create a new instance of the accessor
|
return d.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *CollaborativeArea) Trim() *CollaborativeArea {
|
func (d *CollaborativeArea) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
return d
|
data := New() // Create a new instance of the accessor
|
||||||
|
data.Init(tools.COLLABORATIVE_AREA, caller) // Initialize the accessor with the SHARED_WORKSPACE model type
|
||||||
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *CollaborativeArea) StoreDraftDefault() {
|
func (dma *CollaborativeArea) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
d.AllowedPeersGroup = map[string][]string{
|
b, err := json.Marshal(j)
|
||||||
d.CreatorID: []string{"*"},
|
if err != nil {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
d.IsDraft = false
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *CollaborativeArea) Serialize() map[string]interface{} {
|
||||||
|
var m map[string]interface{}
|
||||||
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
return m
|
||||||
}
|
}
|
||||||
|
@ -1,15 +1,16 @@
|
|||||||
package collaborative_area
|
package collaborative_area
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"slices"
|
"slices"
|
||||||
|
"time"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
"cloud.o-forge.io/core/oc-lib/logs"
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow"
|
|
||||||
w "cloud.o-forge.io/core/oc-lib/models/workflow"
|
w "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workspace"
|
"cloud.o-forge.io/core/oc-lib/models/workspace"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
@ -18,150 +19,43 @@ import (
|
|||||||
// SharedWorkspace is a struct that represents a collaborative area
|
// SharedWorkspace is a struct that represents a collaborative area
|
||||||
type collaborativeAreaMongoAccessor struct {
|
type collaborativeAreaMongoAccessor struct {
|
||||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||||
|
|
||||||
workspaceAccessor utils.Accessor
|
|
||||||
workflowAccessor utils.Accessor
|
|
||||||
peerAccessor utils.Accessor
|
|
||||||
ruleAccessor utils.Accessor
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAccessor(request *tools.APIRequest) *collaborativeAreaMongoAccessor {
|
// New creates a new instance of the collaborativeAreaMongoAccessor
|
||||||
return &collaborativeAreaMongoAccessor{
|
func New() *collaborativeAreaMongoAccessor {
|
||||||
AbstractAccessor: utils.AbstractAccessor{
|
return &collaborativeAreaMongoAccessor{}
|
||||||
Logger: logs.CreateLogger(tools.COLLABORATIVE_AREA.String()), // Create a logger with the data type
|
|
||||||
Request: request,
|
|
||||||
Type: tools.COLLABORATIVE_AREA,
|
|
||||||
},
|
|
||||||
workspaceAccessor: (&workspace.Workspace{}).GetAccessor(request),
|
|
||||||
workflowAccessor: (&w.Workflow{}).GetAccessor(request),
|
|
||||||
peerAccessor: (&peer.Peer{}).GetAccessor(request),
|
|
||||||
ruleAccessor: (&rule.Rule{}).GetAccessor(request),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteOne deletes a collaborative area from the database, given its ID, it automatically share to peers if the workspace is shared
|
// DeleteOne deletes a collaborative area from the database, given its ID, it automatically share to peers if the workspace is shared
|
||||||
func (a *collaborativeAreaMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
func (wfa *collaborativeAreaMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
set, code, err := a.LoadOne(id)
|
set, code, _ := wfa.LoadOne(id)
|
||||||
if code != 200 {
|
if code == 200 { // always delete on peers than recreate
|
||||||
return nil, code, err
|
wfa.deleteToPeer(set.(*CollaborativeArea))
|
||||||
}
|
}
|
||||||
a.deleteToPeer(set.(*CollaborativeArea))
|
wfa.sharedWorkflow(&CollaborativeArea{}, id) // create all shared workflows
|
||||||
a.sharedWorkflow(&CollaborativeArea{}, id) // create all shared workflows
|
wfa.sharedWorkspace(&CollaborativeArea{}, id) // create all collaborative areas
|
||||||
a.sharedWorkspace(&CollaborativeArea{}, id) // create all collaborative areas
|
return wfa.GenericDeleteOne(id, wfa) // then add on yours
|
||||||
return utils.GenericDeleteOne(id, a) // then add on yours
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateOne updates a collaborative area in the database, given its ID and the new data, it automatically share to peers if the workspace is shared
|
|
||||||
func (a *collaborativeAreaMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
|
||||||
res, code, err := utils.GenericUpdateOne(set.(*CollaborativeArea).Trim(), id, a, &CollaborativeArea{})
|
|
||||||
// a.deleteToPeer(res.(*CollaborativeArea)) // delete the collaborative area on the peer
|
|
||||||
a.sharedWorkflow(res.(*CollaborativeArea), id) // replace all shared workflows
|
|
||||||
a.sharedWorkspace(res.(*CollaborativeArea), id) // replace all collaborative areas (not shared worspace obj but workspace one)
|
|
||||||
// a.sendToPeer(res.(*CollaborativeArea)) // send the collaborative area (collaborative area object) to the peers
|
|
||||||
return res, code, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// StoreOne stores a collaborative area in the database, it automatically share to peers if the workspace is shared
|
|
||||||
func (a *collaborativeAreaMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
_, id := (&peer.Peer{}).IsMySelf() // get the local peer
|
|
||||||
data.(*CollaborativeArea).Clear(id) // set the creator
|
|
||||||
// retrieve or proper peer
|
|
||||||
if data.(*CollaborativeArea).CollaborativeAreaRule != nil {
|
|
||||||
data.(*CollaborativeArea).CollaborativeAreaRule = &CollaborativeAreaRule{}
|
|
||||||
}
|
|
||||||
data.(*CollaborativeArea).CollaborativeAreaRule.Creator = id
|
|
||||||
d, code, err := utils.GenericStoreOne(data.(*CollaborativeArea).Trim(), a)
|
|
||||||
if code == 200 {
|
|
||||||
a.sharedWorkflow(d.(*CollaborativeArea), d.GetID()) // create all shared workflows
|
|
||||||
a.sharedWorkspace(d.(*CollaborativeArea), d.GetID()) // create all collaborative areas
|
|
||||||
a.sendToPeer(d.(*CollaborativeArea)) // send the collaborative area (collaborative area object) to the peers
|
|
||||||
}
|
|
||||||
return data, code, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyOne copies a CollaborativeArea in the database
|
|
||||||
func (a *collaborativeAreaMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return a.StoreOne(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func filterEnrich[T utils.ShallowDBObject](arr []string, isDrafted bool, a utils.Accessor) []T {
|
|
||||||
var new []T
|
|
||||||
res, code, _ := a.Search(&dbs.Filters{
|
|
||||||
Or: map[string][]dbs.Filter{
|
|
||||||
"abstractobject.id": {{Operator: dbs.IN.String(), Value: arr}},
|
|
||||||
},
|
|
||||||
}, "", isDrafted)
|
|
||||||
fmt.Println(res, arr, isDrafted, a)
|
|
||||||
if code == 200 {
|
|
||||||
for _, r := range res {
|
|
||||||
new = append(new, r.(T))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return new
|
|
||||||
}
|
|
||||||
|
|
||||||
// enrich is a function that enriches the CollaborativeArea with the shared objects
|
|
||||||
func (a *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *CollaborativeArea, isDrafted bool, request *tools.APIRequest) *CollaborativeArea {
|
|
||||||
sharedWorkspace.SharedWorkspaces = filterEnrich[*workspace.Workspace](sharedWorkspace.Workspaces, isDrafted, a.workspaceAccessor)
|
|
||||||
sharedWorkspace.SharedWorkflows = filterEnrich[*workflow.Workflow](sharedWorkspace.Workflows, isDrafted, a.workflowAccessor)
|
|
||||||
peerskey := []string{}
|
|
||||||
fmt.Println("PEERS 1", sharedWorkspace.AllowedPeersGroup)
|
|
||||||
for k, v := range sharedWorkspace.AllowedPeersGroup {
|
|
||||||
canFound := false
|
|
||||||
for _, t := range request.Groups {
|
|
||||||
if slices.Contains(v, t) {
|
|
||||||
canFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Println("PEERS 2", canFound, v)
|
|
||||||
if slices.Contains(v, "*") || canFound {
|
|
||||||
peerskey = append(peerskey, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Println("PEERS", peerskey)
|
|
||||||
sharedWorkspace.SharedPeers = filterEnrich[*peer.Peer](peerskey, isDrafted, a.peerAccessor)
|
|
||||||
sharedWorkspace.SharedRules = filterEnrich[*rule.Rule](sharedWorkspace.Rules, isDrafted, a.ruleAccessor)
|
|
||||||
return sharedWorkspace
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *collaborativeAreaMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericLoadOne[*CollaborativeArea](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return a.enrich(d.(*CollaborativeArea), false, a.Request), 200, nil
|
|
||||||
}, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *collaborativeAreaMongoAccessor) LoadAll(isDrafted bool) ([]utils.ShallowDBObject, int, error) {
|
|
||||||
return utils.GenericLoadAll[*CollaborativeArea](func(d utils.DBObject) utils.ShallowDBObject {
|
|
||||||
return a.enrich(d.(*CollaborativeArea), isDrafted, a.Request)
|
|
||||||
}, isDrafted, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *collaborativeAreaMongoAccessor) Search(filters *dbs.Filters, search string, isDrafted bool) ([]utils.ShallowDBObject, int, error) {
|
|
||||||
return utils.GenericSearch[*CollaborativeArea](filters, search, (&CollaborativeArea{}).GetObjectFilters(search),
|
|
||||||
func(d utils.DBObject) utils.ShallowDBObject {
|
|
||||||
return a.enrich(d.(*CollaborativeArea), isDrafted, a.Request)
|
|
||||||
}, isDrafted, a)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
sharedWorkspace is a function that shares the collaborative area to the peers
|
sharedWorkspace is a function that shares the collaborative area to the peers
|
||||||
*/
|
*/
|
||||||
func (a *collaborativeAreaMongoAccessor) sharedWorkspace(shared *CollaborativeArea, id string) {
|
func (wfa *collaborativeAreaMongoAccessor) sharedWorkspace(shared *CollaborativeArea, id string) {
|
||||||
eldest, code, _ := a.LoadOne(id) // get the eldest
|
eldest, code, _ := wfa.LoadOne(id) // get the eldest
|
||||||
|
accessor := (&workspace.Workspace{}).GetAccessor(nil)
|
||||||
if code == 200 {
|
if code == 200 {
|
||||||
eld := eldest.(*CollaborativeArea)
|
eld := eldest.(*CollaborativeArea)
|
||||||
if eld.Workspaces != nil { // update all your workspaces in the eldest by replacing shared ref by an empty string
|
if eld.Workspaces != nil { // update all your workspaces in the eldest by replacing shared ref by an empty string
|
||||||
for _, v := range eld.Workspaces {
|
for _, v := range eld.Workspaces {
|
||||||
a.workspaceAccessor.UpdateOne(&workspace.Workspace{Shared: ""}, v)
|
accessor.UpdateOne(&workspace.Workspace{Shared: ""}, v)
|
||||||
if a.GetCaller() != nil || a.GetCaller().URLS == nil || a.GetCaller().URLS[tools.WORKSPACE] == nil {
|
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKSPACE] == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
paccess := (&peer.Peer{}) // send to all peers
|
paccess := (&peer.Peer{}) // send to all peers
|
||||||
for k := range shared.AllowedPeersGroup { // delete the collaborative area on the peer
|
for _, p := range shared.Peers { // delete the collaborative area on the peer
|
||||||
b, err := paccess.LaunchPeerExecution(k, v, tools.WORKSPACE, tools.DELETE, nil, a.GetCaller())
|
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKSPACE, tools.DELETE, nil, wfa.Caller)
|
||||||
if err != nil && b == nil {
|
if err != nil && b == nil {
|
||||||
a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -169,20 +63,20 @@ func (a *collaborativeAreaMongoAccessor) sharedWorkspace(shared *CollaborativeAr
|
|||||||
}
|
}
|
||||||
if shared.Workspaces != nil {
|
if shared.Workspaces != nil {
|
||||||
for _, v := range shared.Workspaces { // update all the collaborative areas
|
for _, v := range shared.Workspaces { // update all the collaborative areas
|
||||||
workspace, code, _ := a.workspaceAccessor.UpdateOne(&workspace.Workspace{Shared: shared.UUID}, v) // add the shared ref to workspace
|
workspace, code, _ := accessor.UpdateOne(&workspace.Workspace{Shared: shared.UUID}, v) // add the shared ref to workspace
|
||||||
if a.GetCaller() != nil || a.GetCaller().URLS == nil || a.GetCaller().URLS[tools.WORKSPACE] == nil {
|
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKSPACE] == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for k := range shared.AllowedPeersGroup {
|
for _, p := range shared.Peers {
|
||||||
if code != 200 {
|
if code != 200 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
paccess := (&peer.Peer{}) // send to all peers, add the collaborative area on the peer
|
paccess := (&peer.Peer{}) // send to all peers, add the collaborative area on the peer
|
||||||
s := workspace.Serialize(workspace)
|
s := workspace.Serialize()
|
||||||
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + k
|
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + p
|
||||||
b, err := paccess.LaunchPeerExecution(k, v, tools.WORKSPACE, tools.POST, s, a.GetCaller())
|
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKSPACE, tools.POST, s, wfa.Caller)
|
||||||
if err != nil && b == nil {
|
if err != nil && b == nil {
|
||||||
a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -192,13 +86,14 @@ func (a *collaborativeAreaMongoAccessor) sharedWorkspace(shared *CollaborativeAr
|
|||||||
}
|
}
|
||||||
|
|
||||||
// sharedWorkflow is a function that shares the shared workflow to the peers
|
// sharedWorkflow is a function that shares the shared workflow to the peers
|
||||||
func (a *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeArea, id string) {
|
func (wfa *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeArea, id string) {
|
||||||
eldest, code, _ := a.LoadOne(id) // get the eldest
|
accessor := (&w.Workflow{}).GetAccessor(nil)
|
||||||
|
eldest, code, _ := wfa.LoadOne(id) // get the eldest
|
||||||
if code == 200 {
|
if code == 200 {
|
||||||
eld := eldest.(*CollaborativeArea)
|
eld := eldest.(*CollaborativeArea)
|
||||||
if eld.Workflows != nil {
|
if eld.Workflows != nil {
|
||||||
for _, v := range eld.Workflows {
|
for _, v := range eld.Workflows {
|
||||||
data, code, _ := a.workflowAccessor.LoadOne(v)
|
data, code, _ := accessor.LoadOne(v)
|
||||||
if code == 200 {
|
if code == 200 {
|
||||||
s := data.(*w.Workflow)
|
s := data.(*w.Workflow)
|
||||||
new := []string{}
|
new := []string{}
|
||||||
@ -209,15 +104,15 @@ func (a *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeAre
|
|||||||
} // kick the shared reference in your old shared workflow
|
} // kick the shared reference in your old shared workflow
|
||||||
n := &w.Workflow{}
|
n := &w.Workflow{}
|
||||||
n.Shared = new
|
n.Shared = new
|
||||||
a.workflowAccessor.UpdateOne(n, v)
|
accessor.UpdateOne(n, v)
|
||||||
if a.GetCaller() != nil || a.GetCaller().URLS == nil || a.GetCaller().URLS[tools.WORKFLOW] == nil {
|
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKFLOW] == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
paccess := (&peer.Peer{}) // send to all peers
|
paccess := (&peer.Peer{}) // send to all peers
|
||||||
for k := range shared.AllowedPeersGroup { // delete the shared workflow on the peer
|
for _, p := range shared.Peers { // delete the shared workflow on the peer
|
||||||
b, err := paccess.LaunchPeerExecution(k, v, tools.WORKFLOW, tools.DELETE, nil, a.GetCaller())
|
b, err := paccess.LaunchPeerExecution(p, v, tools.WORKFLOW, tools.DELETE, nil, wfa.Caller)
|
||||||
if err != nil && b == nil {
|
if err != nil && b == nil {
|
||||||
a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -226,23 +121,23 @@ func (a *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeAre
|
|||||||
}
|
}
|
||||||
if shared.Workflows != nil { // update all the shared workflows
|
if shared.Workflows != nil { // update all the shared workflows
|
||||||
for _, v := range shared.Workflows {
|
for _, v := range shared.Workflows {
|
||||||
data, code, _ := a.workflowAccessor.LoadOne(v)
|
data, code, _ := accessor.LoadOne(v)
|
||||||
if code == 200 {
|
if code == 200 {
|
||||||
s := data.(*w.Workflow)
|
s := data.(*w.Workflow)
|
||||||
if !slices.Contains(s.Shared, id) {
|
if !slices.Contains(s.Shared, id) {
|
||||||
s.Shared = append(s.Shared, id)
|
s.Shared = append(s.Shared, id)
|
||||||
workflow, code, _ := a.workflowAccessor.UpdateOne(s, v)
|
workflow, code, _ := accessor.UpdateOne(s, v)
|
||||||
if a.GetCaller() != nil || a.GetCaller().URLS == nil || a.GetCaller().URLS[tools.WORKFLOW] == nil {
|
if wfa.Caller != nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.WORKFLOW] == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
paccess := (&peer.Peer{})
|
paccess := (&peer.Peer{})
|
||||||
for k := range shared.AllowedPeersGroup { // send to all peers
|
for _, p := range shared.Peers { // send to all peers
|
||||||
if code == 200 {
|
if code == 200 {
|
||||||
s := workflow.Serialize(workflow) // add the shared workflow on the peer
|
s := workflow.Serialize() // add the shared workflow on the peer
|
||||||
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + k
|
s["name"] = fmt.Sprintf("%v", s["name"]) + "_" + p
|
||||||
b, err := paccess.LaunchPeerExecution(k, shared.UUID, tools.WORKFLOW, tools.POST, s, a.GetCaller())
|
b, err := paccess.LaunchPeerExecution(p, shared.UUID, tools.WORKFLOW, tools.POST, s, wfa.Caller)
|
||||||
if err != nil && b == nil {
|
if err != nil && b == nil {
|
||||||
a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
wfa.Logger.Error().Msg("Could not send to peer " + p + ". Error: " + err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -255,29 +150,194 @@ func (a *collaborativeAreaMongoAccessor) sharedWorkflow(shared *CollaborativeAre
|
|||||||
}
|
}
|
||||||
|
|
||||||
// sharedWorkspace is a function that shares the collaborative area to the peers
|
// sharedWorkspace is a function that shares the collaborative area to the peers
|
||||||
func (a *collaborativeAreaMongoAccessor) deleteToPeer(shared *CollaborativeArea) {
|
func (wfa *collaborativeAreaMongoAccessor) deleteToPeer(shared *CollaborativeArea) {
|
||||||
a.contactPeer(shared, tools.POST)
|
if wfa.Caller == nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.COLLABORATIVE_AREA] == nil || wfa.Caller.Disabled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
paccess := (&peer.Peer{})
|
||||||
|
for _, v := range shared.Peers {
|
||||||
|
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: v}}).IsMySelf(); ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
b, err := paccess.LaunchPeerExecution(v, shared.UUID, tools.COLLABORATIVE_AREA, tools.DELETE, nil, wfa.Caller)
|
||||||
|
if err != nil && b == nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not send to peer " + v + ". Error: " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sharedWorkspace is a function that shares the collaborative area to the peers
|
// sharedWorkspace is a function that shares the collaborative area to the peers
|
||||||
func (a *collaborativeAreaMongoAccessor) sendToPeer(shared *CollaborativeArea) {
|
func (wfa *collaborativeAreaMongoAccessor) sendToPeer(shared *CollaborativeArea) {
|
||||||
a.contactPeer(shared, tools.POST)
|
if wfa.Caller == nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.COLLABORATIVE_AREA] == nil || wfa.Caller.Disabled {
|
||||||
}
|
|
||||||
|
|
||||||
func (a *collaborativeAreaMongoAccessor) contactPeer(shared *CollaborativeArea, meth tools.METHOD) {
|
|
||||||
if a.GetCaller() == nil || a.GetCaller().URLS == nil || a.GetCaller().URLS[tools.COLLABORATIVE_AREA] == nil || a.GetCaller().Disabled {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
paccess := (&peer.Peer{})
|
paccess := (&peer.Peer{})
|
||||||
for k := range shared.AllowedPeersGroup {
|
for _, v := range shared.Peers {
|
||||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: k}}).IsMySelf(); ok || (shared.IsSent && meth == tools.POST) || (!shared.IsSent && meth != tools.POST) {
|
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: v}}).IsMySelf(); ok || shared.IsSent {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
shared.IsSent = meth == tools.POST
|
shared.IsSent = true
|
||||||
b, err := paccess.LaunchPeerExecution(k, k, tools.COLLABORATIVE_AREA, meth, shared.Serialize(shared), a.GetCaller())
|
b, err := paccess.LaunchPeerExecution(v, v, tools.COLLABORATIVE_AREA, tools.POST, shared.Serialize(), wfa.Caller)
|
||||||
if err != nil && b == nil {
|
if err != nil && b == nil {
|
||||||
a.Logger.Error().Msg("Could not send to peer " + k + ". Error: " + err.Error())
|
wfa.Logger.Error().Msg("Could not send to peer " + v + ". Error: " + err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateOne updates a collaborative area in the database, given its ID and the new data, it automatically share to peers if the workspace is shared
|
||||||
|
func (wfa *collaborativeAreaMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
|
res, code, err := wfa.GenericUpdateOne(set.(*CollaborativeArea), id, wfa, &CollaborativeArea{})
|
||||||
|
fmt.Println("UpdateOne", set, res, code, err)
|
||||||
|
// wfa.deleteToPeer(res.(*CollaborativeArea)) // delete the collaborative area on the peer
|
||||||
|
wfa.sharedWorkflow(res.(*CollaborativeArea), id) // replace all shared workflows
|
||||||
|
wfa.sharedWorkspace(res.(*CollaborativeArea), id) // replace all collaborative areas (not shared worspace obj but workspace one)
|
||||||
|
// wfa.sendToPeer(res.(*CollaborativeArea)) // send the collaborative area (collaborative area object) to the peers
|
||||||
|
return res, code, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreOne stores a collaborative area in the database, it automatically share to peers if the workspace is shared
|
||||||
|
func (wfa *collaborativeAreaMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
_, id := (&peer.Peer{}).IsMySelf() // get the local peer
|
||||||
|
data.(*CollaborativeArea).CreatorID = id // set the creator id
|
||||||
|
data.(*CollaborativeArea).Peers = append(data.(*CollaborativeArea).Peers, id) // add the creator id to the peers
|
||||||
|
// then reset the shared fields
|
||||||
|
if data.(*CollaborativeArea).Workspaces == nil {
|
||||||
|
data.(*CollaborativeArea).Workspaces = []string{}
|
||||||
|
}
|
||||||
|
if data.(*CollaborativeArea).Workflows == nil {
|
||||||
|
data.(*CollaborativeArea).Workflows = []string{}
|
||||||
|
}
|
||||||
|
if data.(*CollaborativeArea).Rules == nil {
|
||||||
|
data.(*CollaborativeArea).Rules = []string{}
|
||||||
|
}
|
||||||
|
if data.(*CollaborativeArea).CollaborativeAreaRule == nil {
|
||||||
|
data.(*CollaborativeArea).CollaborativeAreaRule = &CollaborativeAreaRule{
|
||||||
|
ShareMode: "private",
|
||||||
|
ExploitedBy: "collaborators only",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
data.(*CollaborativeArea).CollaborativeAreaRule.CreatedAt = time.Now().UTC()
|
||||||
|
// retrieve or proper peer
|
||||||
|
dd, code, err := (&peer.Peer{}).GetAccessor(nil).Search(nil, "0")
|
||||||
|
if code != 200 || len(dd) == 0 {
|
||||||
|
return nil, code, errors.New("Could not retrieve the peer" + err.Error())
|
||||||
|
}
|
||||||
|
data.(*CollaborativeArea).CollaborativeAreaRule.Creator = dd[0].GetID()
|
||||||
|
d, code, err := wfa.GenericStoreOne(data.(*CollaborativeArea), wfa)
|
||||||
|
if code == 200 {
|
||||||
|
wfa.sharedWorkflow(d.(*CollaborativeArea), d.GetID()) // create all shared workflows
|
||||||
|
wfa.sharedWorkspace(d.(*CollaborativeArea), d.GetID()) // create all collaborative areas
|
||||||
|
wfa.sendToPeer(d.(*CollaborativeArea)) // send the collaborative area (collaborative area object) to the peers
|
||||||
|
}
|
||||||
|
return data, code, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyOne copies a CollaborativeArea in the database
|
||||||
|
func (wfa *collaborativeAreaMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
return wfa.StoreOne(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// enrich is a function that enriches the CollaborativeArea with the shared objects
|
||||||
|
func (wfa *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *CollaborativeArea) *CollaborativeArea {
|
||||||
|
access := (&workspace.Workspace{}).GetAccessor(nil)
|
||||||
|
res, code, _ := access.Search(&dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{
|
||||||
|
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Workspaces}},
|
||||||
|
},
|
||||||
|
}, "")
|
||||||
|
if code == 200 {
|
||||||
|
for _, r := range res {
|
||||||
|
sharedWorkspace.SharedWorkspaces = append(sharedWorkspace.SharedWorkspaces, r.(*workspace.Workspace))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
access = (&w.Workflow{}).GetAccessor(nil)
|
||||||
|
res, code, _ = access.Search(&dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{
|
||||||
|
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Workflows}},
|
||||||
|
},
|
||||||
|
}, "")
|
||||||
|
if code == 200 {
|
||||||
|
for _, r := range res {
|
||||||
|
sharedWorkspace.SharedWorkflows = append(sharedWorkspace.SharedWorkflows, r.(*w.Workflow))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
access = (&peer.Peer{}).GetAccessor(nil)
|
||||||
|
res, code, _ = access.Search(&dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{
|
||||||
|
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Peers}},
|
||||||
|
},
|
||||||
|
}, "")
|
||||||
|
if code == 200 {
|
||||||
|
for _, r := range res {
|
||||||
|
sharedWorkspace.SharedPeers = append(sharedWorkspace.SharedPeers, r.(*peer.Peer))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
access = (&rule.Rule{}).GetAccessor(nil)
|
||||||
|
res, code, _ = access.Search(&dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{
|
||||||
|
"abstractobject.id": {{Operator: dbs.IN.String(), Value: sharedWorkspace.Rules}},
|
||||||
|
},
|
||||||
|
}, "")
|
||||||
|
if code == 200 {
|
||||||
|
for _, r := range res {
|
||||||
|
sharedWorkspace.SharedRules = append(sharedWorkspace.SharedRules, r.(*rule.Rule))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sharedWorkspace
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadOne loads a collaborative area from the database, given its ID and enrich it
|
||||||
|
func (wfa *collaborativeAreaMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
|
var sharedWorkspace CollaborativeArea
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
res_mongo.Decode(&sharedWorkspace)
|
||||||
|
return wfa.enrich(&sharedWorkspace), 200, nil // enrich the collaborative area
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadAll loads all the collaborative areas from the database and enrich them
|
||||||
|
func (wfa collaborativeAreaMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []CollaborativeArea
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
objs = append(objs, wfa.enrich(&r)) // enrich the collaborative area
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search searches for collaborative areas in the database, given some filters OR a search string
|
||||||
|
func (wfa *collaborativeAreaMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
|
filters = &dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{ // search by name only by default can be override
|
||||||
|
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []CollaborativeArea
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
objs = append(objs, wfa.enrich(&r)) // enrich the collaborative area
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package rule
|
package rule
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
@ -16,14 +18,39 @@ type Rule struct {
|
|||||||
Actions []string `json:"actions,omitempty" bson:"actions,omitempty"` // NOT DEFINITIVE TO SPECIFICATION
|
Actions []string `json:"actions,omitempty" bson:"actions,omitempty"` // NOT DEFINITIVE TO SPECIFICATION
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ao *Rule) GetID() string {
|
||||||
|
return ao.UUID
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Rule) GenerateID() {
|
func (r *Rule) GenerateID() {
|
||||||
r.UUID = uuid.New().String()
|
r.UUID = uuid.New().String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Rule) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
func (d *Rule) GetName() string {
|
||||||
return NewAccessor(request)
|
return d.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Rule) VerifyAuth(request *tools.APIRequest) bool {
|
func (d *Rule) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
return true
|
data := New()
|
||||||
|
data.Init(tools.RULE, caller)
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *Rule) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *Rule) Serialize() map[string]interface{} {
|
||||||
|
var m map[string]interface{}
|
||||||
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
return m
|
||||||
}
|
}
|
||||||
|
@ -2,9 +2,8 @@ package rule
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
"cloud.o-forge.io/core/oc-lib/logs"
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ruleMongoAccessor struct {
|
type ruleMongoAccessor struct {
|
||||||
@ -12,51 +11,80 @@ type ruleMongoAccessor struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new instance of the ruleMongoAccessor
|
// New creates a new instance of the ruleMongoAccessor
|
||||||
func NewAccessor(request *tools.APIRequest) *ruleMongoAccessor {
|
func New() *ruleMongoAccessor {
|
||||||
return &ruleMongoAccessor{
|
return &ruleMongoAccessor{}
|
||||||
AbstractAccessor: utils.AbstractAccessor{
|
}
|
||||||
Logger: logs.CreateLogger(tools.RULE.String()), // Create a logger with the data type
|
|
||||||
Request: request,
|
// GetType returns the type of the rule
|
||||||
Type: tools.RULE,
|
func (wfa *ruleMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
|
return wfa.GenericDeleteOne(id, wfa)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateOne updates a rule in the database
|
||||||
|
func (wfa *ruleMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
|
return wfa.GenericUpdateOne(set.(*Rule), id, wfa, &Rule{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreOne stores a rule in the database
|
||||||
|
func (wfa *ruleMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
return wfa.GenericStoreOne(data.(*Rule), wfa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *ruleMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
return wfa.GenericStoreOne(data, wfa)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadOne loads a rule from the database
|
||||||
|
func (wfa *ruleMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
|
var rule Rule
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
res_mongo.Decode(&rule)
|
||||||
|
return &rule, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadAll loads all rules from the database
|
||||||
|
func (wfa ruleMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []Rule
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
objs = append(objs, &r)
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search searches for rules in the database, given some filters OR a search string
|
||||||
|
func (wfa *ruleMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
|
filters = &dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||||
|
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Nothing special here, just the basic CRUD operations
|
|
||||||
*/
|
|
||||||
func (a *ruleMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericDeleteOne(id, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ruleMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericUpdateOne(set, id, a, &Rule{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ruleMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericStoreOne(data, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ruleMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericStoreOne(data, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ruleMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericLoadOne[*Rule](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return d, 200, nil
|
|
||||||
}, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ruleMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
|
||||||
return utils.GenericLoadAll[*Rule](a.getExec(), isDraft, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ruleMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
|
||||||
return utils.GenericSearch[*Rule](filters, search, (&Rule{}).GetObjectFilters(search), a.getExec(), isDraft, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ruleMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
|
|
||||||
return func(d utils.DBObject) utils.ShallowDBObject {
|
|
||||||
return d
|
|
||||||
}
|
}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []Rule
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
objs = append(objs, &r)
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
}
|
}
|
||||||
|
@ -1,13 +1,17 @@
|
|||||||
package shallow_collaborative_area
|
package shallow_collaborative_area
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
"github.com/google/uuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ShallowCollaborativeArea struct {
|
type ShallowCollaborativeArea struct {
|
||||||
utils.AbstractObject
|
utils.AbstractObject
|
||||||
IsSent bool `json:"is_sent" bson:"-"`
|
IsSent bool `json:"is_sent" bson:"-"`
|
||||||
|
CreatorID string `json:"peer_id,omitempty" bson:"peer_id,omitempty" validate:"required"`
|
||||||
Version string `json:"version,omitempty" bson:"version,omitempty"`
|
Version string `json:"version,omitempty" bson:"version,omitempty"`
|
||||||
Description string `json:"description,omitempty" bson:"description,omitempty" validate:"required"`
|
Description string `json:"description,omitempty" bson:"description,omitempty" validate:"required"`
|
||||||
Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"`
|
Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"`
|
||||||
@ -17,6 +21,41 @@ type ShallowCollaborativeArea struct {
|
|||||||
Rules []string `json:"rules,omitempty" bson:"rules,omitempty"`
|
Rules []string `json:"rules,omitempty" bson:"rules,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *ShallowCollaborativeArea) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
func (ao *ShallowCollaborativeArea) GetID() string {
|
||||||
return NewAccessor(request)
|
return ao.UUID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ShallowCollaborativeArea) GenerateID() {
|
||||||
|
if r.UUID == "" {
|
||||||
|
r.UUID = uuid.New().String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ShallowCollaborativeArea) GetName() string {
|
||||||
|
return d.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ShallowCollaborativeArea) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
|
data := New()
|
||||||
|
data.Init(tools.COLLABORATIVE_AREA, caller)
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *ShallowCollaborativeArea) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *ShallowCollaborativeArea) Serialize() map[string]interface{} {
|
||||||
|
var m map[string]interface{}
|
||||||
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
return m
|
||||||
}
|
}
|
||||||
|
@ -2,55 +2,82 @@ package shallow_collaborative_area
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
"cloud.o-forge.io/core/oc-lib/logs"
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type shallowSharedWorkspaceMongoAccessor struct {
|
type shallowSharedWorkspaceMongoAccessor struct {
|
||||||
utils.AbstractAccessor
|
utils.AbstractAccessor
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAccessor(request *tools.APIRequest) *shallowSharedWorkspaceMongoAccessor {
|
func New() *shallowSharedWorkspaceMongoAccessor {
|
||||||
return &shallowSharedWorkspaceMongoAccessor{
|
return &shallowSharedWorkspaceMongoAccessor{}
|
||||||
AbstractAccessor: utils.AbstractAccessor{
|
}
|
||||||
Logger: logs.CreateLogger(tools.COLLABORATIVE_AREA.String()), // Create a logger with the data type
|
|
||||||
Request: request, // Set the caller
|
func (wfa *shallowSharedWorkspaceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
Type: tools.COLLABORATIVE_AREA,
|
return wfa.GenericDeleteOne(id, wfa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *shallowSharedWorkspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
|
return wfa.GenericUpdateOne(set.(*ShallowCollaborativeArea), id, wfa, &ShallowCollaborativeArea{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *shallowSharedWorkspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
return wfa.GenericStoreOne(data.(*ShallowCollaborativeArea), wfa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *shallowSharedWorkspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
return wfa.StoreOne(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *shallowSharedWorkspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
|
var sharedWorkspace ShallowCollaborativeArea
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
res_mongo.Decode(&sharedWorkspace)
|
||||||
|
return &sharedWorkspace, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa shallowSharedWorkspaceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []ShallowCollaborativeArea
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
objs = append(objs, &r)
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *shallowSharedWorkspaceMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
|
filters = &dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{
|
||||||
|
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
func (a *shallowSharedWorkspaceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
if err != nil {
|
||||||
return utils.GenericDeleteOne(id, a)
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
}
|
return nil, code, err
|
||||||
|
}
|
||||||
func (a *shallowSharedWorkspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
var results []ShallowCollaborativeArea
|
||||||
return utils.GenericUpdateOne(set.(*ShallowCollaborativeArea), id, a, &ShallowCollaborativeArea{})
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
}
|
return nil, 404, err
|
||||||
|
}
|
||||||
func (a *shallowSharedWorkspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
for _, r := range results {
|
||||||
return utils.GenericStoreOne(data.(*ShallowCollaborativeArea), a)
|
objs = append(objs, &r)
|
||||||
}
|
}
|
||||||
|
return objs, 200, nil
|
||||||
func (a *shallowSharedWorkspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return a.StoreOne(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *shallowSharedWorkspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericLoadOne[*ShallowCollaborativeArea](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return d, 200, nil
|
|
||||||
}, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *shallowSharedWorkspaceMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
|
||||||
return utils.GenericLoadAll[*ShallowCollaborativeArea](func(d utils.DBObject) utils.ShallowDBObject {
|
|
||||||
return d
|
|
||||||
}, isDraft, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *shallowSharedWorkspaceMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
|
||||||
return utils.GenericSearch[*ShallowCollaborativeArea](filters, search, (&ShallowCollaborativeArea{}).GetObjectFilters(search), func(d utils.DBObject) utils.ShallowDBObject {
|
|
||||||
return d
|
|
||||||
}, isDraft, a)
|
|
||||||
}
|
}
|
||||||
|
@ -1,20 +0,0 @@
|
|||||||
package enum
|
|
||||||
|
|
||||||
type InfrastructureType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
DOCKER InfrastructureType = iota
|
|
||||||
KUBERNETES
|
|
||||||
SLURM
|
|
||||||
HW
|
|
||||||
CONDOR
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t InfrastructureType) String() string {
|
|
||||||
return [...]string{"DOCKER", "KUBERNETES", "SLURM", "HW", "CONDOR"}[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
// get list of all infrastructure types
|
|
||||||
func InfrastructureList() []InfrastructureType {
|
|
||||||
return []InfrastructureType{DOCKER, KUBERNETES, SLURM, HW, CONDOR}
|
|
||||||
}
|
|
@ -1,56 +0,0 @@
|
|||||||
package enum
|
|
||||||
|
|
||||||
type StorageSize int
|
|
||||||
|
|
||||||
// StorageType - Enum that defines the type of storage
|
|
||||||
const (
|
|
||||||
GB StorageSize = iota
|
|
||||||
MB
|
|
||||||
KB
|
|
||||||
TB
|
|
||||||
)
|
|
||||||
|
|
||||||
var argoType = [...]string{
|
|
||||||
"Gi",
|
|
||||||
"Mi",
|
|
||||||
"Ki",
|
|
||||||
"Ti",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size to string
|
|
||||||
func (t StorageSize) String() string {
|
|
||||||
return [...]string{"GB", "MB", "KB", "TB"}[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
func SizeList() []StorageSize {
|
|
||||||
return []StorageSize{GB, MB, KB, TB}
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new instance of the StorageResource struct
|
|
||||||
func (dma StorageSize) ToArgo() string {
|
|
||||||
return argoType[dma]
|
|
||||||
}
|
|
||||||
|
|
||||||
// enum of a data type
|
|
||||||
type StorageType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
FILE = iota
|
|
||||||
STREAM
|
|
||||||
API
|
|
||||||
DATABASE
|
|
||||||
S3
|
|
||||||
MEMORY
|
|
||||||
HARDWARE
|
|
||||||
AZURE
|
|
||||||
GCS
|
|
||||||
)
|
|
||||||
|
|
||||||
// String() - Returns the string representation of the storage type
|
|
||||||
func (t StorageType) String() string {
|
|
||||||
return [...]string{"FILE", "STREAM", "API", "DATABASE", "S3", "MEMORY", "HARDWARE", "AZURE", "GCS"}[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
func TypeList() []StorageType {
|
|
||||||
return []StorageType{FILE, STREAM, API, DATABASE, S3, MEMORY, HARDWARE, AZURE, GCS}
|
|
||||||
}
|
|
@ -1,64 +0,0 @@
|
|||||||
package enum
|
|
||||||
|
|
||||||
type CompletionStatus int
|
|
||||||
|
|
||||||
const (
|
|
||||||
DRAFTED CompletionStatus = iota
|
|
||||||
PENDING
|
|
||||||
CANCEL
|
|
||||||
PARTIAL
|
|
||||||
PAID
|
|
||||||
DISPUTED
|
|
||||||
OVERDUE
|
|
||||||
REFUND
|
|
||||||
)
|
|
||||||
|
|
||||||
func (d CompletionStatus) String() string {
|
|
||||||
return [...]string{"drafted", "pending", "cancel", "partial", "paid", "disputed", "overdue", "refund"}[d]
|
|
||||||
}
|
|
||||||
|
|
||||||
func CompletionStatusList() []CompletionStatus {
|
|
||||||
return []CompletionStatus{DRAFTED, PENDING, CANCEL, PARTIAL, PAID, DISPUTED, OVERDUE, REFUND}
|
|
||||||
}
|
|
||||||
|
|
||||||
type BookingStatus int
|
|
||||||
|
|
||||||
const (
|
|
||||||
DRAFT BookingStatus = iota
|
|
||||||
SCHEDULED
|
|
||||||
STARTED
|
|
||||||
FAILURE
|
|
||||||
SUCCESS
|
|
||||||
FORGOTTEN
|
|
||||||
DELAYED
|
|
||||||
CANCELLED
|
|
||||||
)
|
|
||||||
|
|
||||||
var str = [...]string{
|
|
||||||
"draft",
|
|
||||||
"scheduled",
|
|
||||||
"started",
|
|
||||||
"failure",
|
|
||||||
"success",
|
|
||||||
"forgotten",
|
|
||||||
"delayed",
|
|
||||||
"cancelled",
|
|
||||||
}
|
|
||||||
|
|
||||||
func FromInt(i int) string {
|
|
||||||
return str[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d BookingStatus) String() string {
|
|
||||||
return str[d]
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumIndex - Creating common behavior - give the type a EnumIndex functio
|
|
||||||
func (d BookingStatus) EnumIndex() int {
|
|
||||||
return int(d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// List
|
|
||||||
func StatusList() []BookingStatus {
|
|
||||||
return []BookingStatus{DRAFT, SCHEDULED, STARTED, FAILURE, SUCCESS, FORGOTTEN, DELAYED, CANCELLED}
|
|
||||||
}
|
|
@ -1,17 +0,0 @@
|
|||||||
package models
|
|
||||||
|
|
||||||
type Container struct {
|
|
||||||
Image string `json:"image,omitempty" bson:"image,omitempty"` // Image is the container image TEMPO
|
|
||||||
Command string `json:"command,omitempty" bson:"command,omitempty"` // Command is the container command
|
|
||||||
Args string `json:"args,omitempty" bson:"args,omitempty"` // Args is the container arguments
|
|
||||||
Env map[string]string `json:"env,omitempty" bson:"env,omitempty"` // Env is the container environment variables
|
|
||||||
Volumes map[string]string `json:"volumes,omitempty" bson:"volumes,omitempty"` // Volumes is the container volumes
|
|
||||||
|
|
||||||
Exposes []Expose `bson:"exposes,omitempty" json:"exposes,omitempty"` // Expose is the execution
|
|
||||||
}
|
|
||||||
|
|
||||||
type Expose struct {
|
|
||||||
Port int `json:"port,omitempty" bson:"port,omitempty"` // Port is the port
|
|
||||||
Reverse string `json:"reverse,omitempty" bson:"reverse,omitempty"` // Reverse is the reverse
|
|
||||||
PAT int `json:"pat,omitempty" bson:"pat,omitempty"` // PAT is the PAT
|
|
||||||
}
|
|
@ -1,20 +0,0 @@
|
|||||||
package models
|
|
||||||
|
|
||||||
// CPU is a struct that represents a CPU
|
|
||||||
type CPU struct {
|
|
||||||
Model string `bson:"model,omitempty" json:"model,omitempty"`
|
|
||||||
FrequencyGhz float64 `bson:"frequency,omitempty" json:"frequency,omitempty"`
|
|
||||||
Cores int `bson:"cores,omitempty" json:"cores,omitempty"`
|
|
||||||
Architecture string `bson:"architecture,omitempty" json:"architecture,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type RAM struct {
|
|
||||||
SizeGb float64 `bson:"size,omitempty" json:"size,omitempty" description:"Units in MB"`
|
|
||||||
Ecc bool `bson:"ecc" json:"ecc" default:"true"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type GPU struct {
|
|
||||||
Model string `bson:"model,omitempty" json:"model,omitempty"`
|
|
||||||
MemoryGb float64 `bson:"memory,omitempty" json:"memory,omitempty" description:"Units in MB"`
|
|
||||||
Cores map[string]int `bson:"cores,omitempty" json:"cores,omitempty"`
|
|
||||||
}
|
|
@ -1,21 +0,0 @@
|
|||||||
package models
|
|
||||||
|
|
||||||
type Artifact struct {
|
|
||||||
AttrPath string `json:"attr_path,omitempty" bson:"attr_path,omitempty" validate:"required"`
|
|
||||||
AttrFrom string `json:"from_path,omitempty" bson:"from_path,omitempty"`
|
|
||||||
Readonly bool `json:"readonly" bson:"readonly" default:"true"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Param struct {
|
|
||||||
Name string `json:"name" bson:"name" validate:"required"`
|
|
||||||
Attr string `json:"attr,omitempty" bson:"attr,omitempty"`
|
|
||||||
Value string `json:"value,omitempty" bson:"value,omitempty"`
|
|
||||||
Origin string `json:"origin,omitempty" bson:"origin,omitempty"`
|
|
||||||
Readonly bool `json:"readonly" bson:"readonly" default:"true"`
|
|
||||||
Optionnal bool `json:"optionnal" bson:"optionnal" default:"true"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type InOutputs struct {
|
|
||||||
Params []Param `json:"parameters" bson:"parameters"`
|
|
||||||
Artifacts []Artifact `json:"artifacts" bson:"artifacts"`
|
|
||||||
}
|
|
@ -1,42 +0,0 @@
|
|||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
func GetPlannerNearestStart(start time.Time, planned map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest) float64 {
|
|
||||||
near := float64(10000000000) // set a high value
|
|
||||||
for _, items := range planned { // loop through the planned items
|
|
||||||
for _, priced := range items { // loop through the priced items
|
|
||||||
if priced.GetLocationStart() == nil { // if the start is nil,
|
|
||||||
continue // skip the iteration
|
|
||||||
}
|
|
||||||
newS := priced.GetLocationStart() // get the start
|
|
||||||
if newS.Sub(start).Seconds() < near { // if the difference between the start and the new start is less than the nearest start
|
|
||||||
near = newS.Sub(start).Seconds()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return near
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetPlannerLongestTime(end *time.Time, planned map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest) float64 {
|
|
||||||
if end == nil {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
longestTime := float64(0)
|
|
||||||
for _, priced := range planned[tools.PROCESSING_RESOURCE] {
|
|
||||||
if priced.GetLocationEnd() == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
newS := priced.GetLocationEnd()
|
|
||||||
if end == nil && longestTime < newS.Sub(*end).Seconds() {
|
|
||||||
longestTime = newS.Sub(*end).Seconds()
|
|
||||||
}
|
|
||||||
// get the nearest start from start var
|
|
||||||
}
|
|
||||||
return longestTime
|
|
||||||
}
|
|
@ -1,20 +0,0 @@
|
|||||||
package pricing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
type PricedItemITF interface {
|
|
||||||
GetID() string
|
|
||||||
GetType() tools.DataType
|
|
||||||
IsPurchased() bool
|
|
||||||
GetCreatorID() string
|
|
||||||
GetLocationStart() *time.Time
|
|
||||||
SetLocationStart(start time.Time)
|
|
||||||
SetLocationEnd(end time.Time)
|
|
||||||
GetLocationEnd() *time.Time
|
|
||||||
GetExplicitDurationInS() float64
|
|
||||||
GetPrice() (float64, error)
|
|
||||||
}
|
|
@ -1,64 +0,0 @@
|
|||||||
package pricing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type PricingProfileITF interface {
|
|
||||||
GetPrice(quantity float64, val float64, start time.Time, end time.Time, params ...string) (float64, error)
|
|
||||||
IsPurchased() bool
|
|
||||||
GetOverrideStrategyValue() int
|
|
||||||
}
|
|
||||||
|
|
||||||
type RefundType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
REFUND_DEAD_END RefundType = iota
|
|
||||||
REFUND_ON_ERROR
|
|
||||||
REFUND_ON_EARLY_END
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t RefundType) String() string {
|
|
||||||
return [...]string{"REFUND ON DEAD END", "REFUND ON ERROR", "REFUND ON EARLY END"}[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
func RefundTypeList() []RefundType {
|
|
||||||
return []RefundType{REFUND_DEAD_END, REFUND_ON_ERROR, REFUND_ON_EARLY_END}
|
|
||||||
}
|
|
||||||
|
|
||||||
type AccessPricingProfile[T Strategy] struct { // only use for acces such as : DATA && PROCESSING
|
|
||||||
Pricing PricingStrategy[T] `json:"pricing,omitempty" bson:"pricing,omitempty"` // Price is the price of the resource
|
|
||||||
DefaultRefund RefundType `json:"default_refund" bson:"default_refund"` // DefaultRefund is the default refund type of the pricing
|
|
||||||
RefundRatio int32 `json:"refund_ratio" bson:"refund_ratio" default:"0"` // RefundRatio is the refund ratio if missing
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *AccessPricingProfile[T]) GetOverrideStrategyValue() int {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
type ExploitPrivilegeStrategy int
|
|
||||||
|
|
||||||
const (
|
|
||||||
BASIC ExploitPrivilegeStrategy = iota
|
|
||||||
GARANTED_ON_DELAY
|
|
||||||
GARANTED
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExploitPrivilegeStrategyList() []ExploitPrivilegeStrategy {
|
|
||||||
return []ExploitPrivilegeStrategy{BASIC, GARANTED_ON_DELAY, GARANTED}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t ExploitPrivilegeStrategy) String() string {
|
|
||||||
return [...]string{"NO GARANTY", "GARANTED ON SPECIFIC DELAY", "GARANTED"}[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
type ExploitPricingProfile[T Strategy] struct { // only use for exploit such as : STORAGE, COMPUTE, WORKFLOW
|
|
||||||
AccessPricingProfile[T]
|
|
||||||
AdditionnalRefundTypes []RefundType `json:"refund_types" bson:"refund_types"` // RefundTypes is the refund types of the pricing
|
|
||||||
|
|
||||||
PrivilegeStrategy ExploitPrivilegeStrategy `json:"privilege_strategy,omitempty" bson:"privilege_strategy,omitempty"` // Strategy is the strategy of the pricing
|
|
||||||
GarantedDelaySecond uint `json:"garanted_delay_second,omitempty" bson:"garanted_delay_second,omitempty"` // GarantedDelaySecond is the garanted delay of the pricing
|
|
||||||
|
|
||||||
Exceeding bool `json:"exceeding" bson:"exceeding"` // Exceeding is the exceeding of the bill
|
|
||||||
ExceedingRatio int32 `json:"exceeding_ratio" bson:"exceeding_ratio" default:"0"` // ExceedingRatio is the exceeding ratio of the bill
|
|
||||||
}
|
|
@ -1,131 +0,0 @@
|
|||||||
package pricing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BuyingStrategy int
|
|
||||||
|
|
||||||
const (
|
|
||||||
UNLIMITED BuyingStrategy = iota
|
|
||||||
SUBSCRIPTION
|
|
||||||
PAY_PER_USE
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t BuyingStrategy) String() string {
|
|
||||||
return [...]string{"UNLIMITED", "SUBSCRIPTION", "PAY PER USE"}[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
func BuyingStrategyList() []BuyingStrategy {
|
|
||||||
return []BuyingStrategy{UNLIMITED, SUBSCRIPTION, PAY_PER_USE}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Strategy interface {
|
|
||||||
GetStrategy() string
|
|
||||||
GetStrategyValue() int
|
|
||||||
}
|
|
||||||
|
|
||||||
type TimePricingStrategy int
|
|
||||||
|
|
||||||
const (
|
|
||||||
ONCE TimePricingStrategy = iota
|
|
||||||
PER_SECOND
|
|
||||||
PER_MINUTE
|
|
||||||
PER_HOUR
|
|
||||||
PER_DAY
|
|
||||||
PER_WEEK
|
|
||||||
PER_MONTH
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t TimePricingStrategy) String() string {
|
|
||||||
return [...]string{"ONCE", "PER SECOND", "PER MINUTE", "PER HOUR", "PER DAY", "PER WEEK", "PER MONTH"}[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
func TimePricingStrategyList() []TimePricingStrategy {
|
|
||||||
return []TimePricingStrategy{ONCE, PER_SECOND, PER_MINUTE, PER_HOUR, PER_DAY, PER_WEEK, PER_MONTH}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t TimePricingStrategy) GetStrategy() string {
|
|
||||||
return [...]string{"ONCE", "PER_SECOND", "PER_MINUTE", "PER_HOUR", "PER_DAY", "PER_WEEK", "PER_MONTH"}[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t TimePricingStrategy) GetStrategyValue() int {
|
|
||||||
return int(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAverageTimeInSecond(averageTimeInSecond float64, start time.Time, end *time.Time) float64 {
|
|
||||||
now := time.Now()
|
|
||||||
after := now.Add(time.Duration(averageTimeInSecond) * time.Second)
|
|
||||||
|
|
||||||
fromAverageDuration := after.Sub(now).Seconds()
|
|
||||||
var tEnd time.Time
|
|
||||||
if end == nil {
|
|
||||||
tEnd = start.Add(1 * time.Hour)
|
|
||||||
} else {
|
|
||||||
tEnd = *end
|
|
||||||
}
|
|
||||||
fromDateDuration := tEnd.Sub(start).Seconds()
|
|
||||||
|
|
||||||
if fromAverageDuration > fromDateDuration {
|
|
||||||
return fromAverageDuration
|
|
||||||
}
|
|
||||||
return fromDateDuration
|
|
||||||
}
|
|
||||||
|
|
||||||
func BookingEstimation(t TimePricingStrategy, price float64, locationDurationInSecond float64, start time.Time, end *time.Time) (float64, error) {
|
|
||||||
locationDurationInSecond = getAverageTimeInSecond(locationDurationInSecond, start, end)
|
|
||||||
priceStr := fmt.Sprintf("%v", price)
|
|
||||||
p, err := strconv.ParseFloat(priceStr, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
switch t {
|
|
||||||
case ONCE:
|
|
||||||
return p, nil
|
|
||||||
case PER_HOUR:
|
|
||||||
return p * float64(locationDurationInSecond/3600), nil
|
|
||||||
case PER_MINUTE:
|
|
||||||
return p * float64(locationDurationInSecond/60), nil
|
|
||||||
case PER_SECOND:
|
|
||||||
return p * locationDurationInSecond, nil
|
|
||||||
case PER_DAY:
|
|
||||||
return p * float64(locationDurationInSecond/86400), nil
|
|
||||||
case PER_WEEK:
|
|
||||||
return p * float64(locationDurationInSecond/604800), nil
|
|
||||||
case PER_MONTH:
|
|
||||||
return p * float64(locationDurationInSecond/2592000), nil
|
|
||||||
}
|
|
||||||
return 0, errors.New("pricing strategy not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
type PricingStrategy[T Strategy] struct {
|
|
||||||
Price float64 `json:"price" bson:"price" default:"0"` // Price is the Price of the pricing
|
|
||||||
Currency string `json:"currency" bson:"currency" default:"USD"` // Currency is the currency of the pricing
|
|
||||||
BuyingStrategy BuyingStrategy `json:"buying_strategy" bson:"buying_strategy" default:"0"` // BuyingStrategy is the buying strategy of the pricing
|
|
||||||
TimePricingStrategy TimePricingStrategy `json:"time_pricing_strategy" bson:"time_pricing_strategy" default:"0"` // TimePricingStrategy is the time pricing strategy of the pricing
|
|
||||||
OverrideStrategy T `json:"override_strategy" bson:"override_strategy" default:"-1"` // Modulation is the modulation of the pricing
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p PricingStrategy[T]) GetPrice(amountOfData float64, bookingTimeDuration float64, start time.Time, end *time.Time) (float64, error) {
|
|
||||||
if p.BuyingStrategy == SUBSCRIPTION {
|
|
||||||
return BookingEstimation(p.GetTimePricingStrategy(), p.Price*float64(amountOfData), bookingTimeDuration, start, end)
|
|
||||||
} else if p.BuyingStrategy == UNLIMITED {
|
|
||||||
return p.Price, nil
|
|
||||||
}
|
|
||||||
return p.Price * float64(amountOfData), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p PricingStrategy[T]) GetBuyingStrategy() BuyingStrategy {
|
|
||||||
return p.BuyingStrategy
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p PricingStrategy[T]) GetTimePricingStrategy() TimePricingStrategy {
|
|
||||||
return p.TimePricingStrategy
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p PricingStrategy[T]) GetOverrideStrategy() T {
|
|
||||||
return p.OverrideStrategy
|
|
||||||
}
|
|
@ -2,15 +2,18 @@ package models
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"cloud.o-forge.io/core/oc-lib/logs"
|
"cloud.o-forge.io/core/oc-lib/logs"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/order"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
"cloud.o-forge.io/core/oc-lib/models/booking"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area"
|
"cloud.o-forge.io/core/oc-lib/models/collaborative_area"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||||
resource "cloud.o-forge.io/core/oc-lib/models/resources"
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
d "cloud.o-forge.io/core/oc-lib/models/resources/data"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||||
|
p "cloud.o-forge.io/core/oc-lib/models/resources/processing"
|
||||||
|
s "cloud.o-forge.io/core/oc-lib/models/resources/storage"
|
||||||
|
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
w2 "cloud.o-forge.io/core/oc-lib/models/workflow"
|
w2 "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||||
@ -22,22 +25,21 @@ This package contains the models used in the application
|
|||||||
It's used to create the models dynamically
|
It's used to create the models dynamically
|
||||||
*/
|
*/
|
||||||
var models = map[string]func() utils.DBObject{
|
var models = map[string]func() utils.DBObject{
|
||||||
tools.WORKFLOW_RESOURCE.String(): func() utils.DBObject { return &resource.WorkflowResource{} },
|
tools.WORKFLOW_RESOURCE.String(): func() utils.DBObject { return &w.WorkflowResource{} },
|
||||||
tools.DATA_RESOURCE.String(): func() utils.DBObject { return &resource.DataResource{} },
|
tools.DATA_RESOURCE.String(): func() utils.DBObject { return &d.DataResource{} },
|
||||||
tools.COMPUTE_RESOURCE.String(): func() utils.DBObject { return &resource.ComputeResource{} },
|
tools.COMPUTE_RESOURCE.String(): func() utils.DBObject { return &compute.ComputeResource{} },
|
||||||
tools.STORAGE_RESOURCE.String(): func() utils.DBObject { return &resource.StorageResource{} },
|
tools.STORAGE_RESOURCE.String(): func() utils.DBObject { return &s.StorageResource{} },
|
||||||
tools.PROCESSING_RESOURCE.String(): func() utils.DBObject { return &resource.ProcessingResource{} },
|
tools.PROCESSING_RESOURCE.String(): func() utils.DBObject { return &p.ProcessingResource{} },
|
||||||
tools.WORKFLOW.String(): func() utils.DBObject { return &w2.Workflow{} },
|
tools.WORKFLOW.String(): func() utils.DBObject { return &w2.Workflow{} },
|
||||||
tools.WORKFLOW_EXECUTION.String(): func() utils.DBObject { return &workflow_execution.WorkflowExecution{} },
|
tools.WORKFLOW_EXECUTION.String(): func() utils.DBObject { return &workflow_execution.WorkflowExecution{} },
|
||||||
tools.WORKSPACE.String(): func() utils.DBObject { return &w3.Workspace{} },
|
tools.WORKSPACE.String(): func() utils.DBObject { return &w3.Workspace{} },
|
||||||
|
tools.RESOURCE_MODEL.String(): func() utils.DBObject { return &resource_model.ResourceModel{} },
|
||||||
tools.PEER.String(): func() utils.DBObject { return &peer.Peer{} },
|
tools.PEER.String(): func() utils.DBObject { return &peer.Peer{} },
|
||||||
tools.COLLABORATIVE_AREA.String(): func() utils.DBObject { return &collaborative_area.CollaborativeArea{} },
|
tools.COLLABORATIVE_AREA.String(): func() utils.DBObject { return &collaborative_area.CollaborativeArea{} },
|
||||||
tools.RULE.String(): func() utils.DBObject { return &rule.Rule{} },
|
tools.RULE.String(): func() utils.DBObject { return &rule.Rule{} },
|
||||||
tools.BOOKING.String(): func() utils.DBObject { return &booking.Booking{} },
|
tools.BOOKING.String(): func() utils.DBObject { return &booking.Booking{} },
|
||||||
tools.WORKFLOW_HISTORY.String(): func() utils.DBObject { return &w2.WorkflowHistory{} },
|
tools.WORKFLOW_HISTORY.String(): func() utils.DBObject { return &w2.WorkflowHistory{} },
|
||||||
tools.WORKSPACE_HISTORY.String(): func() utils.DBObject { return &w3.WorkspaceHistory{} },
|
tools.WORKSPACE_HISTORY.String(): func() utils.DBObject { return &w3.WorkspaceHistory{} },
|
||||||
tools.ORDER.String(): func() utils.DBObject { return &order.Order{} },
|
|
||||||
tools.PURCHASE_RESOURCE.String(): func() utils.DBObject { return &purchase_resource.PurchaseResource{} },
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Model returns the model object based on the model type
|
// Model returns the model object based on the model type
|
||||||
|
@ -1,332 +0,0 @@
|
|||||||
package order
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Booking is a struct that represents a booking
|
|
||||||
*/
|
|
||||||
|
|
||||||
type Order struct {
|
|
||||||
utils.AbstractObject
|
|
||||||
OrderBy string `json:"order_by" bson:"order_by" validate:"required"`
|
|
||||||
WorkflowID string `json:"workflow_id" bson:"workflow_id" validate:"required"`
|
|
||||||
WorkflowExecutionIDs []string `json:"workflow_execution_ids" bson:"workflow_execution_ids" validate:"required"`
|
|
||||||
Status enum.CompletionStatus `json:"status" bson:"status" default:"0"`
|
|
||||||
SubOrders map[string]*PeerOrder `json:"sub_orders" bson:"sub_orders"`
|
|
||||||
Total float64 `json:"total" bson:"total" validate:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Order) StoreDraftDefault() {
|
|
||||||
r.IsDraft = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Order) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
|
|
||||||
if !r.IsDraft && r.Status != set.(*Order).Status {
|
|
||||||
return true, &Order{Status: set.(*Order).Status} // only state can be updated
|
|
||||||
}
|
|
||||||
return r.IsDraft, set
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Order) CanDelete() bool {
|
|
||||||
return r.IsDraft // only draft order can be deleted
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Order) DraftOrder(scheduler *workflow_execution.WorkflowSchedule, request *tools.APIRequest) error {
|
|
||||||
// set the draft order from the model
|
|
||||||
if err := o.draftStoreFromModel(scheduler, request); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Order) Pay(scheduler *workflow_execution.WorkflowSchedule, request *tools.APIRequest) error {
|
|
||||||
if _, err := o.draftBookOrder(scheduler, request); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
o.Status = enum.PENDING
|
|
||||||
_, code, err := o.GetAccessor(request).UpdateOne(o, o.GetID())
|
|
||||||
if code != 200 || err != nil {
|
|
||||||
return errors.New("could not update the order" + fmt.Sprintf("%v", err))
|
|
||||||
}
|
|
||||||
if err := o.pay(request); err != nil { // pay the order
|
|
||||||
return err
|
|
||||||
} else {
|
|
||||||
o.IsDraft = false
|
|
||||||
}
|
|
||||||
for _, exec := range scheduler.WorkflowExecution {
|
|
||||||
exec.IsDraft = false
|
|
||||||
_, code, err := utils.GenericUpdateOne(exec, exec.GetID(),
|
|
||||||
workflow_execution.NewAccessor(request), &workflow_execution.WorkflowExecution{})
|
|
||||||
if code != 200 || err != nil {
|
|
||||||
return errors.New("could not update the workflow execution" + fmt.Sprintf("%v", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, code, err = o.GetAccessor(request).UpdateOne(o, o.GetID())
|
|
||||||
if code != 200 || err != nil {
|
|
||||||
return errors.New("could not update the order" + fmt.Sprintf("%v", err))
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
TODO : TEMPORARY SET BOOKINGS TO UNDRAFT TO AVOID DELETION
|
|
||||||
BUT NEXT ONLY WHO IS PAYED WILL BE ALLOWED TO CHANGE IT
|
|
||||||
*/
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Order) draftStoreFromModel(scheduler *workflow_execution.WorkflowSchedule, request *tools.APIRequest) error {
|
|
||||||
if request == nil {
|
|
||||||
return errors.New("no request found")
|
|
||||||
}
|
|
||||||
fmt.Println("Drafting order", scheduler.Workflow)
|
|
||||||
if scheduler.Workflow == nil || scheduler.Workflow.Graph == nil { // if the workflow has no graph, return an error
|
|
||||||
return errors.New("no graph found")
|
|
||||||
}
|
|
||||||
o.SetName()
|
|
||||||
o.WorkflowID = scheduler.Workflow.GetID()
|
|
||||||
o.IsDraft = true
|
|
||||||
o.OrderBy = request.PeerID
|
|
||||||
o.WorkflowExecutionIDs = []string{} // create an array of ids
|
|
||||||
for _, exec := range scheduler.WorkflowExecution {
|
|
||||||
o.WorkflowExecutionIDs = append(o.WorkflowExecutionIDs, exec.GetID())
|
|
||||||
}
|
|
||||||
// set the name of the order
|
|
||||||
resourcesByPeer := map[string][]pricing.PricedItemITF{} // create a map of resources by peer
|
|
||||||
|
|
||||||
processings := scheduler.Workflow.GetPricedItem(scheduler.Workflow.Graph.IsProcessing, request) // get the processing items
|
|
||||||
datas := scheduler.Workflow.GetPricedItem(scheduler.Workflow.Graph.IsData, request) // get the data items
|
|
||||||
storages := scheduler.Workflow.GetPricedItem(scheduler.Workflow.Graph.IsStorage, request) // get the storage items
|
|
||||||
workflows := scheduler.Workflow.GetPricedItem(scheduler.Workflow.Graph.IsWorkflow, request) // get the workflow items
|
|
||||||
for _, items := range []map[string]pricing.PricedItemITF{processings, datas, storages, workflows} {
|
|
||||||
for _, item := range items {
|
|
||||||
if _, ok := resourcesByPeer[item.GetCreatorID()]; !ok {
|
|
||||||
resourcesByPeer[item.GetCreatorID()] = []pricing.PricedItemITF{}
|
|
||||||
}
|
|
||||||
resourcesByPeer[item.GetCreatorID()] = append(resourcesByPeer[item.GetCreatorID()], item)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for peerID, resources := range resourcesByPeer {
|
|
||||||
peerOrder := &PeerOrder{
|
|
||||||
Status: enum.DRAFTED,
|
|
||||||
PeerID: peerID,
|
|
||||||
}
|
|
||||||
peerOrder.GenerateID()
|
|
||||||
for _, resource := range resources {
|
|
||||||
peerOrder.AddItem(resource, len(resources)) // TODO SPECIALS REF ADDITIONALS NOTES
|
|
||||||
}
|
|
||||||
if o.SubOrders == nil {
|
|
||||||
o.SubOrders = map[string]*PeerOrder{}
|
|
||||||
}
|
|
||||||
o.SubOrders[peerOrder.GetID()] = peerOrder
|
|
||||||
}
|
|
||||||
// search an order with same user name and same session id
|
|
||||||
err := o.sumUpBill(request)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// should store the order
|
|
||||||
res, code, err := o.GetAccessor(request).Search(&dbs.Filters{
|
|
||||||
And: map[string][]dbs.Filter{
|
|
||||||
"workflow_id": {{Operator: dbs.EQUAL.String(), Value: o.WorkflowID}},
|
|
||||||
"order_by": {{Operator: dbs.EQUAL.String(), Value: request.PeerID}},
|
|
||||||
},
|
|
||||||
}, "", o.IsDraft)
|
|
||||||
if code != 200 || err != nil {
|
|
||||||
return errors.New("could not search the order" + fmt.Sprintf("%v", err))
|
|
||||||
}
|
|
||||||
if len(res) > 0 {
|
|
||||||
_, code, err := utils.GenericUpdateOne(o, res[0].GetID(), o.GetAccessor(request), o)
|
|
||||||
if code != 200 || err != nil {
|
|
||||||
return errors.New("could not update the order" + fmt.Sprintf("%v", err))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
_, code, err := utils.GenericStoreOne(o, o.GetAccessor(request))
|
|
||||||
if code != 200 || err != nil {
|
|
||||||
return errors.New("could not store the order" + fmt.Sprintf("%v", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Order) draftBookOrder(scheduler *workflow_execution.WorkflowSchedule, request *tools.APIRequest) ([]*booking.Booking, error) {
|
|
||||||
draftedBookings := []*booking.Booking{}
|
|
||||||
if request == nil {
|
|
||||||
return draftedBookings, errors.New("no request found")
|
|
||||||
}
|
|
||||||
for _, exec := range scheduler.WorkflowExecution {
|
|
||||||
_, priceds, _, err := scheduler.Workflow.Planify(exec.ExecDate, exec.EndDate, request)
|
|
||||||
if err != nil {
|
|
||||||
return draftedBookings, errors.New("could not planify the workflow" + fmt.Sprintf("%v", err))
|
|
||||||
}
|
|
||||||
bookings := exec.Book(scheduler.UUID, scheduler.Workflow.UUID, priceds)
|
|
||||||
for _, booking := range bookings {
|
|
||||||
_, err := (&peer.Peer{}).LaunchPeerExecution(booking.DestPeerID, "",
|
|
||||||
tools.BOOKING, tools.POST, booking.Serialize(booking), request.Caller)
|
|
||||||
if err != nil {
|
|
||||||
return draftedBookings, errors.New("could not launch the peer execution : " + fmt.Sprintf("%v", err))
|
|
||||||
}
|
|
||||||
draftedBookings = append(draftedBookings, booking)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return draftedBookings, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Order) Quantity() int {
|
|
||||||
return len(o.WorkflowExecutionIDs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Order) SetName() {
|
|
||||||
d.Name = d.UUID + "_order_" + "_" + time.Now().UTC().Format("2006-01-02T15:04:05")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Order) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
|
||||||
return NewAccessor(request) // Create a new instance of the accessor
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Order) sumUpBill(request *tools.APIRequest) error {
|
|
||||||
for _, b := range d.SubOrders {
|
|
||||||
err := b.SumUpBill(request)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.Total += b.Total
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TO FINISH
|
|
||||||
func (d *Order) pay(request *tools.APIRequest) error {
|
|
||||||
responses := make(chan *PeerOrder, len(d.SubOrders))
|
|
||||||
var wg *sync.WaitGroup
|
|
||||||
wg.Add(len(d.SubOrders))
|
|
||||||
for _, b := range d.SubOrders {
|
|
||||||
go b.Pay(request, responses, wg)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
errs := ""
|
|
||||||
gotAnUnpaid := false
|
|
||||||
count := 0
|
|
||||||
for range responses {
|
|
||||||
res := <-responses
|
|
||||||
count++
|
|
||||||
if res != nil {
|
|
||||||
if res.Error != "" {
|
|
||||||
errs += res.Error
|
|
||||||
}
|
|
||||||
if res.Status != enum.PAID {
|
|
||||||
gotAnUnpaid = true
|
|
||||||
}
|
|
||||||
d.Status = enum.PARTIAL
|
|
||||||
d.SubOrders[res.GetID()] = res
|
|
||||||
if count == len(d.SubOrders) && !gotAnUnpaid {
|
|
||||||
d.Status = enum.PAID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if errs != "" {
|
|
||||||
return errors.New(errs)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type PeerOrder struct {
|
|
||||||
utils.AbstractObject
|
|
||||||
Error string `json:"error,omitempty" bson:"error,omitempty"`
|
|
||||||
PeerID string `json:"peer_id,omitempty" bson:"peer_id,omitempty"`
|
|
||||||
Status enum.CompletionStatus `json:"status" bson:"status" default:"0"`
|
|
||||||
BillingAddress string `json:"billing_address,omitempty" bson:"billing_address,omitempty"`
|
|
||||||
Items []*PeerItemOrder `json:"items,omitempty" bson:"items,omitempty"`
|
|
||||||
Total float64 `json:"total,omitempty" bson:"total,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *PeerOrder) Pay(request *tools.APIRequest, response chan *PeerOrder, wg *sync.WaitGroup) {
|
|
||||||
d.Status = enum.PENDING
|
|
||||||
go func() {
|
|
||||||
// DO SOMETHING TO PAY ON BLOCKCHAIN OR WHATEVER ON RETURN UPDATE STATUS
|
|
||||||
d.Status = enum.PAID // TO REMOVE LATER IT'S A MOCK
|
|
||||||
if d.Status == enum.PAID {
|
|
||||||
for _, b := range d.Items {
|
|
||||||
if !b.Item.IsPurchased() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
accessor := purchase_resource.NewAccessor(request)
|
|
||||||
accessor.StoreOne(&purchase_resource.PurchaseResource{
|
|
||||||
ResourceID: b.Item.GetID(),
|
|
||||||
ResourceType: b.Item.GetType(),
|
|
||||||
EndDate: b.Item.GetLocationEnd(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.Status != enum.PENDING {
|
|
||||||
response <- d
|
|
||||||
}
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *PeerOrder) SumUpBill(request *tools.APIRequest) error {
|
|
||||||
for _, b := range d.Items {
|
|
||||||
tot, err := b.GetPrice(request) // missing something
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.Total += tot
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *PeerOrder) AddItem(item pricing.PricedItemITF, quantity int) {
|
|
||||||
d.Items = append(d.Items, &PeerItemOrder{
|
|
||||||
Quantity: quantity,
|
|
||||||
Item: item,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *PeerOrder) SetName() {
|
|
||||||
d.Name = d.UUID + "_order_" + d.PeerID + "_" + time.Now().UTC().Format("2006-01-02T15:04:05")
|
|
||||||
}
|
|
||||||
|
|
||||||
type PeerItemOrder struct {
|
|
||||||
Quantity int `json:"quantity,omitempty" bson:"quantity,omitempty"`
|
|
||||||
Purchase purchase_resource.PurchaseResource `json:"purchase,omitempty" bson:"purchase,omitempty"`
|
|
||||||
Item pricing.PricedItemITF `json:"item,omitempty" bson:"item,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *PeerItemOrder) GetPrice(request *tools.APIRequest) (float64, error) {
|
|
||||||
accessor := purchase_resource.NewAccessor(request)
|
|
||||||
search, code, _ := accessor.Search(&dbs.Filters{
|
|
||||||
And: map[string][]dbs.Filter{
|
|
||||||
"resource_id": {{Operator: dbs.EQUAL.String(), Value: d.Item.GetID()}},
|
|
||||||
},
|
|
||||||
}, "", d.Purchase.IsDraft)
|
|
||||||
if code == 200 && len(search) > 0 {
|
|
||||||
for _, s := range search {
|
|
||||||
if s.(*purchase_resource.PurchaseResource).EndDate == nil || time.Now().UTC().After(*s.(*purchase_resource.PurchaseResource).EndDate) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p, err := d.Item.GetPrice()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return p * float64(d.Quantity), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WTF HOW TO SELECT THE RIGHT PRICE ???
|
|
||||||
// SHOULD SET A BUYING STATUS WHEN PAYMENT IS VALIDATED
|
|
@ -1,64 +0,0 @@
|
|||||||
package order
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/logs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
type orderMongoAccessor struct {
|
|
||||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new instance of the orderMongoAccessor
|
|
||||||
func NewAccessor(request *tools.APIRequest) *orderMongoAccessor {
|
|
||||||
return &orderMongoAccessor{
|
|
||||||
AbstractAccessor: utils.AbstractAccessor{
|
|
||||||
Logger: logs.CreateLogger(tools.ORDER.String()), // Create a logger with the data type
|
|
||||||
Request: request,
|
|
||||||
Type: tools.ORDER,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Nothing special here, just the basic CRUD operations
|
|
||||||
*/
|
|
||||||
func (a *orderMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericDeleteOne(id, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *orderMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericUpdateOne(set, id, a, &Order{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *orderMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return nil, 404, errors.New("Not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *orderMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return nil, 404, errors.New("Not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *orderMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericLoadOne[*Order](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return d, 200, nil
|
|
||||||
}, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *orderMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
|
||||||
return utils.GenericLoadAll[*Order](a.getExec(), isDraft, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *orderMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
|
||||||
return utils.GenericSearch[*Order](filters, search, (&Order{}).GetObjectFilters(search), a.getExec(), isDraft, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *orderMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
|
|
||||||
return func(d utils.DBObject) utils.ShallowDBObject {
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,10 +1,12 @@
|
|||||||
package peer
|
package peer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
"github.com/google/uuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
// now write a go enum for the state partner with self, blacklist, partner
|
// now write a go enum for the state partner with self, blacklist, partner
|
||||||
@ -29,18 +31,13 @@ func (m PeerState) EnumIndex() int {
|
|||||||
// Peer is a struct that represents a peer
|
// Peer is a struct that represents a peer
|
||||||
type Peer struct {
|
type Peer struct {
|
||||||
utils.AbstractObject
|
utils.AbstractObject
|
||||||
Url string `json:"url" bson:"url" validate:"required"` // Url is the URL of the peer (base64url)
|
Url string `json:"url,omitempty" bson:"url,omitempty" validate:"required"` // Url is the URL of the peer (base64url)
|
||||||
WalletAddress string `json:"wallet_address" bson:"wallet_address" validate:"required"` // WalletAddress is the wallet address of the peer
|
PublicKey string `json:"public_key,omitempty" bson:"public_key,omitempty"` // PublicKey is the public key of the peer
|
||||||
PublicKey string `json:"public_key" bson:"public_key" validate:"required"` // PublicKey is the public key of the peer
|
Services map[string]int `json:"services,omitempty" bson:"services,omitempty"`
|
||||||
State PeerState `json:"state" bson:"state" default:"0"`
|
State PeerState `json:"state" bson:"state" default:"0"`
|
||||||
ServicesState map[string]int `json:"services_state,omitempty" bson:"services_state,omitempty"`
|
|
||||||
FailedExecution []PeerExecution `json:"failed_execution" bson:"failed_execution"` // FailedExecution is the list of failed executions, to be retried
|
FailedExecution []PeerExecution `json:"failed_execution" bson:"failed_execution"` // FailedExecution is the list of failed executions, to be retried
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ao *Peer) VerifyAuth(request *tools.APIRequest) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddExecution adds an execution to the list of failed executions
|
// AddExecution adds an execution to the list of failed executions
|
||||||
func (ao *Peer) AddExecution(exec PeerExecution) {
|
func (ao *Peer) AddExecution(exec PeerExecution) {
|
||||||
found := false
|
found := false
|
||||||
@ -67,25 +64,56 @@ func (ao *Peer) RemoveExecution(exec PeerExecution) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IsMySelf checks if the peer is the local peer
|
// IsMySelf checks if the peer is the local peer
|
||||||
func (p *Peer) IsMySelf() (bool, string) {
|
func (ao *Peer) IsMySelf() (bool, string) {
|
||||||
d, code, err := NewAccessor(nil).Search(nil, SELF.String(), p.IsDraft)
|
d, code, err := ao.GetAccessor(nil).Search(nil, SELF.String())
|
||||||
if code != 200 || err != nil || len(d) == 0 {
|
if code != 200 || err != nil || len(d) == 0 {
|
||||||
return false, ""
|
return false, ""
|
||||||
}
|
}
|
||||||
id := d[0].GetID()
|
id := d[0].GetID()
|
||||||
return p.UUID == id, id
|
return ao.UUID == id, id
|
||||||
}
|
}
|
||||||
|
|
||||||
// LaunchPeerExecution launches an execution on a peer
|
// LaunchPeerExecution launches an execution on a peer
|
||||||
func (p *Peer) LaunchPeerExecution(peerID string, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) {
|
func (p *Peer) LaunchPeerExecution(peerID string, dataID string, dt tools.DataType, method tools.METHOD, body map[string]interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) {
|
||||||
p.UUID = peerID
|
p.UUID = peerID
|
||||||
return cache.LaunchPeerExecution(peerID, dataID, dt, method, body, caller) // Launch the execution on the peer through the cache
|
return cache.LaunchPeerExecution(peerID, dataID, dt, method, body, caller) // Launch the execution on the peer through the cache
|
||||||
}
|
}
|
||||||
func (d *Peer) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
|
||||||
data := NewAccessor(request) // Create a new instance of the accessor
|
func (ao *Peer) GetID() string {
|
||||||
|
return ao.UUID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Peer) GenerateID() {
|
||||||
|
if r.UUID == "" {
|
||||||
|
r.UUID = uuid.New().String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Peer) GetName() string {
|
||||||
|
return d.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Peer) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
|
data := New() // Create a new instance of the accessor
|
||||||
|
data.Init(tools.PEER, caller) // Initialize the accessor with the PEER model type
|
||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Peer) CanDelete() bool {
|
func (dma *Peer) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
return false // only draft order can be deleted
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *Peer) Serialize() map[string]interface{} {
|
||||||
|
var m map[string]interface{}
|
||||||
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
return m
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
@ -16,7 +17,7 @@ import (
|
|||||||
type PeerExecution struct {
|
type PeerExecution struct {
|
||||||
Method string `json:"method" bson:"method"`
|
Method string `json:"method" bson:"method"`
|
||||||
Url string `json:"url" bson:"url"`
|
Url string `json:"url" bson:"url"`
|
||||||
Body interface{} `json:"body" bson:"body"`
|
Body map[string]interface{} `json:"body" bson:"body"`
|
||||||
DataType int `json:"data_type" bson:"data_type"`
|
DataType int `json:"data_type" bson:"data_type"`
|
||||||
DataID string `json:"data_id" bson:"data_id"`
|
DataID string `json:"data_id" bson:"data_id"`
|
||||||
}
|
}
|
||||||
@ -28,90 +29,99 @@ type PeerCache struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// urlFormat formats the URL of the peer with the data type API function
|
// urlFormat formats the URL of the peer with the data type API function
|
||||||
func (p *PeerCache) urlFormat(hostUrl string, dt tools.DataType) string {
|
func (p *PeerCache) urlFormat(url string, dt tools.DataType) string {
|
||||||
// localhost is replaced by the local peer URL
|
// localhost is replaced by the local peer URL
|
||||||
// because localhost must collide on a web request security protocol
|
// because localhost must collide on a web request security protocol
|
||||||
/*localhost := ""
|
localhost := ""
|
||||||
if strings.Contains(hostUrl, "localhost") {
|
if strings.Contains(url, "localhost") {
|
||||||
localhost = "localhost"
|
localhost = "localhost"
|
||||||
}
|
}
|
||||||
if strings.Contains(hostUrl, "127.0.0.1") {
|
if strings.Contains(url, "127.0.0.1") {
|
||||||
localhost = "127.0.0.1"
|
localhost = "127.0.0.1"
|
||||||
}
|
}
|
||||||
if localhost != "" {
|
if localhost != "" {
|
||||||
r := regexp.MustCompile("(" + localhost + ":[0-9]+)")
|
r := regexp.MustCompile("(" + localhost + ":[0-9]+)")
|
||||||
t := r.FindString(hostUrl)
|
t := r.FindString(url)
|
||||||
if t != "" {
|
if t != "" {
|
||||||
hostUrl = strings.Replace(hostUrl, t, dt.API()+":8080/oc", -1)
|
url = strings.Replace(url, t, dt.API()+":8080/oc", -1)
|
||||||
} else {
|
} else {
|
||||||
hostUrl = strings.ReplaceAll(hostUrl, localhost, dt.API()+":8080/oc")
|
url = strings.ReplaceAll(url, localhost, dt.API()+":8080/oc")
|
||||||
}
|
}
|
||||||
} else {*/
|
} else {
|
||||||
hostUrl = hostUrl + "/" + strings.ReplaceAll(dt.API(), "oc-", "")
|
url = url + "/" + dt.API()
|
||||||
//}
|
}
|
||||||
fmt.Println("Contacting", hostUrl)
|
return url
|
||||||
return hostUrl
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkPeerStatus checks the status of a peer
|
// checkPeerStatus checks the status of a peer
|
||||||
func (p *PeerCache) checkPeerStatus(peerID string, appName string) (*Peer, bool) {
|
func (p *PeerCache) checkPeerStatus(peerID string, appName string, caller *tools.HTTPCaller) (*Peer, bool) {
|
||||||
api := tools.API{}
|
api := tools.API{}
|
||||||
access := NewShallowAccessor()
|
access := (&Peer{}).GetAccessor(nil)
|
||||||
res, code, _ := access.LoadOne(peerID) // Load the peer from db
|
res, code, _ := access.LoadOne(peerID) // Load the peer from db
|
||||||
if code != 200 { // no peer no party
|
if code != 200 { // no peer no party
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
url := p.urlFormat(res.(*Peer).Url, tools.PEER) + "/status" // Format the URL
|
methods := caller.URLS[tools.PEER] // Get the methods url of the peer
|
||||||
|
if methods == nil {
|
||||||
|
return res.(*Peer), false
|
||||||
|
}
|
||||||
|
meth := methods[tools.POST] // Get the POST method to check status
|
||||||
|
if meth == "" {
|
||||||
|
return res.(*Peer), false
|
||||||
|
}
|
||||||
|
url := p.urlFormat(res.(*Peer).Url, tools.PEER) + meth // Format the URL
|
||||||
|
fmt.Println("Checking peer status on", url, "...")
|
||||||
state, services := api.CheckRemotePeer(url)
|
state, services := api.CheckRemotePeer(url)
|
||||||
res.(*Peer).ServicesState = services // Update the services states of the peer
|
fmt.Println("Checking peer status on", url, state, services) // Check the status of the peer
|
||||||
|
res.(*Peer).Services = services // Update the services states of the peer
|
||||||
access.UpdateOne(res, peerID) // Update the peer in the db
|
access.UpdateOne(res, peerID) // Update the peer in the db
|
||||||
return res.(*Peer), state != tools.DEAD && services[appName] == 0 // Return the peer and its status
|
return res.(*Peer), state != tools.DEAD && services[appName] == 0 // Return the peer and its status
|
||||||
}
|
}
|
||||||
|
|
||||||
// LaunchPeerExecution launches an execution on a peer
|
// LaunchPeerExecution launches an execution on a peer
|
||||||
// The method contacts the path described by : peer.Url + datatype path (from enums) + replacement of id by dataID
|
|
||||||
func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string,
|
func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string,
|
||||||
dt tools.DataType, method tools.METHOD, body interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) {
|
dt tools.DataType, method tools.METHOD, body map[string]interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) {
|
||||||
fmt.Println("Launching peer execution on", caller.URLS, dt, method)
|
fmt.Println("Launching peer execution on", caller.URLS, dt, method)
|
||||||
methods := caller.URLS[dt] // Get the methods url of the data type
|
methods := caller.URLS[dt] // Get the methods url of the data type
|
||||||
if m, ok := methods[method]; !ok || m == "" {
|
if m, ok := methods[method]; !ok || m == "" {
|
||||||
return nil, errors.New("Requested method " + method.String() + " not declared in HTTPCaller")
|
return nil, errors.New("no path found")
|
||||||
}
|
}
|
||||||
path := methods[method] // Get the path corresponding to the action we want to execute
|
meth := methods[method] // Get the method url to execute
|
||||||
path = strings.ReplaceAll(path, ":id", dataID) // Replace the id in the path in case of a DELETE / UPDATE method (it's a standard naming in OC)
|
meth = strings.ReplaceAll(meth, ":id", dataID) // Replace the id in the url in case of a DELETE / UPDATE method (it's a standard naming in OC)
|
||||||
url := ""
|
url := ""
|
||||||
|
|
||||||
// Check the status of the peer
|
// Check the status of the peer
|
||||||
if mypeer, ok := p.checkPeerStatus(peerID, dt.API()); !ok && mypeer != nil {
|
if mypeer, ok := p.checkPeerStatus(peerID, dt.API(), caller); !ok && mypeer != nil {
|
||||||
// If the peer is not reachable, add the execution to the failed executions list
|
// If the peer is not reachable, add the execution to the failed executions list
|
||||||
pexec := &PeerExecution{
|
pexec := &PeerExecution{
|
||||||
Method: method.String(),
|
Method: method.String(),
|
||||||
Url: p.urlFormat((mypeer.Url), dt) + path, // the url is constitued of : host URL + resource path + action path (ex : mypeer.com/datacenter/resourcetype/path/to/action)
|
Url: p.urlFormat((mypeer.Url)+meth, dt),
|
||||||
Body: body,
|
Body: body,
|
||||||
DataType: dt.EnumIndex(),
|
DataType: dt.EnumIndex(),
|
||||||
DataID: dataID,
|
DataID: dataID,
|
||||||
}
|
}
|
||||||
mypeer.AddExecution(*pexec)
|
mypeer.AddExecution(*pexec)
|
||||||
NewShallowAccessor().UpdateOne(mypeer, peerID) // Update the peer in the db
|
mypeer.GetAccessor(nil).UpdateOne(mypeer, peerID) // Update the peer in the db
|
||||||
return nil, errors.New("peer is not reachable")
|
return nil, errors.New("peer is not reachable")
|
||||||
} else {
|
} else {
|
||||||
if mypeer == nil {
|
if mypeer == nil {
|
||||||
return nil, errors.New("peer not found")
|
return nil, errors.New("peer not found")
|
||||||
}
|
}
|
||||||
// If the peer is reachable, launch the execution
|
// If the peer is reachable, launch the execution
|
||||||
url = p.urlFormat((mypeer.Url), dt) + path // Format the URL
|
url = p.urlFormat((mypeer.Url)+meth, dt) // Format the URL
|
||||||
tmp := mypeer.FailedExecution // Get the failed executions list
|
tmp := mypeer.FailedExecution // Get the failed executions list
|
||||||
mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list
|
mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list
|
||||||
NewShallowAccessor().UpdateOne(mypeer, peerID) // Update the peer in the db
|
mypeer.GetAccessor(nil).UpdateOne(mypeer, peerID) // Update the peer in the db
|
||||||
for _, v := range tmp { // Retry the failed executions
|
for _, v := range tmp { // Retry the failed executions
|
||||||
go p.exec(v.Url, tools.ToMethod(v.Method), v.Body, caller)
|
go p.exec(v.Url, tools.ToMethod(v.Method), v.Body, caller)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
fmt.Println("URL exec", url)
|
||||||
return nil, p.exec(url, method, body, caller) // Execute the method
|
return nil, p.exec(url, method, body, caller) // Execute the method
|
||||||
}
|
}
|
||||||
|
|
||||||
// exec executes the method on the peer
|
// exec executes the method on the peer
|
||||||
func (p *PeerCache) exec(url string, method tools.METHOD, body interface{}, caller *tools.HTTPCaller) error {
|
func (p *PeerCache) exec(url string, method tools.METHOD, body map[string]interface{}, caller *tools.HTTPCaller) error {
|
||||||
var b []byte
|
var b []byte
|
||||||
var err error
|
var err error
|
||||||
if method == tools.POST { // Execute the POST method if it's a POST method
|
if method == tools.POST { // Execute the POST method if it's a POST method
|
||||||
@ -123,11 +133,8 @@ func (p *PeerCache) exec(url string, method tools.METHOD, body interface{}, call
|
|||||||
if method == tools.DELETE { // Execute the DELETE method if it's a DELETE method
|
if method == tools.DELETE { // Execute the DELETE method if it's a DELETE method
|
||||||
b, err = caller.CallDelete(url, "")
|
b, err = caller.CallDelete(url, "")
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var m map[string]interface{}
|
var m map[string]interface{}
|
||||||
err = json.Unmarshal(b, &m)
|
json.Unmarshal(b, &m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -4,40 +4,17 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
"cloud.o-forge.io/core/oc-lib/logs"
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type peerMongoAccessor struct {
|
type peerMongoAccessor struct {
|
||||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||||
overrideAuth bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new instance of the peerMongoAccessor
|
// New creates a new instance of the peerMongoAccessor
|
||||||
func NewShallowAccessor() *peerMongoAccessor {
|
func New() *peerMongoAccessor {
|
||||||
return &peerMongoAccessor{
|
return &peerMongoAccessor{}
|
||||||
overrideAuth: true,
|
|
||||||
AbstractAccessor: utils.AbstractAccessor{
|
|
||||||
Logger: logs.CreateLogger(tools.PEER.String()), // Create a logger with the data type
|
|
||||||
Type: tools.PEER,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAccessor(request *tools.APIRequest) *peerMongoAccessor {
|
|
||||||
return &peerMongoAccessor{
|
|
||||||
overrideAuth: false,
|
|
||||||
AbstractAccessor: utils.AbstractAccessor{
|
|
||||||
Logger: logs.CreateLogger(tools.PEER.String()), // Create a logger with the data type
|
|
||||||
Request: request,
|
|
||||||
Type: tools.PEER,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wfa *peerMongoAccessor) ShouldVerifyAuth() bool {
|
|
||||||
return !wfa.overrideAuth
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -45,55 +22,80 @@ func (wfa *peerMongoAccessor) ShouldVerifyAuth() bool {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
func (wfa *peerMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
func (wfa *peerMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
return utils.GenericDeleteOne(id, wfa)
|
return wfa.GenericDeleteOne(id, wfa)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wfa *peerMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
func (wfa *peerMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
return utils.GenericUpdateOne(set.(*Peer), id, wfa, &Peer{})
|
return wfa.GenericUpdateOne(set.(*Peer), id, wfa, &Peer{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wfa *peerMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
func (wfa *peerMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
return utils.GenericStoreOne(data.(*Peer), wfa)
|
return wfa.GenericStoreOne(data.(*Peer), wfa)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wfa *peerMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
func (wfa *peerMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
return utils.GenericStoreOne(data, wfa)
|
return wfa.GenericStoreOne(data, wfa)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dca *peerMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
func (wfa *peerMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
return utils.GenericLoadOne[*Peer](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
var peer Peer
|
||||||
return d, 200, nil
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||||
}, dca)
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
res_mongo.Decode(&peer)
|
||||||
|
|
||||||
|
return &peer, 200, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wfa *peerMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
func (wfa peerMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
return utils.GenericLoadAll[*Peer](func(d utils.DBObject) utils.ShallowDBObject {
|
objs := []utils.ShallowDBObject{}
|
||||||
return d
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
}, isDraft, wfa)
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []Peer
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
objs = append(objs, &r)
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wfa *peerMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
func (wfa *peerMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
return utils.GenericSearch[*Peer](filters, search, wfa.getDefaultFilter(search),
|
objs := []utils.ShallowDBObject{}
|
||||||
func(d utils.DBObject) utils.ShallowDBObject {
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
return d
|
s, err := strconv.Atoi(search)
|
||||||
}, isDraft, wfa)
|
if err == nil {
|
||||||
}
|
filters = &dbs.Filters{
|
||||||
func (a *peerMongoAccessor) getDefaultFilter(search string) *dbs.Filters {
|
|
||||||
if i, err := strconv.Atoi(search); err == nil {
|
|
||||||
return &dbs.Filters{
|
|
||||||
Or: map[string][]dbs.Filter{ // search by name if no filters are provided
|
Or: map[string][]dbs.Filter{ // search by name if no filters are provided
|
||||||
"state": {{Operator: dbs.EQUAL.String(), Value: i}},
|
"state": {{Operator: dbs.EQUAL.String(), Value: s}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if search == "*" {
|
filters = &dbs.Filters{
|
||||||
search = ""
|
|
||||||
}
|
|
||||||
return &dbs.Filters{
|
|
||||||
Or: map[string][]dbs.Filter{ // search by name if no filters are provided
|
Or: map[string][]dbs.Filter{ // search by name if no filters are provided
|
||||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
"url": {{Operator: dbs.LIKE.String(), Value: search}},
|
"url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []Peer
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
objs = append(objs, &r)
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
}
|
}
|
||||||
|
125
models/resource_model/resource_model.go
Normal file
125
models/resource_model/resource_model.go
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
package resource_model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WebResource struct {
|
||||||
|
Protocol string `bson:"protocol,omitempty" json:"protocol,omitempty"` // Protocol is the protocol of the URL
|
||||||
|
Path string `bson:"path,omitempty" json:"path,omitempty"` // Path is the path of the URL
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* AbstractResource is a struct that represents a resource
|
||||||
|
* it defines the resource data
|
||||||
|
*/
|
||||||
|
type AbstractResource struct {
|
||||||
|
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||||
|
ShortDescription string `json:"short_description,omitempty" bson:"short_description,omitempty" validate:"required"` // ShortDescription is the short description of the resource
|
||||||
|
Description string `json:"description,omitempty" bson:"description,omitempty"` // Description is the description of the resource
|
||||||
|
Logo string `json:"logo,omitempty" bson:"logo,omitempty" validate:"required"` // Logo is the logo of the resource
|
||||||
|
Owner string `json:"owner,omitempty" bson:"owner,omitempty" validate:"required"` // Owner is the owner of the resource
|
||||||
|
OwnerLogo string `json:"owner_logo,omitempty" bson:"owner_logo,omitempty"` // OwnerLogo is the owner logo of the resource
|
||||||
|
SourceUrl string `json:"source_url,omitempty" bson:"source_url,omitempty" validate:"required"` // SourceUrl is the source URL of the resource
|
||||||
|
PeerID string `json:"peer_id,omitempty" bson:"peer_id,omitempty"` // PeerID is the ID of the peer getting this resource
|
||||||
|
Price string `json:"price,omitempty" bson:"price,omitempty"` // Price is the price of access to the resource
|
||||||
|
License string `json:"license,omitempty" bson:"license,omitempty"` // License is the license of the resource
|
||||||
|
ResourceModel *ResourceModel `json:"resource_model,omitempty" bson:"resource_model,omitempty"` // ResourceModel is the model of the resource
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* GetModelType returns the type of the model key
|
||||||
|
*/
|
||||||
|
func (abs *AbstractResource) GetModelType(cat string, key string) interface{} {
|
||||||
|
if abs.ResourceModel == nil || abs.ResourceModel.Model == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, ok := abs.ResourceModel.Model[key]; !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return abs.ResourceModel.Model[cat][key].Type
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* GetModelKeys returns the keys of the model
|
||||||
|
*/
|
||||||
|
func (abs *AbstractResource) GetModelKeys() []string {
|
||||||
|
keys := make([]string, 0)
|
||||||
|
for k := range abs.ResourceModel.Model {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* GetModelReadOnly returns the readonly of the model key
|
||||||
|
*/
|
||||||
|
func (abs *AbstractResource) GetModelReadOnly(cat string, key string) interface{} {
|
||||||
|
if abs.ResourceModel == nil || abs.ResourceModel.Model == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, ok := abs.ResourceModel.Model[key]; !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return abs.ResourceModel.Model[cat][key].ReadOnly
|
||||||
|
}
|
||||||
|
|
||||||
|
type Model struct {
|
||||||
|
Type string `json:"type,omitempty" bson:"type,omitempty"` // Type is the type of the model
|
||||||
|
ReadOnly bool `json:"readonly,omitempty" bson:"readonly,omitempty"` // ReadOnly is the readonly of the model
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ResourceModel is a struct that represents a resource model
|
||||||
|
* it defines the resource metadata and specificity
|
||||||
|
* Warning: This struct is not user available, it is only used by the system
|
||||||
|
*/
|
||||||
|
type ResourceModel struct {
|
||||||
|
UUID string `json:"id,omitempty" bson:"id,omitempty" validate:"required"`
|
||||||
|
ResourceType string `json:"resource_type,omitempty" bson:"resource_type,omitempty" validate:"required"`
|
||||||
|
VarRefs map[string]string `json:"var_refs,omitempty" bson:"var_refs,omitempty"` // VarRefs is the variable references of the model
|
||||||
|
Model map[string]map[string]Model `json:"model,omitempty" bson:"model,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ao *ResourceModel) GetID() string {
|
||||||
|
return ao.UUID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ao *ResourceModel) UpToDate() {}
|
||||||
|
|
||||||
|
func (r *ResourceModel) GenerateID() {
|
||||||
|
r.UUID = uuid.New().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ResourceModel) GetName() string {
|
||||||
|
return d.UUID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ResourceModel) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
|
data := &ResourceModelMongoAccessor{}
|
||||||
|
data.Init(tools.RESOURCE_MODEL, caller)
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *ResourceModel) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *ResourceModel) Serialize() map[string]interface{} {
|
||||||
|
var m map[string]interface{}
|
||||||
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
return m
|
||||||
|
}
|
83
models/resource_model/resource_model_mongo_accessor.go
Normal file
83
models/resource_model/resource_model_mongo_accessor.go
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
package resource_model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ResourceModelMongoAccessor struct {
|
||||||
|
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Nothing special here, just the basic CRUD operations
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (wfa *ResourceModelMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
|
return wfa.GenericDeleteOne(id, wfa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *ResourceModelMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
|
return wfa.GenericUpdateOne(set, id, wfa, &ResourceModel{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *ResourceModelMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
return wfa.GenericStoreOne(data, wfa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *ResourceModelMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
return wfa.GenericStoreOne(data, wfa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *ResourceModelMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
|
var workflow ResourceModel
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
res_mongo.Decode(&workflow)
|
||||||
|
return &workflow, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa ResourceModelMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []ResourceModel
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
objs = append(objs, &r)
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *ResourceModelMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
|
filters = &dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{
|
||||||
|
"resource_type": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []ResourceModel
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
objs = append(objs, &r)
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
@ -1,192 +0,0 @@
|
|||||||
package resources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/models"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* ComputeResource is a struct that represents a compute resource
|
|
||||||
* it defines the resource compute
|
|
||||||
*/
|
|
||||||
type ComputeResource struct {
|
|
||||||
AbstractInstanciatedResource[*ComputeResourceInstance]
|
|
||||||
Architecture string `json:"architecture,omitempty" bson:"architecture,omitempty"` // Architecture is the architecture
|
|
||||||
Infrastructure enum.InfrastructureType `json:"infrastructure" bson:"infrastructure" default:"-1"` // Infrastructure is the infrastructure
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ComputeResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
|
||||||
return NewAccessor[*ComputeResource](tools.COMPUTE_RESOURCE, request, func() utils.DBObject { return &ComputeResource{} })
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ComputeResource) GetType() string {
|
|
||||||
return tools.COMPUTE_RESOURCE.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *ComputeResource) ConvertToPricedResource(
|
|
||||||
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
|
|
||||||
if t != tools.COMPUTE_RESOURCE {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
p := abs.AbstractInstanciatedResource.ConvertToPricedResource(t, request)
|
|
||||||
priced := p.(*PricedResource)
|
|
||||||
return &PricedComputeResource{
|
|
||||||
PricedResource: *priced,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type ComputeNode struct {
|
|
||||||
Name string `json:"name,omitempty" bson:"name,omitempty"`
|
|
||||||
Quantity int64 `json:"quantity" bson:"quantity" default:"1"`
|
|
||||||
RAM *models.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
|
|
||||||
CPUs map[string]int64 `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs key is model
|
|
||||||
GPUs map[string]int64 `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs key is model
|
|
||||||
}
|
|
||||||
|
|
||||||
type ComputeResourceInstance struct {
|
|
||||||
ResourceInstance[*ComputeResourcePartnership]
|
|
||||||
Source string `json:"source,omitempty" bson:"source,omitempty"` // Source is the source of the resource
|
|
||||||
SecurityLevel string `json:"security_level,omitempty" bson:"security_level,omitempty"`
|
|
||||||
PowerSources []string `json:"power_sources,omitempty" bson:"power_sources,omitempty"`
|
|
||||||
AnnualCO2Emissions float64 `json:"annual_co2_emissions,omitempty" bson:"co2_emissions,omitempty"`
|
|
||||||
CPUs map[string]*models.CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs key is model
|
|
||||||
GPUs map[string]*models.GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs key is model
|
|
||||||
Nodes []*ComputeNode `json:"nodes,omitempty" bson:"nodes,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ComputeResourcePartnership struct {
|
|
||||||
ResourcePartnerShip[*ComputeResourcePricingProfile]
|
|
||||||
MaxAllowedCPUsCores map[string]int `json:"allowed_cpus,omitempty" bson:"allowed_cpus,omitempty"`
|
|
||||||
MaxAllowedGPUsMemoryGB map[string]float64 `json:"allowed_gpus,omitempty" bson:"allowed_gpus,omitempty"`
|
|
||||||
MaxAllowedRAMSize float64 `json:"allowed_ram,omitempty" bson:"allowed_ram,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ComputeResourcePricingProfile struct {
|
|
||||||
pricing.ExploitPricingProfile[pricing.TimePricingStrategy]
|
|
||||||
// ExploitPricingProfile is the pricing profile of a compute it means that we exploit the resource for an amount of continuous time
|
|
||||||
CPUsPrices map[string]float64 `json:"cpus_prices,omitempty" bson:"cpus_prices,omitempty"` // CPUsPrices is the prices of the CPUs
|
|
||||||
GPUsPrices map[string]float64 `json:"gpus_prices,omitempty" bson:"gpus_prices,omitempty"` // GPUsPrices is the prices of the GPUs
|
|
||||||
RAMPrice float64 `json:"ram_price" bson:"ram_price" default:"-1"` // RAMPrice is the price of the RAM
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ComputeResourcePricingProfile) IsPurchased() bool {
|
|
||||||
return p.Pricing.BuyingStrategy != pricing.PAY_PER_USE
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ComputeResourcePricingProfile) GetOverrideStrategyValue() int {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOT A PROPER QUANTITY
|
|
||||||
// amountOfData is the number of CPUs, GPUs or RAM dependings on the params
|
|
||||||
func (p *ComputeResourcePricingProfile) GetPrice(amountOfData float64, explicitDuration float64, start time.Time, end time.Time, params ...string) (float64, error) {
|
|
||||||
if len(params) < 1 {
|
|
||||||
return 0, errors.New("params must be set")
|
|
||||||
}
|
|
||||||
pp := float64(0)
|
|
||||||
model := params[1]
|
|
||||||
if strings.Contains(params[0], "cpus") && len(params) > 1 {
|
|
||||||
if _, ok := p.CPUsPrices[model]; ok {
|
|
||||||
p.Pricing.Price = p.CPUsPrices[model]
|
|
||||||
}
|
|
||||||
r, err := p.Pricing.GetPrice(amountOfData, explicitDuration, start, &end)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
pp += r
|
|
||||||
|
|
||||||
}
|
|
||||||
if strings.Contains(params[0], "gpus") && len(params) > 1 {
|
|
||||||
if _, ok := p.GPUsPrices[model]; ok {
|
|
||||||
p.Pricing.Price = p.GPUsPrices[model]
|
|
||||||
}
|
|
||||||
r, err := p.Pricing.GetPrice(amountOfData, explicitDuration, start, &end)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
pp += r
|
|
||||||
}
|
|
||||||
if strings.Contains(params[0], "ram") {
|
|
||||||
if p.RAMPrice >= 0 {
|
|
||||||
p.Pricing.Price = p.RAMPrice
|
|
||||||
}
|
|
||||||
r, err := p.Pricing.GetPrice(float64(amountOfData), explicitDuration, start, &end)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
pp += r
|
|
||||||
}
|
|
||||||
return pp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type PricedComputeResource struct {
|
|
||||||
PricedResource
|
|
||||||
|
|
||||||
CPUsLocated map[string]float64 `json:"cpus_in_use" bson:"cpus_in_use"` // CPUsInUse is the list of CPUs in use
|
|
||||||
GPUsLocated map[string]float64 `json:"gpus_in_use" bson:"gpus_in_use"` // GPUsInUse is the list of GPUs in use
|
|
||||||
RAMLocated float64 `json:"ram_in_use" bson:"ram_in_use"` // RAMInUse is the RAM in use
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PricedComputeResource) GetType() tools.DataType {
|
|
||||||
return tools.COMPUTE_RESOURCE
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PricedComputeResource) GetPrice() (float64, error) {
|
|
||||||
now := time.Now()
|
|
||||||
if r.UsageStart == nil {
|
|
||||||
r.UsageStart = &now
|
|
||||||
}
|
|
||||||
if r.UsageEnd == nil {
|
|
||||||
add := r.UsageStart.Add(time.Duration(1 * time.Hour))
|
|
||||||
r.UsageEnd = &add
|
|
||||||
}
|
|
||||||
if r.SelectedPricing == nil {
|
|
||||||
if len(r.PricingProfiles) == 0 {
|
|
||||||
return 0, errors.New("pricing profile must be set on Priced Compute" + r.ResourceID)
|
|
||||||
}
|
|
||||||
r.SelectedPricing = &r.PricingProfiles[0]
|
|
||||||
}
|
|
||||||
pricing := *r.SelectedPricing
|
|
||||||
price := float64(0)
|
|
||||||
for _, l := range []map[string]float64{r.CPUsLocated, r.GPUsLocated} {
|
|
||||||
for model, amountOfData := range l {
|
|
||||||
cpus, err := pricing.GetPrice(float64(amountOfData), r.ExplicitBookingDurationS, *r.UsageStart, *r.UsageEnd, "cpus", model)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
price += cpus
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ram, err := pricing.GetPrice(r.RAMLocated, r.ExplicitBookingDurationS, *r.UsageStart, *r.UsageEnd, "ram")
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
price += ram
|
|
||||||
return price, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* FillWithDefaultProcessingUsage fills the order item with the default processing usage
|
|
||||||
* it depends on the processing usage only if nothing is set, during order
|
|
||||||
*/
|
|
||||||
func (i *PricedComputeResource) FillWithDefaultProcessingUsage(usage *ProcessingUsage) {
|
|
||||||
for _, cpu := range usage.CPUs {
|
|
||||||
if _, ok := i.CPUsLocated[cpu.Model]; !ok {
|
|
||||||
i.CPUsLocated[cpu.Model] = 0
|
|
||||||
}
|
|
||||||
if i.CPUsLocated[cpu.Model] < float64(cpu.Cores) {
|
|
||||||
i.CPUsLocated[cpu.Model] = float64(cpu.Cores)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, cpu := range usage.GPUs {
|
|
||||||
i.GPUsLocated[cpu.Model] = 1
|
|
||||||
}
|
|
||||||
i.RAMLocated = usage.RAM.SizeGb
|
|
||||||
}
|
|
101
models/resources/compute/compute.go
Normal file
101
models/resources/compute/compute.go
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
package compute
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TechnologyEnum int
|
||||||
|
|
||||||
|
const (
|
||||||
|
DOCKER TechnologyEnum = iota
|
||||||
|
KUBERNETES
|
||||||
|
SLURM
|
||||||
|
HW
|
||||||
|
CONDOR
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t TechnologyEnum) String() string {
|
||||||
|
return [...]string{"DOCKER", "KUBERNETES", "SLURM", "HW", "CONDOR"}[t]
|
||||||
|
}
|
||||||
|
|
||||||
|
type AccessEnum int
|
||||||
|
|
||||||
|
const (
|
||||||
|
SSH AccessEnum = iota
|
||||||
|
SSH_KUBE_API
|
||||||
|
SSH_SLURM
|
||||||
|
SSH_DOCKER
|
||||||
|
OPENCLOUD
|
||||||
|
VPN
|
||||||
|
)
|
||||||
|
|
||||||
|
func (a AccessEnum) String() string {
|
||||||
|
return [...]string{"SSH", "SSH_KUBE_API", "SSH_SLURM", "SSH_DOCKER", "OPENCLOUD", "VPN"}[a]
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ComputeResource is a struct that represents a compute resource
|
||||||
|
* it defines the resource compute
|
||||||
|
*/
|
||||||
|
type ComputeResource struct {
|
||||||
|
resource_model.AbstractResource
|
||||||
|
Technology TechnologyEnum `json:"technology" bson:"technology" default:"0"` // Technology is the technology
|
||||||
|
Architecture string `json:"architecture,omitempty" bson:"architecture,omitempty"` // Architecture is the architecture
|
||||||
|
Access AccessEnum `json:"access" bson:"access default:"0"` // Access is the access
|
||||||
|
|
||||||
|
Localisation string `json:"localisation,omitempty" bson:"localisation,omitempty"` // Localisation is the localisation
|
||||||
|
|
||||||
|
CPUs []*CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs
|
||||||
|
RAM *RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
|
||||||
|
GPUs []*GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *ComputeResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *ComputeResource) Serialize() map[string]interface{} {
|
||||||
|
var m map[string]interface{}
|
||||||
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ComputeResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
|
data := New()
|
||||||
|
data.Init(tools.COMPUTE_RESOURCE, caller)
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// CPU is a struct that represents a CPU
|
||||||
|
type CPU struct {
|
||||||
|
Cores uint `bson:"cores,omitempty" json:"cores,omitempty"` //TODO: validate
|
||||||
|
Architecture string `bson:"architecture,omitempty" json:"architecture,omitempty"` //TOOD: enum
|
||||||
|
Shared bool `bson:"shared,omitempty" json:"shared,omitempty"`
|
||||||
|
MinimumMemory uint `bson:"minimum_memory,omitempty" json:"minimum_memory,omitempty"`
|
||||||
|
Platform string `bson:"platform,omitempty" json:"platform,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RAM struct {
|
||||||
|
Size uint `bson:"size,omitempty" json:"size,omitempty" description:"Units in MB"`
|
||||||
|
Ecc bool `bson:"ecc,omitempty" json:"ecc,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GPU struct {
|
||||||
|
CudaCores uint `bson:"cuda_cores,omitempty" json:"cuda_cores,omitempty"`
|
||||||
|
Model string `bson:"model,omitempty" json:"model,omitempty"`
|
||||||
|
Memory uint `bson:"memory,omitempty" json:"memory,omitempty" description:"Units in MB"`
|
||||||
|
TensorCores uint `bson:"tensor_cores,omitempty" json:"tensor_cores,omitempty"`
|
||||||
|
}
|
112
models/resources/compute/compute_mongo_accessor.go
Normal file
112
models/resources/compute/compute_mongo_accessor.go
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
package compute
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
type computeMongoAccessor struct {
|
||||||
|
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new instance of the computeMongoAccessor
|
||||||
|
func New() *computeMongoAccessor {
|
||||||
|
return &computeMongoAccessor{}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Nothing special here, just the basic CRUD operations
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (dca *computeMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
|
return dca.GenericDeleteOne(id, dca)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dca *computeMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
|
set.(*ComputeResource).ResourceModel = nil
|
||||||
|
return dca.GenericUpdateOne(set, id, dca, &ComputeResource{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dca *computeMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
data.(*ComputeResource).ResourceModel = nil
|
||||||
|
return dca.GenericStoreOne(data, dca)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dca *computeMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
return dca.GenericStoreOne(data, dca)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dca *computeMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
|
var compute ComputeResource
|
||||||
|
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, dca.GetType())
|
||||||
|
if err != nil {
|
||||||
|
dca.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res_mongo.Decode(&compute)
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, dca.GetType())
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
compute.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
return &compute, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa computeMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []ComputeResource
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||||
|
for _, r := range results {
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r) // only get the abstract resource !
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *computeMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
|
filters = &dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||||
|
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []ComputeResource
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||||
|
for _, r := range results {
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r) // only get the abstract resource !
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
46
models/resources/compute/compute_test.go
Normal file
46
models/resources/compute/compute_test.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
package compute
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStoreOneCompute(t *testing.T) {
|
||||||
|
dc := ComputeResource{
|
||||||
|
AbstractResource: resource_model.AbstractResource{
|
||||||
|
AbstractObject: utils.AbstractObject{Name: "testCompute"},
|
||||||
|
Description: "Lorem Ipsum",
|
||||||
|
Logo: "azerty.com",
|
||||||
|
Owner: "toto",
|
||||||
|
OwnerLogo: "totoLogo",
|
||||||
|
SourceUrl: "azerty.fr",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
dcma := New()
|
||||||
|
id, _, _ := dcma.StoreOne(&dc)
|
||||||
|
|
||||||
|
assert.NotEmpty(t, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadOneCompute(t *testing.T) {
|
||||||
|
dc := ComputeResource{
|
||||||
|
AbstractResource: resource_model.AbstractResource{
|
||||||
|
AbstractObject: utils.AbstractObject{Name: "testCompute"},
|
||||||
|
Description: "Lorem Ipsum",
|
||||||
|
Logo: "azerty.com",
|
||||||
|
Owner: "toto",
|
||||||
|
OwnerLogo: "totoLogo",
|
||||||
|
SourceUrl: "azerty.fr",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
dcma := New()
|
||||||
|
new_dc, _, _ := dcma.StoreOne(&dc)
|
||||||
|
|
||||||
|
assert.Equal(t, dc, new_dc)
|
||||||
|
}
|
@ -1,179 +0,0 @@
|
|||||||
package resources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/models"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* DataResource is a struct that represents a data resource
|
|
||||||
* it defines the resource data
|
|
||||||
*/
|
|
||||||
type DataResource struct {
|
|
||||||
AbstractInstanciatedResource[*DataInstance]
|
|
||||||
Type string `bson:"type,omitempty" json:"type,omitempty"`
|
|
||||||
Quality string `bson:"quality,omitempty" json:"quality,omitempty"`
|
|
||||||
OpenData bool `bson:"open_data" json:"open_data" default:"false"` // Type is the type of the storage
|
|
||||||
Static bool `bson:"static" json:"static" default:"false"`
|
|
||||||
UpdatePeriod *time.Time `bson:"update_period,omitempty" json:"update_period,omitempty"`
|
|
||||||
PersonalData bool `bson:"personal_data,omitempty" json:"personal_data,omitempty"`
|
|
||||||
AnonymizedPersonalData bool `bson:"anonymized_personal_data,omitempty" json:"anonymized_personal_data,omitempty"`
|
|
||||||
SizeGB float64 `json:"size,omitempty" bson:"size,omitempty"` // SizeGB is the size of the data License DataLicense `json:"license" bson:"license" description:"license of the data" default:"0"` // License is the license of the data
|
|
||||||
// ? Interest DataLicense `json:"interest" bson:"interest" description:"interest of the data" default:"0"` // Interest is the interest of the data
|
|
||||||
Example string `json:"example,omitempty" bson:"example,omitempty" description:"base64 encoded data"` // Example is an example of the data
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DataResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
|
||||||
return NewAccessor[*DataResource](tools.DATA_RESOURCE, request, func() utils.DBObject { return &DataResource{} }) // Create a new instance of the accessor
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *DataResource) GetType() string {
|
|
||||||
return tools.DATA_RESOURCE.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *DataResource) ConvertToPricedResource(
|
|
||||||
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
|
|
||||||
if t != tools.DATA_RESOURCE {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
p := abs.AbstractInstanciatedResource.ConvertToPricedResource(t, request)
|
|
||||||
priced := p.(*PricedResource)
|
|
||||||
return &PricedDataResource{
|
|
||||||
PricedResource: *priced,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type DataInstance struct {
|
|
||||||
ResourceInstance[*DataResourcePartnership]
|
|
||||||
Source string `json:"source,omitempty" bson:"source,omitempty"` // Source is the source of the data
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ri *DataInstance) StoreDraftDefault() {
|
|
||||||
found := false
|
|
||||||
for _, p := range ri.ResourceInstance.Env {
|
|
||||||
if p.Attr == "source" {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
ri.ResourceInstance.Env = append(ri.ResourceInstance.Env, models.Param{
|
|
||||||
Attr: "source",
|
|
||||||
Value: ri.Source,
|
|
||||||
Readonly: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
ri.ResourceInstance.StoreDraftDefault()
|
|
||||||
}
|
|
||||||
|
|
||||||
type DataResourcePartnership struct {
|
|
||||||
ResourcePartnerShip[*DataResourcePricingProfile]
|
|
||||||
MaxDownloadableGbAllowed float64 `json:"allowed_gb,omitempty" bson:"allowed_gb,omitempty"`
|
|
||||||
PersonalDataAllowed bool `json:"personal_data_allowed,omitempty" bson:"personal_data_allowed,omitempty"`
|
|
||||||
AnonymizedPersonalDataAllowed bool `json:"anonymized_personal_data_allowed,omitempty" bson:"anonymized_personal_data_allowed,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type DataResourcePricingStrategy int
|
|
||||||
|
|
||||||
const (
|
|
||||||
PER_DOWNLOAD DataResourcePricingStrategy = iota
|
|
||||||
PER_TB_DOWNLOADED
|
|
||||||
PER_GB_DOWNLOADED
|
|
||||||
PER_MB_DOWNLOADED
|
|
||||||
PER_KB_DOWNLOADED
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t DataResourcePricingStrategy) String() string {
|
|
||||||
return [...]string{"PER DOWNLOAD", "PER TB DOWNLOADED", "PER GB DOWNLOADED", "PER MB DOWNLOADED", "PER KB DOWNLOADED"}[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
func DataResourcePricingStrategyList() []DataResourcePricingStrategy {
|
|
||||||
return []DataResourcePricingStrategy{PER_DOWNLOAD, PER_TB_DOWNLOADED, PER_GB_DOWNLOADED, PER_MB_DOWNLOADED, PER_KB_DOWNLOADED}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToDataResourcePricingStrategy(i int) DataResourcePricingStrategy {
|
|
||||||
return DataResourcePricingStrategy(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t DataResourcePricingStrategy) GetStrategy() string {
|
|
||||||
return [...]string{"PER_DOWNLOAD", "PER_GB", "PER_MB", "PER_KB"}[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t DataResourcePricingStrategy) GetStrategyValue() int {
|
|
||||||
return int(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t DataResourcePricingStrategy) GetQuantity(amountOfDataGB float64) (float64, error) {
|
|
||||||
switch t {
|
|
||||||
case PER_DOWNLOAD:
|
|
||||||
return 1, nil
|
|
||||||
case PER_TB_DOWNLOADED:
|
|
||||||
return amountOfDataGB * 1000, nil
|
|
||||||
case PER_GB_DOWNLOADED:
|
|
||||||
return amountOfDataGB, nil
|
|
||||||
case PER_MB_DOWNLOADED:
|
|
||||||
return amountOfDataGB / 1000, nil
|
|
||||||
case PER_KB_DOWNLOADED:
|
|
||||||
return amountOfDataGB / 1000000, nil
|
|
||||||
}
|
|
||||||
return 0, errors.New("pricing strategy not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
type DataResourcePricingProfile struct {
|
|
||||||
pricing.AccessPricingProfile[DataResourcePricingStrategy] // AccessPricingProfile is the pricing profile of a data it means that we can access the data for an amount of time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *DataResourcePricingProfile) GetOverrideStrategyValue() int {
|
|
||||||
return p.Pricing.OverrideStrategy.GetStrategyValue()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *DataResourcePricingProfile) GetPrice(amountOfData float64, explicitDuration float64, start time.Time, end time.Time, params ...string) (float64, error) {
|
|
||||||
return p.Pricing.GetPrice(amountOfData, explicitDuration, start, &end)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *DataResourcePricingProfile) IsPurchased() bool {
|
|
||||||
return p.Pricing.BuyingStrategy != pricing.PAY_PER_USE
|
|
||||||
}
|
|
||||||
|
|
||||||
type PricedDataResource struct {
|
|
||||||
PricedResource
|
|
||||||
UsageStorageGB float64 `json:"storage_gb,omitempty" bson:"storage_gb,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PricedDataResource) GetType() tools.DataType {
|
|
||||||
return tools.DATA_RESOURCE
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PricedDataResource) GetPrice() (float64, error) {
|
|
||||||
fmt.Println("GetPrice", r.UsageStart, r.UsageEnd)
|
|
||||||
now := time.Now()
|
|
||||||
if r.UsageStart == nil {
|
|
||||||
r.UsageStart = &now
|
|
||||||
}
|
|
||||||
if r.UsageEnd == nil {
|
|
||||||
add := r.UsageStart.Add(time.Duration(1 * time.Hour))
|
|
||||||
r.UsageEnd = &add
|
|
||||||
}
|
|
||||||
if r.SelectedPricing == nil {
|
|
||||||
if len(r.PricingProfiles) == 0 {
|
|
||||||
return 0, errors.New("pricing profile must be set on Priced Data" + r.ResourceID)
|
|
||||||
}
|
|
||||||
r.SelectedPricing = &r.PricingProfiles[0]
|
|
||||||
}
|
|
||||||
pricing := *r.SelectedPricing
|
|
||||||
var err error
|
|
||||||
amountOfData := float64(1)
|
|
||||||
if pricing.GetOverrideStrategyValue() >= 0 {
|
|
||||||
amountOfData, err = ToDataResourcePricingStrategy(pricing.GetOverrideStrategyValue()).GetQuantity(r.UsageStorageGB)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pricing.GetPrice(amountOfData, r.ExplicitBookingDurationS, *r.UsageStart, *r.UsageEnd)
|
|
||||||
}
|
|
65
models/resources/data/data.go
Normal file
65
models/resources/data/data.go
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
package data
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
)
|
||||||
|
|
||||||
|
// enum of public private or licenced data
|
||||||
|
type DataLicense int
|
||||||
|
|
||||||
|
const (
|
||||||
|
PUBLIC DataLicense = iota
|
||||||
|
PRIVATE
|
||||||
|
LICENCED
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Struct of Usage Conditions
|
||||||
|
*/
|
||||||
|
type UsageConditions struct {
|
||||||
|
Usage string `json:"usage,omitempty" bson:"usage,omitempty" description:"usage of the data"` // Usage is the usage of the data
|
||||||
|
Actors []string `json:"actors,omitempty" bson:"actors,omitempty" description:"actors of the data"` // Actors is the actors of the data
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* DataResource is a struct that represents a data resource
|
||||||
|
* it defines the resource data
|
||||||
|
*/
|
||||||
|
type DataResource struct {
|
||||||
|
resource_model.AbstractResource // AbstractResource contains the basic fields of an object (id, name)
|
||||||
|
resource_model.WebResource
|
||||||
|
Type string `bson:"type,omitempty" json:"type,omitempty"` // Type is the type of the storage
|
||||||
|
UsageConditions UsageConditions `json:"usage_conditions,omitempty" bson:"usage_conditions,omitempty" description:"usage conditions of the data"` // UsageConditions is the usage conditions of the data
|
||||||
|
License DataLicense `json:"license" bson:"license" description:"license of the data" default:"0"` // License is the license of the data
|
||||||
|
Interest DataLicense `json:"interest" bson:"interest" description:"interest of the data" default:"0"` // Interest is the interest of the data
|
||||||
|
Example string `json:"example,omitempty" bson:"example,omitempty" description:"base64 encoded data"` // Example is an example of the data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *DataResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *DataResource) Serialize() map[string]interface{} {
|
||||||
|
var m map[string]interface{}
|
||||||
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DataResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
|
data := New() // Create a new instance of the accessor
|
||||||
|
data.Init(tools.DATA_RESOURCE, caller) // Initialize the accessor with the DATA_RESOURCE model type
|
||||||
|
return data
|
||||||
|
}
|
110
models/resources/data/data_mongo_accessor.go
Normal file
110
models/resources/data/data_mongo_accessor.go
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
package data
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
|
mongo "cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
type dataMongoAccessor struct {
|
||||||
|
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new instance of the dataMongoAccessor
|
||||||
|
func New() *dataMongoAccessor {
|
||||||
|
return &dataMongoAccessor{}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Nothing special here, just the basic CRUD operations
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (dma *dataMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
|
return dma.GenericDeleteOne(id, dma)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *dataMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
|
set.(*DataResource).ResourceModel = nil
|
||||||
|
return dma.GenericUpdateOne(set, id, dma, &DataResource{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *dataMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
data.(*DataResource).ResourceModel = nil
|
||||||
|
return dma.GenericStoreOne(data, dma)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *dataMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
return dma.GenericStoreOne(data, dma)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *dataMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
|
var data DataResource
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, dma.GetType())
|
||||||
|
if err != nil {
|
||||||
|
dma.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
res_mongo.Decode(&data)
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, dma.GetType())
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
data.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
return &data, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa dataMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []DataResource
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||||
|
for _, r := range results {
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r) // only get the abstract resource !
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *dataMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
|
filters = &dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||||
|
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []DataResource
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||||
|
for _, r := range results {
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r) // only get the abstract resource !
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
53
models/resources/data/data_test.go
Normal file
53
models/resources/data/data_test.go
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
package data
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStoreOneData(t *testing.T) {
|
||||||
|
d := DataResource{
|
||||||
|
WebResource: resource_model.WebResource{
|
||||||
|
Protocol: "http", Path: "azerty.fr",
|
||||||
|
},
|
||||||
|
Example: "123456",
|
||||||
|
AbstractResource: resource_model.AbstractResource{
|
||||||
|
AbstractObject: utils.AbstractObject{Name: "testData"},
|
||||||
|
Description: "Lorem Ipsum",
|
||||||
|
Logo: "azerty.com",
|
||||||
|
Owner: "toto",
|
||||||
|
OwnerLogo: "totoLogo",
|
||||||
|
SourceUrl: "azerty.fr",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
dma := New()
|
||||||
|
id, _, _ := dma.StoreOne(&d)
|
||||||
|
|
||||||
|
assert.NotEmpty(t, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadOneDate(t *testing.T) {
|
||||||
|
d := DataResource{
|
||||||
|
WebResource: resource_model.WebResource{
|
||||||
|
Protocol: "http", Path: "azerty.fr",
|
||||||
|
},
|
||||||
|
Example: "123456",
|
||||||
|
AbstractResource: resource_model.AbstractResource{
|
||||||
|
AbstractObject: utils.AbstractObject{Name: "testData"},
|
||||||
|
Description: "Lorem Ipsum",
|
||||||
|
Logo: "azerty.com",
|
||||||
|
Owner: "toto",
|
||||||
|
OwnerLogo: "totoLogo",
|
||||||
|
SourceUrl: "azerty.fr",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
dma := New()
|
||||||
|
new_d, _, _ := dma.StoreOne(&d)
|
||||||
|
assert.Equal(t, d, new_d)
|
||||||
|
}
|
@ -1,34 +0,0 @@
|
|||||||
package resources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ResourceInterface interface {
|
|
||||||
utils.DBObject
|
|
||||||
Trim()
|
|
||||||
ConvertToPricedResource(t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF
|
|
||||||
GetType() string
|
|
||||||
GetSelectedInstance() utils.DBObject
|
|
||||||
ClearEnv() utils.DBObject
|
|
||||||
SetAllowedInstances(request *tools.APIRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResourceInstanceITF interface {
|
|
||||||
utils.DBObject
|
|
||||||
GetID() string
|
|
||||||
GetName() string
|
|
||||||
StoreDraftDefault()
|
|
||||||
ClearEnv()
|
|
||||||
GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF
|
|
||||||
GetPeerGroups() ([]ResourcePartnerITF, []map[string][]string)
|
|
||||||
ClearPeerGroups()
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResourcePartnerITF interface {
|
|
||||||
GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF
|
|
||||||
GetPeerGroups() map[string][]string
|
|
||||||
ClearPeerGroups()
|
|
||||||
}
|
|
@ -1,65 +0,0 @@
|
|||||||
package resources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ResourceSet struct {
|
|
||||||
Datas []string `bson:"datas,omitempty" json:"datas,omitempty"`
|
|
||||||
Storages []string `bson:"storages,omitempty" json:"storages,omitempty"`
|
|
||||||
Processings []string `bson:"processings,omitempty" json:"processings,omitempty"`
|
|
||||||
Computes []string `bson:"computes,omitempty" json:"computes,omitempty"`
|
|
||||||
Workflows []string `bson:"workflows,omitempty" json:"workflows,omitempty"`
|
|
||||||
|
|
||||||
DataResources []*DataResource `bson:"-" json:"data_resources,omitempty"`
|
|
||||||
StorageResources []*StorageResource `bson:"-" json:"storage_resources,omitempty"`
|
|
||||||
ProcessingResources []*ProcessingResource `bson:"-" json:"processing_resources,omitempty"`
|
|
||||||
ComputeResources []*ComputeResource `bson:"-" json:"compute_resources,omitempty"`
|
|
||||||
WorkflowResources []*WorkflowResource `bson:"-" json:"workflow_resources,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ResourceSet) Clear() {
|
|
||||||
r.DataResources = nil
|
|
||||||
r.StorageResources = nil
|
|
||||||
r.ProcessingResources = nil
|
|
||||||
r.ComputeResources = nil
|
|
||||||
r.WorkflowResources = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ResourceSet) Fill(request *tools.APIRequest) {
|
|
||||||
r.Clear()
|
|
||||||
for k, v := range map[utils.DBObject][]string{
|
|
||||||
(&DataResource{}): r.Datas,
|
|
||||||
(&ComputeResource{}): r.Computes,
|
|
||||||
(&StorageResource{}): r.Storages,
|
|
||||||
(&ProcessingResource{}): r.Processings,
|
|
||||||
(&WorkflowResource{}): r.Workflows,
|
|
||||||
} {
|
|
||||||
for _, id := range v {
|
|
||||||
d, _, e := k.GetAccessor(request).LoadOne(id)
|
|
||||||
if e == nil {
|
|
||||||
switch k.(type) {
|
|
||||||
case *DataResource:
|
|
||||||
r.DataResources = append(r.DataResources, d.(*DataResource))
|
|
||||||
case *ComputeResource:
|
|
||||||
r.ComputeResources = append(r.ComputeResources, d.(*ComputeResource))
|
|
||||||
case *StorageResource:
|
|
||||||
r.StorageResources = append(r.StorageResources, d.(*StorageResource))
|
|
||||||
case *ProcessingResource:
|
|
||||||
r.ProcessingResources = append(r.ProcessingResources, d.(*ProcessingResource))
|
|
||||||
case *WorkflowResource:
|
|
||||||
r.WorkflowResources = append(r.WorkflowResources, d.(*WorkflowResource))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type ItemResource struct {
|
|
||||||
Data *DataResource `bson:"data,omitempty" json:"data,omitempty"`
|
|
||||||
Processing *ProcessingResource `bson:"processing,omitempty" json:"processing,omitempty"`
|
|
||||||
Storage *StorageResource `bson:"storage,omitempty" json:"storage,omitempty"`
|
|
||||||
Compute *ComputeResource `bson:"compute,omitempty" json:"compute,omitempty"`
|
|
||||||
Workflow *WorkflowResource `bson:"workflow,omitempty" json:"workflow,omitempty"`
|
|
||||||
}
|
|
@ -1,93 +0,0 @@
|
|||||||
package resources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
type PricedResource struct {
|
|
||||||
Name string `json:"name,omitempty" bson:"name,omitempty"`
|
|
||||||
Logo string `json:"logo,omitempty" bson:"logo,omitempty"`
|
|
||||||
InstancesRefs map[string]string `json:"instances_refs,omitempty" bson:"instances_refs,omitempty"`
|
|
||||||
PricingProfiles []pricing.PricingProfileITF `json:"pricing_profiles,omitempty" bson:"pricing_profiles,omitempty"`
|
|
||||||
SelectedPricing *pricing.PricingProfileITF `json:"selected_pricing,omitempty" bson:"selected_pricing,omitempty"`
|
|
||||||
ExplicitBookingDurationS float64 `json:"explicit_location_duration_s,omitempty" bson:"explicit_location_duration_s,omitempty"`
|
|
||||||
UsageStart *time.Time `json:"start,omitempty" bson:"start,omitempty"`
|
|
||||||
UsageEnd *time.Time `json:"end,omitempty" bson:"end,omitempty"`
|
|
||||||
CreatorID string `json:"peer_id,omitempty" bson:"peer_id,omitempty"`
|
|
||||||
ResourceID string `json:"resource_id,omitempty" bson:"resource_id,omitempty"`
|
|
||||||
ResourceType tools.DataType `json:"resource_type,omitempty" bson:"resource_type,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *PricedResource) GetID() string {
|
|
||||||
return abs.ResourceID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *PricedResource) GetType() tools.DataType {
|
|
||||||
return abs.ResourceType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *PricedResource) GetCreatorID() string {
|
|
||||||
return abs.CreatorID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *PricedResource) IsPurchased() bool {
|
|
||||||
if abs.SelectedPricing == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return (*abs.SelectedPricing).IsPurchased()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *PricedResource) GetLocationEnd() *time.Time {
|
|
||||||
return abs.UsageEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *PricedResource) GetLocationStart() *time.Time {
|
|
||||||
return abs.UsageStart
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *PricedResource) SetLocationStart(start time.Time) {
|
|
||||||
abs.UsageStart = &start
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *PricedResource) SetLocationEnd(end time.Time) {
|
|
||||||
abs.UsageEnd = &end
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *PricedResource) GetExplicitDurationInS() float64 {
|
|
||||||
if abs.ExplicitBookingDurationS == 0 {
|
|
||||||
if abs.UsageEnd == nil && abs.UsageStart == nil {
|
|
||||||
return time.Duration(1 * time.Hour).Seconds()
|
|
||||||
}
|
|
||||||
if abs.UsageEnd == nil {
|
|
||||||
add := abs.UsageStart.Add(time.Duration(1 * time.Hour))
|
|
||||||
abs.UsageEnd = &add
|
|
||||||
}
|
|
||||||
return abs.UsageEnd.Sub(*abs.UsageStart).Seconds()
|
|
||||||
}
|
|
||||||
return abs.ExplicitBookingDurationS
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PricedResource) GetPrice() (float64, error) {
|
|
||||||
fmt.Println("GetPrice", r.UsageStart, r.UsageEnd)
|
|
||||||
now := time.Now()
|
|
||||||
if r.UsageStart == nil {
|
|
||||||
r.UsageStart = &now
|
|
||||||
}
|
|
||||||
if r.UsageEnd == nil {
|
|
||||||
add := r.UsageStart.Add(time.Duration(1 * time.Hour))
|
|
||||||
r.UsageEnd = &add
|
|
||||||
}
|
|
||||||
if r.SelectedPricing == nil {
|
|
||||||
if len(r.PricingProfiles) == 0 {
|
|
||||||
return 0, errors.New("pricing profile must be set on Priced Resource " + r.ResourceID)
|
|
||||||
}
|
|
||||||
r.SelectedPricing = &r.PricingProfiles[0]
|
|
||||||
}
|
|
||||||
pricing := *r.SelectedPricing
|
|
||||||
return pricing.GetPrice(1, 0, *r.UsageStart, *r.UsageEnd)
|
|
||||||
}
|
|
@ -1,86 +0,0 @@
|
|||||||
package resources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/models"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ProcessingUsage struct {
|
|
||||||
CPUs map[string]*models.CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs key is model
|
|
||||||
GPUs map[string]*models.GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs key is model
|
|
||||||
RAM *models.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
|
|
||||||
|
|
||||||
StorageGb float64 `bson:"storage,omitempty" json:"storage,omitempty"` // Storage is the storage
|
|
||||||
Hypothesis string `bson:"hypothesis,omitempty" json:"hypothesis,omitempty"`
|
|
||||||
ScalingModel string `bson:"scaling_model,omitempty" json:"scaling_model,omitempty"` // ScalingModel is the scaling model
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* ProcessingResource is a struct that represents a processing resource
|
|
||||||
* it defines the resource processing
|
|
||||||
*/
|
|
||||||
type ProcessingResource struct {
|
|
||||||
AbstractInstanciatedResource[*ProcessingInstance]
|
|
||||||
Infrastructure enum.InfrastructureType `json:"infrastructure" bson:"infrastructure" default:"-1"` // Infrastructure is the infrastructure
|
|
||||||
IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service
|
|
||||||
Usage *ProcessingUsage `bson:"usage,omitempty" json:"usage,omitempty"` // Usage is the usage of the processing
|
|
||||||
OpenSource bool `json:"open_source" bson:"open_source" default:"false"`
|
|
||||||
License string `json:"license,omitempty" bson:"license,omitempty"`
|
|
||||||
Maturity string `json:"maturity,omitempty" bson:"maturity,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ProcessingResource) GetType() string {
|
|
||||||
return tools.PROCESSING_RESOURCE.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
type ProcessingResourceAccess struct {
|
|
||||||
Container *models.Container `json:"container,omitempty" bson:"container,omitempty"` // Container is the container
|
|
||||||
}
|
|
||||||
|
|
||||||
type ProcessingInstance struct {
|
|
||||||
ResourceInstance[*ResourcePartnerShip[*ProcessingResourcePricingProfile]]
|
|
||||||
Access *ProcessingResourceAccess `json:"access,omitempty" bson:"access,omitempty"` // Access is the access
|
|
||||||
}
|
|
||||||
|
|
||||||
type PricedProcessingResource struct {
|
|
||||||
PricedResource
|
|
||||||
IsService bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PricedProcessingResource) GetType() tools.DataType {
|
|
||||||
return tools.PROCESSING_RESOURCE
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *PricedProcessingResource) GetExplicitDurationInS() float64 {
|
|
||||||
if a.ExplicitBookingDurationS == 0 {
|
|
||||||
if a.IsService || a.UsageStart == nil {
|
|
||||||
if a.IsService {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
return time.Duration(1 * time.Hour).Seconds()
|
|
||||||
}
|
|
||||||
return a.UsageEnd.Sub(*a.UsageStart).Seconds()
|
|
||||||
}
|
|
||||||
return a.ExplicitBookingDurationS
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ProcessingResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
|
||||||
return NewAccessor[*ProcessingResource](tools.PROCESSING_RESOURCE, request, func() utils.DBObject { return &ProcessingResource{} }) // Create a new instance of the accessor
|
|
||||||
}
|
|
||||||
|
|
||||||
type ProcessingResourcePricingProfile struct {
|
|
||||||
pricing.AccessPricingProfile[pricing.TimePricingStrategy] // AccessPricingProfile is the pricing profile of a data it means that we can access the data for an amount of time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProcessingResourcePricingProfile) IsPurchased() bool {
|
|
||||||
return p.Pricing.BuyingStrategy != pricing.PAY_PER_USE
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProcessingResourcePricingProfile) GetPrice(amountOfData float64, val float64, start time.Time, end time.Time, params ...string) (float64, error) {
|
|
||||||
return p.Pricing.GetPrice(amountOfData, val, start, &end)
|
|
||||||
}
|
|
67
models/resources/processing/processing.go
Normal file
67
models/resources/processing/processing.go
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
package processing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Container struct {
|
||||||
|
Image string `json:"image,omitempty" bson:"image,omitempty"` // Image is the container image
|
||||||
|
Command string `json:"command,omitempty" bson:"command,omitempty"` // Command is the container command
|
||||||
|
Args string `json:"args,omitempty" bson:"args,omitempty"` // Args is the container arguments
|
||||||
|
Env map[string]string `json:"env,omitempty" bson:"env,omitempty"` // Env is the container environment variables
|
||||||
|
Volumes map[string]string `json:"volumes,omitempty" bson:"volumes,omitempty"` // Volumes is the container volumes
|
||||||
|
}
|
||||||
|
|
||||||
|
type Expose struct {
|
||||||
|
Port int `json:"port,omitempty" bson:"port,omitempty"` // Port is the port
|
||||||
|
Reverse string `json:"reverse,omitempty" bson:"reverse,omitempty"` // Reverse is the reverse
|
||||||
|
PAT int `json:"pat,omitempty" bson:"pat,omitempty"` // PAT is the PAT
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ProcessingResource is a struct that represents a processing resource
|
||||||
|
* it defines the resource processing
|
||||||
|
*/
|
||||||
|
type ProcessingResource struct {
|
||||||
|
resource_model.AbstractResource
|
||||||
|
IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service
|
||||||
|
CPUs []*compute.CPU `bson:"cpus,omitempty" json:"cp_us,omitempty"` // CPUs is the list of CPUs
|
||||||
|
GPUs []*compute.GPU `bson:"gpus,omitempty" json:"gp_us,omitempty"` // GPUs is the list of GPUs
|
||||||
|
RAM *compute.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
|
||||||
|
Storage uint `bson:"storage,omitempty" json:"storage,omitempty"` // Storage is the storage
|
||||||
|
Parallel bool `bson:"parallel,omitempty" json:"parallel,omitempty"` // Parallel is a flag that indicates if the processing is parallel
|
||||||
|
ScalingModel uint `bson:"scaling_model,omitempty" json:"scaling_model,omitempty"` // ScalingModel is the scaling model
|
||||||
|
DiskIO string `bson:"disk_io,omitempty" json:"disk_io,omitempty"` // DiskIO is the disk IO
|
||||||
|
Container *Container `bson:"container,omitempty" json:"container,omitempty"` // Container is the container
|
||||||
|
Expose []Expose `bson:"expose,omitempty" json:"expose,omitempty"` // Expose is the execution
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *ProcessingResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *ProcessingResource) Serialize() map[string]interface{} {
|
||||||
|
var m map[string]interface{}
|
||||||
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ProcessingResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
|
data := New() // Create a new instance of the accessor
|
||||||
|
data.Init(tools.PROCESSING_RESOURCE, caller) // Initialize the accessor with the PROCESSING_RESOURCE model type
|
||||||
|
return data
|
||||||
|
}
|
114
models/resources/processing/processing_mongo_accessor.go
Normal file
114
models/resources/processing/processing_mongo_accessor.go
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
package processing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
type processingMongoAccessor struct {
|
||||||
|
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new instance of the processingMongoAccessor
|
||||||
|
func New() *processingMongoAccessor {
|
||||||
|
return &processingMongoAccessor{}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Nothing special here, just the basic CRUD operations
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (pma *processingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
|
return pma.GenericDeleteOne(id, pma)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pma *processingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
|
set.(*ProcessingResource).ResourceModel = nil
|
||||||
|
return pma.GenericUpdateOne(set, id, pma, &ProcessingResource{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pma *processingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
data.(*ProcessingResource).ResourceModel = nil
|
||||||
|
return pma.GenericStoreOne(data, pma)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pma *processingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
return pma.GenericStoreOne(data, pma)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pma *processingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
|
|
||||||
|
var processing ProcessingResource
|
||||||
|
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, pma.GetType())
|
||||||
|
if err != nil {
|
||||||
|
pma.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res_mongo.Decode(&processing)
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, pma.GetType())
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
processing.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
return &processing, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa processingMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []ProcessingResource
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||||
|
for _, r := range results {
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r) // only get the abstract resource !
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search searches for processing resources in the database, given some filters OR a search string
|
||||||
|
func (wfa *processingMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
|
filters = &dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||||
|
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []ProcessingResource
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||||
|
for _, r := range results {
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r) // only get the abstract resource !
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
38
models/resources/processing/processing_test.go
Normal file
38
models/resources/processing/processing_test.go
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
package processing
|
||||||
|
|
||||||
|
/*
|
||||||
|
func TestStoreOneProcessing(t *testing.T) {
|
||||||
|
p := ProcessingResource{Container: "totoCont",
|
||||||
|
AbstractResource: resources.AbstractResource{
|
||||||
|
AbstractObject: utils.AbstractObject{Name: "testData"},
|
||||||
|
Description: "Lorem Ipsum",
|
||||||
|
Logo: "azerty.com",
|
||||||
|
Owner: "toto",
|
||||||
|
OwnerLogo: "totoLogo",
|
||||||
|
SourceUrl: "azerty.fr",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
sma := ProcessingMongoAccessor{}
|
||||||
|
id, _, _ := sma.StoreOne(&p)
|
||||||
|
|
||||||
|
assert.NotEmpty(t, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadOneProcessing(t *testing.T) {
|
||||||
|
p := ProcessingResource{Container: "totoCont",
|
||||||
|
AbstractResource: resources.AbstractResource{
|
||||||
|
AbstractObject: utils.AbstractObject{Name: "testData"},
|
||||||
|
Description: "Lorem Ipsum",
|
||||||
|
Logo: "azerty.com",
|
||||||
|
Owner: "toto",
|
||||||
|
OwnerLogo: "totoLogo",
|
||||||
|
SourceUrl: "azerty.fr",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
sma := ProcessingMongoAccessor{}
|
||||||
|
new_s, _, _ := sma.StoreOne(&p)
|
||||||
|
assert.Equal(t, p, new_s)
|
||||||
|
}
|
||||||
|
*/
|
@ -1,30 +0,0 @@
|
|||||||
package purchase_resource
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
type PurchaseResource struct {
|
|
||||||
utils.AbstractObject
|
|
||||||
EndDate *time.Time `json:"end_buying_date,omitempty" bson:"end_buying_date,omitempty"`
|
|
||||||
ResourceID string `json:"resource_id" bson:"resource_id" validate:"required"`
|
|
||||||
ResourceType tools.DataType `json:"resource_type" bson:"resource_type" validate:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *PurchaseResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
|
||||||
return NewAccessor(request) // Create a new instance of the accessor
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PurchaseResource) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
|
|
||||||
return r.IsDraft, set // only draft buying can be updated
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PurchaseResource) CanDelete() bool { // ENDBuyingDate is passed
|
|
||||||
if r.EndDate != nil {
|
|
||||||
return time.Now().UTC().After(*r.EndDate)
|
|
||||||
}
|
|
||||||
return false // only draft bookings can be deleted
|
|
||||||
}
|
|
@ -1,72 +0,0 @@
|
|||||||
package purchase_resource
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/logs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
type purchaseResourceMongoAccessor struct {
|
|
||||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new instance of the bookingMongoAccessor
|
|
||||||
func NewAccessor(request *tools.APIRequest) *purchaseResourceMongoAccessor {
|
|
||||||
return &purchaseResourceMongoAccessor{
|
|
||||||
AbstractAccessor: utils.AbstractAccessor{
|
|
||||||
Logger: logs.CreateLogger(tools.PURCHASE_RESOURCE.String()), // Create a logger with the data type
|
|
||||||
Request: request,
|
|
||||||
Type: tools.PURCHASE_RESOURCE,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Nothing special here, just the basic CRUD operations
|
|
||||||
*/
|
|
||||||
func (a *purchaseResourceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericDeleteOne(id, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *purchaseResourceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericUpdateOne(set, id, a, &PurchaseResource{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *purchaseResourceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericStoreOne(data, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *purchaseResourceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericStoreOne(data, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *purchaseResourceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericLoadOne[*PurchaseResource](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
if d.(*PurchaseResource).EndDate != nil && time.Now().UTC().After(*d.(*PurchaseResource).EndDate) {
|
|
||||||
utils.GenericDeleteOne(id, a)
|
|
||||||
return nil, 404, nil
|
|
||||||
}
|
|
||||||
return d, 200, nil
|
|
||||||
}, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *purchaseResourceMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
|
||||||
return utils.GenericLoadAll[*PurchaseResource](a.getExec(), isDraft, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *purchaseResourceMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
|
||||||
return utils.GenericSearch[*PurchaseResource](filters, search, (&PurchaseResource{}).GetObjectFilters(search), a.getExec(), isDraft, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *purchaseResourceMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
|
|
||||||
return func(d utils.DBObject) utils.ShallowDBObject {
|
|
||||||
if d.(*PurchaseResource).EndDate != nil && time.Now().UTC().After(*d.(*PurchaseResource).EndDate) {
|
|
||||||
utils.GenericDeleteOne(d.GetID(), a)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,222 +1,57 @@
|
|||||||
package resources
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"slices"
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/data"
|
||||||
"cloud.o-forge.io/core/oc-lib/config"
|
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/models"
|
"cloud.o-forge.io/core/oc-lib/models/resources/processing"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
"github.com/biter777/countries"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// AbstractResource is the struct containing all of the attributes commons to all ressources
|
// AbstractResource is the struct containing all of the attributes commons to all ressources
|
||||||
type AbstractResource struct {
|
|
||||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
// Resource is the interface to be implemented by all classes inheriting from Resource to have the same behavior
|
||||||
Type string `json:"type,omitempty" bson:"type,omitempty"` // Type is the type of the resource
|
|
||||||
Logo string `json:"logo,omitempty" bson:"logo,omitempty" validate:"required"` // Logo is the logo of the resource
|
// http://www.inanzzz.com/index.php/post/wqbs/a-basic-usage-of-int-and-string-enum-types-in-golang
|
||||||
Description string `json:"description,omitempty" bson:"description,omitempty"` // Description is the description of the resource
|
type ResourceSet struct {
|
||||||
ShortDescription string `json:"short_description,omitempty" bson:"short_description,omitempty" validate:"required"` // ShortDescription is the short description of the resource
|
Datas []string `bson:"datas,omitempty" json:"datas,omitempty"`
|
||||||
Owners []utils.Owner `json:"owners,omitempty" bson:"owners,omitempty"` // Owners is the list of owners of the resource
|
Storages []string `bson:"storages,omitempty" json:"storages,omitempty"`
|
||||||
UsageRestrictions string `bson:"usage_restrictions,omitempty" json:"usage_restrictions,omitempty"`
|
Processings []string `bson:"processings,omitempty" json:"processings,omitempty"`
|
||||||
SelectedInstanceIndex *int `json:"selected_instance_index,omitempty" bson:"selected_instance_index,omitempty"` // SelectedInstance is the selected instance
|
Computes []string `bson:"computes,omitempty" json:"computes,omitempty"`
|
||||||
|
Workflows []string `bson:"workflows,omitempty" json:"workflows,omitempty"`
|
||||||
|
|
||||||
|
DataResources []*data.DataResource `bson:"-" json:"data_resources,omitempty"`
|
||||||
|
StorageResources []*storage.StorageResource `bson:"-" json:"storage_resources,omitempty"`
|
||||||
|
ProcessingResources []*processing.ProcessingResource `bson:"-" json:"processing_resources,omitempty"`
|
||||||
|
ComputeResources []*compute.ComputeResource `bson:"-" json:"compute_resources,omitempty"`
|
||||||
|
WorkflowResources []*w.WorkflowResource `bson:"-" json:"workflow_resources,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AbstractResource) GetSelectedInstance() utils.DBObject {
|
type ItemResource struct {
|
||||||
return nil
|
Data *data.DataResource `bson:"data,omitempty" json:"data,omitempty"`
|
||||||
|
Processing *processing.ProcessingResource `bson:"processing,omitempty" json:"processing,omitempty"`
|
||||||
|
Storage *storage.StorageResource `bson:"storage,omitempty" json:"storage,omitempty"`
|
||||||
|
Compute *compute.ComputeResource `bson:"compute,omitempty" json:"compute,omitempty"`
|
||||||
|
Workflow *w.WorkflowResource `bson:"workflow,omitempty" json:"workflow,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AbstractResource) GetType() string {
|
func (i *ItemResource) GetAbstractRessource() *resource_model.AbstractResource {
|
||||||
return tools.INVALID.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *AbstractResource) StoreDraftDefault() {
|
if(i.Data != nil){
|
||||||
r.IsDraft = true
|
return &i.Data.AbstractResource
|
||||||
}
|
|
||||||
|
|
||||||
func (r *AbstractResource) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
|
|
||||||
if r.IsDraft != set.IsDrafted() && set.IsDrafted() {
|
|
||||||
return true, set // only state can be updated
|
|
||||||
}
|
}
|
||||||
return r.IsDraft != set.IsDrafted() && set.IsDrafted(), set
|
if(i.Processing != nil){
|
||||||
}
|
return &i.Processing.AbstractResource
|
||||||
|
|
||||||
func (r *AbstractResource) CanDelete() bool {
|
|
||||||
return r.IsDraft // only draft bookings can be deleted
|
|
||||||
}
|
|
||||||
|
|
||||||
type AbstractInstanciatedResource[T ResourceInstanceITF] struct {
|
|
||||||
AbstractResource // AbstractResource contains the basic fields of an object (id, name)
|
|
||||||
Instances []T `json:"instances,omitempty" bson:"instances,omitempty"` // Bill is the bill of the resource // Bill is the bill of the resource
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *AbstractInstanciatedResource[T]) ConvertToPricedResource(
|
|
||||||
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
|
|
||||||
instances := map[string]string{}
|
|
||||||
profiles := []pricing.PricingProfileITF{}
|
|
||||||
for _, instance := range abs.Instances {
|
|
||||||
instances[instance.GetID()] = instance.GetName()
|
|
||||||
profiles = instance.GetPricingsProfiles(request.PeerID, request.Groups)
|
|
||||||
}
|
}
|
||||||
return &PricedResource{
|
if(i.Storage != nil){
|
||||||
Name: abs.Name,
|
return &i.Storage.AbstractResource
|
||||||
Logo: abs.Logo,
|
|
||||||
ResourceID: abs.UUID,
|
|
||||||
ResourceType: t,
|
|
||||||
InstancesRefs: instances,
|
|
||||||
PricingProfiles: profiles,
|
|
||||||
CreatorID: abs.CreatorID,
|
|
||||||
}
|
}
|
||||||
}
|
if(i.Compute != nil){
|
||||||
|
return &i.Compute.AbstractResource
|
||||||
func (abs *AbstractInstanciatedResource[T]) ClearEnv() utils.DBObject {
|
|
||||||
for _, instance := range abs.Instances {
|
|
||||||
instance.ClearEnv()
|
|
||||||
}
|
}
|
||||||
return abs
|
if(i.Workflow != nil){
|
||||||
}
|
return &i.Workflow.AbstractResource
|
||||||
|
|
||||||
func (r *AbstractInstanciatedResource[T]) GetSelectedInstance() utils.DBObject {
|
|
||||||
if r.SelectedInstanceIndex != nil && len(r.Instances) > *r.SelectedInstanceIndex {
|
|
||||||
return r.Instances[*r.SelectedInstanceIndex]
|
|
||||||
}
|
|
||||||
if len(r.Instances) > 0 {
|
|
||||||
return r.Instances[0]
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (abs *AbstractInstanciatedResource[T]) SetAllowedInstances(request *tools.APIRequest) {
|
|
||||||
if request != nil && request.PeerID == abs.CreatorID && request.PeerID != "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
abs.Instances = verifyAuthAction[T](abs.Instances, request)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *AbstractInstanciatedResource[T]) Trim() {
|
|
||||||
d.Type = d.GetType()
|
|
||||||
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.CreatorID}}).IsMySelf(); !ok {
|
|
||||||
for _, instance := range d.Instances {
|
|
||||||
instance.ClearPeerGroups()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *AbstractInstanciatedResource[T]) VerifyAuth(request *tools.APIRequest) bool {
|
|
||||||
return len(verifyAuthAction[T](abs.Instances, request)) > 0 || abs.AbstractObject.VerifyAuth(request)
|
|
||||||
}
|
|
||||||
|
|
||||||
func verifyAuthAction[T ResourceInstanceITF](baseInstance []T, request *tools.APIRequest) []T {
|
|
||||||
instances := []T{}
|
|
||||||
for _, instance := range baseInstance {
|
|
||||||
_, peerGroups := instance.GetPeerGroups()
|
|
||||||
for _, peers := range peerGroups {
|
|
||||||
if request == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if grps, ok := peers[request.PeerID]; ok || config.GetConfig().Whitelist {
|
|
||||||
if (ok && slices.Contains(grps, "*")) || (!ok && config.GetConfig().Whitelist) {
|
|
||||||
instances = append(instances, instance)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, grp := range grps {
|
|
||||||
if slices.Contains(request.Groups, grp) {
|
|
||||||
instances = append(instances, instance)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return instances
|
|
||||||
}
|
|
||||||
|
|
||||||
type GeoPoint struct {
|
|
||||||
Latitude float64 `json:"latitude,omitempty" bson:"latitude,omitempty"`
|
|
||||||
Longitude float64 `json:"longitude,omitempty" bson:"longitude,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Credentials struct {
|
|
||||||
Login string `json:"login,omitempty" bson:"login,omitempty"`
|
|
||||||
Pass string `json:"password,omitempty" bson:"password,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResourceInstance[T ResourcePartnerITF] struct {
|
|
||||||
utils.AbstractObject
|
|
||||||
Location GeoPoint `json:"location,omitempty" bson:"location,omitempty"`
|
|
||||||
Country countries.CountryCode `json:"country,omitempty" bson:"country,omitempty"`
|
|
||||||
AccessProtocol string `json:"access_protocol,omitempty" bson:"access_protocol,omitempty"`
|
|
||||||
Env []models.Param `json:"env,omitempty" bson:"env,omitempty"`
|
|
||||||
Inputs []models.Param `json:"inputs,omitempty" bson:"inputs,omitempty"`
|
|
||||||
Outputs []models.Param `json:"outputs,omitempty" bson:"outputs,omitempty"`
|
|
||||||
Partnerships []T `json:"partnerships,omitempty" bson:"partnerships,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ri *ResourceInstance[T]) ClearEnv() {
|
|
||||||
ri.Env = []models.Param{}
|
|
||||||
ri.Inputs = []models.Param{}
|
|
||||||
ri.Outputs = []models.Param{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ri *ResourceInstance[T]) GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF {
|
|
||||||
pricings := []pricing.PricingProfileITF{}
|
|
||||||
for _, p := range ri.Partnerships {
|
|
||||||
pricings = append(pricings, p.GetPricingsProfiles(peerID, groups)...)
|
|
||||||
}
|
|
||||||
return pricings
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ri *ResourceInstance[T]) GetPeerGroups() ([]ResourcePartnerITF, []map[string][]string) {
|
|
||||||
groups := []map[string][]string{}
|
|
||||||
partners := []ResourcePartnerITF{}
|
|
||||||
for _, p := range ri.Partnerships {
|
|
||||||
partners = append(partners, p)
|
|
||||||
groups = append(groups, p.GetPeerGroups())
|
|
||||||
}
|
|
||||||
return partners, groups
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ri *ResourceInstance[T]) ClearPeerGroups() {
|
|
||||||
for _, p := range ri.Partnerships {
|
|
||||||
p.ClearPeerGroups()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResourcePartnerShip[T pricing.PricingProfileITF] struct {
|
|
||||||
Namespace string `json:"namespace" bson:"namespace" default:"default-namespace"`
|
|
||||||
PeerGroups map[string][]string `json:"peer_groups,omitempty" bson:"peer_groups,omitempty"`
|
|
||||||
PricingProfiles []T `json:"pricing_profiles,omitempty" bson:"pricing_profiles,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ri *ResourcePartnerShip[T]) GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF {
|
|
||||||
profiles := []pricing.PricingProfileITF{}
|
|
||||||
if ri.PeerGroups[peerID] != nil {
|
|
||||||
for _, ri := range ri.PricingProfiles {
|
|
||||||
profiles = append(profiles, ri)
|
|
||||||
}
|
|
||||||
if slices.Contains(groups, "*") {
|
|
||||||
for _, ri := range ri.PricingProfiles {
|
|
||||||
profiles = append(profiles, ri)
|
|
||||||
}
|
|
||||||
return profiles
|
|
||||||
}
|
|
||||||
for _, p := range ri.PeerGroups[peerID] {
|
|
||||||
if slices.Contains(groups, p) {
|
|
||||||
for _, ri := range ri.PricingProfiles {
|
|
||||||
profiles = append(profiles, ri)
|
|
||||||
}
|
|
||||||
return profiles
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return profiles
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *ResourcePartnerShip[T]) GetPeerGroups() map[string][]string {
|
|
||||||
return rp.PeerGroups
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *ResourcePartnerShip[T]) ClearPeerGroups() {
|
|
||||||
rp.PeerGroups = map[string][]string{}
|
|
||||||
}
|
|
||||||
|
@ -1,92 +0,0 @@
|
|||||||
package resources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"slices"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/logs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
type resourceMongoAccessor[T ResourceInterface] struct {
|
|
||||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
|
||||||
generateData func() utils.DBObject
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new instance of the computeMongoAccessor
|
|
||||||
func NewAccessor[T ResourceInterface](t tools.DataType, request *tools.APIRequest, g func() utils.DBObject) *resourceMongoAccessor[T] {
|
|
||||||
if !slices.Contains([]tools.DataType{tools.COMPUTE_RESOURCE, tools.STORAGE_RESOURCE, tools.PROCESSING_RESOURCE, tools.WORKFLOW_RESOURCE, tools.DATA_RESOURCE}, t) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &resourceMongoAccessor[T]{
|
|
||||||
AbstractAccessor: utils.AbstractAccessor{
|
|
||||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
|
||||||
Request: request,
|
|
||||||
Type: t,
|
|
||||||
},
|
|
||||||
generateData: g,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Nothing special here, just the basic CRUD operations
|
|
||||||
*/
|
|
||||||
func (dca *resourceMongoAccessor[T]) DeleteOne(id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericDeleteOne(id, dca)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dca *resourceMongoAccessor[T]) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
|
||||||
set.(T).Trim()
|
|
||||||
return utils.GenericUpdateOne(set, id, dca, dca.generateData())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dca *resourceMongoAccessor[T]) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
data.(T).Trim()
|
|
||||||
return utils.GenericStoreOne(data, dca)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dca *resourceMongoAccessor[T]) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
return dca.StoreOne(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dca *resourceMongoAccessor[T]) LoadOne(id string) (utils.DBObject, int, error) {
|
|
||||||
return utils.GenericLoadOne[T](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
|
||||||
d.(T).SetAllowedInstances(dca.Request)
|
|
||||||
return d, 200, nil
|
|
||||||
}, dca)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wfa *resourceMongoAccessor[T]) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
|
||||||
return utils.GenericLoadAll[T](func(d utils.DBObject) utils.ShallowDBObject {
|
|
||||||
d.(T).SetAllowedInstances(wfa.Request)
|
|
||||||
return d
|
|
||||||
}, isDraft, wfa)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wfa *resourceMongoAccessor[T]) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
|
||||||
if filters == nil && search == "*" {
|
|
||||||
return utils.GenericLoadAll[T](func(d utils.DBObject) utils.ShallowDBObject {
|
|
||||||
d.(T).SetAllowedInstances(wfa.Request)
|
|
||||||
return d
|
|
||||||
}, isDraft, wfa)
|
|
||||||
}
|
|
||||||
return utils.GenericSearch[T](filters, search, wfa.getResourceFilter(search),
|
|
||||||
func(d utils.DBObject) utils.ShallowDBObject {
|
|
||||||
d.(T).SetAllowedInstances(wfa.Request)
|
|
||||||
return d
|
|
||||||
}, isDraft, wfa)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *resourceMongoAccessor[T]) getResourceFilter(search string) *dbs.Filters {
|
|
||||||
return &dbs.Filters{
|
|
||||||
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
|
||||||
"abstractintanciatedresource.abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
|
||||||
"abstractintanciatedresource.abstractresource.type": {{Operator: dbs.LIKE.String(), Value: search}},
|
|
||||||
"abstractintanciatedresource.abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
|
||||||
"abstractintanciatedresource.abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
|
||||||
"abstractintanciatedresource.abstractresource.owners.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
|
||||||
"abstractintanciatedresource.abstractresource.abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: search}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,198 +0,0 @@
|
|||||||
package resources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/models"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* StorageResource is a struct that represents a storage resource
|
|
||||||
* it defines the resource storage
|
|
||||||
*/
|
|
||||||
type StorageResource struct {
|
|
||||||
AbstractInstanciatedResource[*StorageResourceInstance] // AbstractResource contains the basic fields of an object (id, name)
|
|
||||||
StorageType enum.StorageType `bson:"storage_type" json:"storage_type" default:"-1"` // Type is the type of the storage
|
|
||||||
Acronym string `bson:"acronym,omitempty" json:"acronym,omitempty"` // Acronym is the acronym of the storage
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *StorageResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
|
||||||
return NewAccessor[*StorageResource](tools.STORAGE_RESOURCE, request, func() utils.DBObject { return &StorageResource{} }) // Create a new instance of the accessor
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *StorageResource) GetType() string {
|
|
||||||
return tools.STORAGE_RESOURCE.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (abs *StorageResource) ConvertToPricedResource(
|
|
||||||
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
|
|
||||||
if t != tools.STORAGE_RESOURCE {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
p := abs.AbstractInstanciatedResource.ConvertToPricedResource(t, request)
|
|
||||||
priced := p.(*PricedResource)
|
|
||||||
return &PricedStorageResource{
|
|
||||||
PricedResource: *priced,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type StorageResourceInstance struct {
|
|
||||||
ResourceInstance[*StorageResourcePartnership]
|
|
||||||
Credentials *Credentials `json:"credentials,omitempty" bson:"credentials,omitempty"`
|
|
||||||
Source string `bson:"source,omitempty" json:"source,omitempty"` // Source is the source of the storage
|
|
||||||
Local bool `bson:"local" json:"local"`
|
|
||||||
SecurityLevel string `bson:"security_level,omitempty" json:"security_level,omitempty"`
|
|
||||||
SizeType enum.StorageSize `bson:"size_type" json:"size_type" default:"0"` // SizeType is the type of the storage size
|
|
||||||
SizeGB int64 `bson:"size,omitempty" json:"size,omitempty"` // Size is the size of the storage
|
|
||||||
Encryption bool `bson:"encryption,omitempty" json:"encryption,omitempty"` // Encryption is a flag that indicates if the storage is encrypted
|
|
||||||
Redundancy string `bson:"redundancy,omitempty" json:"redundancy,omitempty"` // Redundancy is the redundancy of the storage
|
|
||||||
Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ri *StorageResourceInstance) ClearEnv() {
|
|
||||||
ri.Credentials = nil
|
|
||||||
ri.Env = []models.Param{}
|
|
||||||
ri.Inputs = []models.Param{}
|
|
||||||
ri.Outputs = []models.Param{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ri *StorageResourceInstance) StoreDraftDefault() {
|
|
||||||
found := false
|
|
||||||
for _, p := range ri.ResourceInstance.Env {
|
|
||||||
if p.Attr == "source" {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
ri.ResourceInstance.Env = append(ri.ResourceInstance.Env, models.Param{
|
|
||||||
Attr: "source",
|
|
||||||
Value: ri.Source,
|
|
||||||
Readonly: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
ri.ResourceInstance.StoreDraftDefault()
|
|
||||||
}
|
|
||||||
|
|
||||||
type StorageResourcePartnership struct {
|
|
||||||
ResourcePartnerShip[*StorageResourcePricingProfile]
|
|
||||||
MaxSizeGBAllowed float64 `json:"allowed_gb,omitempty" bson:"allowed_gb,omitempty"`
|
|
||||||
OnlyEncryptedAllowed bool `json:"personal_data_allowed,omitempty" bson:"personal_data_allowed,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PrivilegeStoragePricingStrategy int
|
|
||||||
|
|
||||||
const (
|
|
||||||
BASIC_STORAGE PrivilegeStoragePricingStrategy = iota
|
|
||||||
GARANTED_ON_DELAY_STORAGE
|
|
||||||
GARANTED_STORAGE
|
|
||||||
)
|
|
||||||
|
|
||||||
func PrivilegeStoragePricingStrategyList() []PrivilegeStoragePricingStrategy {
|
|
||||||
return []PrivilegeStoragePricingStrategy{BASIC_STORAGE, GARANTED_ON_DELAY_STORAGE, GARANTED_STORAGE}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t PrivilegeStoragePricingStrategy) String() string {
|
|
||||||
return [...]string{"NO MEMORY HOLDING", "KEEPED ON MEMORY GARANTED DURING DELAY", "KEEPED ON MEMORY GARANTED"}[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
type StorageResourcePricingStrategy int
|
|
||||||
|
|
||||||
const (
|
|
||||||
PER_DATA_STORED StorageResourcePricingStrategy = iota
|
|
||||||
PER_TB_STORED
|
|
||||||
PER_GB_STORED
|
|
||||||
PER_MB_STORED
|
|
||||||
PER_KB_STORED
|
|
||||||
)
|
|
||||||
|
|
||||||
func StorageResourcePricingStrategyList() []StorageResourcePricingStrategy {
|
|
||||||
return []StorageResourcePricingStrategy{PER_DATA_STORED, PER_TB_STORED, PER_GB_STORED, PER_MB_STORED, PER_KB_STORED}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t StorageResourcePricingStrategy) String() string {
|
|
||||||
return [...]string{"PER DATA STORED", "PER TB STORED", "PER GB STORED", "PER MB STORED", "PER KB STORED"}[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t StorageResourcePricingStrategy) GetStrategy() string {
|
|
||||||
return [...]string{"PER_DATA_STORED", "PER_GB_STORED", "PER_MB_STORED", "PER_KB_STORED"}[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t StorageResourcePricingStrategy) GetStrategyValue() int {
|
|
||||||
return int(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToStorageResourcePricingStrategy(i int) StorageResourcePricingStrategy {
|
|
||||||
return StorageResourcePricingStrategy(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t StorageResourcePricingStrategy) GetQuantity(amountOfDataGB float64) (float64, error) {
|
|
||||||
switch t {
|
|
||||||
case PER_DATA_STORED:
|
|
||||||
return amountOfDataGB, nil
|
|
||||||
case PER_TB_STORED:
|
|
||||||
return amountOfDataGB * 1000, nil
|
|
||||||
case PER_GB_STORED:
|
|
||||||
return amountOfDataGB, nil
|
|
||||||
case PER_MB_STORED:
|
|
||||||
return (amountOfDataGB * 1000), nil
|
|
||||||
case PER_KB_STORED:
|
|
||||||
return amountOfDataGB * 1000000, nil
|
|
||||||
}
|
|
||||||
return 0, errors.New("pricing strategy not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
type StorageResourcePricingProfile struct {
|
|
||||||
pricing.ExploitPricingProfile[StorageResourcePricingStrategy] // ExploitPricingProfile is the pricing profile of a storage it means that we exploit the resource for an amount of continuous time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *StorageResourcePricingProfile) IsPurchased() bool {
|
|
||||||
return p.Pricing.BuyingStrategy != pricing.PAY_PER_USE
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *StorageResourcePricingProfile) GetPrice(amountOfData float64, val float64, start time.Time, end time.Time, params ...string) (float64, error) {
|
|
||||||
return p.Pricing.GetPrice(amountOfData, val, start, &end)
|
|
||||||
}
|
|
||||||
|
|
||||||
type PricedStorageResource struct {
|
|
||||||
PricedResource
|
|
||||||
UsageStorageGB float64 `json:"storage_gb,omitempty" bson:"storage_gb,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PricedStorageResource) GetType() tools.DataType {
|
|
||||||
return tools.STORAGE_RESOURCE
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PricedStorageResource) GetPrice() (float64, error) {
|
|
||||||
fmt.Println("GetPrice", r.UsageStart, r.UsageEnd)
|
|
||||||
now := time.Now()
|
|
||||||
if r.UsageStart == nil {
|
|
||||||
r.UsageStart = &now
|
|
||||||
}
|
|
||||||
if r.UsageEnd == nil {
|
|
||||||
add := r.UsageStart.Add(time.Duration(1 * time.Hour))
|
|
||||||
r.UsageEnd = &add
|
|
||||||
}
|
|
||||||
if r.SelectedPricing == nil {
|
|
||||||
if len(r.PricingProfiles) == 0 {
|
|
||||||
return 0, errors.New("pricing profile must be set on Priced Storage" + r.ResourceID)
|
|
||||||
}
|
|
||||||
r.SelectedPricing = &r.PricingProfiles[0]
|
|
||||||
}
|
|
||||||
pricing := *r.SelectedPricing
|
|
||||||
var err error
|
|
||||||
amountOfData := float64(1)
|
|
||||||
if pricing.GetOverrideStrategyValue() >= 0 {
|
|
||||||
amountOfData, err = ToStorageResourcePricingStrategy(pricing.GetOverrideStrategyValue()).GetQuantity(r.UsageStorageGB)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pricing.GetPrice(amountOfData, r.ExplicitBookingDurationS, *r.UsageStart, *r.UsageEnd)
|
|
||||||
}
|
|
84
models/resources/storage/storage.go
Normal file
84
models/resources/storage/storage.go
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StorageSize int
|
||||||
|
|
||||||
|
// StorageType - Enum that defines the type of storage
|
||||||
|
const (
|
||||||
|
GB StorageSize = iota
|
||||||
|
MB
|
||||||
|
KB
|
||||||
|
)
|
||||||
|
|
||||||
|
var argoType = [...]string{
|
||||||
|
"Gi",
|
||||||
|
"Mi",
|
||||||
|
"Ki",
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new instance of the StorageResource struct
|
||||||
|
func (dma StorageSize) ToArgo() string {
|
||||||
|
return argoType[dma]
|
||||||
|
}
|
||||||
|
|
||||||
|
// enum of a data type
|
||||||
|
type StorageType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
FILE = iota
|
||||||
|
STREAM
|
||||||
|
API
|
||||||
|
DATABASE
|
||||||
|
S3
|
||||||
|
MEMORY
|
||||||
|
HARDWARE
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* StorageResource is a struct that represents a storage resource
|
||||||
|
* it defines the resource storage
|
||||||
|
*/
|
||||||
|
type StorageResource struct {
|
||||||
|
resource_model.AbstractResource // AbstractResource contains the basic fields of an object (id, name)
|
||||||
|
resource_model.WebResource
|
||||||
|
Type StorageType `bson:"type,omitempty" json:"type,omitempty"` // Type is the type of the storage
|
||||||
|
Acronym string `bson:"acronym,omitempty" json:"acronym,omitempty"` // Acronym is the acronym of the storage
|
||||||
|
SizeType StorageSize `bson:"size_type" json:"size_type" default:"0"` // SizeType is the type of the storage size
|
||||||
|
Size uint `bson:"size,omitempty" json:"size,omitempty"` // Size is the size of the storage
|
||||||
|
Local bool `bson:"local" json:"local"` // Local is a flag that indicates if the storage is local
|
||||||
|
Encryption bool `bson:"encryption,omitempty" json:"encryption,omitempty"` // Encryption is a flag that indicates if the storage is encrypted
|
||||||
|
Redundancy string `bson:"redundancy,omitempty" json:"redundancy,omitempty"` // Redundancy is the redundancy of the storage
|
||||||
|
Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *StorageResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *StorageResource) Serialize() map[string]interface{} {
|
||||||
|
var m map[string]interface{}
|
||||||
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *StorageResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
|
data := New() // Create a new instance of the accessor
|
||||||
|
data.Init(tools.STORAGE_RESOURCE, caller) // Initialize the accessor with the STORAGE_RESOURCE model type
|
||||||
|
return data
|
||||||
|
}
|
114
models/resources/storage/storage_mongo_accessor.go
Normal file
114
models/resources/storage/storage_mongo_accessor.go
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
type storageMongoAccessor struct {
|
||||||
|
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new instance of the storageMongoAccessor
|
||||||
|
func New() *storageMongoAccessor {
|
||||||
|
return &storageMongoAccessor{}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Nothing special here, just the basic CRUD operations
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (sma *storageMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
|
return sma.GenericDeleteOne(id, sma)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sma *storageMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
|
set.(*StorageResource).ResourceModel = nil
|
||||||
|
return sma.GenericUpdateOne(set, id, sma, &StorageResource{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sma *storageMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
data.(*StorageResource).ResourceModel = nil
|
||||||
|
return sma.GenericStoreOne(data, sma)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sma *storageMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
return sma.GenericStoreOne(data, sma)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sma *storageMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
|
|
||||||
|
var storage StorageResource
|
||||||
|
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, sma.GetType())
|
||||||
|
if err != nil {
|
||||||
|
sma.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res_mongo.Decode(&storage)
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, sma.GetType())
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
storage.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
return &storage, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa storageMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []StorageResource
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||||
|
for _, r := range results {
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r) // only get the abstract resource !
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search searches for storage resources in the database, given some filters OR a search string
|
||||||
|
func (wfa *storageMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
|
filters = &dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||||
|
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []StorageResource
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||||
|
for _, r := range results {
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r) // only get the abstract resource !
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
46
models/resources/storage/storage_test.go
Normal file
46
models/resources/storage/storage_test.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStoreOneStorage(t *testing.T) {
|
||||||
|
s := StorageResource{Size: 123, WebResource: resource_model.WebResource{Protocol: "http", Path: "azerty.fr"},
|
||||||
|
AbstractResource: resource_model.AbstractResource{
|
||||||
|
AbstractObject: utils.AbstractObject{Name: "testData"},
|
||||||
|
Description: "Lorem Ipsum",
|
||||||
|
Logo: "azerty.com",
|
||||||
|
Owner: "toto",
|
||||||
|
OwnerLogo: "totoLogo",
|
||||||
|
SourceUrl: "azerty.fr",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
sma := New()
|
||||||
|
id, _, _ := sma.StoreOne(&s)
|
||||||
|
|
||||||
|
assert.NotEmpty(t, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadOneStorage(t *testing.T) {
|
||||||
|
s := StorageResource{Size: 123, WebResource: resource_model.WebResource{Protocol: "http", Path: "azerty.fr"},
|
||||||
|
AbstractResource: resource_model.AbstractResource{
|
||||||
|
AbstractObject: utils.AbstractObject{Name: "testData"},
|
||||||
|
Description: "Lorem Ipsum",
|
||||||
|
Logo: "azerty.com",
|
||||||
|
Owner: "toto",
|
||||||
|
OwnerLogo: "totoLogo",
|
||||||
|
SourceUrl: "azerty.fr",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
sma := New()
|
||||||
|
new_s, _, _ := sma.StoreOne(&s)
|
||||||
|
|
||||||
|
assert.Equal(t, s, new_s)
|
||||||
|
}
|
@ -1,46 +0,0 @@
|
|||||||
package resources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
type WorkflowResourcePricingProfile struct{}
|
|
||||||
|
|
||||||
// WorkflowResource is a struct that represents a workflow resource
|
|
||||||
// it defines the resource workflow
|
|
||||||
type WorkflowResource struct {
|
|
||||||
AbstractResource
|
|
||||||
WorkflowID string `bson:"workflow_id,omitempty" json:"workflow_id,omitempty"` // WorkflowID is the ID of the native workflow
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *WorkflowResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
|
||||||
return NewAccessor[*ComputeResource](tools.WORKFLOW_RESOURCE, request, func() utils.DBObject { return &WorkflowResource{} })
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *WorkflowResource) GetType() string {
|
|
||||||
return tools.WORKFLOW_RESOURCE.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *WorkflowResource) ClearEnv() utils.DBObject {
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *WorkflowResource) Trim() {
|
|
||||||
/* EMPTY */
|
|
||||||
}
|
|
||||||
func (w *WorkflowResource) SetAllowedInstances(request *tools.APIRequest) {
|
|
||||||
/* EMPTY */
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *WorkflowResource) ConvertToPricedResource(
|
|
||||||
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
|
|
||||||
return &PricedResource{
|
|
||||||
Name: w.Name,
|
|
||||||
Logo: w.Logo,
|
|
||||||
ResourceID: w.UUID,
|
|
||||||
ResourceType: t,
|
|
||||||
CreatorID: w.CreatorID,
|
|
||||||
}
|
|
||||||
}
|
|
70
models/resources/workflow/graph/graph.go
Normal file
70
models/resources/workflow/graph/graph.go
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
package graph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Graph is a struct that represents a graph
|
||||||
|
type Graph struct {
|
||||||
|
Zoom float64 `bson:"zoom" json:"zoom" default:"1"` // Zoom is the graphical zoom of the graph
|
||||||
|
Items map[string]GraphItem `bson:"items" json:"items" default:"{}" validate:"required"` // Items is the list of elements in the graph
|
||||||
|
Links []GraphLink `bson:"links" json:"links" default:"{}" validate:"required"` // Links is the list of links between elements in the graph
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Graph) GetResource(id string) (string, utils.DBObject) {
|
||||||
|
if item, ok := g.Items[id]; ok {
|
||||||
|
if item.Data != nil {
|
||||||
|
return tools.DATA_RESOURCE.String(), item.Data
|
||||||
|
} else if item.Compute != nil {
|
||||||
|
return tools.COMPUTE_RESOURCE.String(), item.Compute
|
||||||
|
} else if item.Workflow != nil {
|
||||||
|
return tools.WORKFLOW_RESOURCE.String(), item.Workflow
|
||||||
|
} else if item.Processing != nil {
|
||||||
|
return tools.PROCESSING_RESOURCE.String(), item.Processing
|
||||||
|
} else if item.Storage != nil {
|
||||||
|
return tools.STORAGE_RESOURCE.String(), item.Storage
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GraphItem is a struct that represents an item in a graph
|
||||||
|
type GraphItem struct {
|
||||||
|
ID string `bson:"id" json:"id" validate:"required"` // ID is the unique identifier of the item
|
||||||
|
Width float64 `bson:"width" json:"width" validate:"required"` // Width is the graphical width of the item
|
||||||
|
Height float64 `bson:"height" json:"height" validate:"required"` // Height is the graphical height of the item
|
||||||
|
Position Position `bson:"position" json:"position" validate:"required"` // Position is the graphical position of the item
|
||||||
|
*resources.ItemResource // ItemResource is the resource of the item affected to the item
|
||||||
|
}
|
||||||
|
|
||||||
|
// GraphLink is a struct that represents a link between two items in a graph
|
||||||
|
type GraphLink struct {
|
||||||
|
Source Position `bson:"source" json:"source" validate:"required"` // Source is the source graphical position of the link
|
||||||
|
Destination Position `bson:"destination" json:"destination" validate:"required"` // Destination is the destination graphical position of the link
|
||||||
|
Style *GraphLinkStyle `bson:"style,omitempty" json:"style,omitempty"` // Style is the graphical style of the link
|
||||||
|
}
|
||||||
|
|
||||||
|
// GraphLinkStyle is a struct that represents the style of a link in a graph
|
||||||
|
type GraphLinkStyle struct {
|
||||||
|
Color int64 `bson:"color" json:"color"` // Color is the graphical color of the link (int description of a color, can be transpose as hex)
|
||||||
|
Stroke float64 `bson:"stroke" json:"stroke"` // Stroke is the graphical stroke of the link
|
||||||
|
Tension float64 `bson:"tension" json:"tension"` // Tension is the graphical tension of the link
|
||||||
|
HeadRadius float64 `bson:"head_radius" json:"head_radius"` // graphical pin radius
|
||||||
|
DashWidth float64 `bson:"dash_width" json:"dash_width"` // DashWidth is the graphical dash width of the link
|
||||||
|
DashSpace float64 `bson:"dash_space" json:"dash_space"` // DashSpace is the graphical dash space of the link
|
||||||
|
EndArrow Position `bson:"end_arrow" json:"end_arrow"` // EndArrow is the graphical end arrow of the link
|
||||||
|
StartArrow Position `bson:"start_arrow" json:"start_arrow"` // StartArrow is the graphical start arrow of the link
|
||||||
|
ArrowStyle int64 `bson:"arrow_style" json:"arrow_style"` // ArrowStyle is the graphical arrow style of the link (enum foundable in UI)
|
||||||
|
ArrowDirection int64 `bson:"arrow_direction" json:"arrow_direction"` // ArrowDirection is the graphical arrow direction of the link (enum foundable in UI)
|
||||||
|
StartArrowWidth float64 `bson:"start_arrow_width" json:"start_arrow_width"` // StartArrowWidth is the graphical start arrow width of the link
|
||||||
|
EndArrowWidth float64 `bson:"end_arrow_width" json:"end_arrow_width"` // EndArrowWidth is the graphical end arrow width of the link
|
||||||
|
}
|
||||||
|
|
||||||
|
// Position is a struct that represents a graphical position
|
||||||
|
type Position struct {
|
||||||
|
ID string `json:"id" bson:"id"` // ID reprents ItemID (optionnal), TODO: rename to ItemID
|
||||||
|
X float64 `json:"x" bson:"x" validate:"required"` // X is the graphical x position
|
||||||
|
Y float64 `json:"y" bson:"y" validate:"required"` // Y is the graphical y position
|
||||||
|
}
|
41
models/resources/workflow/workflow.go
Normal file
41
models/resources/workflow/workflow.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package oclib
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WorkflowResource is a struct that represents a workflow resource
|
||||||
|
// it defines the resource workflow
|
||||||
|
type WorkflowResource struct {
|
||||||
|
resource_model.AbstractResource
|
||||||
|
WorkflowID string `bson:"workflow_id,omitempty" json:"workflow_id,omitempty"` // WorkflowID is the ID of the native workflow
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WorkflowResource) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
|
data := New() // Create a new instance of the accessor
|
||||||
|
data.Init(tools.WORKFLOW_RESOURCE, caller) // Initialize the accessor with the WORKFLOW_RESOURCE model type
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *WorkflowResource) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *WorkflowResource) Serialize() map[string]interface{} {
|
||||||
|
var m map[string]interface{}
|
||||||
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
return m
|
||||||
|
}
|
113
models/resources/workflow/workflow_mongo_accessor.go
Normal file
113
models/resources/workflow/workflow_mongo_accessor.go
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
package oclib
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
type workflowResourceMongoAccessor struct {
|
||||||
|
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||||
|
}
|
||||||
|
|
||||||
|
func New() *workflowResourceMongoAccessor {
|
||||||
|
return &workflowResourceMongoAccessor{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *workflowResourceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
|
return wfa.GenericDeleteOne(id, wfa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *workflowResourceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
|
set.(*WorkflowResource).ResourceModel = nil
|
||||||
|
return wfa.GenericUpdateOne(set, id, wfa, &WorkflowResource{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *workflowResourceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
data.(*WorkflowResource).ResourceModel = nil
|
||||||
|
return wfa.GenericStoreOne(data, wfa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *workflowResourceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
|
res, _, _ := wfa.LoadOne(data.GetID())
|
||||||
|
data.(*WorkflowResource).WorkflowID = data.GetID()
|
||||||
|
if res == nil {
|
||||||
|
return wfa.GenericStoreOne(data, wfa)
|
||||||
|
} else {
|
||||||
|
data.(*WorkflowResource).UUID = res.GetID()
|
||||||
|
return wfa.GenericUpdateOne(data, res.GetID(), wfa, &WorkflowResource{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa *workflowResourceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
|
var workflow WorkflowResource
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
res_mongo.Decode(&workflow)
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
workflow.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
return &workflow, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wfa workflowResourceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []WorkflowResource
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||||
|
for _, r := range results {
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r)
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search searches for workflow resources in the database, given some filters OR a search string
|
||||||
|
func (wfa *workflowResourceMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
|
filters = &dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||||
|
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []WorkflowResource
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
|
||||||
|
resources, _, err := accessor.Search(nil, wfa.GetType())
|
||||||
|
for _, r := range results {
|
||||||
|
if err == nil && len(resources) > 0 {
|
||||||
|
r.ResourceModel = resources[0].(*resource_model.ResourceModel)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r)
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
43
models/resources/workflow/workflow_test.go
Normal file
43
models/resources/workflow/workflow_test.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package oclib
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStoreOneWorkflow(t *testing.T) {
|
||||||
|
w := WorkflowResource{AbstractResource: resource_model.AbstractResource{
|
||||||
|
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
|
||||||
|
Description: "Lorem Ipsum",
|
||||||
|
Logo: "azerty.com",
|
||||||
|
Owner: "toto",
|
||||||
|
OwnerLogo: "totoLogo",
|
||||||
|
SourceUrl: "azerty.fr",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
wma := New()
|
||||||
|
id, _, _ := wma.StoreOne(&w)
|
||||||
|
|
||||||
|
assert.NotEmpty(t, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadOneWorkflow(t *testing.T) {
|
||||||
|
w := WorkflowResource{AbstractResource: resource_model.AbstractResource{
|
||||||
|
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
|
||||||
|
Description: "Lorem Ipsum",
|
||||||
|
Logo: "azerty.com",
|
||||||
|
Owner: "toto",
|
||||||
|
OwnerLogo: "totoLogo",
|
||||||
|
SourceUrl: "azerty.fr",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
wma := New()
|
||||||
|
new_w, _, _ := wma.StoreOne(&w)
|
||||||
|
assert.Equal(t, w, new_w)
|
||||||
|
}
|
@ -2,9 +2,12 @@ package utils
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/logs"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
"github.com/go-playground/validator/v10"
|
"github.com/go-playground/validator/v10"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
@ -14,13 +17,6 @@ import (
|
|||||||
// single instance of the validator used in every model Struct to validate the fields
|
// single instance of the validator used in every model Struct to validate the fields
|
||||||
var validate = validator.New(validator.WithRequiredStructEnabled())
|
var validate = validator.New(validator.WithRequiredStructEnabled())
|
||||||
|
|
||||||
type AccessMode int
|
|
||||||
|
|
||||||
const (
|
|
||||||
Private AccessMode = iota
|
|
||||||
Public
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* AbstractObject is a struct that represents the basic fields of an object
|
* AbstractObject is a struct that represents the basic fields of an object
|
||||||
* it defines the object id and name
|
* it defines the object id and name
|
||||||
@ -29,144 +25,149 @@ const (
|
|||||||
type AbstractObject struct {
|
type AbstractObject struct {
|
||||||
UUID string `json:"id,omitempty" bson:"id,omitempty" validate:"required"`
|
UUID string `json:"id,omitempty" bson:"id,omitempty" validate:"required"`
|
||||||
Name string `json:"name,omitempty" bson:"name,omitempty" validate:"required"`
|
Name string `json:"name,omitempty" bson:"name,omitempty" validate:"required"`
|
||||||
IsDraft bool `json:"is_draft" bson:"is_draft" default:"false"`
|
UpdateDate time.Time `json:"update_date" bson:"update_date"`
|
||||||
CreatorID string `json:"creator_id,omitempty" bson:"creator_id,omitempty"`
|
LastPeerWriter string `json:"last_peer_writer" bson:"last_peer_writer"`
|
||||||
UserCreatorID string `json:"user_creator_id,omitempty" bson:"user_creator_id,omitempty"`
|
|
||||||
CreationDate time.Time `json:"creation_date,omitempty" bson:"creation_date,omitempty"`
|
|
||||||
UpdateDate time.Time `json:"update_date,omitempty" bson:"update_date,omitempty"`
|
|
||||||
UpdaterID string `json:"updater_id,omitempty" bson:"updater_id,omitempty"`
|
|
||||||
UserUpdaterID string `json:"user_updater_id,omitempty" bson:"user_updater_id,omitempty"`
|
|
||||||
AccessMode AccessMode `json:"access_mode" bson:"access_mode" default:"0"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ri *AbstractObject) GetAccessor(request *tools.APIRequest) Accessor {
|
// GetID returns the id of the object (abstract)
|
||||||
|
func (ao *AbstractObject) GetID() string {
|
||||||
|
return ao.UUID
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetName returns the name of the object (abstract)
|
||||||
|
func (ao *AbstractObject) GetName() string {
|
||||||
|
return ao.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ao *AbstractObject) UpToDate() {
|
||||||
|
ao.UpdateDate = time.Now()
|
||||||
|
// ao.LastPeerWriter, _ = static.GetMyLocalJsonPeer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAccessor returns the accessor of the object (abstract)
|
||||||
|
func (dma *AbstractObject) GetAccessor(caller *tools.HTTPCaller) Accessor {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (dma *AbstractObject) Deserialize(j map[string]interface{}) DBObject {
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dma *AbstractObject) Serialize() map[string]interface{} {
|
||||||
|
var m map[string]interface{}
|
||||||
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
func (r *AbstractObject) GenerateID() {
|
func (r *AbstractObject) GenerateID() {
|
||||||
if r.UUID == "" {
|
if r.UUID == "" {
|
||||||
r.UUID = uuid.New().String()
|
r.UUID = uuid.New().String()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AbstractObject) StoreDraftDefault() {
|
|
||||||
r.IsDraft = false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *AbstractObject) CanUpdate(set DBObject) (bool, DBObject) {
|
|
||||||
return true, set
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *AbstractObject) CanDelete() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *AbstractObject) IsDrafted() bool {
|
|
||||||
return r.IsDraft
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetID implements ShallowDBObject.
|
|
||||||
func (ao AbstractObject) GetID() string {
|
|
||||||
return ao.UUID
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetName implements ShallowDBObject.
|
|
||||||
func (ao AbstractObject) GetName() string {
|
|
||||||
return ao.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ao *AbstractObject) GetCreatorID() string {
|
|
||||||
return ao.CreatorID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ao *AbstractObject) UpToDate(user string, peer string, create bool) {
|
|
||||||
ao.UpdateDate = time.Now()
|
|
||||||
ao.UpdaterID = peer
|
|
||||||
ao.UserUpdaterID = user
|
|
||||||
if create {
|
|
||||||
ao.CreationDate = time.Now()
|
|
||||||
ao.CreatorID = peer
|
|
||||||
ao.UserCreatorID = user
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ao *AbstractObject) VerifyAuth(request *tools.APIRequest) bool {
|
|
||||||
return ao.AccessMode == Public || (request != nil && ao.CreatorID == request.PeerID && request.PeerID != "")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ao *AbstractObject) GetObjectFilters(search string) *dbs.Filters {
|
|
||||||
if search == "*" {
|
|
||||||
search = ""
|
|
||||||
}
|
|
||||||
return &dbs.Filters{
|
|
||||||
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
|
||||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dma *AbstractObject) Deserialize(j map[string]interface{}, obj DBObject) DBObject {
|
|
||||||
b, err := json.Marshal(j)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
json.Unmarshal(b, obj)
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dma *AbstractObject) Serialize(obj DBObject) map[string]interface{} {
|
|
||||||
var m map[string]interface{}
|
|
||||||
b, err := json.Marshal(obj)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
json.Unmarshal(b, &m)
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
type AbstractAccessor struct {
|
type AbstractAccessor struct {
|
||||||
Logger zerolog.Logger // Logger is the logger of the accessor, it's a specilized logger for the accessor
|
Logger zerolog.Logger // Logger is the logger of the accessor, it's a specilized logger for the accessor
|
||||||
Type tools.DataType // Type is the data type of the accessor
|
Type string // Type is the data type of the accessor
|
||||||
Request *tools.APIRequest // Caller is the http caller of the accessor (optionnal) only need in a peer connection
|
Caller *tools.HTTPCaller // Caller is the http caller of the accessor (optionnal) only need in a peer connection
|
||||||
ResourceModelAccessor Accessor
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AbstractAccessor) ShouldVerifyAuth() bool {
|
func (dma *AbstractAccessor) GetType() string {
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *AbstractAccessor) GetRequest() *tools.APIRequest {
|
|
||||||
return r.Request
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dma *AbstractAccessor) GetUser() string {
|
|
||||||
if dma.Request == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return dma.Request.Username
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dma *AbstractAccessor) GetPeerID() string {
|
|
||||||
if dma.Request == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return dma.Request.PeerID
|
|
||||||
}
|
|
||||||
func (dma *AbstractAccessor) GetGroups() []string {
|
|
||||||
if dma.Request == nil {
|
|
||||||
return []string{}
|
|
||||||
}
|
|
||||||
return dma.Request.Groups
|
|
||||||
}
|
|
||||||
func (dma *AbstractAccessor) GetLogger() *zerolog.Logger {
|
|
||||||
return &dma.Logger
|
|
||||||
}
|
|
||||||
func (dma *AbstractAccessor) GetType() tools.DataType {
|
|
||||||
return dma.Type
|
return dma.Type
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dma *AbstractAccessor) GetCaller() *tools.HTTPCaller {
|
func (dma *AbstractAccessor) GetCaller() *tools.HTTPCaller {
|
||||||
if dma.Request == nil {
|
return dma.Caller
|
||||||
return nil
|
}
|
||||||
}
|
|
||||||
return dma.Request.Caller
|
// Init initializes the accessor with the data type and the http caller
|
||||||
|
func (dma *AbstractAccessor) Init(t tools.DataType, caller *tools.HTTPCaller) {
|
||||||
|
dma.Logger = logs.CreateLogger(t.String()) // Create a logger with the data type
|
||||||
|
dma.Caller = caller // Set the caller
|
||||||
|
dma.Type = t.String() // Set the data type
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenericLoadOne loads one object from the database (generic)
|
||||||
|
func (wfa *AbstractAccessor) GenericStoreOne(data DBObject, accessor Accessor) (DBObject, int, error) {
|
||||||
|
data.GenerateID()
|
||||||
|
f := dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{
|
||||||
|
"abstractresource.abstractobject.name": {{
|
||||||
|
Operator: dbs.LIKE.String(),
|
||||||
|
Value: data.GetName(),
|
||||||
|
}},
|
||||||
|
"abstractobject.name": {{
|
||||||
|
Operator: dbs.LIKE.String(),
|
||||||
|
Value: data.GetName(),
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if cursor, _, _ := accessor.Search(&f, ""); len(cursor) > 0 {
|
||||||
|
return nil, 409, errors.New(accessor.GetType() + " with name " + data.GetName() + " already exists")
|
||||||
|
}
|
||||||
|
err := validate.Struct(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 422, err
|
||||||
|
}
|
||||||
|
id, code, err := mongo.MONGOService.StoreOne(data, data.GetID(), wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not store " + data.GetName() + " to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
return accessor.LoadOne(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenericLoadOne loads one object from the database (generic)
|
||||||
|
func (dma *AbstractAccessor) GenericDeleteOne(id string, accessor Accessor) (DBObject, int, error) {
|
||||||
|
res, code, err := accessor.LoadOne(id)
|
||||||
|
if err != nil {
|
||||||
|
dma.Logger.Error().Msg("Could not retrieve " + id + " to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
_, code, err = mongo.MONGOService.DeleteOne(id, accessor.GetType())
|
||||||
|
if err != nil {
|
||||||
|
dma.Logger.Error().Msg("Could not delete " + id + " to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
return res, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenericLoadOne loads one object from the database (generic)
|
||||||
|
// json expected in entry is a flatted object no need to respect the inheritance hierarchy
|
||||||
|
func (dma *AbstractAccessor) GenericUpdateOne(set DBObject, id string, accessor Accessor, new DBObject) (DBObject, int, error) {
|
||||||
|
r, c, err := accessor.LoadOne(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, c, err
|
||||||
|
}
|
||||||
|
change := set.Serialize() // get the changes
|
||||||
|
loaded := r.Serialize() // get the loaded object
|
||||||
|
|
||||||
|
for k, v := range change { // apply the changes, with a flatten method
|
||||||
|
loaded[k] = v
|
||||||
|
}
|
||||||
|
id, code, err := mongo.MONGOService.UpdateOne(new.Deserialize(loaded), id, accessor.GetType())
|
||||||
|
if err != nil {
|
||||||
|
dma.Logger.Error().Msg("Could not update " + id + " to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
return accessor.LoadOne(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenericLoadOne loads one object from the database (generic)
|
||||||
|
// json expected in entry is a flatted object no need to respect the inheritance hierarchy
|
||||||
|
func (dma *AbstractAccessor) GenericRawUpdateOne(set DBObject, id string, accessor Accessor) (DBObject, int, error) {
|
||||||
|
id, code, err := mongo.MONGOService.UpdateOne(set, id, accessor.GetType())
|
||||||
|
if err != nil {
|
||||||
|
dma.Logger.Error().Msg("Could not update " + id + " to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
return accessor.LoadOne(id)
|
||||||
}
|
}
|
||||||
|
@ -1,166 +1,8 @@
|
|||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
/*
|
||||||
"errors"
|
type Price struct {
|
||||||
|
Price float64 `json:"price,omitempty" bson:"price,omitempty"`
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
Currency string `json:"currency,omitempty" bson:"currency,omitempty"`
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
|
||||||
mgb "go.mongodb.org/mongo-driver/mongo"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Owner struct {
|
|
||||||
Name string `json:"name,omitempty" bson:"name,omitempty"`
|
|
||||||
Logo string `json:"logo,omitempty" bson:"logo,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func VerifyAccess(a Accessor, id string) error {
|
|
||||||
data, _, err := a.LoadOne(id)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if a.ShouldVerifyAuth() && !data.VerifyAuth(a.GetRequest()) {
|
|
||||||
return errors.New("you are not allowed to access :" + a.GetType().String())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenericLoadOne loads one object from the database (generic)
|
|
||||||
func GenericStoreOne(data DBObject, a Accessor) (DBObject, int, error) {
|
|
||||||
data.GenerateID()
|
|
||||||
data.StoreDraftDefault()
|
|
||||||
data.UpToDate(a.GetUser(), a.GetPeerID(), true)
|
|
||||||
f := dbs.Filters{
|
|
||||||
Or: map[string][]dbs.Filter{
|
|
||||||
"abstractresource.abstractobject.name": {{
|
|
||||||
Operator: dbs.LIKE.String(),
|
|
||||||
Value: data.GetName(),
|
|
||||||
}},
|
|
||||||
"abstractobject.name": {{
|
|
||||||
Operator: dbs.LIKE.String(),
|
|
||||||
Value: data.GetName(),
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if a.ShouldVerifyAuth() && !data.VerifyAuth(a.GetRequest()) {
|
|
||||||
return nil, 403, errors.New("you are not allowed to access : " + a.GetType().String())
|
|
||||||
}
|
|
||||||
if cursor, _, _ := a.Search(&f, "", data.IsDrafted()); len(cursor) > 0 {
|
|
||||||
return nil, 409, errors.New(a.GetType().String() + " with name " + data.GetName() + " already exists")
|
|
||||||
}
|
|
||||||
err := validate.Struct(data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 422, err
|
|
||||||
}
|
|
||||||
id, code, err := mongo.MONGOService.StoreOne(data, data.GetID(), a.GetType().String())
|
|
||||||
if err != nil {
|
|
||||||
a.GetLogger().Error().Msg("Could not store " + data.GetName() + " to db. Error: " + err.Error())
|
|
||||||
return nil, code, err
|
|
||||||
}
|
|
||||||
return a.LoadOne(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenericLoadOne loads one object from the database (generic)
|
|
||||||
func GenericDeleteOne(id string, a Accessor) (DBObject, int, error) {
|
|
||||||
res, code, err := a.LoadOne(id)
|
|
||||||
if !res.CanDelete() {
|
|
||||||
return nil, 403, errors.New("you are not allowed to delete :" + a.GetType().String())
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, code, err
|
|
||||||
}
|
|
||||||
if a.ShouldVerifyAuth() && !res.VerifyAuth(a.GetRequest()) {
|
|
||||||
return nil, 403, errors.New("you are not allowed to access " + a.GetType().String())
|
|
||||||
}
|
|
||||||
_, code, err = mongo.MONGOService.DeleteOne(id, a.GetType().String())
|
|
||||||
if err != nil {
|
|
||||||
a.GetLogger().Error().Msg("Could not delete " + id + " to db. Error: " + err.Error())
|
|
||||||
return nil, code, err
|
|
||||||
}
|
|
||||||
return res, 200, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenericLoadOne loads one object from the database (generic)
|
|
||||||
// json expected in entry is a flatted object no need to respect the inheritance hierarchy
|
|
||||||
func GenericUpdateOne(set DBObject, id string, a Accessor, new DBObject) (DBObject, int, error) {
|
|
||||||
r, c, err := a.LoadOne(id)
|
|
||||||
if err != nil {
|
|
||||||
return nil, c, err
|
|
||||||
}
|
|
||||||
ok, newSet := r.CanUpdate(set)
|
|
||||||
if !ok {
|
|
||||||
return nil, 403, errors.New("you are not allowed to delete :" + a.GetType().String())
|
|
||||||
}
|
|
||||||
set = newSet
|
|
||||||
r.UpToDate(a.GetUser(), a.GetPeerID(), false)
|
|
||||||
if a.ShouldVerifyAuth() && !r.VerifyAuth(a.GetRequest()) {
|
|
||||||
return nil, 403, errors.New("you are not allowed to access :" + a.GetType().String())
|
|
||||||
}
|
|
||||||
change := set.Serialize(set) // get the changes
|
|
||||||
loaded := r.Serialize(r) // get the loaded object
|
|
||||||
|
|
||||||
for k, v := range change { // apply the changes, with a flatten method
|
|
||||||
loaded[k] = v
|
|
||||||
}
|
|
||||||
id, code, err := mongo.MONGOService.UpdateOne(new.Deserialize(loaded, new), id, a.GetType().String())
|
|
||||||
if err != nil {
|
|
||||||
a.GetLogger().Error().Msg("Could not update " + id + " to db. Error: " + err.Error())
|
|
||||||
return nil, code, err
|
|
||||||
}
|
|
||||||
return a.LoadOne(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
func GenericLoadOne[T DBObject](id string, f func(DBObject) (DBObject, int, error), a Accessor) (DBObject, int, error) {
|
|
||||||
var data T
|
|
||||||
res_mongo, code, err := mongo.MONGOService.LoadOne(id, a.GetType().String())
|
|
||||||
if err != nil {
|
|
||||||
return nil, code, err
|
|
||||||
}
|
|
||||||
res_mongo.Decode(&data)
|
|
||||||
if a.ShouldVerifyAuth() && !data.VerifyAuth(a.GetRequest()) {
|
|
||||||
return nil, 403, errors.New("you are not allowed to access :" + a.GetType().String())
|
|
||||||
}
|
|
||||||
return f(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func genericLoadAll[T DBObject](res *mgb.Cursor, code int, err error, onlyDraft bool, f func(DBObject) ShallowDBObject, a Accessor) ([]ShallowDBObject, int, error) {
|
|
||||||
objs := []ShallowDBObject{}
|
|
||||||
var results []T
|
|
||||||
if err != nil {
|
|
||||||
return nil, code, err
|
|
||||||
}
|
|
||||||
if err = res.All(mongo.MngoCtx, &results); err != nil {
|
|
||||||
return nil, 404, err
|
|
||||||
}
|
|
||||||
for _, r := range results {
|
|
||||||
if (a.ShouldVerifyAuth() && !r.VerifyAuth(a.GetRequest())) || f(r) == nil || (onlyDraft && !r.IsDrafted()) || (!onlyDraft && r.IsDrafted()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
objs = append(objs, f(r))
|
|
||||||
}
|
|
||||||
return objs, 200, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func GenericLoadAll[T DBObject](f func(DBObject) ShallowDBObject, onlyDraft bool, wfa Accessor) ([]ShallowDBObject, int, error) {
|
|
||||||
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType().String())
|
|
||||||
return genericLoadAll[T](res_mongo, code, err, onlyDraft, f, wfa)
|
|
||||||
}
|
|
||||||
|
|
||||||
func GenericSearch[T DBObject](filters *dbs.Filters, search string, defaultFilters *dbs.Filters,
|
|
||||||
f func(DBObject) ShallowDBObject, onlyDraft bool, wfa Accessor) ([]ShallowDBObject, int, error) {
|
|
||||||
if filters == nil && search != "" {
|
|
||||||
filters = defaultFilters
|
|
||||||
}
|
|
||||||
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType().String())
|
|
||||||
return genericLoadAll[T](res_mongo, code, err, onlyDraft, f, wfa)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenericLoadOne loads one object from the database (generic)
|
|
||||||
// json expected in entry is a flatted object no need to respect the inheritance hierarchy
|
|
||||||
func GenericRawUpdateOne(set DBObject, id string, a Accessor) (DBObject, int, error) {
|
|
||||||
id, code, err := mongo.MONGOService.UpdateOne(set, id, a.GetType().String())
|
|
||||||
if err != nil {
|
|
||||||
a.GetLogger().Error().Msg("Could not update " + id + " to db. Error: " + err.Error())
|
|
||||||
return nil, code, err
|
|
||||||
}
|
|
||||||
return a.LoadOne(id)
|
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
@ -3,7 +3,6 @@ package utils
|
|||||||
import (
|
import (
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ShallowDBObject is an interface that defines the basic methods shallowed version of a DBObject
|
// ShallowDBObject is an interface that defines the basic methods shallowed version of a DBObject
|
||||||
@ -11,8 +10,8 @@ type ShallowDBObject interface {
|
|||||||
GenerateID()
|
GenerateID()
|
||||||
GetID() string
|
GetID() string
|
||||||
GetName() string
|
GetName() string
|
||||||
Serialize(obj DBObject) map[string]interface{}
|
Deserialize(j map[string]interface{}) DBObject
|
||||||
Deserialize(j map[string]interface{}, obj DBObject) DBObject
|
Serialize() map[string]interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DBObject is an interface that defines the basic methods for a DBObject
|
// DBObject is an interface that defines the basic methods for a DBObject
|
||||||
@ -20,33 +19,22 @@ type DBObject interface {
|
|||||||
GenerateID()
|
GenerateID()
|
||||||
GetID() string
|
GetID() string
|
||||||
GetName() string
|
GetName() string
|
||||||
IsDrafted() bool
|
UpToDate()
|
||||||
CanDelete() bool
|
Deserialize(j map[string]interface{}) DBObject
|
||||||
StoreDraftDefault()
|
Serialize() map[string]interface{}
|
||||||
GetCreatorID() string
|
GetAccessor(caller *tools.HTTPCaller) Accessor
|
||||||
UpToDate(user string, peer string, create bool)
|
|
||||||
CanUpdate(set DBObject) (bool, DBObject)
|
|
||||||
VerifyAuth(request *tools.APIRequest) bool
|
|
||||||
Serialize(obj DBObject) map[string]interface{}
|
|
||||||
GetAccessor(request *tools.APIRequest) Accessor
|
|
||||||
Deserialize(j map[string]interface{}, obj DBObject) DBObject
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Accessor is an interface that defines the basic methods for an Accessor
|
// Accessor is an interface that defines the basic methods for an Accessor
|
||||||
type Accessor interface {
|
type Accessor interface {
|
||||||
GetUser() string
|
Init(t tools.DataType, caller *tools.HTTPCaller)
|
||||||
GetPeerID() string
|
GetType() string
|
||||||
GetGroups() []string
|
|
||||||
ShouldVerifyAuth() bool
|
|
||||||
GetType() tools.DataType
|
|
||||||
GetLogger() *zerolog.Logger
|
|
||||||
GetCaller() *tools.HTTPCaller
|
GetCaller() *tools.HTTPCaller
|
||||||
GetRequest() *tools.APIRequest
|
Search(filters *dbs.Filters, search string) ([]ShallowDBObject, int, error)
|
||||||
|
LoadAll() ([]ShallowDBObject, int, error)
|
||||||
LoadOne(id string) (DBObject, int, error)
|
LoadOne(id string) (DBObject, int, error)
|
||||||
DeleteOne(id string) (DBObject, int, error)
|
DeleteOne(id string) (DBObject, int, error)
|
||||||
CopyOne(data DBObject) (DBObject, int, error)
|
CopyOne(data DBObject) (DBObject, int, error)
|
||||||
StoreOne(data DBObject) (DBObject, int, error)
|
StoreOne(data DBObject) (DBObject, int, error)
|
||||||
LoadAll(isDraft bool) ([]ShallowDBObject, int, error)
|
|
||||||
UpdateOne(set DBObject, id string) (DBObject, int, error)
|
UpdateOne(set DBObject, id string) (DBObject, int, error)
|
||||||
Search(filters *dbs.Filters, search string, isDraft bool) ([]ShallowDBObject, int, error)
|
|
||||||
}
|
}
|
||||||
|
@ -1,147 +0,0 @@
|
|||||||
package graph
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Graph is a struct that represents a graph
|
|
||||||
type Graph struct {
|
|
||||||
Partial bool `json:"partial" default:"false"` // Partial is a flag that indicates if the graph is partial
|
|
||||||
Zoom float64 `bson:"zoom" json:"zoom" default:"1"` // Zoom is the graphical zoom of the graph
|
|
||||||
Items map[string]GraphItem `bson:"items" json:"items" default:"{}" validate:"required"` // Items is the list of elements in the graph
|
|
||||||
Links []GraphLink `bson:"links" json:"links" default:"{}" validate:"required"` // Links is the list of links between elements in the graph
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Graph) Clear(id string) {
|
|
||||||
realItems := map[string]GraphItem{}
|
|
||||||
for k, it := range g.Items {
|
|
||||||
if k == id {
|
|
||||||
realinks := []GraphLink{}
|
|
||||||
for _, link := range g.Links {
|
|
||||||
if link.Source.ID != id && link.Destination.ID != id {
|
|
||||||
realinks = append(realinks, link)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
g.Links = realinks
|
|
||||||
g.Partial = true
|
|
||||||
} else {
|
|
||||||
realItems[k] = it
|
|
||||||
}
|
|
||||||
}
|
|
||||||
g.Items = realItems
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wf *Graph) IsProcessing(item GraphItem) bool {
|
|
||||||
return item.Processing != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wf *Graph) IsCompute(item GraphItem) bool {
|
|
||||||
return item.Compute != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wf *Graph) IsData(item GraphItem) bool {
|
|
||||||
return item.Data != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wf *Graph) IsStorage(item GraphItem) bool {
|
|
||||||
return item.Storage != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wf *Graph) IsWorkflow(item GraphItem) bool {
|
|
||||||
return item.Workflow != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Graph) GetAverageTimeRelatedToProcessingActivity(start time.Time, processings []*resources.ProcessingResource, resource resources.ResourceInterface,
|
|
||||||
f func(GraphItem) resources.ResourceInterface, request *tools.APIRequest) (float64, float64) {
|
|
||||||
nearestStart := float64(10000000000)
|
|
||||||
oneIsInfinite := false
|
|
||||||
longestDuration := float64(0)
|
|
||||||
for _, link := range g.Links {
|
|
||||||
for _, processing := range processings {
|
|
||||||
var source string // source is the source of the link
|
|
||||||
if link.Destination.ID == processing.GetID() && f(g.Items[link.Source.ID]) != nil && f(g.Items[link.Source.ID]).GetID() == resource.GetID() { // if the destination is the processing and the source is not a compute
|
|
||||||
source = link.Source.ID
|
|
||||||
} else if link.Source.ID == processing.GetID() && f(g.Items[link.Source.ID]) != nil && f(g.Items[link.Source.ID]).GetID() == resource.GetID() { // if the source is the processing and the destination is not a compute
|
|
||||||
source = link.Destination.ID
|
|
||||||
}
|
|
||||||
priced := processing.ConvertToPricedResource(tools.PROCESSING_RESOURCE, request)
|
|
||||||
if source != "" {
|
|
||||||
if priced.GetLocationStart() != nil {
|
|
||||||
near := float64(priced.GetLocationStart().Sub(start).Seconds())
|
|
||||||
if near < nearestStart {
|
|
||||||
nearestStart = near
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
if priced.GetLocationEnd() != nil {
|
|
||||||
duration := float64(priced.GetLocationEnd().Sub(*priced.GetLocationStart()).Seconds())
|
|
||||||
if longestDuration < duration {
|
|
||||||
longestDuration = duration
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
oneIsInfinite = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if oneIsInfinite {
|
|
||||||
return nearestStart, -1
|
|
||||||
}
|
|
||||||
return nearestStart, longestDuration
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* GetAverageTimeBeforeStart is a function that returns the average time before the start of a processing
|
|
||||||
*/
|
|
||||||
func (g *Graph) GetAverageTimeProcessingBeforeStart(average float64, processingID string, request *tools.APIRequest) float64 {
|
|
||||||
currents := []float64{} // list of current time
|
|
||||||
for _, link := range g.Links { // for each link
|
|
||||||
var source string // source is the source of the link
|
|
||||||
if link.Destination.ID == processingID && g.Items[link.Source.ID].Processing == nil { // if the destination is the processing and the source is not a compute
|
|
||||||
source = link.Source.ID
|
|
||||||
} else if link.Source.ID == processingID && g.Items[link.Source.ID].Processing == nil { // if the source is the processing and the destination is not a compute
|
|
||||||
source = link.Destination.ID
|
|
||||||
}
|
|
||||||
if source == "" { // if source is empty, continue
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dt, r := g.GetResource(source) // get the resource of the source
|
|
||||||
if r == nil { // if item is nil, continue
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
priced := r.ConvertToPricedResource(dt, request)
|
|
||||||
current := priced.GetExplicitDurationInS() // get the explicit duration of the item
|
|
||||||
if current < 0 { // if current is negative, its means that duration of a before could be infinite continue
|
|
||||||
return current
|
|
||||||
}
|
|
||||||
current += g.GetAverageTimeProcessingBeforeStart(current, source, request) // get the average time before start of the source
|
|
||||||
currents = append(currents, current) // append the current to the currents
|
|
||||||
}
|
|
||||||
var max float64 // get the max time to wait dependancies to finish
|
|
||||||
for _, current := range currents {
|
|
||||||
if current > max {
|
|
||||||
max = current
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return max
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Graph) GetResource(id string) (tools.DataType, resources.ResourceInterface) {
|
|
||||||
if item, ok := g.Items[id]; ok {
|
|
||||||
if item.Data != nil {
|
|
||||||
return tools.DATA_RESOURCE, item.Data
|
|
||||||
} else if item.Compute != nil {
|
|
||||||
return tools.COMPUTE_RESOURCE, item.Compute
|
|
||||||
} else if item.Workflow != nil {
|
|
||||||
return tools.WORKFLOW_RESOURCE, item.Workflow
|
|
||||||
} else if item.Processing != nil {
|
|
||||||
return tools.PROCESSING_RESOURCE, item.Processing
|
|
||||||
} else if item.Storage != nil {
|
|
||||||
return tools.STORAGE_RESOURCE, item.Storage
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tools.INVALID, nil
|
|
||||||
}
|
|
@ -1,38 +0,0 @@
|
|||||||
package graph
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GraphItem is a struct that represents an item in a graph
|
|
||||||
type GraphItem struct {
|
|
||||||
ID string `bson:"id" json:"id" validate:"required"` // ID is the unique identifier of the item
|
|
||||||
Width float64 `bson:"width" json:"width" validate:"required"` // Width is the graphical width of the item
|
|
||||||
Height float64 `bson:"height" json:"height" validate:"required"` // Height is the graphical height of the item
|
|
||||||
Position Position `bson:"position" json:"position" validate:"required"` // Position is the graphical position of the item
|
|
||||||
*resources.ItemResource // ItemResource is the resource of the item affected to the item
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *GraphItem) GetResource() (tools.DataType, resources.ResourceInterface) {
|
|
||||||
if g.Data != nil {
|
|
||||||
return tools.DATA_RESOURCE, g.Data
|
|
||||||
} else if g.Compute != nil {
|
|
||||||
return tools.COMPUTE_RESOURCE, g.Compute
|
|
||||||
} else if g.Workflow != nil {
|
|
||||||
return tools.WORKFLOW_RESOURCE, g.Workflow
|
|
||||||
} else if g.Processing != nil {
|
|
||||||
return tools.PROCESSING_RESOURCE, g.Processing
|
|
||||||
} else if g.Storage != nil {
|
|
||||||
return tools.STORAGE_RESOURCE, g.Storage
|
|
||||||
}
|
|
||||||
return tools.INVALID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *GraphItem) Clear() {
|
|
||||||
g.Data = nil
|
|
||||||
g.Compute = nil
|
|
||||||
g.Workflow = nil
|
|
||||||
g.Processing = nil
|
|
||||||
g.Storage = nil
|
|
||||||
}
|
|
@ -1,56 +0,0 @@
|
|||||||
package graph
|
|
||||||
|
|
||||||
import "cloud.o-forge.io/core/oc-lib/models/common/models"
|
|
||||||
|
|
||||||
type StorageProcessingGraphLink struct {
|
|
||||||
Write bool `json:"write" bson:"write"`
|
|
||||||
Source string `json:"source" bson:"source"`
|
|
||||||
Destination string `json:"destination" bson:"destination"`
|
|
||||||
FileName string `json:"filename" bson:"filename"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GraphLink is a struct that represents a link between two items in a graph
|
|
||||||
type GraphLink struct {
|
|
||||||
Source Position `bson:"source" json:"source" validate:"required"` // Source is the source graphical position of the link
|
|
||||||
Destination Position `bson:"destination" json:"destination" validate:"required"` // Destination is the destination graphical position of the link
|
|
||||||
Style *GraphLinkStyle `bson:"style,omitempty" json:"style,omitempty"` // Style is the graphical style of the link
|
|
||||||
StorageLinkInfos []StorageProcessingGraphLink `bson:"storage_link_infos,omitempty" json:"storage_link_infos,omitempty"` // StorageLinkInfo is the storage link info
|
|
||||||
Env []models.Param `json:"env" bson:"env"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// tool function to check if a link is a link between a compute and a resource
|
|
||||||
func (l *GraphLink) IsComputeLink(g Graph) (bool, string) {
|
|
||||||
if g.Items == nil {
|
|
||||||
return false, ""
|
|
||||||
}
|
|
||||||
if d, ok := g.Items[l.Source.ID]; ok && d.Compute != nil {
|
|
||||||
return true, d.Compute.UUID
|
|
||||||
}
|
|
||||||
if d, ok := g.Items[l.Destination.ID]; ok && d.Compute != nil {
|
|
||||||
return true, d.Compute.UUID
|
|
||||||
}
|
|
||||||
return false, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// GraphLinkStyle is a struct that represents the style of a link in a graph
|
|
||||||
type GraphLinkStyle struct {
|
|
||||||
Color int64 `bson:"color" json:"color"` // Color is the graphical color of the link (int description of a color, can be transpose as hex)
|
|
||||||
Stroke float64 `bson:"stroke" json:"stroke"` // Stroke is the graphical stroke of the link
|
|
||||||
Tension float64 `bson:"tension" json:"tension"` // Tension is the graphical tension of the link
|
|
||||||
HeadRadius float64 `bson:"head_radius" json:"head_radius"` // graphical pin radius
|
|
||||||
DashWidth float64 `bson:"dash_width" json:"dash_width"` // DashWidth is the graphical dash width of the link
|
|
||||||
DashSpace float64 `bson:"dash_space" json:"dash_space"` // DashSpace is the graphical dash space of the link
|
|
||||||
EndArrow Position `bson:"end_arrow" json:"end_arrow"` // EndArrow is the graphical end arrow of the link
|
|
||||||
StartArrow Position `bson:"start_arrow" json:"start_arrow"` // StartArrow is the graphical start arrow of the link
|
|
||||||
ArrowStyle int64 `bson:"arrow_style" json:"arrow_style"` // ArrowStyle is the graphical arrow style of the link (enum foundable in UI)
|
|
||||||
ArrowDirection int64 `bson:"arrow_direction" json:"arrow_direction"` // ArrowDirection is the graphical arrow direction of the link (enum foundable in UI)
|
|
||||||
StartArrowWidth float64 `bson:"start_arrow_width" json:"start_arrow_width"` // StartArrowWidth is the graphical start arrow width of the link
|
|
||||||
EndArrowWidth float64 `bson:"end_arrow_width" json:"end_arrow_width"` // EndArrowWidth is the graphical end arrow width of the link
|
|
||||||
}
|
|
||||||
|
|
||||||
// Position is a struct that represents a graphical position
|
|
||||||
type Position struct {
|
|
||||||
ID string `json:"id" bson:"id"` // ID reprents ItemID (optionnal)
|
|
||||||
X float64 `json:"x" bson:"x" validate:"required"` // X is the graphical x position
|
|
||||||
Y float64 `json:"y" bson:"y" validate:"required"` // Y is the graphical y position
|
|
||||||
}
|
|
@ -1,102 +1,103 @@
|
|||||||
package workflow
|
package workflow
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/workflow/graph"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow/graph"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* AbstractWorkflow is a struct that represents a workflow for resource or native workflow
|
||||||
|
* Warning: there is 2 types of workflows, the resource workflow and the native workflow
|
||||||
|
* native workflow is the one that you create to schedule an execution
|
||||||
|
* resource workflow is the one that is created to set our native workflow in catalog
|
||||||
|
*/
|
||||||
|
type AbstractWorkflow struct {
|
||||||
|
resources.ResourceSet
|
||||||
|
Graph *graph.Graph `bson:"graph,omitempty" json:"graph,omitempty"` // Graph UI & logic representation of the workflow
|
||||||
|
ScheduleActive bool `json:"schedule_active" bson:"schedule_active"` // ScheduleActive is a flag that indicates if the schedule is active, if not the workflow is not scheduled and no execution or booking will be set
|
||||||
|
Schedule *WorkflowSchedule `bson:"schedule,omitempty" json:"schedule,omitempty"` // Schedule is the schedule of the workflow
|
||||||
|
Shared []string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workflow
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *AbstractWorkflow) GetWorkflows() (list_computings []graph.GraphItem) {
|
||||||
|
for _, item := range w.Graph.Items {
|
||||||
|
if item.Workflow != nil {
|
||||||
|
list_computings = append(list_computings, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *AbstractWorkflow) GetComputeByRelatedProcessing(processingID string) []*compute.ComputeResource {
|
||||||
|
storages := []*compute.ComputeResource{}
|
||||||
|
for _, link := range w.Graph.Links {
|
||||||
|
nodeID := link.Destination.ID // we considers that the processing is the destination
|
||||||
|
node := w.Graph.Items[link.Source.ID].Compute // we are looking for the storage as source
|
||||||
|
if node == nil { // if the source is not a storage, we consider that the destination is the storage
|
||||||
|
nodeID = link.Source.ID // and the processing is the source
|
||||||
|
node = w.Graph.Items[link.Destination.ID].Compute // we are looking for the storage as destination
|
||||||
|
}
|
||||||
|
if processingID == nodeID && node != nil { // if the storage is linked to the processing
|
||||||
|
storages = append(storages, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return storages
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *AbstractWorkflow) GetStoragesByRelatedProcessing(processingID string) []*storage.StorageResource {
|
||||||
|
storages := []*storage.StorageResource{}
|
||||||
|
for _, link := range w.Graph.Links {
|
||||||
|
nodeID := link.Destination.ID // we considers that the processing is the destination
|
||||||
|
node := w.Graph.Items[link.Source.ID].Storage // we are looking for the storage as source
|
||||||
|
if node == nil { // if the source is not a storage, we consider that the destination is the storage
|
||||||
|
nodeID = link.Source.ID // and the processing is the source
|
||||||
|
node = w.Graph.Items[link.Destination.ID].Storage // we are looking for the storage as destination
|
||||||
|
}
|
||||||
|
if processingID == nodeID && node != nil { // if the storage is linked to the processing
|
||||||
|
storages = append(storages, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return storages
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *AbstractWorkflow) GetProcessings() (list_computings []graph.GraphItem) {
|
||||||
|
for _, item := range w.Graph.Items {
|
||||||
|
if item.Processing != nil {
|
||||||
|
list_computings = append(list_computings, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// tool function to check if a link is a link between a compute and a resource
|
||||||
|
func (w *AbstractWorkflow) isDCLink(link graph.GraphLink) (bool, string) {
|
||||||
|
if w.Graph == nil || w.Graph.Items == nil {
|
||||||
|
return false, ""
|
||||||
|
}
|
||||||
|
if d, ok := w.Graph.Items[link.Source.ID]; ok && d.Compute != nil {
|
||||||
|
return true, d.Compute.UUID
|
||||||
|
}
|
||||||
|
if d, ok := w.Graph.Items[link.Destination.ID]; ok && d.Compute != nil {
|
||||||
|
return true, d.Compute.UUID
|
||||||
|
}
|
||||||
|
return false, ""
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Workflow is a struct that represents a workflow
|
* Workflow is a struct that represents a workflow
|
||||||
* it defines the native workflow
|
* it defines the native workflow
|
||||||
*/
|
*/
|
||||||
type Workflow struct {
|
type Workflow struct {
|
||||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||||
resources.ResourceSet
|
AbstractWorkflow // AbstractWorkflow contains the basic fields of a workflow
|
||||||
Graph *graph.Graph `bson:"graph,omitempty" json:"graph,omitempty"` // Graph UI & logic representation of the workflow
|
|
||||||
ScheduleActive bool `json:"schedule_active" bson:"schedule_active"` // ScheduleActive is a flag that indicates if the schedule is active, if not the workflow is not scheduled and no execution or booking will be set
|
|
||||||
// Schedule *WorkflowSchedule `bson:"schedule,omitempty" json:"schedule,omitempty"` // Schedule is the schedule of the workflow
|
|
||||||
Shared []string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workflow // AbstractWorkflow contains the basic fields of a workflow
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Workflow) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
|
||||||
return NewAccessor(request) // Create a new instance of the accessor
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Workflow) GetGraphItems(f func(item graph.GraphItem) bool) (list_datas []graph.GraphItem) {
|
|
||||||
for _, item := range w.Graph.Items {
|
|
||||||
if f(item) {
|
|
||||||
list_datas = append(list_datas, item)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Workflow) GetPricedItem(f func(item graph.GraphItem) bool, request *tools.APIRequest) map[string]pricing.PricedItemITF {
|
|
||||||
list_datas := map[string]pricing.PricedItemITF{}
|
|
||||||
for _, item := range w.Graph.Items {
|
|
||||||
if f(item) {
|
|
||||||
dt, res := item.GetResource()
|
|
||||||
ord := res.ConvertToPricedResource(dt, request)
|
|
||||||
list_datas[res.GetID()] = ord
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return list_datas
|
|
||||||
}
|
|
||||||
|
|
||||||
type Related struct {
|
|
||||||
Node resources.ResourceInterface
|
|
||||||
Links []graph.GraphLink
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Workflow) GetByRelatedProcessing(processingID string, g func(item graph.GraphItem) bool) map[string]Related {
|
|
||||||
related := map[string]Related{}
|
|
||||||
for _, link := range w.Graph.Links {
|
|
||||||
nodeID := link.Destination.ID
|
|
||||||
var node resources.ResourceInterface
|
|
||||||
if g(w.Graph.Items[link.Source.ID]) {
|
|
||||||
item := w.Graph.Items[link.Source.ID]
|
|
||||||
_, node = item.GetResource()
|
|
||||||
}
|
|
||||||
if node == nil && g(w.Graph.Items[link.Destination.ID]) { // if the source is not a storage, we consider that the destination is the storage
|
|
||||||
nodeID = link.Source.ID
|
|
||||||
item := w.Graph.Items[link.Destination.ID] // and the processing is the source
|
|
||||||
_, node = item.GetResource() // we are looking for the storage as destination
|
|
||||||
}
|
|
||||||
if processingID == nodeID && node != nil { // if the storage is linked to the processing
|
|
||||||
if _, ok := related[processingID]; !ok {
|
|
||||||
related[processingID] = Related{}
|
|
||||||
}
|
|
||||||
rel := related[node.GetID()]
|
|
||||||
rel.Node = node
|
|
||||||
rel.Links = append(rel.Links, link)
|
|
||||||
related[processingID] = rel
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return related
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ao *Workflow) VerifyAuth(request *tools.APIRequest) bool {
|
|
||||||
isAuthorized := false
|
|
||||||
if len(ao.Shared) > 0 {
|
|
||||||
for _, shared := range ao.Shared {
|
|
||||||
shared, code, _ := shallow_collaborative_area.NewAccessor(request).LoadOne(shared)
|
|
||||||
if code != 200 || shared == nil {
|
|
||||||
isAuthorized = false
|
|
||||||
} else {
|
|
||||||
isAuthorized = shared.VerifyAuth(request)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ao.AbstractObject.VerifyAuth(request) || isAuthorized
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -107,19 +108,19 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
|
|||||||
if wfa.Graph == nil { // no graph no booking
|
if wfa.Graph == nil { // no graph no booking
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
accessor := (&resources.ComputeResource{}).GetAccessor(&tools.APIRequest{Caller: caller})
|
accessor := (&compute.ComputeResource{}).GetAccessor(nil)
|
||||||
for _, link := range wfa.Graph.Links {
|
for _, link := range wfa.Graph.Links {
|
||||||
if ok, compute_id := link.IsComputeLink(*wfa.Graph); ok { // check if the link is a link between a compute and a resource
|
if ok, dc_id := wfa.isDCLink(link); ok { // check if the link is a link between a compute and a resource
|
||||||
compute, code, _ := accessor.LoadOne(compute_id)
|
dc, code, _ := accessor.LoadOne(dc_id)
|
||||||
if code != 200 {
|
if code != 200 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// CHECK BOOKING ON PEER, compute could be a remote one
|
// CHECK BOOKING ON PEER, compute could be a remote one
|
||||||
peerID := compute.(*resources.ComputeResource).CreatorID
|
peerID := dc.(*compute.ComputeResource).PeerID
|
||||||
if peerID == "" {
|
if peerID == "" {
|
||||||
return false, errors.New("no peer id")
|
return false, errors.New("no peer id")
|
||||||
} // no peer id no booking, we need to know where to book
|
} // no peer id no booking, we need to know where to book
|
||||||
_, err := (&peer.Peer{}).LaunchPeerExecution(peerID, compute_id, tools.BOOKING, tools.GET, nil, caller)
|
_, err := (&peer.Peer{}).LaunchPeerExecution(peerID, dc_id, tools.BOOKING, tools.GET, nil, caller)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -128,91 +129,31 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *Workflow) Planify(start time.Time, end *time.Time, request *tools.APIRequest) (float64, map[tools.DataType]map[string]pricing.PricedItemITF, *Workflow, error) {
|
func (d *Workflow) GetName() string {
|
||||||
priceds := map[tools.DataType]map[string]pricing.PricedItemITF{}
|
return d.Name
|
||||||
ps, priceds, err := plan[*resources.ProcessingResource](tools.PROCESSING_RESOURCE, wf, priceds, request, wf.Graph.IsProcessing,
|
|
||||||
func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) {
|
|
||||||
return start.Add(time.Duration(wf.Graph.GetAverageTimeProcessingBeforeStart(0, res.GetID(), request)) * time.Second), priced.GetExplicitDurationInS()
|
|
||||||
}, func(started time.Time, duration float64) *time.Time {
|
|
||||||
s := started.Add(time.Duration(duration))
|
|
||||||
return &s
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return 0, priceds, nil, err
|
|
||||||
}
|
|
||||||
if _, priceds, err = plan[resources.ResourceInterface](tools.DATA_RESOURCE, wf, priceds, request, wf.Graph.IsData,
|
|
||||||
func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) {
|
|
||||||
return start, 0
|
|
||||||
}, func(started time.Time, duration float64) *time.Time {
|
|
||||||
return end
|
|
||||||
}); err != nil {
|
|
||||||
return 0, priceds, nil, err
|
|
||||||
}
|
|
||||||
for k, f := range map[tools.DataType]func(graph.GraphItem) bool{tools.STORAGE_RESOURCE: wf.Graph.IsStorage, tools.COMPUTE_RESOURCE: wf.Graph.IsCompute} {
|
|
||||||
if _, priceds, err = plan[resources.ResourceInterface](k, wf, priceds, request, f,
|
|
||||||
func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) {
|
|
||||||
nearestStart, longestDuration := wf.Graph.GetAverageTimeRelatedToProcessingActivity(start, ps, res, func(i graph.GraphItem) (r resources.ResourceInterface) {
|
|
||||||
if f(i) {
|
|
||||||
_, r = i.GetResource()
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}, request)
|
|
||||||
return start.Add(time.Duration(nearestStart) * time.Second), longestDuration
|
|
||||||
}, func(started time.Time, duration float64) *time.Time {
|
|
||||||
s := started.Add(time.Duration(duration))
|
|
||||||
return &s
|
|
||||||
}); err != nil {
|
|
||||||
return 0, priceds, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
longest := common.GetPlannerLongestTime(end, priceds, request)
|
|
||||||
if _, priceds, err = plan[resources.ResourceInterface](tools.WORKFLOW_RESOURCE, wf, priceds, request, wf.Graph.IsWorkflow,
|
|
||||||
func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) {
|
|
||||||
start := start.Add(time.Duration(common.GetPlannerNearestStart(start, priceds, request)) * time.Second)
|
|
||||||
longest := float64(-1)
|
|
||||||
r, code, err := res.GetAccessor(request).LoadOne(res.GetID())
|
|
||||||
if code != 200 || err != nil {
|
|
||||||
return start, longest
|
|
||||||
}
|
|
||||||
if neoLongest, _, _, err := r.(*Workflow).Planify(start, end, request); err != nil {
|
|
||||||
return start, longest
|
|
||||||
} else if neoLongest > longest {
|
|
||||||
longest = neoLongest
|
|
||||||
}
|
|
||||||
return start.Add(time.Duration(common.GetPlannerNearestStart(start, priceds, request)) * time.Second), longest
|
|
||||||
}, func(start time.Time, longest float64) *time.Time {
|
|
||||||
s := start.Add(time.Duration(longest) * time.Second)
|
|
||||||
return &s
|
|
||||||
}); err != nil {
|
|
||||||
return 0, priceds, nil, err
|
|
||||||
}
|
|
||||||
return longest, priceds, wf, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func plan[T resources.ResourceInterface](dt tools.DataType, wf *Workflow, priceds map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest,
|
func (d *Workflow) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
f func(graph.GraphItem) bool, start func(resources.ResourceInterface, pricing.PricedItemITF) (time.Time, float64), end func(time.Time, float64) *time.Time) ([]T, map[tools.DataType]map[string]pricing.PricedItemITF, error) {
|
data := New() // Create a new instance of the accessor
|
||||||
resources := []T{}
|
data.Init(tools.WORKFLOW, caller) // Initialize the accessor with the WORKFLOW model type
|
||||||
for _, item := range wf.GetGraphItems(f) {
|
return data
|
||||||
if priceds[dt] == nil {
|
}
|
||||||
priceds[dt] = map[string]pricing.PricedItemITF{}
|
|
||||||
}
|
func (dma *Workflow) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
dt, realItem := item.GetResource()
|
b, err := json.Marshal(j)
|
||||||
if realItem == nil {
|
if err != nil {
|
||||||
return resources, priceds, errors.New("could not load the processing resource")
|
return nil
|
||||||
}
|
}
|
||||||
priced := realItem.ConvertToPricedResource(dt, request)
|
json.Unmarshal(b, dma)
|
||||||
started, duration := start(realItem, priced)
|
return dma
|
||||||
priced.SetLocationStart(started)
|
}
|
||||||
if duration >= 0 {
|
|
||||||
if e := end(started, duration); e != nil {
|
func (dma *Workflow) Serialize() map[string]interface{} {
|
||||||
priced.SetLocationEnd(*e)
|
var m map[string]interface{}
|
||||||
}
|
b, err := json.Marshal(dma)
|
||||||
}
|
if err != nil {
|
||||||
if e := end(started, priced.GetExplicitDurationInS()); e != nil {
|
return nil
|
||||||
priced.SetLocationEnd(*e)
|
}
|
||||||
}
|
json.Unmarshal(b, &m)
|
||||||
resources = append(resources, realItem.(T))
|
return m
|
||||||
priceds[dt][item.ID] = priced
|
|
||||||
}
|
|
||||||
return resources, priceds, nil
|
|
||||||
}
|
}
|
||||||
|
@ -8,8 +8,10 @@ import (
|
|||||||
|
|
||||||
type WorkflowHistory struct{ Workflow }
|
type WorkflowHistory struct{ Workflow }
|
||||||
|
|
||||||
func (d *WorkflowHistory) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
func (d *WorkflowHistory) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
return NewAccessorHistory(request) // Create a new instance of the accessor
|
data := New() // Create a new instance of the accessor
|
||||||
|
data.Init(tools.WORKSPACE_HISTORY, caller) // Initialize the accessor with the WORKSPACE model type
|
||||||
|
return data
|
||||||
}
|
}
|
||||||
func (r *WorkflowHistory) GenerateID() {
|
func (r *WorkflowHistory) GenerateID() {
|
||||||
r.UUID = uuid.New().String()
|
r.UUID = uuid.New().String()
|
||||||
|
@ -2,65 +2,175 @@ package workflow
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
"cloud.o-forge.io/core/oc-lib/logs"
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workspace"
|
"cloud.o-forge.io/core/oc-lib/models/workspace"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
cron "github.com/robfig/cron/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
type workflowMongoAccessor struct {
|
type workflowMongoAccessor struct {
|
||||||
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
|
||||||
computeResourceAccessor utils.Accessor
|
|
||||||
collaborativeAreaAccessor utils.Accessor
|
|
||||||
workspaceAccessor utils.Accessor
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAccessorHistory(request *tools.APIRequest) *workflowMongoAccessor {
|
|
||||||
return new(tools.WORKFLOW_HISTORY, request)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAccessor(request *tools.APIRequest) *workflowMongoAccessor {
|
|
||||||
return new(tools.WORKFLOW, request)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new instance of the workflowMongoAccessor
|
// New creates a new instance of the workflowMongoAccessor
|
||||||
func new(t tools.DataType, request *tools.APIRequest) *workflowMongoAccessor {
|
func New() *workflowMongoAccessor {
|
||||||
return &workflowMongoAccessor{
|
return &workflowMongoAccessor{}
|
||||||
computeResourceAccessor: (&resources.ComputeResource{}).GetAccessor(request),
|
}
|
||||||
collaborativeAreaAccessor: (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(request),
|
|
||||||
workspaceAccessor: (&workspace.Workspace{}).GetAccessor(request),
|
/*
|
||||||
AbstractAccessor: utils.AbstractAccessor{
|
* THERE IS A LOT IN THIS FILE SHOULD BE AWARE OF THE COMMENTS
|
||||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
*/
|
||||||
Request: request,
|
|
||||||
Type: t,
|
/*
|
||||||
},
|
* getExecutions is a function that returns the executions of a workflow
|
||||||
|
* it returns an array of workflow_execution.WorkflowExecution
|
||||||
|
*/
|
||||||
|
func (wfa *workflowMongoAccessor) getExecutions(id string, data *Workflow) ([]*workflow_execution.WorkflowExecution, error) {
|
||||||
|
workflows_execution := []*workflow_execution.WorkflowExecution{}
|
||||||
|
if data.Schedule != nil { // only set execution on a scheduled workflow
|
||||||
|
if data.Schedule.Start == nil { // if no start date, return an error
|
||||||
|
return workflows_execution, errors.New("should get a start date on the scheduler.")
|
||||||
}
|
}
|
||||||
|
if data.Schedule.End != nil && data.Schedule.End.IsZero() { // if end date is zero, set it to nil
|
||||||
|
data.Schedule.End = nil
|
||||||
|
}
|
||||||
|
if len(data.Schedule.Cron) > 0 { // if cron is set then end date should be set
|
||||||
|
if data.Schedule.End == nil {
|
||||||
|
return workflows_execution, errors.New("a cron task should have an end date.")
|
||||||
|
}
|
||||||
|
cronStr := strings.Split(data.Schedule.Cron, " ") // split the cron string to treat it
|
||||||
|
if len(cronStr) < 6 { // if the cron string is less than 6 fields, return an error because format is : ss mm hh dd MM dw (6 fields)
|
||||||
|
return nil, errors.New("Bad cron message: " + data.Schedule.Cron + ". Should be at least ss mm hh dd MM dw")
|
||||||
|
}
|
||||||
|
subCron := strings.Join(cronStr[:6], " ")
|
||||||
|
// cron should be parsed as ss mm hh dd MM dw t (min 6 fields)
|
||||||
|
specParser := cron.NewParser(cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) // create a new cron parser
|
||||||
|
sched, err := specParser.Parse(subCron) // parse the cron string
|
||||||
|
if err != nil {
|
||||||
|
return workflows_execution, errors.New("Bad cron message: " + err.Error())
|
||||||
|
}
|
||||||
|
// loop through the cron schedule to set the executions
|
||||||
|
for s := sched.Next(*data.Schedule.Start); !s.IsZero() && s.Before(*data.Schedule.End); s = sched.Next(s) {
|
||||||
|
obj := &workflow_execution.WorkflowExecution{
|
||||||
|
AbstractObject: utils.AbstractObject{
|
||||||
|
Name: data.Schedule.Name, // set the name of the execution
|
||||||
|
},
|
||||||
|
ExecDate: &s, // set the execution date
|
||||||
|
EndDate: data.Schedule.End, // set the end date
|
||||||
|
State: 1, // set the state to 1 (scheduled)
|
||||||
|
WorkflowID: id, // set the workflow id dependancy of the execution
|
||||||
|
}
|
||||||
|
workflows_execution = append(workflows_execution, obj) // append the execution to the array
|
||||||
|
}
|
||||||
|
|
||||||
|
} else { // if no cron, set the execution to the start date
|
||||||
|
obj := &workflow_execution.WorkflowExecution{ // create a new execution
|
||||||
|
AbstractObject: utils.AbstractObject{
|
||||||
|
Name: data.Schedule.Name,
|
||||||
|
},
|
||||||
|
ExecDate: data.Schedule.Start,
|
||||||
|
EndDate: data.Schedule.End,
|
||||||
|
State: 1,
|
||||||
|
WorkflowID: id,
|
||||||
|
}
|
||||||
|
workflows_execution = append(workflows_execution, obj) // append the execution to the array
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return workflows_execution, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteOne deletes a workflow from the database, delete depending executions and bookings
|
// DeleteOne deletes a workflow from the database, delete depending executions and bookings
|
||||||
func (a *workflowMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
func (wfa *workflowMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
res, code, err := utils.GenericDeleteOne(id, a)
|
wfa.execution(id, &Workflow{
|
||||||
|
AbstractWorkflow: AbstractWorkflow{ScheduleActive: false},
|
||||||
|
}, true) // delete the executions
|
||||||
|
res, code, err := wfa.GenericDeleteOne(id, wfa)
|
||||||
if res != nil && code == 200 {
|
if res != nil && code == 200 {
|
||||||
a.execute(res.(*Workflow), true, false) // up to date the workspace for the workflow
|
wfa.execute(res.(*Workflow), true, false) // up to date the workspace for the workflow
|
||||||
a.share(res.(*Workflow), true, a.GetCaller())
|
wfa.share(res.(*Workflow), true, wfa.Caller)
|
||||||
}
|
}
|
||||||
return a.verifyResource(res), code, err
|
return res, code, err
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* book is a function that books a workflow on the peers
|
||||||
|
* it takes the workflow id, the real data and the executions
|
||||||
|
* it returns an error if the booking fails
|
||||||
|
*/
|
||||||
|
func (wfa *workflowMongoAccessor) book(id string, realData *Workflow, execs []*workflow_execution.WorkflowExecution) error {
|
||||||
|
if wfa.Caller == nil || wfa.Caller.URLS == nil || wfa.Caller.URLS[tools.BOOKING] == nil {
|
||||||
|
return errors.New("no caller defined")
|
||||||
|
}
|
||||||
|
methods := wfa.Caller.URLS[tools.BOOKING]
|
||||||
|
if _, ok := methods[tools.POST]; !ok {
|
||||||
|
return errors.New("no path found")
|
||||||
|
}
|
||||||
|
res, code, _ := wfa.LoadOne(id)
|
||||||
|
if code != 200 {
|
||||||
|
return errors.New("could not load workflow")
|
||||||
|
}
|
||||||
|
r := res.(*Workflow)
|
||||||
|
g := r.Graph
|
||||||
|
if realData.Graph != nil { // if the graph is set, set it to the real data
|
||||||
|
g = realData.Graph
|
||||||
|
}
|
||||||
|
if g != nil && g.Links != nil && len(g.Links) > 0 { // if the graph is set and has links then book the workflow (even on ourselves)
|
||||||
|
accessor := (&compute.ComputeResource{}).GetAccessor(nil)
|
||||||
|
isDCFound := []string{}
|
||||||
|
for _, link := range g.Links {
|
||||||
|
if ok, dc_id := realData.isDCLink(link); ok { // check if the link is a link between a compute and a resource booking is only on compute
|
||||||
|
if slices.Contains(isDCFound, dc_id) {
|
||||||
|
continue
|
||||||
|
} // if the compute is already found, skip it
|
||||||
|
isDCFound = append(isDCFound, dc_id)
|
||||||
|
dc, code, _ := accessor.LoadOne(dc_id)
|
||||||
|
if code != 200 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// CHECK BOOKING
|
||||||
|
peerID := dc.(*compute.ComputeResource).PeerID
|
||||||
|
if peerID == "" { // no peer id no booking
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// BOOKING ON PEER
|
||||||
|
_, err := (&peer.Peer{}).LaunchPeerExecution(peerID, "", tools.BOOKING, tools.POST,
|
||||||
|
(&workflow_execution.WorkflowExecutions{ // it's the standard model for booking see OC-PEER
|
||||||
|
WorkflowID: id, // set the workflow id "WHO"
|
||||||
|
ResourceID: dc_id, // set the compute id "WHERE"
|
||||||
|
Executions: execs, // set the executions to book "WHAT"
|
||||||
|
}).Serialize(), wfa.Caller)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("BOOKING", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* share is a function that shares a workflow to the peers if the workflow is shared
|
* share is a function that shares a workflow to the peers if the workflow is shared
|
||||||
*/
|
*/
|
||||||
func (a *workflowMongoAccessor) share(realData *Workflow, delete bool, caller *tools.HTTPCaller) {
|
func (wfa *workflowMongoAccessor) share(realData *Workflow, delete bool, caller *tools.HTTPCaller) {
|
||||||
if realData == nil || realData.Shared == nil || len(realData.Shared) == 0 || caller == nil || caller.Disabled { // no shared no sharing
|
if realData == nil || realData.Shared == nil || len(realData.Shared) == 0 || caller == nil || caller.Disabled { // no shared no sharing
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, sharedID := range realData.Shared { // loop through the shared ids
|
for _, sharedID := range realData.Shared { // loop through the shared ids
|
||||||
res, code, _ := a.collaborativeAreaAccessor.LoadOne(sharedID)
|
access := (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(nil)
|
||||||
|
res, code, _ := access.LoadOne(sharedID)
|
||||||
if code != 200 {
|
if code != 200 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -71,87 +181,141 @@ func (a *workflowMongoAccessor) share(realData *Workflow, delete bool, caller *t
|
|||||||
if ok, _ := paccess.IsMySelf(); ok { // if the peer is the current peer, never share because it will create a loop
|
if ok, _ := paccess.IsMySelf(); ok { // if the peer is the current peer, never share because it will create a loop
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if delete { // if the workflow is deleted, share the deletion orderResourceAccessor utils.Accessor
|
if delete { // if the workflow is deleted, share the deletion
|
||||||
|
|
||||||
history := NewHistory()
|
history := NewHistory()
|
||||||
history.StoreOne(history.MapFromWorkflow(res.(*Workflow)))
|
history.StoreOne(history.MapFromWorkflow(res.(*Workflow)))
|
||||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.DELETE,
|
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.DELETE, map[string]interface{}{}, caller)
|
||||||
map[string]interface{}{}, caller)
|
|
||||||
} else { // if the workflow is updated, share the update
|
} else { // if the workflow is updated, share the update
|
||||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.PUT,
|
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKFLOW, tools.PUT, res.Serialize(), caller)
|
||||||
res.Serialize(res), caller)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.Logger.Error().Msg(err.Error())
|
wfa.Logger.Error().Msg(err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateOne updates a workflow in the database
|
/*
|
||||||
func (a *workflowMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
* execution is a create or delete function for the workflow executions depending on the schedule of the workflow
|
||||||
// avoid the update if the schedule is the same
|
*/
|
||||||
set = a.verifyResource(set)
|
func (wfa *workflowMongoAccessor) execution(id string, realData *Workflow, delete bool) (int, error) {
|
||||||
if set.(*Workflow).Graph != nil && set.(*Workflow).Graph.Partial {
|
nats := tools.NewNATSCaller() // create a new nats caller because executions are sent to the nats for daemons
|
||||||
return nil, 403, errors.New("you are not allowed to update a partial workflow")
|
mongo.MONGOService.DeleteMultiple(map[string]interface{}{
|
||||||
|
"state": 1, // only delete the scheduled executions only scheduled if executions are in progress or ended, they should not be deleted for registration
|
||||||
|
"workflow_id": id,
|
||||||
|
}, tools.WORKFLOW_EXECUTION.String())
|
||||||
|
err := wfa.book(id, realData, []*workflow_execution.WorkflowExecution{}) // delete the booking of the workflow on the peers
|
||||||
|
fmt.Println("DELETE BOOKING", err)
|
||||||
|
nats.SetNATSPub(tools.WORKFLOW.String(), tools.REMOVE, realData) // send the deletion to the nats
|
||||||
|
if err != nil {
|
||||||
|
return 409, err
|
||||||
}
|
}
|
||||||
res, code, err := utils.GenericUpdateOne(set, id, a, &Workflow{})
|
|
||||||
|
accessor := (&workflow_execution.WorkflowExecution{}).GetAccessor(nil)
|
||||||
|
execs, err := wfa.getExecutions(id, realData) // get the executions of the workflow
|
||||||
|
if err != nil {
|
||||||
|
return 422, err
|
||||||
|
}
|
||||||
|
if !realData.ScheduleActive || delete { // if the schedule is not active, delete the executions
|
||||||
|
execs = []*workflow_execution.WorkflowExecution{}
|
||||||
|
}
|
||||||
|
err = wfa.book(id, realData, execs) // book the workflow on the peers
|
||||||
|
fmt.Println("BOOKING", err)
|
||||||
|
if err != nil {
|
||||||
|
return 409, err // if the booking fails, return an error for integrity between peers
|
||||||
|
}
|
||||||
|
fmt.Println("BOOKING", delete)
|
||||||
|
for _, obj := range execs {
|
||||||
|
_, code, err := accessor.StoreOne(obj)
|
||||||
|
fmt.Println("EXEC", code, err)
|
||||||
|
if code != 200 {
|
||||||
|
return code, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nats.SetNATSPub(tools.WORKFLOW.String(), tools.CREATE, realData) // send the creation to the nats
|
||||||
|
return 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateOne updates a workflow in the database
|
||||||
|
func (wfa *workflowMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
|
res, code, err := wfa.LoadOne(id)
|
||||||
|
if code != 200 {
|
||||||
|
return nil, 409, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// avoid the update if the schedule is the same
|
||||||
|
avoid := set.(*Workflow).Schedule == nil || (res.(*Workflow).Schedule != nil && res.(*Workflow).ScheduleActive == set.(*Workflow).ScheduleActive && res.(*Workflow).Schedule.Start == set.(*Workflow).Schedule.Start && res.(*Workflow).Schedule.End == set.(*Workflow).Schedule.End && res.(*Workflow).Schedule.Cron == set.(*Workflow).Schedule.Cron)
|
||||||
|
res, code, err = wfa.GenericUpdateOne(set, id, wfa, &Workflow{})
|
||||||
if code != 200 {
|
if code != 200 {
|
||||||
return nil, code, err
|
return nil, code, err
|
||||||
}
|
}
|
||||||
workflow := res.(*Workflow)
|
workflow := res.(*Workflow)
|
||||||
a.execute(workflow, false, true) // update the workspace for the workflow
|
if !avoid { // if the schedule is not avoided, update the executions
|
||||||
a.share(workflow, false, a.GetCaller()) // share the update to the peers
|
if code, err := wfa.execution(id, workflow, false); code != 200 {
|
||||||
|
return nil, code, errors.New("could not update the executions : " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Println("UPDATE", workflow.ScheduleActive, workflow.Schedule)
|
||||||
|
if workflow.ScheduleActive && workflow.Schedule != nil { // if the workflow is scheduled, update the executions
|
||||||
|
now := time.Now().UTC()
|
||||||
|
if (workflow.Schedule.End != nil && now.After(*workflow.Schedule.End)) || (workflow.Schedule.End == nil && workflow.Schedule.Start != nil && now.After(*workflow.Schedule.Start)) { // if the start date is passed, then you can book
|
||||||
|
workflow.ScheduleActive = false
|
||||||
|
wfa.GenericRawUpdateOne(workflow, id, wfa)
|
||||||
|
} // if the start date is passed, update the executions
|
||||||
|
}
|
||||||
|
wfa.execute(workflow, false, false) // update the workspace for the workflow
|
||||||
|
wfa.share(workflow, false, wfa.Caller) // share the update to the peers
|
||||||
return res, code, nil
|
return res, code, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// StoreOne stores a workflow in the database
|
// StoreOne stores a workflow in the database
|
||||||
func (a *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
func (wfa *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
data = a.verifyResource(data)
|
|
||||||
d := data.(*Workflow)
|
d := data.(*Workflow)
|
||||||
if d.Graph != nil && d.Graph.Partial {
|
if d.ScheduleActive && d.Schedule != nil { // if the workflow is scheduled, update the executions
|
||||||
return nil, 403, errors.New("you are not allowed to update a partial workflow")
|
now := time.Now().UTC()
|
||||||
|
if (d.Schedule.End != nil && now.After(*d.Schedule.End)) || (d.Schedule.End == nil && d.Schedule.Start != nil && now.After(*d.Schedule.Start)) { // if the start date is passed, then you can book
|
||||||
|
d.ScheduleActive = false
|
||||||
|
} // if the start date is passed, update the executions
|
||||||
}
|
}
|
||||||
res, code, err := utils.GenericStoreOne(d, a)
|
res, code, err := wfa.GenericStoreOne(d, wfa)
|
||||||
if err != nil || code != 200 {
|
if err != nil || code != 200 {
|
||||||
return nil, code, err
|
return nil, code, err
|
||||||
}
|
}
|
||||||
workflow := res.(*Workflow)
|
workflow := res.(*Workflow)
|
||||||
|
|
||||||
a.share(workflow, false, a.GetCaller()) // share the creation to the peers
|
wfa.share(workflow, false, wfa.Caller) // share the creation to the peers
|
||||||
a.execute(workflow, false, true) // store the workspace for the workflow
|
//store the executions
|
||||||
|
if code, err := wfa.execution(res.GetID(), workflow, false); err != nil {
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
wfa.execute(workflow, false, false) // store the workspace for the workflow
|
||||||
return res, code, nil
|
return res, code, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyOne copies a workflow in the database
|
// CopyOne copies a workflow in the database
|
||||||
func (a *workflowMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
func (wfa *workflowMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
wf := data.(*Workflow)
|
return wfa.GenericStoreOne(data, wfa)
|
||||||
for _, item := range wf.Graph.Items {
|
|
||||||
_, obj := item.GetResource()
|
|
||||||
if obj != nil {
|
|
||||||
obj.ClearEnv()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return utils.GenericStoreOne(data, a)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// execute is a function that executes a workflow
|
// execute is a function that executes a workflow
|
||||||
// it stores the workflow resources in a specific workspace to never have a conflict in UI and logic
|
// it stores the workflow resources in a specific workspace to never have a conflict in UI and logic
|
||||||
func (a *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active bool) {
|
func (wfa *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active bool) {
|
||||||
|
|
||||||
|
accessor := (&workspace.Workspace{}).GetAccessor(nil)
|
||||||
filters := &dbs.Filters{
|
filters := &dbs.Filters{
|
||||||
Or: map[string][]dbs.Filter{ // filter by standard workspace name attached to a workflow
|
Or: map[string][]dbs.Filter{ // filter by standard workspace name attached to a workflow
|
||||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: workflow.Name + "_workspace"}},
|
"abstractobject.name": {{dbs.LIKE.String(), workflow.Name + "_workspace"}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
resource, _, err := a.workspaceAccessor.Search(filters, "", workflow.IsDraft)
|
resource, _, err := accessor.Search(filters, "")
|
||||||
if delete { // if delete is set to true, delete the workspace
|
if delete { // if delete is set to true, delete the workspace
|
||||||
for _, r := range resource {
|
for _, r := range resource {
|
||||||
a.workspaceAccessor.DeleteOne(r.GetID())
|
accessor.DeleteOne(r.GetID())
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err == nil && len(resource) > 0 { // if the workspace already exists, update it
|
if err == nil && len(resource) > 0 { // if the workspace already exists, update it
|
||||||
a.workspaceAccessor.UpdateOne(&workspace.Workspace{
|
accessor.UpdateOne(&workspace.Workspace{
|
||||||
Active: active,
|
Active: active,
|
||||||
ResourceSet: resources.ResourceSet{
|
ResourceSet: resources.ResourceSet{
|
||||||
Datas: workflow.Datas,
|
Datas: workflow.Datas,
|
||||||
@ -162,7 +326,7 @@ func (a *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active
|
|||||||
},
|
},
|
||||||
}, resource[0].GetID())
|
}, resource[0].GetID())
|
||||||
} else { // if the workspace does not exist, create it
|
} else { // if the workspace does not exist, create it
|
||||||
a.workspaceAccessor.StoreOne(&workspace.Workspace{
|
accessor.StoreOne(&workspace.Workspace{
|
||||||
Active: active,
|
Active: active,
|
||||||
AbstractObject: utils.AbstractObject{Name: workflow.Name + "_workspace"},
|
AbstractObject: utils.AbstractObject{Name: workflow.Name + "_workspace"},
|
||||||
ResourceSet: resources.ResourceSet{
|
ResourceSet: resources.ResourceSet{
|
||||||
@ -176,49 +340,65 @@ func (a *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *workflowMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
// LoadOne loads a workflow from the database
|
||||||
return utils.GenericLoadOne[*Workflow](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
func (wfa *workflowMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
w := d.(*Workflow)
|
var workflow Workflow
|
||||||
a.execute(w, false, true) // if no workspace is attached to the workflow, create it
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||||
return d, 200, nil
|
if err != nil {
|
||||||
}, a)
|
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
res_mongo.Decode(&workflow)
|
||||||
|
if workflow.ScheduleActive && workflow.Schedule != nil { // if the workflow is scheduled, update the executions
|
||||||
|
now := time.Now().UTC()
|
||||||
|
if (workflow.Schedule.End != nil && now.After(*workflow.Schedule.End)) || (workflow.Schedule.End == nil && workflow.Schedule.Start != nil && now.After(*workflow.Schedule.Start)) { // if the start date is passed, then you can book
|
||||||
|
workflow.ScheduleActive = false
|
||||||
|
wfa.GenericRawUpdateOne(&workflow, id, wfa)
|
||||||
|
|
||||||
|
} // if the start date is passed, update the executions
|
||||||
|
}
|
||||||
|
wfa.execute(&workflow, false, true) // if no workspace is attached to the workflow, create it
|
||||||
|
return &workflow, 200, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *workflowMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
// LoadAll loads all the workflows from the database
|
||||||
return utils.GenericLoadAll[*Workflow](func(d utils.DBObject) utils.ShallowDBObject { return &d.(*Workflow).AbstractObject }, isDraft, a)
|
func (wfa workflowMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []Workflow
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
objs = append(objs, &r.AbstractObject) // only AbstractObject fields !
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *workflowMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
func (wfa *workflowMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
return utils.GenericSearch[*Workflow](filters, search, (&Workflow{}).GetObjectFilters(search), func(d utils.DBObject) utils.ShallowDBObject { return a.verifyResource(d) }, isDraft, a)
|
objs := []utils.ShallowDBObject{}
|
||||||
}
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
|
filters = &dbs.Filters{
|
||||||
func (a *workflowMongoAccessor) verifyResource(obj utils.DBObject) utils.DBObject {
|
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||||
wf := obj.(*Workflow)
|
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
if wf.Graph == nil {
|
},
|
||||||
return wf
|
}
|
||||||
}
|
}
|
||||||
for _, item := range wf.Graph.Items {
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
t, resource := item.GetResource()
|
if err != nil {
|
||||||
if resource == nil {
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
continue
|
return nil, code, err
|
||||||
}
|
}
|
||||||
var access utils.Accessor
|
var results []Workflow
|
||||||
if t == tools.COMPUTE_RESOURCE {
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
access = resources.NewAccessor[*resources.ComputeResource](t, a.GetRequest(), func() utils.DBObject { return &resources.ComputeResource{} })
|
return nil, 404, err
|
||||||
} else if t == tools.PROCESSING_RESOURCE {
|
}
|
||||||
access = resources.NewAccessor[*resources.ProcessingResource](t, a.GetRequest(), func() utils.DBObject { return &resources.ProcessingResource{} })
|
for _, r := range results {
|
||||||
} else if t == tools.STORAGE_RESOURCE {
|
objs = append(objs, &r)
|
||||||
access = resources.NewAccessor[*resources.StorageResource](t, a.GetRequest(), func() utils.DBObject { return &resources.StorageResource{} })
|
}
|
||||||
} else if t == tools.WORKFLOW_RESOURCE {
|
return objs, 200, nil
|
||||||
access = resources.NewAccessor[*resources.WorkflowResource](t, a.GetRequest(), func() utils.DBObject { return &resources.WorkflowResource{} })
|
|
||||||
} else if t == tools.DATA_RESOURCE {
|
|
||||||
access = resources.NewAccessor[*resources.DataResource](t, a.GetRequest(), func() utils.DBObject { return &resources.DataResource{} })
|
|
||||||
} else {
|
|
||||||
wf.Graph.Clear(resource.GetID())
|
|
||||||
}
|
|
||||||
if error := utils.VerifyAccess(access, resource.GetID()); error != nil {
|
|
||||||
wf.Graph.Clear(resource.GetID())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return wf
|
|
||||||
}
|
}
|
||||||
|
23
models/workflow/workflow_schedule.go
Normal file
23
models/workflow/workflow_schedule.go
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
package workflow
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// WorkflowSchedule is a struct that contains the scheduling information of a workflow
|
||||||
|
type ScheduleMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
TASK ScheduleMode = iota
|
||||||
|
SERVICE
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* WorkflowSchedule is a struct that contains the scheduling information of a workflow
|
||||||
|
* It contains the mode of the schedule (Task or Service), the name of the schedule, the start and end time of the schedule and the cron expression
|
||||||
|
*/
|
||||||
|
type WorkflowSchedule struct {
|
||||||
|
Mode int64 `json:"mode" bson:"mode" validate:"required"` // Mode is the mode of the schedule (Task or Service)
|
||||||
|
Name string `json:"name" bson:"name" validate:"required"` // Name is the name of the schedule
|
||||||
|
Start *time.Time `json:"start" bson:"start" validate:"required,ltfield=End"` // Start is the start time of the schedule, is required and must be less than the End time
|
||||||
|
End *time.Time `json:"end,omitempty" bson:"end,omitempty"` // End is the end time of the schedule
|
||||||
|
Cron string `json:"cron,omitempty" bson:"cron,omitempty"` // here the cron format : ss mm hh dd MM dw task
|
||||||
|
}
|
@ -12,7 +12,7 @@ func TestStoreOneWorkflow(t *testing.T) {
|
|||||||
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
|
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
|
||||||
}
|
}
|
||||||
|
|
||||||
wma := NewAccessor(nil)
|
wma := New()
|
||||||
id, _, _ := wma.StoreOne(&w)
|
id, _, _ := wma.StoreOne(&w)
|
||||||
|
|
||||||
assert.NotEmpty(t, id)
|
assert.NotEmpty(t, id)
|
||||||
@ -23,7 +23,7 @@ func TestLoadOneWorkflow(t *testing.T) {
|
|||||||
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
|
AbstractObject: utils.AbstractObject{Name: "testWorkflow"},
|
||||||
}
|
}
|
||||||
|
|
||||||
wma := NewAccessor(nil)
|
wma := New()
|
||||||
new_w, _, _ := wma.StoreOne(&w)
|
new_w, _, _ := wma.StoreOne(&w)
|
||||||
assert.Equal(t, w, new_w)
|
assert.Equal(t, w, new_w)
|
||||||
}
|
}
|
||||||
|
@ -1,78 +1,94 @@
|
|||||||
package workflow_execution
|
package workflow_execution
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ScheduledType - Enum for the different states of a workflow execution
|
||||||
|
type ScheduledType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
SCHEDULED ScheduledType = iota + 1
|
||||||
|
STARTED
|
||||||
|
FAILURE
|
||||||
|
SUCCESS
|
||||||
|
FORGOTTEN
|
||||||
|
)
|
||||||
|
|
||||||
|
var str = [...]string{
|
||||||
|
"scheduled",
|
||||||
|
"started",
|
||||||
|
"failure",
|
||||||
|
"success",
|
||||||
|
"forgotten",
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromInt(i int) string {
|
||||||
|
return str[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d ScheduledType) String() string {
|
||||||
|
return str[d]
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumIndex - Creating common behavior - give the type a EnumIndex functio
|
||||||
|
func (d ScheduledType) EnumIndex() int {
|
||||||
|
return int(d)
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* WorkflowExecution is a struct that represents a list of workflow executions
|
* WorkflowExecutions is a struct that represents a list of workflow executions
|
||||||
|
* Warning: No user can write (del, post, put) a workflow execution, it is only used by the system
|
||||||
|
* workflows generate their own executions
|
||||||
|
*/
|
||||||
|
type WorkflowExecutions struct {
|
||||||
|
WorkflowID string `json:"workflow_id" bson:"workflow_id"`
|
||||||
|
ResourceID string `json:"resource_id" bson:"resource_id"`
|
||||||
|
Executions []*WorkflowExecution `json:"executions" bson:"executions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// New - Creates a new instance of the WorkflowExecutions from a map
|
||||||
|
func (dma *WorkflowExecutions) Deserialize(j map[string]interface{}) *WorkflowExecutions {
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize - Returns the WorkflowExecutions as a map
|
||||||
|
func (dma *WorkflowExecutions) Serialize() map[string]interface{} {
|
||||||
|
var m map[string]interface{}
|
||||||
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* WorkflowExecution is a struct that represents a workflow execution
|
||||||
* Warning: No user can write (del, post, put) a workflow execution, it is only used by the system
|
* Warning: No user can write (del, post, put) a workflow execution, it is only used by the system
|
||||||
* workflows generate their own executions
|
* workflows generate their own executions
|
||||||
*/
|
*/
|
||||||
type WorkflowExecution struct {
|
type WorkflowExecution struct {
|
||||||
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
|
||||||
PeerBookByGraph map[string]map[string][]string `json:"peer_book_by_graph,omitempty" bson:"peer_book_by_graph,omitempty"` // BookByResource is a map of the resource id and the list of the booking id
|
ExecDate *time.Time `json:"execution_date,omitempty" bson:"execution_date,omitempty" validate:"required"` // ExecDate is the execution date of the workflow, is required
|
||||||
ExecutionsID string `json:"executions_id,omitempty" bson:"executions_id,omitempty"`
|
|
||||||
ExecDate time.Time `json:"execution_date,omitempty" bson:"execution_date,omitempty" validate:"required"` // ExecDate is the execution date of the workflow, is required
|
|
||||||
EndDate *time.Time `json:"end_date,omitempty" bson:"end_date,omitempty"` // EndDate is the end date of the workflow
|
EndDate *time.Time `json:"end_date,omitempty" bson:"end_date,omitempty"` // EndDate is the end date of the workflow
|
||||||
State enum.BookingStatus `json:"state" bson:"state" default:"0"` // TEMPORARY TODO DEFAULT 1 -> 0 State is the state of the workflow
|
State ScheduledType `json:"state" bson:"state" default:"0"` // State is the state of the workflow
|
||||||
WorkflowID string `json:"workflow_id" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow
|
WorkflowID string `json:"workflow_id" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *WorkflowExecution) StoreDraftDefault() {
|
|
||||||
r.IsDraft = false // TODO: TEMPORARY
|
|
||||||
r.State = enum.SCHEDULED
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *WorkflowExecution) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
|
|
||||||
if r.State != set.(*WorkflowExecution).State {
|
|
||||||
return true, &WorkflowExecution{State: set.(*WorkflowExecution).State} // only state can be updated
|
|
||||||
}
|
|
||||||
return !r.IsDraft, set // only draft buying can be updated
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *WorkflowExecution) CanDelete() bool {
|
|
||||||
return r.IsDraft // only draft bookings can be deleted
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wfa *WorkflowExecution) Equals(we *WorkflowExecution) bool {
|
func (wfa *WorkflowExecution) Equals(we *WorkflowExecution) bool {
|
||||||
return wfa.ExecDate.Equal(we.ExecDate) && wfa.WorkflowID == we.WorkflowID
|
return wfa.ExecDate.Equal(*we.ExecDate) && wfa.WorkflowID == we.WorkflowID
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *WorkflowExecution) PurgeDraft(request *tools.APIRequest) error {
|
|
||||||
if ws.EndDate == nil {
|
|
||||||
// if no end... then Book like a savage
|
|
||||||
e := ws.ExecDate.Add(time.Hour)
|
|
||||||
ws.EndDate = &e
|
|
||||||
}
|
|
||||||
accessor := ws.GetAccessor(request)
|
|
||||||
res, code, err := accessor.Search(&dbs.Filters{
|
|
||||||
And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date
|
|
||||||
"state": {{Operator: dbs.EQUAL.String(), Value: enum.DRAFT.EnumIndex()}},
|
|
||||||
"workflow_id": {{Operator: dbs.EQUAL.String(), Value: ws.WorkflowID}},
|
|
||||||
"execution_date": {
|
|
||||||
{Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(*ws.EndDate)},
|
|
||||||
{Operator: dbs.GTE.String(), Value: primitive.NewDateTimeFromTime(ws.ExecDate)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, "", ws.IsDraft)
|
|
||||||
if code != 200 || err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, r := range res {
|
|
||||||
accessor.DeleteOne(r.GetID())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// tool to transform the argo status to a state
|
// tool to transform the argo status to a state
|
||||||
@ -80,76 +96,52 @@ func (wfa *WorkflowExecution) ArgoStatusToState(status string) *WorkflowExecutio
|
|||||||
status = strings.ToLower(status)
|
status = strings.ToLower(status)
|
||||||
switch status {
|
switch status {
|
||||||
case "succeeded": // Succeeded
|
case "succeeded": // Succeeded
|
||||||
wfa.State = enum.SUCCESS
|
wfa.State = SUCCESS
|
||||||
case "pending": // Pending
|
case "pending": // Pending
|
||||||
wfa.State = enum.SCHEDULED
|
wfa.State = SCHEDULED
|
||||||
case "running": // Running
|
case "running": // Running
|
||||||
wfa.State = enum.STARTED
|
wfa.State = STARTED
|
||||||
default: // Failed
|
default: // Failed
|
||||||
wfa.State = enum.FAILURE
|
wfa.State = FAILURE
|
||||||
}
|
}
|
||||||
return wfa
|
return wfa
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ao *WorkflowExecution) GetID() string {
|
||||||
|
return ao.UUID
|
||||||
|
}
|
||||||
|
|
||||||
func (r *WorkflowExecution) GenerateID() {
|
func (r *WorkflowExecution) GenerateID() {
|
||||||
if r.UUID == "" {
|
|
||||||
r.UUID = uuid.New().String()
|
r.UUID = uuid.New().String()
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *WorkflowExecution) GetName() string {
|
func (d *WorkflowExecution) GetName() string {
|
||||||
return d.UUID + "_" + d.ExecDate.String()
|
return d.UUID + "_" + d.ExecDate.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *WorkflowExecution) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
func (d *WorkflowExecution) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
return NewAccessor(request) // Create a new instance of the accessor
|
data := New() // Create a new instance of the accessor
|
||||||
|
data.Init(tools.WORKFLOW_EXECUTION, caller) // Initialize the accessor with the WORKFLOW_EXECUTION model type
|
||||||
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *WorkflowExecution) VerifyAuth(request *tools.APIRequest) bool {
|
// New creates a new instance of the WorkflowExecution from a map
|
||||||
return true
|
func (dma *WorkflowExecution) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *WorkflowExecution) Book(executionsID string, wfID string, priceds map[tools.DataType]map[string]pricing.PricedItemITF) []*booking.Booking {
|
// Serialize returns the WorkflowExecution as a map
|
||||||
booking := d.bookEach(executionsID, wfID, tools.STORAGE_RESOURCE, priceds[tools.STORAGE_RESOURCE])
|
func (dma *WorkflowExecution) Serialize() map[string]interface{} {
|
||||||
booking = append(booking, d.bookEach(executionsID, wfID, tools.PROCESSING_RESOURCE, priceds[tools.PROCESSING_RESOURCE])...)
|
var m map[string]interface{}
|
||||||
return booking
|
b, err := json.Marshal(dma)
|
||||||
}
|
if err != nil {
|
||||||
|
return nil
|
||||||
func (d *WorkflowExecution) bookEach(executionsID string, wfID string, dt tools.DataType, priceds map[string]pricing.PricedItemITF) []*booking.Booking {
|
}
|
||||||
items := []*booking.Booking{}
|
json.Unmarshal(b, &m)
|
||||||
for itemID, priced := range priceds {
|
return m
|
||||||
if d.PeerBookByGraph == nil {
|
|
||||||
d.PeerBookByGraph = map[string]map[string][]string{}
|
|
||||||
}
|
|
||||||
if d.PeerBookByGraph[priced.GetCreatorID()] == nil {
|
|
||||||
d.PeerBookByGraph[priced.GetCreatorID()] = map[string][]string{}
|
|
||||||
}
|
|
||||||
if d.PeerBookByGraph[priced.GetCreatorID()][itemID] == nil {
|
|
||||||
d.PeerBookByGraph[priced.GetCreatorID()][itemID] = []string{}
|
|
||||||
}
|
|
||||||
start := d.ExecDate
|
|
||||||
if s := priced.GetLocationStart(); s != nil {
|
|
||||||
start = *s
|
|
||||||
}
|
|
||||||
end := start.Add(time.Duration(priced.GetExplicitDurationInS()) * time.Second)
|
|
||||||
bookingItem := &booking.Booking{
|
|
||||||
AbstractObject: utils.AbstractObject{
|
|
||||||
UUID: uuid.New().String(),
|
|
||||||
Name: d.GetName() + "_" + executionsID + "_" + wfID,
|
|
||||||
},
|
|
||||||
ExecutionsID: executionsID,
|
|
||||||
State: enum.SCHEDULED,
|
|
||||||
ResourceID: priced.GetID(),
|
|
||||||
ResourceType: dt,
|
|
||||||
DestPeerID: priced.GetCreatorID(),
|
|
||||||
WorkflowID: wfID,
|
|
||||||
ExecutionID: d.GetID(),
|
|
||||||
ExpectedStartDate: start,
|
|
||||||
ExpectedEndDate: &end,
|
|
||||||
}
|
|
||||||
items = append(items, bookingItem)
|
|
||||||
d.PeerBookByGraph[priced.GetCreatorID()][itemID] = append(
|
|
||||||
d.PeerBookByGraph[priced.GetCreatorID()][itemID], bookingItem.GetID())
|
|
||||||
}
|
|
||||||
return items
|
|
||||||
}
|
}
|
||||||
|
@ -1,107 +1,98 @@
|
|||||||
package workflow_execution
|
package workflow_execution
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
"cloud.o-forge.io/core/oc-lib/logs"
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type workflowExecutionMongoAccessor struct {
|
type workflowExecutionMongoAccessor struct {
|
||||||
utils.AbstractAccessor
|
utils.AbstractAccessor
|
||||||
shallow bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newShallowAccessor(request *tools.APIRequest) *workflowExecutionMongoAccessor {
|
func New() *workflowExecutionMongoAccessor {
|
||||||
return &workflowExecutionMongoAccessor{
|
return &workflowExecutionMongoAccessor{}
|
||||||
shallow: true,
|
|
||||||
AbstractAccessor: utils.AbstractAccessor{
|
|
||||||
Logger: logs.CreateLogger(tools.WORKFLOW_EXECUTION.String()), // Create a logger with the data type
|
|
||||||
Request: request,
|
|
||||||
Type: tools.WORKFLOW_EXECUTION,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAccessor(request *tools.APIRequest) *workflowExecutionMongoAccessor {
|
|
||||||
return &workflowExecutionMongoAccessor{
|
|
||||||
shallow: false,
|
|
||||||
AbstractAccessor: utils.AbstractAccessor{
|
|
||||||
Logger: logs.CreateLogger(tools.WORKFLOW_EXECUTION.String()), // Create a logger with the data type
|
|
||||||
Request: request,
|
|
||||||
Type: tools.WORKFLOW_EXECUTION,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wfa *workflowExecutionMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
func (wfa *workflowExecutionMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
return nil, 404, errors.New("not implemented")
|
return wfa.GenericDeleteOne(id, wfa)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wfa *workflowExecutionMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
func (wfa *workflowExecutionMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
if set.(*WorkflowExecution).State == 0 {
|
return wfa.GenericUpdateOne(set, id, wfa, &WorkflowExecution{})
|
||||||
return nil, 400, errors.New("state is required")
|
|
||||||
}
|
|
||||||
realSet := WorkflowExecution{State: set.(*WorkflowExecution).State}
|
|
||||||
return utils.GenericUpdateOne(&realSet, id, wfa, &WorkflowExecution{})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wfa *workflowExecutionMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
func (wfa *workflowExecutionMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
return nil, 404, errors.New("not implemented")
|
return wfa.GenericStoreOne(data, wfa)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wfa *workflowExecutionMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
func (wfa *workflowExecutionMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
return nil, 404, errors.New("not implemented")
|
return wfa.GenericStoreOne(data, wfa)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *workflowExecutionMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
func (wfa *workflowExecutionMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
return utils.GenericLoadOne[*WorkflowExecution](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
var workflow WorkflowExecution
|
||||||
now := time.Now()
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||||
now = now.Add(time.Second * -60)
|
if err != nil {
|
||||||
if d.(*WorkflowExecution).State == enum.DRAFT && !a.shallow && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
|
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
utils.GenericDeleteOne(d.GetID(), newShallowAccessor(a.Request))
|
return nil, code, err
|
||||||
return nil, 404, errors.New("not found")
|
|
||||||
}
|
}
|
||||||
if d.(*WorkflowExecution).State == enum.SCHEDULED && !a.shallow && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
|
res_mongo.Decode(&workflow)
|
||||||
d.(*WorkflowExecution).State = enum.FORGOTTEN
|
if workflow.State == SCHEDULED && time.Now().UTC().After(*workflow.ExecDate) {
|
||||||
utils.GenericRawUpdateOne(d, id, newShallowAccessor(a.Request))
|
workflow.State = FORGOTTEN
|
||||||
|
wfa.GenericRawUpdateOne(&workflow, id, wfa)
|
||||||
}
|
}
|
||||||
return d, 200, nil
|
return &workflow, 200, nil
|
||||||
}, a)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *workflowExecutionMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
func (wfa *workflowExecutionMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
return utils.GenericLoadAll[*WorkflowExecution](a.getExec(), isDraft, a)
|
objs := []utils.ShallowDBObject{}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []WorkflowExecution
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
if r.State == SCHEDULED && time.Now().UTC().After(*r.ExecDate) {
|
||||||
|
r.State = FORGOTTEN
|
||||||
|
wfa.GenericRawUpdateOne(&r, r.UUID, wfa)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r.AbstractObject)
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *workflowExecutionMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
// Search searches for workflow executions in the database, given some filters OR a search string
|
||||||
return utils.GenericSearch[*WorkflowExecution](filters, search, a.GetExecFilters(search), a.getExec(), isDraft, a)
|
func (wfa *workflowExecutionMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
}
|
objs := []utils.ShallowDBObject{}
|
||||||
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
func (a *workflowExecutionMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
|
filters = &dbs.Filters{
|
||||||
return func(d utils.DBObject) utils.ShallowDBObject {
|
|
||||||
now := time.Now()
|
|
||||||
now = now.Add(time.Second * -60)
|
|
||||||
if d.(*WorkflowExecution).State == enum.DRAFT && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
|
|
||||||
utils.GenericDeleteOne(d.GetID(), newShallowAccessor(a.Request))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if d.(*WorkflowExecution).State == enum.SCHEDULED && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
|
|
||||||
d.(*WorkflowExecution).State = enum.FORGOTTEN
|
|
||||||
utils.GenericRawUpdateOne(d, d.GetID(), newShallowAccessor(a.Request))
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *workflowExecutionMongoAccessor) GetExecFilters(search string) *dbs.Filters {
|
|
||||||
return &dbs.Filters{
|
|
||||||
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search + "_execution"}},
|
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
}}
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []WorkflowExecution
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
if r.State == SCHEDULED && time.Now().UTC().After(*r.ExecDate) {
|
||||||
|
r.State = FORGOTTEN
|
||||||
|
wfa.GenericRawUpdateOne(&r, r.UUID, wfa)
|
||||||
|
}
|
||||||
|
objs = append(objs, &r)
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
}
|
}
|
||||||
|
@ -1,214 +0,0 @@
|
|||||||
package workflow_execution
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/workflow"
|
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/robfig/cron"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* WorkflowSchedule is a struct that contains the scheduling information of a workflow
|
|
||||||
* It contains the mode of the schedule (Task or Service), the name of the schedule, the start and end time of the schedule and the cron expression
|
|
||||||
*/
|
|
||||||
// it's a flying object only use in a session time. It's not stored in the database
|
|
||||||
type WorkflowSchedule struct {
|
|
||||||
UUID string `json:"id" validate:"required"` // ExecutionsID is the list of the executions id of the workflow
|
|
||||||
Workflow *workflow.Workflow `json:"workflow,omitempty"` // Workflow is the workflow dependancy of the schedule
|
|
||||||
WorkflowExecution []*WorkflowExecution `json:"workflow_executions,omitempty"` // WorkflowExecution is the list of executions of the workflow
|
|
||||||
Message string `json:"message,omitempty"` // Message is the message of the schedule
|
|
||||||
Warning string `json:"warning,omitempty"` // Warning is the warning message of the schedule
|
|
||||||
Start time.Time `json:"start" validate:"required,ltfield=End"` // Start is the start time of the schedule, is required and must be less than the End time
|
|
||||||
End *time.Time `json:"end,omitempty"` // End is the end time of the schedule, is required and must be greater than the Start time
|
|
||||||
DurationS float64 `json:"duration_s" default:"-1"` // End is the end time of the schedule
|
|
||||||
Cron string `json:"cron,omitempty"` // here the cron format : ss mm hh dd MM dw task
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewScheduler(start string, end string, durationInS float64, cron string) *WorkflowSchedule {
|
|
||||||
s, err := time.Parse("2006-01-02T15:04:05", start)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ws := &WorkflowSchedule{
|
|
||||||
UUID: uuid.New().String(),
|
|
||||||
Start: s,
|
|
||||||
DurationS: durationInS,
|
|
||||||
Cron: cron,
|
|
||||||
}
|
|
||||||
e, err := time.Parse("2006-01-02T15:04:05", end)
|
|
||||||
if err == nil {
|
|
||||||
ws.End = &e
|
|
||||||
}
|
|
||||||
return ws
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *WorkflowSchedule) CheckBooking(wfID string, request *tools.APIRequest) (bool, *workflow.Workflow, []*WorkflowExecution, []*booking.Booking, error) {
|
|
||||||
if request.Caller == nil && request.Caller.URLS == nil && request.Caller.URLS[tools.BOOKING] == nil || request.Caller.URLS[tools.BOOKING][tools.GET] == "" {
|
|
||||||
return false, nil, []*WorkflowExecution{}, []*booking.Booking{}, errors.New("no caller defined")
|
|
||||||
}
|
|
||||||
access := workflow.NewAccessor(request)
|
|
||||||
res, code, err := access.LoadOne(wfID)
|
|
||||||
if code != 200 {
|
|
||||||
return false, nil, []*WorkflowExecution{}, []*booking.Booking{}, errors.New("could not load the workflow with id: " + err.Error())
|
|
||||||
}
|
|
||||||
wf := res.(*workflow.Workflow)
|
|
||||||
longest, priceds, wf, err := wf.Planify(ws.Start, ws.End, request)
|
|
||||||
if err != nil {
|
|
||||||
return false, wf, []*WorkflowExecution{}, []*booking.Booking{}, err
|
|
||||||
}
|
|
||||||
ws.DurationS = longest
|
|
||||||
ws.Message = "We estimate that the workflow will start at " + ws.Start.String() + " and last " + fmt.Sprintf("%v", ws.DurationS) + " seconds."
|
|
||||||
if ws.End != nil && ws.Start.Add(time.Duration(longest)*time.Second).After(*ws.End) {
|
|
||||||
ws.Warning = "The workflow may be too long to be executed in the given time frame, we will try to book it anyway\n"
|
|
||||||
}
|
|
||||||
execs, err := ws.getExecutions(wf)
|
|
||||||
if err != nil {
|
|
||||||
return false, wf, []*WorkflowExecution{}, []*booking.Booking{}, err
|
|
||||||
}
|
|
||||||
bookings := []*booking.Booking{}
|
|
||||||
for _, exec := range execs {
|
|
||||||
bookings = append(bookings, exec.Book(ws.UUID, wfID, priceds)...)
|
|
||||||
for _, b := range bookings {
|
|
||||||
meth := request.Caller.URLS[tools.BOOKING][tools.GET]
|
|
||||||
meth = strings.ReplaceAll(meth, ":id", b.ResourceID)
|
|
||||||
meth = strings.ReplaceAll(meth, ":start_date", b.ExpectedStartDate.Format("2006-01-02T15:04:05"))
|
|
||||||
meth = strings.ReplaceAll(meth, ":end_date", b.ExpectedEndDate.Format("2006-01-02T15:04:05"))
|
|
||||||
request.Caller.URLS[tools.BOOKING][tools.GET] = meth
|
|
||||||
_, err := (&peer.Peer{}).LaunchPeerExecution(b.DestPeerID, b.ResourceID, tools.BOOKING, tools.GET, nil, request.Caller)
|
|
||||||
if err != nil {
|
|
||||||
return false, wf, execs, bookings, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return true, wf, execs, bookings, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *WorkflowSchedule) Schedules(wfID string, request *tools.APIRequest) (*WorkflowSchedule, *workflow.Workflow, []*WorkflowExecution, error) {
|
|
||||||
if request == nil {
|
|
||||||
return ws, nil, []*WorkflowExecution{}, errors.New("no request found")
|
|
||||||
}
|
|
||||||
c := request.Caller
|
|
||||||
if c == nil || c.URLS == nil || c.URLS[tools.BOOKING] == nil {
|
|
||||||
return ws, nil, []*WorkflowExecution{}, errors.New("no caller defined")
|
|
||||||
}
|
|
||||||
methods := c.URLS[tools.BOOKING]
|
|
||||||
if _, ok := methods[tools.GET]; !ok {
|
|
||||||
return ws, nil, []*WorkflowExecution{}, errors.New("no path found")
|
|
||||||
}
|
|
||||||
ok, wf, executions, bookings, err := ws.CheckBooking(wfID, request)
|
|
||||||
ws.WorkflowExecution = executions
|
|
||||||
if !ok || err != nil {
|
|
||||||
return ws, nil, executions, errors.New("could not book the workflow : " + fmt.Sprintf("%v", err))
|
|
||||||
}
|
|
||||||
ws.Workflow = wf
|
|
||||||
for _, booking := range bookings {
|
|
||||||
_, err := (&peer.Peer{}).LaunchPeerExecution(booking.DestPeerID, "",
|
|
||||||
tools.BOOKING, tools.POST, booking.Serialize(booking), request.Caller)
|
|
||||||
if err != nil {
|
|
||||||
return ws, wf, executions, errors.New("could not launch the peer execution : " + fmt.Sprintf("%v", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Println("Schedules")
|
|
||||||
for _, exec := range executions {
|
|
||||||
err := exec.PurgeDraft(request)
|
|
||||||
if err != nil {
|
|
||||||
return ws, nil, []*WorkflowExecution{}, errors.New("purge draft" + fmt.Sprintf("%v", err))
|
|
||||||
}
|
|
||||||
exec.StoreDraftDefault()
|
|
||||||
utils.GenericStoreOne(exec, NewAccessor(request))
|
|
||||||
}
|
|
||||||
fmt.Println("Schedules")
|
|
||||||
return ws, wf, executions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
BOOKING IMPLIED TIME, not of subscription but of execution
|
|
||||||
so is processing time execution time applied on computes
|
|
||||||
data can improve the processing time
|
|
||||||
time should implied a security time border (10sec) if not from the same executions
|
|
||||||
VERIFY THAT WE HANDLE DIFFERENCE BETWEEN LOCATION TIME && BOOKING
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* getExecutions is a function that returns the executions of a workflow
|
|
||||||
* it returns an array of workflow_execution.WorkflowExecution
|
|
||||||
*/
|
|
||||||
func (ws *WorkflowSchedule) getExecutions(workflow *workflow.Workflow) ([]*WorkflowExecution, error) {
|
|
||||||
workflows_executions := []*WorkflowExecution{}
|
|
||||||
dates, err := ws.getDates()
|
|
||||||
if err != nil {
|
|
||||||
return workflows_executions, err
|
|
||||||
}
|
|
||||||
for _, date := range dates {
|
|
||||||
obj := &WorkflowExecution{
|
|
||||||
AbstractObject: utils.AbstractObject{
|
|
||||||
UUID: uuid.New().String(), // set the uuid of the execution
|
|
||||||
Name: workflow.Name + "_execution_" + date.Start.String(), // set the name of the execution
|
|
||||||
},
|
|
||||||
ExecutionsID: ws.UUID,
|
|
||||||
ExecDate: date.Start, // set the execution date
|
|
||||||
EndDate: date.End, // set the end date
|
|
||||||
State: enum.DRAFT, // set the state to 1 (scheduled)
|
|
||||||
WorkflowID: workflow.GetID(), // set the workflow id dependancy of the execution
|
|
||||||
}
|
|
||||||
workflows_executions = append(workflows_executions, obj)
|
|
||||||
}
|
|
||||||
return workflows_executions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *WorkflowSchedule) getDates() ([]Schedule, error) {
|
|
||||||
schedule := []Schedule{}
|
|
||||||
if len(ws.Cron) > 0 { // if cron is set then end date should be set
|
|
||||||
if ws.End == nil {
|
|
||||||
return schedule, errors.New("a cron task should have an end date")
|
|
||||||
}
|
|
||||||
if ws.DurationS <= 0 {
|
|
||||||
ws.DurationS = ws.End.Sub(ws.Start).Seconds()
|
|
||||||
}
|
|
||||||
cronStr := strings.Split(ws.Cron, " ") // split the cron string to treat it
|
|
||||||
if len(cronStr) < 6 { // if the cron string is less than 6 fields, return an error because format is : ss mm hh dd MM dw (6 fields)
|
|
||||||
return schedule, errors.New("Bad cron message: (" + ws.Cron + "). Should be at least ss mm hh dd MM dw")
|
|
||||||
}
|
|
||||||
subCron := strings.Join(cronStr[:6], " ")
|
|
||||||
// cron should be parsed as ss mm hh dd MM dw t (min 6 fields)
|
|
||||||
specParser := cron.NewParser(cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) // create a new cron parser
|
|
||||||
sched, err := specParser.Parse(subCron) // parse the cron string
|
|
||||||
if err != nil {
|
|
||||||
return schedule, errors.New("Bad cron message: " + err.Error())
|
|
||||||
}
|
|
||||||
// loop through the cron schedule to set the executions
|
|
||||||
for s := sched.Next(ws.Start); !s.IsZero() && s.Before(*ws.End); s = sched.Next(s) {
|
|
||||||
e := s.Add(time.Duration(ws.DurationS) * time.Second)
|
|
||||||
schedule = append(schedule, Schedule{
|
|
||||||
Start: s,
|
|
||||||
End: &e,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
} else { // if no cron, set the execution to the start date
|
|
||||||
schedule = append(schedule, Schedule{
|
|
||||||
Start: ws.Start,
|
|
||||||
End: ws.End,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return schedule, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Schedule struct {
|
|
||||||
Start time.Time
|
|
||||||
End *time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* TODO : LARGEST GRAIN PLANIFYING THE WORKFLOW WHEN OPTION IS SET
|
|
||||||
* SET PROTECTION BORDER TIME
|
|
||||||
*/
|
|
@ -1,10 +1,12 @@
|
|||||||
package workspace
|
package workspace
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
"encoding/json"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
|
"github.com/google/uuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Workspace is a struct that represents a workspace
|
// Workspace is a struct that represents a workspace
|
||||||
@ -16,17 +18,43 @@ type Workspace struct {
|
|||||||
Shared string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workspace
|
Shared string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workspace
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Workspace) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
func (ao *Workspace) GetID() string {
|
||||||
return NewAccessor(request) // Create a new instance of the accessor
|
return ao.UUID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ao *Workspace) VerifyAuth(request *tools.APIRequest) bool {
|
func (r *Workspace) GenerateID() {
|
||||||
if ao.Shared != "" {
|
if r.UUID == "" {
|
||||||
shared, code, _ := shallow_collaborative_area.NewAccessor(request).LoadOne(ao.Shared)
|
r.UUID = uuid.New().String()
|
||||||
if code != 200 || shared == nil {
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
return shared.VerifyAuth(request)
|
}
|
||||||
}
|
|
||||||
return ao.AbstractObject.VerifyAuth(request)
|
func (d *Workspace) GetName() string {
|
||||||
|
return d.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Workspace) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
|
data := New() // Create a new instance of the accessor
|
||||||
|
data.Init(tools.WORKSPACE, caller) // Initialize the accessor with the WORKSPACE model type
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new instance of the workspaceMongoAccessor from a map
|
||||||
|
func (dma *Workspace) Deserialize(j map[string]interface{}) utils.DBObject {
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, dma)
|
||||||
|
return dma
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize returns the workspaceMongoAccessor as a map
|
||||||
|
func (dma *Workspace) Serialize() map[string]interface{} {
|
||||||
|
var m map[string]interface{}
|
||||||
|
b, err := json.Marshal(dma)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
return m
|
||||||
}
|
}
|
||||||
|
@ -8,8 +8,10 @@ import (
|
|||||||
|
|
||||||
type WorkspaceHistory struct{ Workspace }
|
type WorkspaceHistory struct{ Workspace }
|
||||||
|
|
||||||
func (d *WorkspaceHistory) GetAccessor(request *tools.APIRequest) utils.Accessor {
|
func (d *WorkspaceHistory) GetAccessor(caller *tools.HTTPCaller) utils.Accessor {
|
||||||
return NewAccessorHistory(request) // Create a new instance of the accessor
|
data := New() // Create a new instance of the accessor
|
||||||
|
data.Init(tools.WORKSPACE_HISTORY, caller) // Initialize the accessor with the WORKSPACE model type
|
||||||
|
return data
|
||||||
}
|
}
|
||||||
func (r *WorkspaceHistory) GenerateID() {
|
func (r *WorkspaceHistory) GenerateID() {
|
||||||
r.UUID = uuid.New().String()
|
r.UUID = uuid.New().String()
|
||||||
|
@ -2,11 +2,17 @@ package workspace
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||||
"cloud.o-forge.io/core/oc-lib/logs"
|
"cloud.o-forge.io/core/oc-lib/dbs/mongo"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/compute"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/data"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/processing"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/storage"
|
||||||
|
w "cloud.o-forge.io/core/oc-lib/models/resources/workflow"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||||
"cloud.o-forge.io/core/oc-lib/tools"
|
"cloud.o-forge.io/core/oc-lib/tools"
|
||||||
)
|
)
|
||||||
@ -17,111 +23,194 @@ type workspaceMongoAccessor struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new instance of the workspaceMongoAccessor
|
// New creates a new instance of the workspaceMongoAccessor
|
||||||
func NewAccessorHistory(request *tools.APIRequest) *workspaceMongoAccessor {
|
func New() *workspaceMongoAccessor {
|
||||||
return new(tools.WORKSPACE_HISTORY, request)
|
return &workspaceMongoAccessor{}
|
||||||
}
|
|
||||||
|
|
||||||
func NewAccessor(request *tools.APIRequest) *workspaceMongoAccessor {
|
|
||||||
return new(tools.WORKSPACE, request)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new instance of the workspaceMongoAccessor
|
|
||||||
func new(t tools.DataType, request *tools.APIRequest) *workspaceMongoAccessor {
|
|
||||||
return &workspaceMongoAccessor{
|
|
||||||
utils.AbstractAccessor{
|
|
||||||
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
|
|
||||||
Request: request,
|
|
||||||
Type: t,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteOne deletes a workspace from the database, given its ID, it automatically share to peers if the workspace is shared
|
// DeleteOne deletes a workspace from the database, given its ID, it automatically share to peers if the workspace is shared
|
||||||
// it checks if a workspace with the same name already exists
|
// it checks if a workspace with the same name already exists
|
||||||
func (a *workspaceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
func (wfa *workspaceMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
|
||||||
res, code, err := utils.GenericDeleteOne(id, a)
|
res, code, err := wfa.GenericDeleteOne(id, wfa)
|
||||||
if code == 200 && res != nil {
|
if code == 200 && res != nil {
|
||||||
a.share(res.(*Workspace), tools.DELETE, a.GetCaller()) // Share the deletion to the peers
|
wfa.share(res.(*Workspace), tools.DELETE, wfa.Caller) // Share the deletion to the peers
|
||||||
}
|
}
|
||||||
return res, code, err
|
return res, code, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateOne updates a workspace in the database, given its ID, it automatically share to peers if the workspace is shared
|
// UpdateOne updates a workspace in the database, given its ID, it automatically share to peers if the workspace is shared
|
||||||
func (a *workspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
func (wfa *workspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
|
||||||
d := set.(*Workspace) // Get the workspace from the set
|
d := set.(*Workspace) // Get the workspace from the set
|
||||||
d.Clear()
|
d.DataResources = nil // Reset the resources
|
||||||
|
d.ComputeResources = nil
|
||||||
|
d.StorageResources = nil
|
||||||
|
d.ProcessingResources = nil
|
||||||
|
d.WorkflowResources = nil
|
||||||
if d.Active { // If the workspace is active, deactivate all the other workspaces
|
if d.Active { // If the workspace is active, deactivate all the other workspaces
|
||||||
res, _, err := a.LoadAll(true)
|
res, _, err := wfa.LoadAll()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, r := range res {
|
for _, r := range res {
|
||||||
if r.GetID() != id {
|
if r.GetID() != id {
|
||||||
r.(*Workspace).Active = false
|
r.(*Workspace).Active = false
|
||||||
a.UpdateOne(r.(*Workspace), r.GetID())
|
wfa.UpdateOne(r.(*Workspace), r.GetID())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
res, code, err := utils.GenericUpdateOne(set, id, a, &Workspace{})
|
res, code, err := wfa.GenericUpdateOne(set, id, wfa, &Workspace{})
|
||||||
if code == 200 && res != nil {
|
if code == 200 && res != nil {
|
||||||
a.share(res.(*Workspace), tools.PUT, a.GetCaller())
|
wfa.share(res.(*Workspace), tools.PUT, wfa.Caller)
|
||||||
}
|
}
|
||||||
return res, code, err
|
return res, code, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// StoreOne stores a workspace in the database, it checks if a workspace with the same name already exists
|
// StoreOne stores a workspace in the database, it checks if a workspace with the same name already exists
|
||||||
func (a *workspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
func (wfa *workspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
filters := &dbs.Filters{
|
filters := &dbs.Filters{
|
||||||
Or: map[string][]dbs.Filter{
|
Or: map[string][]dbs.Filter{
|
||||||
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: data.GetName() + "_workspace"}},
|
"abstractobject.name": {{dbs.LIKE.String(), data.GetName() + "_workspace"}},
|
||||||
"abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: a.GetPeerID()}},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// filters *dbs.Filters, word string, isDraft bool
|
res, _, err := wfa.Search(filters, "") // Search for the workspace
|
||||||
res, _, err := a.Search(filters, "", true) // Search for the workspace
|
|
||||||
if err == nil && len(res) > 0 { // If the workspace already exists, return an error
|
if err == nil && len(res) > 0 { // If the workspace already exists, return an error
|
||||||
return nil, 409, errors.New("a workspace with the same name already exists")
|
return nil, 409, errors.New("A workspace with the same name already exists")
|
||||||
}
|
}
|
||||||
// reset the resources
|
// reset the resources
|
||||||
d := data.(*Workspace)
|
d := data.(*Workspace)
|
||||||
d.Clear()
|
d.DataResources = nil
|
||||||
return utils.GenericStoreOne(d, a)
|
d.ComputeResources = nil
|
||||||
|
d.StorageResources = nil
|
||||||
|
d.ProcessingResources = nil
|
||||||
|
d.WorkflowResources = nil
|
||||||
|
return wfa.GenericStoreOne(d, wfa)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyOne copies a workspace in the database
|
// CopyOne copies a workspace in the database
|
||||||
func (a *workspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
func (wfa *workspaceMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
|
||||||
return utils.GenericStoreOne(data, a)
|
return wfa.GenericStoreOne(data, wfa)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *workspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
/*
|
||||||
return utils.GenericLoadOne[*Workspace](id, func(d utils.DBObject) (utils.DBObject, int, error) {
|
This function is used to fill the workspace with the resources
|
||||||
d.(*Workspace).Fill(a.GetRequest())
|
*/
|
||||||
return d, 200, nil
|
func (wfa *workspaceMongoAccessor) fill(workflow *Workspace) *Workspace {
|
||||||
}, a)
|
// Fill the workspace with the resources
|
||||||
|
if workflow.Datas != nil && len(workflow.Datas) > 0 {
|
||||||
|
dataAccessor := (&data.DataResource{}).GetAccessor(nil)
|
||||||
|
for _, id := range workflow.Datas {
|
||||||
|
d, _, e := dataAccessor.LoadOne(id)
|
||||||
|
if e == nil {
|
||||||
|
workflow.DataResources = append(workflow.DataResources, d.(*data.DataResource))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Fill the workspace with the computes
|
||||||
|
if workflow.Computes != nil && len(workflow.Computes) > 0 {
|
||||||
|
dataAccessor := (&compute.ComputeResource{}).GetAccessor(nil)
|
||||||
|
for _, id := range workflow.Computes {
|
||||||
|
d, _, e := dataAccessor.LoadOne(id)
|
||||||
|
if e == nil {
|
||||||
|
workflow.ComputeResources = append(workflow.ComputeResources, d.(*compute.ComputeResource))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Fill the workspace with the storages
|
||||||
|
if workflow.Storages != nil && len(workflow.Storages) > 0 {
|
||||||
|
dataAccessor := (&storage.StorageResource{}).GetAccessor(nil)
|
||||||
|
for _, id := range workflow.Storages {
|
||||||
|
d, _, e := dataAccessor.LoadOne(id)
|
||||||
|
if e == nil {
|
||||||
|
workflow.StorageResources = append(workflow.StorageResources, d.(*storage.StorageResource))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Fill the workspace with the processings
|
||||||
|
if workflow.Processings != nil && len(workflow.Processings) > 0 {
|
||||||
|
dataAccessor := (&processing.ProcessingResource{}).GetAccessor(nil)
|
||||||
|
for _, id := range workflow.Processings {
|
||||||
|
d, _, e := dataAccessor.LoadOne(id)
|
||||||
|
if e == nil {
|
||||||
|
workflow.ProcessingResources = append(workflow.ProcessingResources, d.(*processing.ProcessingResource))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Fill the workspace with the workflows
|
||||||
|
if workflow.Workflows != nil && len(workflow.Workflows) > 0 {
|
||||||
|
dataAccessor := (&w.WorkflowResource{}).GetAccessor(nil)
|
||||||
|
for _, id := range workflow.Workflows {
|
||||||
|
d, _, e := dataAccessor.LoadOne(id)
|
||||||
|
if e == nil {
|
||||||
|
workflow.WorkflowResources = append(workflow.WorkflowResources, d.(*w.WorkflowResource))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return workflow
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *workspaceMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
// LoadOne loads a workspace from the database, given its ID
|
||||||
return utils.GenericLoadAll[*Workspace](func(d utils.DBObject) utils.ShallowDBObject {
|
func (wfa *workspaceMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
|
||||||
d.(*Workspace).Fill(a.GetRequest())
|
var workflow Workspace
|
||||||
return d
|
res_mongo, code, err := mongo.MONGOService.LoadOne(id, wfa.GetType())
|
||||||
}, isDraft, a)
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
res_mongo.Decode(&workflow)
|
||||||
|
|
||||||
|
return wfa.fill(&workflow), 200, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *workspaceMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
|
// LoadAll loads all the workspaces from the database
|
||||||
return utils.GenericSearch[*Workspace](filters, search, (&Workspace{}).GetObjectFilters(search), func(d utils.DBObject) utils.ShallowDBObject {
|
func (wfa workspaceMongoAccessor) LoadAll() ([]utils.ShallowDBObject, int, error) {
|
||||||
d.(*Workspace).Fill(a.GetRequest())
|
objs := []utils.ShallowDBObject{}
|
||||||
return d
|
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType())
|
||||||
}, isDraft, a)
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not retrieve any from db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []Workspace
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
objs = append(objs, wfa.fill(&r))
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search searches for workspaces in the database, given some filters OR a search string
|
||||||
|
func (wfa *workspaceMongoAccessor) Search(filters *dbs.Filters, search string) ([]utils.ShallowDBObject, int, error) {
|
||||||
|
objs := []utils.ShallowDBObject{}
|
||||||
|
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" {
|
||||||
|
filters = &dbs.Filters{
|
||||||
|
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||||
|
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType())
|
||||||
|
if err != nil {
|
||||||
|
wfa.Logger.Error().Msg("Could not store to db. Error: " + err.Error())
|
||||||
|
return nil, code, err
|
||||||
|
}
|
||||||
|
var results []Workspace
|
||||||
|
if err = res_mongo.All(mongo.MngoCtx, &results); err != nil {
|
||||||
|
return nil, 404, err
|
||||||
|
}
|
||||||
|
for _, r := range results {
|
||||||
|
objs = append(objs, wfa.fill(&r))
|
||||||
|
}
|
||||||
|
return objs, 200, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
This function is used to share the workspace with the peers
|
This function is used to share the workspace with the peers
|
||||||
*/
|
*/
|
||||||
func (a *workspaceMongoAccessor) share(realData *Workspace, method tools.METHOD, caller *tools.HTTPCaller) {
|
func (wfa *workspaceMongoAccessor) share(realData *Workspace, method tools.METHOD, caller *tools.HTTPCaller) {
|
||||||
|
fmt.Println("Sharing workspace", realData, caller)
|
||||||
if realData == nil || realData.Shared == "" || caller == nil || caller.Disabled {
|
if realData == nil || realData.Shared == "" || caller == nil || caller.Disabled {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
shallow := &shallow_collaborative_area.ShallowCollaborativeArea{}
|
access := (&shallow_collaborative_area.ShallowCollaborativeArea{}).GetAccessor(nil)
|
||||||
access := (shallow).GetAccessor(a.GetRequest())
|
|
||||||
res, code, _ := access.LoadOne(realData.Shared)
|
res, code, _ := access.LoadOne(realData.Shared)
|
||||||
if code != 200 {
|
if code != 200 {
|
||||||
return
|
return
|
||||||
@ -138,10 +227,10 @@ func (a *workspaceMongoAccessor) share(realData *Workspace, method tools.METHOD,
|
|||||||
history.StoreOne(history.MapFromWorkspace(res.(*Workspace)))
|
history.StoreOne(history.MapFromWorkspace(res.(*Workspace)))
|
||||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.DELETE, map[string]interface{}{}, caller)
|
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.DELETE, map[string]interface{}{}, caller)
|
||||||
} else { // If the workspace is updated, share the update
|
} else { // If the workspace is updated, share the update
|
||||||
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.PUT, res.Serialize(res), caller)
|
_, err = paccess.LaunchPeerExecution(p, res.GetID(), tools.WORKSPACE, tools.PUT, res.Serialize(), caller)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.Logger.Error().Msg(err.Error())
|
wfa.Logger.Error().Msg(err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
11
tools/api.go
11
tools/api.go
@ -3,6 +3,7 @@ package tools
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"cloud.o-forge.io/core/oc-lib/config"
|
"cloud.o-forge.io/core/oc-lib/config"
|
||||||
@ -10,13 +11,6 @@ import (
|
|||||||
beego "github.com/beego/beego/v2/server/web"
|
beego "github.com/beego/beego/v2/server/web"
|
||||||
)
|
)
|
||||||
|
|
||||||
type APIRequest struct {
|
|
||||||
Username string
|
|
||||||
PeerID string
|
|
||||||
Groups []string
|
|
||||||
Caller *HTTPCaller
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* API is the Health Check API
|
* API is the Health Check API
|
||||||
* it defines the health check methods
|
* it defines the health check methods
|
||||||
@ -115,8 +109,8 @@ func (a *API) SubscribeRouter(infos []*beego.ControllerInfo) {
|
|||||||
// CheckRemotePeer checks the state of a remote peer
|
// CheckRemotePeer checks the state of a remote peer
|
||||||
func (a *API) CheckRemotePeer(url string) (State, map[string]int) {
|
func (a *API) CheckRemotePeer(url string) (State, map[string]int) {
|
||||||
// Check if the database is up
|
// Check if the database is up
|
||||||
var resp APIStatusResponse
|
|
||||||
caller := NewHTTPCaller(map[DataType]map[METHOD]string{}) // Create a new http caller
|
caller := NewHTTPCaller(map[DataType]map[METHOD]string{}) // Create a new http caller
|
||||||
|
var resp APIStatusResponse
|
||||||
b, err := caller.CallPost(url, "", map[string]interface{}{}) // Call the status endpoint of the peer
|
b, err := caller.CallPost(url, "", map[string]interface{}{}) // Call the status endpoint of the peer
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return DEAD, map[string]int{} // If the peer is not reachable, return dead
|
return DEAD, map[string]int{} // If the peer is not reachable, return dead
|
||||||
@ -149,6 +143,7 @@ func (a *API) CheckRemoteAPIs(apis []DataType) (State, map[string]string, error)
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
json.Unmarshal(b, &resp)
|
json.Unmarshal(b, &resp)
|
||||||
|
fmt.Println(string(b))
|
||||||
if resp.Data == nil { //
|
if resp.Data == nil { //
|
||||||
state = REDUCED_SERVICE // If the response is empty, return reduced service
|
state = REDUCED_SERVICE // If the response is empty, return reduced service
|
||||||
continue
|
continue
|
||||||
|
@ -13,19 +13,13 @@ const (
|
|||||||
WORKFLOW
|
WORKFLOW
|
||||||
WORKFLOW_EXECUTION
|
WORKFLOW_EXECUTION
|
||||||
WORKSPACE
|
WORKSPACE
|
||||||
|
RESOURCE_MODEL
|
||||||
PEER
|
PEER
|
||||||
COLLABORATIVE_AREA
|
COLLABORATIVE_AREA
|
||||||
RULE
|
RULE
|
||||||
BOOKING
|
BOOKING
|
||||||
WORKFLOW_HISTORY
|
WORKFLOW_HISTORY
|
||||||
WORKSPACE_HISTORY
|
WORKSPACE_HISTORY
|
||||||
ORDER
|
|
||||||
PURCHASE_RESOURCE
|
|
||||||
ADMIRALTY_SOURCE
|
|
||||||
ADMIRALTY_TARGET
|
|
||||||
ADMIRALTY_SECRET
|
|
||||||
ADMIRALTY_KUBECONFIG
|
|
||||||
ADMIRALTY_NODES
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var NOAPI = ""
|
var NOAPI = ""
|
||||||
@ -35,11 +29,6 @@ var WORKFLOWAPI = "oc-workflow"
|
|||||||
var WORKSPACEAPI = "oc-workspace"
|
var WORKSPACEAPI = "oc-workspace"
|
||||||
var PEERSAPI = "oc-peer"
|
var PEERSAPI = "oc-peer"
|
||||||
var DATACENTERAPI = "oc-datacenter"
|
var DATACENTERAPI = "oc-datacenter"
|
||||||
var ADMIRALTY_SOURCEAPI = DATACENTERAPI+"/admiralty/source"
|
|
||||||
var ADMIRALTY_TARGETAPI = DATACENTERAPI+"/admiralty/target"
|
|
||||||
var ADMIRALTY_SECRETAPI = DATACENTERAPI+"/admiralty/secret"
|
|
||||||
var ADMIRALTY_KUBECONFIGAPI = DATACENTERAPI+"/admiralty/kubeconfig"
|
|
||||||
var ADMIRALTY_NODESAPI = DATACENTERAPI+"/admiralty/node"
|
|
||||||
|
|
||||||
// Bind the standard API name to the data type
|
// Bind the standard API name to the data type
|
||||||
var DefaultAPI = [...]string{
|
var DefaultAPI = [...]string{
|
||||||
@ -52,19 +41,13 @@ var DefaultAPI = [...]string{
|
|||||||
WORKFLOWAPI,
|
WORKFLOWAPI,
|
||||||
NOAPI,
|
NOAPI,
|
||||||
WORKSPACEAPI,
|
WORKSPACEAPI,
|
||||||
|
CATALOGAPI,
|
||||||
PEERSAPI,
|
PEERSAPI,
|
||||||
SHAREDAPI,
|
SHAREDAPI,
|
||||||
SHAREDAPI,
|
SHAREDAPI,
|
||||||
DATACENTERAPI,
|
DATACENTERAPI,
|
||||||
NOAPI,
|
NOAPI,
|
||||||
NOAPI,
|
NOAPI,
|
||||||
NOAPI,
|
|
||||||
NOAPI,
|
|
||||||
ADMIRALTY_SOURCEAPI,
|
|
||||||
ADMIRALTY_TARGETAPI,
|
|
||||||
ADMIRALTY_SECRETAPI,
|
|
||||||
ADMIRALTY_KUBECONFIGAPI,
|
|
||||||
ADMIRALTY_NODESAPI,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bind the standard data name to the data type
|
// Bind the standard data name to the data type
|
||||||
@ -78,19 +61,13 @@ var Str = [...]string{
|
|||||||
"workflow",
|
"workflow",
|
||||||
"workflow_execution",
|
"workflow_execution",
|
||||||
"workspace",
|
"workspace",
|
||||||
|
"resource_model",
|
||||||
"peer",
|
"peer",
|
||||||
"collaborative_area",
|
"collaborative_area",
|
||||||
"rule",
|
"rule",
|
||||||
"booking",
|
"booking",
|
||||||
"workflow_history",
|
"workflow_history",
|
||||||
"workspace_history",
|
"workspace_history",
|
||||||
"order",
|
|
||||||
"purchase_resource",
|
|
||||||
"admiralty_source",
|
|
||||||
"admiralty_target",
|
|
||||||
"admiralty_secret",
|
|
||||||
"admiralty_kubeconfig",
|
|
||||||
"admiralty_node",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func FromInt(i int) string {
|
func FromInt(i int) string {
|
||||||
@ -109,7 +86,3 @@ func (d DataType) String() string { // String - Returns the string name of the d
|
|||||||
func (d DataType) EnumIndex() int {
|
func (d DataType) EnumIndex() int {
|
||||||
return int(d)
|
return int(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
func DataTypeList() []DataType {
|
|
||||||
return []DataType{DATA_RESOURCE, PROCESSING_RESOURCE, STORAGE_RESOURCE, COMPUTE_RESOURCE, WORKFLOW_RESOURCE, WORKFLOW, WORKFLOW_EXECUTION, WORKSPACE, PEER, COLLABORATIVE_AREA, RULE, BOOKING, WORKFLOW_HISTORY, WORKSPACE_HISTORY, ORDER, PURCHASE_RESOURCE,ADMIRALTY_SOURCE,ADMIRALTY_TARGET,ADMIRALTY_SECRET,ADMIRALTY_KUBECONFIG,ADMIRALTY_NODES}
|
|
||||||
}
|
|
||||||
|
@ -3,7 +3,6 @@ package tools
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
@ -17,7 +16,6 @@ const (
|
|||||||
GET METHOD = iota
|
GET METHOD = iota
|
||||||
PUT
|
PUT
|
||||||
POST
|
POST
|
||||||
POSTCHECK
|
|
||||||
DELETE
|
DELETE
|
||||||
|
|
||||||
STRICT_INTERNAL_GET
|
STRICT_INTERNAL_GET
|
||||||
@ -28,7 +26,7 @@ const (
|
|||||||
|
|
||||||
// String returns the string of the enum
|
// String returns the string of the enum
|
||||||
func (m METHOD) String() string {
|
func (m METHOD) String() string {
|
||||||
return [...]string{"GET", "PUT", "POST", "POST", "DELETE", "INTERNALGET", "INTERNALPUT", "INTERNALPOST", "INTERNALDELETE"}[m]
|
return [...]string{"GET", "PUT", "POST", "DELETE", "INTERNALGET", "INTERNALPUT", "INTERNALPOST", "INTERNALDELETE"}[m]
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnumIndex returns the index of the enum
|
// EnumIndex returns the index of the enum
|
||||||
@ -38,7 +36,7 @@ func (m METHOD) EnumIndex() int {
|
|||||||
|
|
||||||
// ToMethod returns the method from a string
|
// ToMethod returns the method from a string
|
||||||
func ToMethod(str string) METHOD {
|
func ToMethod(str string) METHOD {
|
||||||
for _, s := range []METHOD{GET, PUT, POST, POSTCHECK, DELETE,
|
for _, s := range []METHOD{GET, PUT, POST, DELETE,
|
||||||
STRICT_INTERNAL_GET, STRICT_INTERNAL_PUT, STRICT_INTERNAL_POST, STRICT_INTERNAL_DELETE} {
|
STRICT_INTERNAL_GET, STRICT_INTERNAL_PUT, STRICT_INTERNAL_POST, STRICT_INTERNAL_DELETE} {
|
||||||
if s.String() == str {
|
if s.String() == str {
|
||||||
return s
|
return s
|
||||||
@ -52,7 +50,6 @@ var HTTPCallerInstance = &HTTPCaller{} // Singleton instance of the HTTPCaller
|
|||||||
type HTTPCaller struct {
|
type HTTPCaller struct {
|
||||||
URLS map[DataType]map[METHOD]string // Map of the different methods and their urls
|
URLS map[DataType]map[METHOD]string // Map of the different methods and their urls
|
||||||
Disabled bool // Disabled flag
|
Disabled bool // Disabled flag
|
||||||
LastResults map[string]interface{} // Used to store information regarding the last execution of a given method on a given data type
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHTTPCaller creates a new instance of the HTTP Caller
|
// NewHTTPCaller creates a new instance of the HTTP Caller
|
||||||
@ -78,41 +75,22 @@ func (caller *HTTPCaller) CallGet(url string, subpath string, types ...string) (
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
err = caller.StoreResp(resp)
|
return io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return caller.LastResults["body"].([]byte), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CallPut calls the DELETE method on the HTTP server
|
// CallPut calls the DELETE method on the HTTP server
|
||||||
func (caller *HTTPCaller) CallDelete(url string, subpath string) ([]byte, error) {
|
func (caller *HTTPCaller) CallDelete(url string, subpath string) ([]byte, error) {
|
||||||
req, err := http.NewRequest("DELETE", url+subpath, nil)
|
resp, err := http.NewRequest("DELETE", url+subpath, nil)
|
||||||
if err != nil {
|
if err != nil || resp == nil || resp.Body == nil {
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
client := &http.Client{}
|
|
||||||
resp, err := client.Do(req)
|
|
||||||
if err != nil || req == nil || req.Body == nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
return io.ReadAll(resp.Body)
|
||||||
err = caller.StoreResp(resp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return caller.LastResults["body"].([]byte), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CallPost calls the POST method on the HTTP server
|
// CallPost calls the POST method on the HTTP server
|
||||||
func (caller *HTTPCaller) CallPost(url string, subpath string, body interface{}, types ...string) ([]byte, error) {
|
func (caller *HTTPCaller) CallPost(url string, subpath string, body map[string]interface{}, types ...string) ([]byte, error) {
|
||||||
postBody, err := json.Marshal(body)
|
postBody, _ := json.Marshal(body)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
responseBody := bytes.NewBuffer(postBody)
|
responseBody := bytes.NewBuffer(postBody)
|
||||||
contentType := "application/json"
|
contentType := "application/json"
|
||||||
if len(types) > 0 {
|
if len(types) > 0 {
|
||||||
@ -123,12 +101,7 @@ func (caller *HTTPCaller) CallPost(url string, subpath string, body interface{},
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
err = caller.StoreResp(resp)
|
return io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return caller.LastResults["body"].([]byte), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CallPost calls the POST method on the HTTP server
|
// CallPost calls the POST method on the HTTP server
|
||||||
@ -146,12 +119,7 @@ func (caller *HTTPCaller) CallPut(url string, subpath string, body map[string]in
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
err = caller.StoreResp(resp)
|
return io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return caller.LastResults["body"].([]byte), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CallRaw calls the Raw method on the HTTP server
|
// CallRaw calls the Raw method on the HTTP server
|
||||||
@ -171,12 +139,7 @@ func (caller *HTTPCaller) CallRaw(method string, url string, subpath string,
|
|||||||
req.AddCookie(c)
|
req.AddCookie(c)
|
||||||
}
|
}
|
||||||
client := &http.Client{}
|
client := &http.Client{}
|
||||||
resp, err := client.Do(req)
|
return client.Do(req)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CallRaw calls the Raw method on the HTTP server
|
// CallRaw calls the Raw method on the HTTP server
|
||||||
@ -196,17 +159,3 @@ func (caller *HTTPCaller) CallForm(method string, url string, subpath string,
|
|||||||
client := &http.Client{}
|
client := &http.Client{}
|
||||||
return client.Do(req)
|
return client.Do(req)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (caller *HTTPCaller) StoreResp(resp *http.Response) error {
|
|
||||||
caller.LastResults = make(map[string]interface{})
|
|
||||||
caller.LastResults["header"] = resp.Header
|
|
||||||
caller.LastResults["code"] = resp.StatusCode
|
|
||||||
data, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Error reading the body of the last request")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
caller.LastResults["body"] = data
|
|
||||||
return nil
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user