Merge branch 'main' of https://cloud.o-forge.io/core/oc-lib
Merged Main OCLIB
This commit is contained in:
3
.gitattributes
vendored
Normal file
3
.gitattributes
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Force Go as the main language
|
||||||
|
*.go linguist-detectable=true
|
||||||
|
* linguist-language=Go
|
||||||
@@ -597,3 +597,65 @@ func (l *LibData) ToPurchasedResource() *purchase_resource.PurchaseResource {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ============== ADMIRALTY ==============
|
||||||
|
// Returns a concatenation of the peerId and namespace in order for
|
||||||
|
// kubernetes ressources to have a unique name, under 63 characters
|
||||||
|
// and yet identify which peer they are created for
|
||||||
|
func GetConcatenatedName(peerId string, namespace string) string {
|
||||||
|
s := strings.Split(namespace, "-")[:2]
|
||||||
|
n := s[0] + "-" + s[1]
|
||||||
|
|
||||||
|
return peerId + "-" + n
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------------- Loading resources ----------
|
||||||
|
|
||||||
|
func LoadOneStorage(storageId string, user string, peerID string, groups []string) (*resources.StorageResource, error) {
|
||||||
|
|
||||||
|
res := NewRequest(LibDataEnum(STORAGE_RESOURCE), user, peerID, groups,nil).LoadOne(storageId)
|
||||||
|
if res.Code != 200 {
|
||||||
|
l := GetLogger()
|
||||||
|
l.Error().Msg("Error while loading storage ressource " + storageId)
|
||||||
|
return nil,fmt.Errorf(res.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.ToStorageResource(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadOneComputing(computingId string, user string, peerID string, groups []string) (*resources.ComputeResource, error) {
|
||||||
|
|
||||||
|
res := NewRequest(LibDataEnum(COMPUTE_RESOURCE), user, peerID, groups,nil).LoadOne(computingId)
|
||||||
|
if res.Code != 200 {
|
||||||
|
l := GetLogger()
|
||||||
|
l.Error().Msg("Error while loading computing ressource " + computingId)
|
||||||
|
return nil,fmt.Errorf(res.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.ToComputeResource(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadOneProcessing(processingId string, user string, peerID string, groups []string) (*resources.ProcessingResource, error) {
|
||||||
|
|
||||||
|
res := NewRequest(LibDataEnum(PROCESSING_RESOURCE), user, peerID, groups,nil).LoadOne(processingId)
|
||||||
|
if res.Code != 200 {
|
||||||
|
l := GetLogger()
|
||||||
|
l.Error().Msg("Error while loading processing ressource " + processingId)
|
||||||
|
return nil,fmt.Errorf(res.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.ToProcessingResource(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadOneData(dataId string, user string, peerID string, groups []string) (*resources.DataResource, error) {
|
||||||
|
|
||||||
|
res := NewRequest(LibDataEnum(DATA_RESOURCE), user, peerID, groups,nil).LoadOne(dataId)
|
||||||
|
if res.Code != 200 {
|
||||||
|
l := GetLogger()
|
||||||
|
l.Error().Msg("Error while loading data ressource " + dataId)
|
||||||
|
return nil,fmt.Errorf(res.Err)
|
||||||
|
}
|
||||||
|
return res.ToDataResource(), nil
|
||||||
|
|
||||||
|
}
|
||||||
@@ -2,6 +2,7 @@ package bill
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -184,9 +185,18 @@ type PeerItemOrder struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *PeerItemOrder) GetPrice(request *tools.APIRequest) (float64, error) {
|
func (d *PeerItemOrder) GetPrice(request *tools.APIRequest) (float64, error) {
|
||||||
|
/////////// Temporary in order to allow GenerateOrder to complete while billing is still WIP
|
||||||
|
if d.Purchase == nil {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
///////////
|
||||||
var priced *resources.PricedResource
|
var priced *resources.PricedResource
|
||||||
b, _ := json.Marshal(d.Item)
|
b, _ := json.Marshal(d.Item)
|
||||||
json.Unmarshal(b, priced)
|
err := json.Unmarshal(b, priced)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
accessor := purchase_resource.NewAccessor(request)
|
accessor := purchase_resource.NewAccessor(request)
|
||||||
search, code, _ := accessor.Search(&dbs.Filters{
|
search, code, _ := accessor.Search(&dbs.Filters{
|
||||||
And: map[string][]dbs.Filter{
|
And: map[string][]dbs.Filter{
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ type Booking struct {
|
|||||||
ExecutionMetrics map[string][]models.MetricsSnapshot `json:"metrics,omitempty" bson:"metrics,omitempty"`
|
ExecutionMetrics map[string][]models.MetricsSnapshot `json:"metrics,omitempty" bson:"metrics,omitempty"`
|
||||||
|
|
||||||
ExecutionsID string `json:"executions_id,omitempty" bson:"executions_id,omitempty" validate:"required"` // ExecutionsID is the ID of the executions
|
ExecutionsID string `json:"executions_id,omitempty" bson:"executions_id,omitempty" validate:"required"` // ExecutionsID is the ID of the executions
|
||||||
DestPeerID string `json:"dest_peer_id,omitempty"` // DestPeerID is the ID of the destination peer
|
DestPeerID string `json:"dest_peer_id,omitempty" bson:"dest_peer_id,omitempty"` // DestPeerID is the ID of the destination peer
|
||||||
WorkflowID string `json:"workflow_id,omitempty" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow
|
WorkflowID string `json:"workflow_id,omitempty" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow
|
||||||
ExecutionID string `json:"execution_id,omitempty" bson:"execution_id,omitempty" validate:"required"`
|
ExecutionID string `json:"execution_id,omitempty" bson:"execution_id,omitempty" validate:"required"`
|
||||||
State enum.BookingStatus `json:"state,omitempty" bson:"state,omitempty" validate:"required"` // State is the state of the booking
|
State enum.BookingStatus `json:"state,omitempty" bson:"state,omitempty" validate:"required"` // State is the state of the booking
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
type PurchaseResource struct {
|
type PurchaseResource struct {
|
||||||
utils.AbstractObject
|
utils.AbstractObject
|
||||||
DestPeerID string
|
DestPeerID string `json:"dest_peer_id" bson:"dest_peer_id"`
|
||||||
PricedItem map[string]interface{} `json:"priced_item,omitempty" bson:"priced_item,omitempty" validate:"required"`
|
PricedItem map[string]interface{} `json:"priced_item,omitempty" bson:"priced_item,omitempty" validate:"required"`
|
||||||
ExecutionsID string `json:"executions_id,omitempty" bson:"executions_id,omitempty" validate:"required"` // ExecutionsID is the ID of the executions
|
ExecutionsID string `json:"executions_id,omitempty" bson:"executions_id,omitempty" validate:"required"` // ExecutionsID is the ID of the executions
|
||||||
EndDate *time.Time `json:"end_buying_date,omitempty" bson:"end_buying_date,omitempty"`
|
EndDate *time.Time `json:"end_buying_date,omitempty" bson:"end_buying_date,omitempty"`
|
||||||
|
|||||||
@@ -367,13 +367,11 @@ func (w *Workflow) GetByRelatedProcessing(processingID string, g func(item graph
|
|||||||
_, node = item.GetResource() // we are looking for the storage as destination
|
_, node = item.GetResource() // we are looking for the storage as destination
|
||||||
}
|
}
|
||||||
if processingID == nodeID && node != nil { // if the storage is linked to the processing
|
if processingID == nodeID && node != nil { // if the storage is linked to the processing
|
||||||
if _, ok := related[processingID]; !ok {
|
relID := node.GetID()
|
||||||
related[processingID] = Related{}
|
rel := Related{}
|
||||||
}
|
|
||||||
rel := related[node.GetID()]
|
|
||||||
rel.Node = node
|
rel.Node = node
|
||||||
rel.Links = append(rel.Links, link)
|
rel.Links = append(rel.Links, link)
|
||||||
related[processingID] = rel
|
related[relID] = rel
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return related
|
return related
|
||||||
@@ -484,6 +482,33 @@ func (wf *Workflow) Planify(start time.Time, end *time.Time, request *tools.APIR
|
|||||||
return longest, priceds, wf, nil
|
return longest, priceds, wf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns a map of DataType (processing,computing,data,storage,worfklow) where each resource (identified by its UUID)
|
||||||
|
// is mapped to the list of its items (different appearance) in the graph
|
||||||
|
// ex: if the same Minio storage is represented by several nodes in the graph, in [tools.STORAGE_RESSOURCE] its UUID will be mapped to
|
||||||
|
// the list of GraphItem ID that correspond to the ID of each node
|
||||||
|
func (w *Workflow) GetItemsByResources() (map[tools.DataType]map[string][]string) {
|
||||||
|
res := make(map[tools.DataType]map[string][]string)
|
||||||
|
dtMethodMap := map[tools.DataType]func() []graph.GraphItem{
|
||||||
|
tools.STORAGE_RESOURCE: func() []graph.GraphItem { return w.GetGraphItems(w.Graph.IsStorage) },
|
||||||
|
tools.DATA_RESOURCE: func() []graph.GraphItem { return w.GetGraphItems(w.Graph.IsData) },
|
||||||
|
tools.COMPUTE_RESOURCE: func() []graph.GraphItem { return w.GetGraphItems(w.Graph.IsCompute) },
|
||||||
|
tools.PROCESSING_RESOURCE: func() []graph.GraphItem { return w.GetGraphItems(w.Graph.IsProcessing) },
|
||||||
|
tools.WORKFLOW_RESOURCE: func() []graph.GraphItem { return w.GetGraphItems(w.Graph.IsWorkflow) },
|
||||||
|
}
|
||||||
|
|
||||||
|
for dt, meth := range dtMethodMap {
|
||||||
|
res[dt] = make(map[string][]string)
|
||||||
|
items := meth()
|
||||||
|
for _, i := range items {
|
||||||
|
_, r := i.GetResource()
|
||||||
|
rId := r.GetID()
|
||||||
|
res[dt][rId] = append(res[dt][rId],i.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
func plan[T resources.ResourceInterface](
|
func plan[T resources.ResourceInterface](
|
||||||
dt tools.DataType, wf *Workflow, priceds map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest,
|
dt tools.DataType, wf *Workflow, priceds map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest,
|
||||||
f func(graph.GraphItem) bool, start func(resources.ResourceInterface, pricing.PricedItemITF) (time.Time, float64), end func(time.Time, float64) *time.Time) ([]T, map[tools.DataType]map[string]pricing.PricedItemITF, error) {
|
f func(graph.GraphItem) bool, start func(resources.ResourceInterface, pricing.PricedItemITF) (time.Time, float64), end func(time.Time, float64) *time.Time) ([]T, map[tools.DataType]map[string]pricing.PricedItemITF, error) {
|
||||||
|
|||||||
@@ -142,7 +142,7 @@ func getBooking(b *booking.Booking, request *tools.APIRequest, errCh chan error,
|
|||||||
_, err = (&peer.Peer{}).LaunchPeerExecution(b.DestPeerID, b.ResourceID, tools.BOOKING, tools.GET, nil, &c)
|
_, err = (&peer.Peer{}).LaunchPeerExecution(b.DestPeerID, b.ResourceID, tools.BOOKING, tools.GET, nil, &c)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- err
|
errCh <- fmt.Errorf("error on " + b.DestPeerID + err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ const (
|
|||||||
LIVE_STORAGE
|
LIVE_STORAGE
|
||||||
BILL
|
BILL
|
||||||
MINIO_SVCACC
|
MINIO_SVCACC
|
||||||
|
MINIO_SVCACC_SECRET
|
||||||
)
|
)
|
||||||
|
|
||||||
var NOAPI = ""
|
var NOAPI = ""
|
||||||
@@ -75,6 +76,7 @@ var DefaultAPI = [...]string{
|
|||||||
DATACENTERAPI,
|
DATACENTERAPI,
|
||||||
NOAPI,
|
NOAPI,
|
||||||
MINIO,
|
MINIO,
|
||||||
|
MINIO,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bind the standard data name to the data type
|
// Bind the standard data name to the data type
|
||||||
@@ -105,6 +107,7 @@ var Str = [...]string{
|
|||||||
"live_storage",
|
"live_storage",
|
||||||
"bill",
|
"bill",
|
||||||
"service_account",
|
"service_account",
|
||||||
|
"secret",
|
||||||
}
|
}
|
||||||
|
|
||||||
func FromInt(i int) string {
|
func FromInt(i int) string {
|
||||||
|
|||||||
Reference in New Issue
Block a user