test
This commit is contained in:
parent
83d118fb05
commit
93f3806b86
4
env.env
Normal file
4
env.env
Normal file
@ -0,0 +1,4 @@
|
||||
KUBERNETES_SERVICE_HOST=192.168.1.169
|
||||
KUBE_CA="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTWpNeE1USXdNell3SGhjTk1qUXdPREE0TVRBeE16VTJXaGNOTXpRd09EQTJNVEF4TXpVMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTWpNeE1USXdNell3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTVlk3ZHZhNEdYTVdkMy9jMlhLN3JLYjlnWXgyNSthaEE0NmkyNVBkSFAKRktQL2UxSVMyWVF0dzNYZW1TTUQxaStZdzJSaVppNUQrSVZUamNtNHdhcnFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWtlUVJpNFJiODduME5yRnZaWjZHClc2SU55NnN3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUlnRXA5ck04WmdNclRZSHYxZjNzOW5DZXZZeWVVa3lZUk4KWjUzazdoaytJS1FDSVFDbk05TnVGKzlTakIzNDFacGZ5ays2NEpWdkpSM3BhcmVaejdMd2lhNm9kdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
|
||||
KUBE_CERT="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJWUxWNkFPQkdrU1F3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOekl6TVRFeU1ETTJNQjRYRFRJME1EZ3dPREV3TVRNMU5sb1hEVEkxTURndwpPREV3TVRNMU5sb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJGQ2Q1MFdPeWdlQ2syQzcKV2FrOWY4MVAvSkJieVRIajRWOXBsTEo0ck5HeHFtSjJOb2xROFYxdUx5RjBtOTQ2Nkc0RmRDQ2dqaXFVSk92Swp3NVRPNnd5alNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVFJkOFI5cXVWK2pjeUVmL0ovT1hQSzMyS09XekFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQTArbThqTDBJVldvUTZ0dnB4cFo4NVlMalF1SmpwdXM0aDdnSXRxS3NmUVVDSUI2M2ZNdzFBMm5OVWU1TgpIUGZOcEQwSEtwcVN0Wnk4djIyVzliYlJUNklZCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTWpNeE1USXdNell3SGhjTk1qUXdPREE0TVRBeE16VTJXaGNOTXpRd09EQTJNVEF4TXpVMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTWpNeE1USXdNell3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRc3hXWk9pbnIrcVp4TmFEQjVGMGsvTDF5cE01VHAxOFRaeU92ektJazQKRTFsZWVqUm9STW0zNmhPeVljbnN3d3JoNnhSUnBpMW5RdGhyMzg0S0Z6MlBvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTBYZkVmYXJsZm8zTWhIL3lmemx6Cnl0OWlqbHN3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUxJL2dNYnNMT3MvUUpJa3U2WHVpRVMwTEE2cEJHMXgKcnBlTnpGdlZOekZsQWlFQW1wdjBubjZqN3M0MVI0QzFNMEpSL0djNE53MHdldlFmZWdEVGF1R2p3cFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
|
||||
KUBE_DATA="LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU5ZS1BFb1dhd1NKUzJlRW5oWmlYMk5VZlY1ZlhKV2krSVNnV09TNFE5VTlvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFVUozblJZN0tCNEtUWUx0WnFUMS96VS84a0Z2Sk1lUGhYMm1Vc25pczBiR3FZblkyaVZEeApYVzR2SVhTYjNqcm9iZ1YwSUtDT0twUWs2OHJEbE03ckRBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo="
|
30
main.go
30
main.go
@ -88,7 +88,7 @@ func main() {
|
||||
logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API")
|
||||
}
|
||||
|
||||
builder, argo_file_path, stepMax, err := new_wf.ExportToArgo(exec.ExecutionsID, conf.GetConfig().Timeout)
|
||||
_, argo_file_path, stepMax, err := new_wf.ExportToArgo(exec.ExecutionsID, conf.GetConfig().Timeout)
|
||||
if err != nil {
|
||||
logger.Error().Msg("Could not create the Argo file for " + conf.GetConfig().WorkflowID)
|
||||
logger.Error().Msg(err.Error())
|
||||
@ -100,20 +100,22 @@ func main() {
|
||||
wf_logger = logger.With().Str("argo_name", workflowName).Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger()
|
||||
wf_logger.Debug().Msg("Testing argo name")
|
||||
|
||||
err = builder.CompleteBuild(exec.ExecutionsID)
|
||||
if err != nil {
|
||||
logger.Error().Msg(err.Error())
|
||||
}
|
||||
/*
|
||||
THIS FAILED FOR LOCAL ARGO ACTIONS ! IN PURPOSE OF MAIN KEEPING DEMO AVAILABLE i comment it
|
||||
err = builder.CompleteBuild(exec.ExecutionsID)
|
||||
if err != nil {
|
||||
logger.Error().Msg(err.Error())
|
||||
}*/
|
||||
_ = stepMax
|
||||
// if conf.GetConfig().KubeHost == "" {
|
||||
// // Not in a k8s environment, get conf from parameters
|
||||
// fmt.Println("Executes outside of k8s")
|
||||
// executeOutside(argo_file_path, stepMax)
|
||||
// } else {
|
||||
// // Executed in a k8s environment
|
||||
// fmt.Println("Executes inside a k8s")
|
||||
// executeInside(exec.GetID(), "argo", argo_file_path, stepMax)
|
||||
// }
|
||||
if conf.GetConfig().KubeHost == "" {
|
||||
// Not in a k8s environment, get conf from parameters
|
||||
fmt.Println("Executes outside of k8s")
|
||||
executeOutside(argo_file_path, stepMax)
|
||||
} else {
|
||||
// Executed in a k8s environment
|
||||
fmt.Println("Executes inside a k8s")
|
||||
executeInside(exec.GetID(), "argo", argo_file_path, stepMax)
|
||||
}
|
||||
}
|
||||
|
||||
// So far we only log the output from
|
||||
|
@ -5,57 +5,57 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
type AdmiraltySetter struct {
|
||||
Id string // ID to identify the execution, correspond to workflow_executions id
|
||||
Id string // ID to identify the execution, correspond to workflow_executions id
|
||||
}
|
||||
|
||||
func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID string) error {
|
||||
func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string, remotePeerID string) error {
|
||||
/*
|
||||
data := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(remotePeerID)
|
||||
if data.Code != 200 {
|
||||
logger.Error().Msg("Error while trying to instantiate remote peer " + remotePeerID)
|
||||
return fmt.Errorf(data.Err)
|
||||
}
|
||||
remotePeer := data.ToPeer()
|
||||
|
||||
data := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(remotePeerID)
|
||||
if data.Code != 200 {
|
||||
logger.Error().Msg("Error while trying to instantiate remote peer " + remotePeerID)
|
||||
return fmt.Errorf(data.Err)
|
||||
}
|
||||
remotePeer := data.ToPeer()
|
||||
data = oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(localPeerID)
|
||||
if data.Code != 200 {
|
||||
logger.Error().Msg("Error while trying to instantiate local peer " + remotePeerID)
|
||||
return fmt.Errorf(data.Err)
|
||||
}
|
||||
localPeer := data.ToPeer()
|
||||
|
||||
data = oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(localPeerID)
|
||||
if data.Code != 200 {
|
||||
logger.Error().Msg("Error while trying to instantiate local peer " + remotePeerID)
|
||||
return fmt.Errorf(data.Err)
|
||||
}
|
||||
localPeer := data.ToPeer()
|
||||
|
||||
caller := tools.NewHTTPCaller(
|
||||
map[tools.DataType]map[tools.METHOD]string{
|
||||
tools.ADMIRALTY_SOURCE: {
|
||||
tools.POST :"/:id",
|
||||
},
|
||||
tools.ADMIRALTY_KUBECONFIG: {
|
||||
tools.GET:"/:id",
|
||||
},
|
||||
tools.ADMIRALTY_SECRET: {
|
||||
tools.POST:"/:id",
|
||||
},
|
||||
tools.ADMIRALTY_TARGET: {
|
||||
tools.POST:"/:id",
|
||||
},
|
||||
tools.ADMIRALTY_NODES: {
|
||||
tools.GET:"/:id",
|
||||
},
|
||||
},
|
||||
)
|
||||
fmt.Println("Creating source in", remotePeerID, " ns-" + s.Id)
|
||||
_ = s.callRemoteExecution(remotePeer, http.StatusCreated,caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil)
|
||||
kubeconfig := s.getKubeconfig(remotePeer, caller)
|
||||
_ = s.callRemoteExecution(localPeer, http.StatusCreated, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig)
|
||||
_ = s.callRemoteExecution(localPeer,http.StatusCreated,caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil)
|
||||
_ = s.callRemoteExecution(localPeer,http.StatusOK,caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil)
|
||||
caller := tools.NewHTTPCaller(
|
||||
map[tools.DataType]map[tools.METHOD]string{
|
||||
tools.ADMIRALTY_SOURCE: {
|
||||
tools.POST :"/:id",
|
||||
},
|
||||
tools.ADMIRALTY_KUBECONFIG: {
|
||||
tools.GET:"/:id",
|
||||
},
|
||||
tools.ADMIRALTY_SECRET: {
|
||||
tools.POST:"/:id",
|
||||
},
|
||||
tools.ADMIRALTY_TARGET: {
|
||||
tools.POST:"/:id",
|
||||
},
|
||||
tools.ADMIRALTY_NODES: {
|
||||
tools.GET:"/:id",
|
||||
},
|
||||
},
|
||||
)
|
||||
fmt.Println("Creating source in", remotePeerID, " ns-" + s.Id)
|
||||
|
||||
_ = s.callRemoteExecution(remotePeer, http.StatusCreated,caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil)
|
||||
kubeconfig := s.getKubeconfig(remotePeer, caller)
|
||||
_ = s.callRemoteExecution(localPeer, http.StatusCreated, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig)
|
||||
_ = s.callRemoteExecution(localPeer,http.StatusCreated,caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil)
|
||||
_ = s.callRemoteExecution(localPeer,http.StatusOK,caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil)
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -75,7 +75,7 @@ func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCalle
|
||||
return kubedata
|
||||
}
|
||||
|
||||
func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}) *peer.PeerExecution {
|
||||
func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode int, caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}) *peer.PeerExecution {
|
||||
resp, err := peer.LaunchPeerExecution(peer.UUID, dataID, dt, method, body, caller)
|
||||
if err != nil {
|
||||
fmt.Println("Error when executing on peer at", peer.Url)
|
||||
|
@ -25,11 +25,11 @@ import (
|
||||
var logger zerolog.Logger
|
||||
|
||||
type ArgoBuilder struct {
|
||||
OriginWorkflow *w.Workflow
|
||||
Workflow Workflow
|
||||
Services []*Service
|
||||
Timeout int
|
||||
RemotePeers []string
|
||||
OriginWorkflow *w.Workflow
|
||||
Workflow Workflow
|
||||
Services []*Service
|
||||
Timeout int
|
||||
RemotePeers []string
|
||||
}
|
||||
|
||||
type Workflow struct {
|
||||
@ -188,7 +188,7 @@ func (b *ArgoBuilder) createArgoTemplates(namespace string,
|
||||
_, firstItems, lastItems = b.addTaskToArgo(b.Workflow.getDag(), id, processing, firstItems, lastItems)
|
||||
template := &Template{Name: getArgoName(processing.GetName(), id)}
|
||||
fmt.Println("Creating template for", template.Name)
|
||||
isReparted, peerId := b.isProcessingReparted(*processing,id)
|
||||
isReparted, peerId := b.isProcessingReparted(*processing, id)
|
||||
template.CreateContainer(processing, b.Workflow.getDag())
|
||||
if isReparted {
|
||||
b.RemotePeers = append(b.RemotePeers, peerId)
|
||||
@ -377,16 +377,15 @@ func getArgoName(raw_name string, component_id string) (formatedName string) {
|
||||
|
||||
// Verify if a processing resource is attached to another Compute than the one hosting
|
||||
// the current Open Cloud instance. If true return the peer ID to contact
|
||||
func (b *ArgoBuilder) isProcessingReparted(processing resources.ProcessingResource, graphID string) (bool,string) {
|
||||
func (b *ArgoBuilder) isProcessingReparted(processing resources.ProcessingResource, graphID string) (bool, string) {
|
||||
computeAttached := b.retrieveProcessingCompute(graphID)
|
||||
if computeAttached == nil {
|
||||
logger.Error().Msg("No compute was found attached to processing " + processing.Name + " : " + processing.UUID )
|
||||
logger.Error().Msg("No compute was found attached to processing " + processing.Name + " : " + processing.UUID)
|
||||
panic(0)
|
||||
}
|
||||
|
||||
|
||||
// Creates an accessor srtictly for Peer Collection
|
||||
req := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"","",nil,nil)
|
||||
req := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", "", nil, nil)
|
||||
if req == nil {
|
||||
fmt.Println("TODO : handle error when trying to create a request on the Peer Collection")
|
||||
return false, ""
|
||||
@ -401,8 +400,8 @@ func (b *ArgoBuilder) isProcessingReparted(processing resources.ProcessingResour
|
||||
|
||||
peer := *res.ToPeer()
|
||||
|
||||
isNotReparted, _ := peer.IsMySelf()
|
||||
fmt.Println("Result IsMySelf for ", peer.UUID ," : ", isNotReparted)
|
||||
isNotReparted := peer.State == 1
|
||||
fmt.Println("Result IsMySelf for ", peer.UUID, " : ", isNotReparted)
|
||||
|
||||
return !isNotReparted, peer.UUID
|
||||
}
|
||||
@ -411,9 +410,9 @@ func (b *ArgoBuilder) retrieveProcessingCompute(graphID string) *resources.Compu
|
||||
for _, link := range b.OriginWorkflow.Graph.Links {
|
||||
// If a link contains the id of the processing
|
||||
var oppositeId string
|
||||
if link.Source.ID == graphID{
|
||||
if link.Source.ID == graphID {
|
||||
oppositeId = link.Destination.ID
|
||||
} else if(link.Destination.ID == graphID){
|
||||
} else if link.Destination.ID == graphID {
|
||||
oppositeId = link.Source.ID
|
||||
}
|
||||
fmt.Println("OppositeId : ", oppositeId)
|
||||
@ -431,14 +430,13 @@ func (b *ArgoBuilder) retrieveProcessingCompute(graphID string) *resources.Compu
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
// Execute the last actions once the YAML file for the Argo Workflow is created
|
||||
func (b *ArgoBuilder) CompleteBuild(executionsId string) error {
|
||||
fmt.Println("DEV :: Completing build")
|
||||
for _, peer := range b.RemotePeers {
|
||||
fmt.Println("DEV :: Launching Admiralty Setup for ", peer)
|
||||
setter := AdmiraltySetter{Id: executionsId}
|
||||
setter.InitializeAdmiralty(conf.GetConfig().PeerID,peer)
|
||||
setter.InitializeAdmiralty(conf.GetConfig().PeerID, peer)
|
||||
}
|
||||
return nil
|
||||
}
|
Loading…
Reference in New Issue
Block a user