diff --git a/main.go b/main.go index 2fac24e..4e417e5 100644 --- a/main.go +++ b/main.go @@ -84,10 +84,11 @@ func main() { err := new_wf.LoadFrom(conf.GetConfig().WorkflowID, conf.GetConfig().PeerID) if err != nil { + logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API") } - argo_file_path, stepMax, err := new_wf.ExportToArgo(exec.ExecutionsID, conf.GetConfig().Timeout) + builder, argo_file_path, stepMax, err := new_wf.ExportToArgo(exec.ExecutionsID, conf.GetConfig().Timeout) if err != nil { logger.Error().Msg("Could not create the Argo file for " + conf.GetConfig().WorkflowID) logger.Error().Msg(err.Error()) @@ -99,15 +100,20 @@ func main() { wf_logger = logger.With().Str("argo_name", workflowName).Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() wf_logger.Debug().Msg("Testing argo name") - if conf.GetConfig().KubeHost == "" { - // Not in a k8s environment, get conf from parameters - fmt.Println("Executes outside of k8s") - executeOutside(argo_file_path, stepMax) - } else { - // Executed in a k8s environment - fmt.Println("Executes inside a k8s") - executeInside(exec.GetID(), "argo", argo_file_path, stepMax) + err = builder.CompleteBuild(exec.ExecutionsID) + if err != nil { + logger.Error().Msg(err.Error()) } + _ = stepMax + // if conf.GetConfig().KubeHost == "" { + // // Not in a k8s environment, get conf from parameters + // fmt.Println("Executes outside of k8s") + // executeOutside(argo_file_path, stepMax) + // } else { + // // Executed in a k8s environment + // fmt.Println("Executes inside a k8s") + // executeInside(exec.GetID(), "argo", argo_file_path, stepMax) + // } } // So far we only log the output from @@ -224,9 +230,9 @@ func loadConfig(is_k8s bool, parser *argparse.Parser) { o = initOnion(o) setConf(is_k8s, o, parser) - if !IsValidUUID(conf.GetConfig().ExecutionID) { - logger.Fatal().Msg("Provided ID is not an UUID") - } + // if !IsValidUUID(conf.GetConfig().ExecutionID) { + // logger.Fatal().Msg("Provided ID is not an UUID") + // } } func setConf(is_k8s bool, o *onion.Onion, parser *argparse.Parser) { diff --git a/models/template.go b/models/template.go index e9f36df..8398a6c 100644 --- a/models/template.go +++ b/models/template.go @@ -95,7 +95,7 @@ type Template struct { Resource ServiceResource `yaml:"resource,omitempty"` } -func (template *Template) CreateContainer(processing *resources.ProcessingResource, dag *Dag, templateName string) { +func (template *Template) CreateContainer(processing *resources.ProcessingResource, dag *Dag) { instance := processing.GetSelectedInstance() if instance == nil { return @@ -116,7 +116,7 @@ func (template *Template) CreateContainer(processing *resources.ProcessingResour template.Outputs.Parameters = append(template.Inputs.Parameters, Parameter{Name: v.Name}) } cmd := strings.ReplaceAll(inst.Access.Container.Command, container.Image, "") - container.Args = append(container.Args, "echo "+templateName+" && ") // a casual echo to know where we are for logs purpose + for _, a := range strings.Split(cmd, " ") { container.Args = append(container.Args, template.ReplacePerEnv(a, inst.Env)) } @@ -124,6 +124,7 @@ func (template *Template) CreateContainer(processing *resources.ProcessingResour container.Args = append(container.Args, template.ReplacePerEnv(a, inst.Env)) } container.Args = []string{strings.Join(container.Args, " ")} + template.Container = container } diff --git a/workflow_builder/admiralty_setter.go b/workflow_builder/admiralty_setter.go new file mode 100644 index 0000000..fff8c25 --- /dev/null +++ b/workflow_builder/admiralty_setter.go @@ -0,0 +1,93 @@ +package workflow_builder + +import ( + "encoding/json" + "fmt" + "net/http" + + oclib "cloud.o-forge.io/core/oc-lib" + "cloud.o-forge.io/core/oc-lib/models/peer" + "cloud.o-forge.io/core/oc-lib/tools" +) + +type AdmiraltySetter struct { + Id string // ID to identify the execution, correspond to workflow_executions id +} + +func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID string) error { + + data := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(remotePeerID) + if data.Code != 200 { + logger.Error().Msg("Error while trying to instantiate remote peer " + remotePeerID) + return fmt.Errorf(data.Err) + } + remotePeer := data.ToPeer() + + data = oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(localPeerID) + if data.Code != 200 { + logger.Error().Msg("Error while trying to instantiate local peer " + remotePeerID) + return fmt.Errorf(data.Err) + } + localPeer := data.ToPeer() + + caller := tools.NewHTTPCaller( + map[tools.DataType]map[tools.METHOD]string{ + tools.ADMIRALTY_SOURCE: map[tools.METHOD]string{ + tools.POST :"/:id", + }, + tools.ADMIRALTY_KUBECONFIG: map[tools.METHOD]string{ + tools.GET:"/:id", + }, + tools.ADMIRALTY_SECRET: map[tools.METHOD]string{ + tools.POST:"/:id", + }, + tools.ADMIRALTY_TARGET: map[tools.METHOD]string{ + tools.POST:"/:id", + }, + tools.ADMIRALTY_NODES: map[tools.METHOD]string{ + tools.GET:"/:id", + }, + }, + ) + fmt.Println("Creating source in", remotePeerID, " ns-" + s.Id) + _ = s.callRemoteExecution(remotePeer, http.StatusCreated,caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil) + kubeconfig := s.getKubeconfig(remotePeer, caller) + _ = s.callRemoteExecution(localPeer, http.StatusCreated, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig) + _ = s.callRemoteExecution(localPeer,http.StatusCreated,caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil) + _ = s.callRemoteExecution(localPeer,http.StatusOK,caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil) + + return nil +} + +func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCaller) map[string]string { + var kubedata map[string]string + _ = s.callRemoteExecution(peer, http.StatusOK, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil) + if caller.LastResults["body"] == nil || len(caller.LastResults["body"].([]byte)) == 0 { + fmt.Println("Something went wrong when retrieving data from Get call for kubeconfig") + panic(0) + } + err := json.Unmarshal(caller.LastResults["body"].([]byte), &kubedata) + if err != nil { + fmt.Println("Something went wrong when unmarshalling data from Get call for kubeconfig") + panic(0) + } + + return kubedata +} + +func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}) *peer.PeerExecution { + resp, err := peer.LaunchPeerExecution(peer.UUID, dataID, dt, method, body, caller) + if err != nil { + fmt.Println("Error when executing on peer at", peer.Url) + fmt.Println(err) + panic(0) + } + + if caller.LastResults["code"].(int) != expectedCode { + fmt.Println("Didn't receive the expected code :", caller.LastResults["code"], "when expecting", expectedCode) + fmt.Println(string(caller.LastResults["body"].(byte))) + panic(0) + } + + return resp +} \ No newline at end of file diff --git a/workflow_builder/argo_builder.go b/workflow_builder/argo_builder.go index 2b146eb..dd90272 100644 --- a/workflow_builder/argo_builder.go +++ b/workflow_builder/argo_builder.go @@ -25,10 +25,11 @@ import ( var logger zerolog.Logger type ArgoBuilder struct { - OriginWorkflow *w.Workflow - Workflow Workflow - Services []*Service - Timeout int + OriginWorkflow *w.Workflow + Workflow Workflow + Services []*Service + Timeout int + RemotePeers []string } type Workflow struct { @@ -187,7 +188,11 @@ func (b *ArgoBuilder) createArgoTemplates(namespace string, _, firstItems, lastItems = b.addTaskToArgo(b.Workflow.getDag(), id, processing, firstItems, lastItems) template := &Template{Name: getArgoName(processing.GetName(), id)} fmt.Println("Creating template for", template.Name) - template.CreateContainer(processing, b.Workflow.getDag(), template.Name) + isReparted, peerId := b.isProcessingReparted(*processing,id) + template.CreateContainer(processing, b.Workflow.getDag()) + if isReparted { + b.RemotePeers = append(b.RemotePeers, peerId) + } // get datacenter from the processing if processing.IsService { b.CreateService(id, processing) @@ -266,6 +271,7 @@ func (b *ArgoBuilder) createArgoTemplates(namespace string, b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, *template) return volumes, firstItems, lastItems } + func (b *ArgoBuilder) addTaskToArgo(dag *Dag, graphItemID string, processing *resources.ProcessingResource, firstItems []string, lastItems []string) (*Dag, []string, []string) { unique_name := getArgoName(processing.GetName(), graphItemID) @@ -368,3 +374,71 @@ func getArgoName(raw_name string, component_id string) (formatedName string) { formatedName = strings.ToLower(formatedName) return } + +// Verify if a processing resource is attached to another Compute than the one hosting +// the current Open Cloud instance. If true return the peer ID to contact +func (b *ArgoBuilder) isProcessingReparted(processing resources.ProcessingResource, graphID string) (bool,string) { + computeAttached := b.retrieveProcessingCompute(graphID) + if computeAttached == nil { + logger.Error().Msg("No compute was found attached to processing " + processing.Name + " : " + processing.UUID ) + panic(0) + } + + + // Creates an accessor srtictly for Peer Collection + req := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"","",nil,nil) + if req == nil { + fmt.Println("TODO : handle error when trying to create a request on the Peer Collection") + return false, "" + } + + res := req.LoadOne(computeAttached.CreatorID) + if res.Err != "" { + fmt.Print("TODO : handle error when requesting PeerID") + fmt.Print(res.Err) + return false, "" + } + + peer := *res.ToPeer() + + isNotReparted, _ := peer.IsMySelf() + fmt.Println("Result IsMySelf for ", peer.UUID ," : ", isNotReparted) + + return !isNotReparted, peer.UUID +} + +func (b *ArgoBuilder) retrieveProcessingCompute(graphID string) *resources.ComputeResource { + for _, link := range b.OriginWorkflow.Graph.Links { + // If a link contains the id of the processing + var oppositeId string + if link.Source.ID == graphID{ + oppositeId = link.Destination.ID + } else if(link.Destination.ID == graphID){ + oppositeId = link.Source.ID + } + fmt.Println("OppositeId : ", oppositeId) + if oppositeId != "" { + dt, res := b.OriginWorkflow.Graph.GetResource(oppositeId) + if dt == oclib.COMPUTE_RESOURCE { + return res.(*resources.ComputeResource) + } else { + continue + } + } + + } + + return nil +} + + +// Execute the last actions once the YAML file for the Argo Workflow is created +func (b *ArgoBuilder) CompleteBuild(executionsId string) error { + fmt.Println("DEV :: Completing build") + for _, peer := range b.RemotePeers { + fmt.Println("DEV :: Launching Admiralty Setup for ", peer) + setter := AdmiraltySetter{Id: executionsId} + setter.InitializeAdmiralty(conf.GetConfig().PeerID,peer) + } + return nil +} \ No newline at end of file diff --git a/workflow_builder/graph.go b/workflow_builder/graph.go index 0f5168c..5716ee7 100644 --- a/workflow_builder/graph.go +++ b/workflow_builder/graph.go @@ -41,20 +41,20 @@ func (w *WorflowDB) getWorkflow(workflow_id string, peerID string) (workflow *wo return new_wf, nil } -func (w *WorflowDB) ExportToArgo(namespace string, timeout int) (string, int, error) { +func (w *WorflowDB) ExportToArgo(namespace string, timeout int) (*ArgoBuilder,string, int, error) { logger := oclib.GetLogger() fmt.Println("Exporting to Argo", w.Workflow) if len(w.Workflow.Name) == 0 || w.Workflow.Graph == nil { - return "", 0, fmt.Errorf("can't export a graph that has not been loaded yet") + return nil, "", 0, fmt.Errorf("can't export a graph that has not been loaded yet") } - argo_builder := ArgoBuilder{OriginWorkflow: w.Workflow, Timeout: timeout} - filename, stepMax, _, _, err := argo_builder.CreateDAG(namespace, true) + argoBuilder := ArgoBuilder{OriginWorkflow: w.Workflow, Timeout: timeout} + filename, stepMax, _, _, err := argoBuilder.CreateDAG(namespace, true) if err != nil { logger.Error().Msg("Could not create the argo file for " + w.Workflow.Name) - return "", 0, err + return nil, "", 0, err } - return filename, stepMax, nil + return &argoBuilder, filename, stepMax, nil } // TODO implement this function