From 08ade1af66a037eacb94260181af2cdc67cf7094 Mon Sep 17 00:00:00 2001 From: pb Date: Wed, 2 Apr 2025 11:40:14 +0200 Subject: [PATCH 01/19] Finished to implement admiralty onto the final argo yaml file and restructured file creation --- main.go | 33 +++++++------- models/template.go | 11 +++++ workflow_builder/admiralty_setter.go | 59 +++++++++++++++++++++---- workflow_builder/argo_builder.go | 66 ++++++++++++++++++---------- workflow_builder/graph.go | 10 ++--- 5 files changed, 125 insertions(+), 54 deletions(-) diff --git a/main.go b/main.go index 4e417e5..a316003 100644 --- a/main.go +++ b/main.go @@ -88,32 +88,33 @@ func main() { logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API") } - builder, argo_file_path, stepMax, err := new_wf.ExportToArgo(exec.ExecutionsID, conf.GetConfig().Timeout) + builder, stepMax, err := new_wf.ExportToArgo(exec.ExecutionsID, conf.GetConfig().Timeout) if err != nil { logger.Error().Msg("Could not create the Argo file for " + conf.GetConfig().WorkflowID) logger.Error().Msg(err.Error()) } - logger.Debug().Msg("Created :" + argo_file_path) + + + argo_file_path, err := builder.CompleteBuild(exec.ExecutionsID) + if err != nil { + logger.Error().Msg(err.Error()) + } workflowName = getContainerName(argo_file_path) wf_logger = logger.With().Str("argo_name", workflowName).Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() wf_logger.Debug().Msg("Testing argo name") - - err = builder.CompleteBuild(exec.ExecutionsID) - if err != nil { - logger.Error().Msg(err.Error()) - } _ = stepMax - // if conf.GetConfig().KubeHost == "" { - // // Not in a k8s environment, get conf from parameters - // fmt.Println("Executes outside of k8s") - // executeOutside(argo_file_path, stepMax) - // } else { - // // Executed in a k8s environment - // fmt.Println("Executes inside a k8s") - // executeInside(exec.GetID(), "argo", argo_file_path, stepMax) - // } + + if conf.GetConfig().KubeHost == "" { + // Not in a k8s environment, get conf from parameters + fmt.Println("Executes outside of k8s") + executeOutside(argo_file_path, stepMax) + } else { + // Executed in a k8s environment + fmt.Println("Executes inside a k8s") + executeInside(exec.GetID(), "argo", argo_file_path, stepMax) + } } // So far we only log the output from diff --git a/models/template.go b/models/template.go index 8398a6c..f7a6019 100644 --- a/models/template.go +++ b/models/template.go @@ -58,6 +58,7 @@ type Dag struct { type TemplateMetadata struct { Labels map[string]string `yaml:"labels,omitempty"` + Annotations map[string]string `yaml:"annotations,omitempty"` } type Secret struct { @@ -139,3 +140,13 @@ func (template *Template) ReplacePerEnv(arg string, envs []models.Param) string } return arg } + +// Add the metadata that allow Admiralty to pick up an Argo Workflow that needs to be reparted +// The value of "clustername" is the peerId, which must be replaced by the node name's for this specific execution +func (t *Template) AddAdmiraltyAnnotations(peerId string){ + if t.Metadata.Annotations == nil { + t.Metadata.Annotations = make(map[string]string) + } + t.Metadata.Annotations["multicluster.admiralty.io/elect"] = "" + t.Metadata.Annotations["multicluster.admiralty.io/clustername"] = peerId +} \ No newline at end of file diff --git a/workflow_builder/admiralty_setter.go b/workflow_builder/admiralty_setter.go index fff8c25..f4af8ef 100644 --- a/workflow_builder/admiralty_setter.go +++ b/workflow_builder/admiralty_setter.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/http" + "time" oclib "cloud.o-forge.io/core/oc-lib" "cloud.o-forge.io/core/oc-lib/models/peer" @@ -11,7 +12,8 @@ import ( ) type AdmiraltySetter struct { - Id string // ID to identify the execution, correspond to workflow_executions id + Id string // ID to identify the execution, correspond to workflow_executions id + NodeName string // Allows to retrieve the name of the node used for this execution on each peer {"peerId": "nodeName"} } func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID string) error { @@ -50,18 +52,19 @@ func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID st }, ) fmt.Println("Creating source in", remotePeerID, " ns-" + s.Id) - _ = s.callRemoteExecution(remotePeer, http.StatusCreated,caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil) + _ = s.callRemoteExecution(remotePeer, http.StatusCreated,caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true) kubeconfig := s.getKubeconfig(remotePeer, caller) - _ = s.callRemoteExecution(localPeer, http.StatusCreated, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig) - _ = s.callRemoteExecution(localPeer,http.StatusCreated,caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil) - _ = s.callRemoteExecution(localPeer,http.StatusOK,caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil) + _ = s.callRemoteExecution(localPeer, http.StatusCreated, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true) + _ = s.callRemoteExecution(localPeer,http.StatusCreated,caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true) + + s.checkNodeStatus(localPeer,caller) return nil } func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCaller) map[string]string { var kubedata map[string]string - _ = s.callRemoteExecution(peer, http.StatusOK, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil) + _ = s.callRemoteExecution(peer, http.StatusOK, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil, true) if caller.LastResults["body"] == nil || len(caller.LastResults["body"].([]byte)) == 0 { fmt.Println("Something went wrong when retrieving data from Get call for kubeconfig") panic(0) @@ -75,7 +78,7 @@ func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCalle return kubedata } -func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}) *peer.PeerExecution { +func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) *peer.PeerExecution { resp, err := peer.LaunchPeerExecution(peer.UUID, dataID, dt, method, body, caller) if err != nil { fmt.Println("Error when executing on peer at", peer.Url) @@ -85,9 +88,47 @@ func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode int,ca if caller.LastResults["code"].(int) != expectedCode { fmt.Println("Didn't receive the expected code :", caller.LastResults["code"], "when expecting", expectedCode) - fmt.Println(string(caller.LastResults["body"].(byte))) - panic(0) + if _, ok := caller.LastResults["body"]; ok { + logger.Info().Msg(string(caller.LastResults["body"].([]byte))) + // fmt.Println(string(caller.LastResults["body"].([]byte))) + } + if panicCode { + panic(0) + } } return resp +} + +func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller){ + var data map[string]interface{} + if resp, ok := caller.LastResults["body"]; ok { + json.Unmarshal(resp.([]byte), &data) + } + + if node, ok := data["node"]; ok { + metadata := node.(map[string]interface{})["metadata"] + name := metadata.(map[string]interface{})["name"].(string) + s.NodeName = name + } else { + fmt.Println("Could not retrieve data about the recently created node") + panic(0) + } +} + +func (s *AdmiraltySetter) checkNodeStatus(localPeer *peer.Peer, caller *tools.HTTPCaller){ + for i := range(5) { + time.Sleep(5 * time.Second) // let some time for kube to generate the node + _ = s.callRemoteExecution(localPeer,http.StatusOK,caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil, false) + if caller.LastResults["code"] == 200 { + s.storeNodeName(caller) + return + } + if i == 5 { + logger.Error().Msg("Node on " + localPeer.Name + " was never found, panicking !") + panic(0) + } + logger.Info().Msg("Could not verify that node is up. Retrying...") + } + } \ No newline at end of file diff --git a/workflow_builder/argo_builder.go b/workflow_builder/argo_builder.go index dd90272..9cf71a8 100644 --- a/workflow_builder/argo_builder.go +++ b/workflow_builder/argo_builder.go @@ -61,7 +61,7 @@ type Spec struct { // TODO: found on a processing instance linked to storage // add s3, gcs, azure, etc if needed on a link between processing and storage -func (b *ArgoBuilder) CreateDAG(namespace string, write bool) (string, int, []string, []string, error) { +func (b *ArgoBuilder) CreateDAG(namespace string, write bool) ( int, []string, []string, error) { fmt.Println("Creating DAG", b.OriginWorkflow.Graph.Items) // handle services by checking if there is only one processing with hostname and port firstItems, lastItems, volumes := b.createTemplates(namespace) @@ -74,26 +74,11 @@ func (b *ArgoBuilder) CreateDAG(namespace string, write bool) (string, int, []st b.Workflow.ApiVersion = "argoproj.io/v1alpha1" b.Workflow.Kind = "Workflow" if !write { - return "", len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil + return len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil } - random_name := fakelish.GenerateFakeWord(5, 8) + "-" + fakelish.GenerateFakeWord(5, 8) - b.Workflow.Metadata.Name = "oc-monitor-" + random_name - logger = oclib.GetLogger() - yamlified, err := yaml.Marshal(b.Workflow) - if err != nil { - logger.Error().Msg("Could not transform object to yaml file") - return "", 0, firstItems, lastItems, err - } - // Give a unique name to each argo file with its timestamp DD:MM:YYYY_hhmmss - current_timestamp := time.Now().Format("02_01_2006_150405") - file_name := random_name + "_" + current_timestamp + ".yml" - workflows_dir := "./argo_workflows/" - err = os.WriteFile(workflows_dir+file_name, []byte(yamlified), 0660) - if err != nil { - logger.Error().Msg("Could not write the yaml file") - return "", 0, firstItems, lastItems, err - } - return workflows_dir + file_name, len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil + + + return len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil } func (b *ArgoBuilder) createTemplates(namespace string) ([]string, []string, []VolumeMount) { @@ -122,7 +107,7 @@ func (b *ArgoBuilder) createTemplates(namespace string) ([]string, []string, []V continue } subBuilder := ArgoBuilder{OriginWorkflow: realWorkflow.(*w.Workflow), Timeout: b.Timeout} - _, _, fi, li, err := subBuilder.CreateDAG(namespace, false) + _, fi, li, err := subBuilder.CreateDAG(namespace, false) if err != nil { logger.Error().Msg("Error creating the subworkflow : " + err.Error()) continue @@ -192,6 +177,7 @@ func (b *ArgoBuilder) createArgoTemplates(namespace string, template.CreateContainer(processing, b.Workflow.getDag()) if isReparted { b.RemotePeers = append(b.RemotePeers, peerId) + template.AddAdmiraltyAnnotations(peerId) } // get datacenter from the processing if processing.IsService { @@ -433,12 +419,44 @@ func (b *ArgoBuilder) retrieveProcessingCompute(graphID string) *resources.Compu // Execute the last actions once the YAML file for the Argo Workflow is created -func (b *ArgoBuilder) CompleteBuild(executionsId string) error { +func (b *ArgoBuilder) CompleteBuild(executionsId string) (string, error) { fmt.Println("DEV :: Completing build") + setter := AdmiraltySetter{Id: executionsId} + // Setup admiralty for each node for _, peer := range b.RemotePeers { fmt.Println("DEV :: Launching Admiralty Setup for ", peer) - setter := AdmiraltySetter{Id: executionsId} setter.InitializeAdmiralty(conf.GetConfig().PeerID,peer) } - return nil + + // Update the name of the admiralty node to use + for _, template := range b.Workflow.Spec.Templates { + if len(template.Metadata.Annotations) > 0 { + if resp, ok := template.Metadata.Annotations["multicluster.admiralty.io/clustername"]; ok { + fmt.Println(resp) + template.Metadata.Annotations["multicluster.admiralty.io/clustername"] = setter.NodeName + } + } + } + + // Generate the YAML file + random_name := fakelish.GenerateFakeWord(5, 8) + "-" + fakelish.GenerateFakeWord(5, 8) + b.Workflow.Metadata.Name = "oc-monitor-" + random_name + logger = oclib.GetLogger() + yamlified, err := yaml.Marshal(b.Workflow) + if err != nil { + logger.Error().Msg("Could not transform object to yaml file") + return "", err + } + // Give a unique name to each argo file with its timestamp DD:MM:YYYY_hhmmss + current_timestamp := time.Now().Format("02_01_2006_150405") + file_name := random_name + "_" + current_timestamp + ".yml" + workflows_dir := "./argo_workflows/" + err = os.WriteFile(workflows_dir+file_name, []byte(yamlified), 0660) + + if err != nil { + logger.Error().Msg("Could not write the yaml file") + return "", err + } + + return workflows_dir + file_name, nil } \ No newline at end of file diff --git a/workflow_builder/graph.go b/workflow_builder/graph.go index 5716ee7..6cb688e 100644 --- a/workflow_builder/graph.go +++ b/workflow_builder/graph.go @@ -41,20 +41,20 @@ func (w *WorflowDB) getWorkflow(workflow_id string, peerID string) (workflow *wo return new_wf, nil } -func (w *WorflowDB) ExportToArgo(namespace string, timeout int) (*ArgoBuilder,string, int, error) { +func (w *WorflowDB) ExportToArgo(namespace string, timeout int) (*ArgoBuilder, int, error) { logger := oclib.GetLogger() fmt.Println("Exporting to Argo", w.Workflow) if len(w.Workflow.Name) == 0 || w.Workflow.Graph == nil { - return nil, "", 0, fmt.Errorf("can't export a graph that has not been loaded yet") + return nil, 0, fmt.Errorf("can't export a graph that has not been loaded yet") } argoBuilder := ArgoBuilder{OriginWorkflow: w.Workflow, Timeout: timeout} - filename, stepMax, _, _, err := argoBuilder.CreateDAG(namespace, true) + stepMax, _, _, err := argoBuilder.CreateDAG(namespace, true) if err != nil { logger.Error().Msg("Could not create the argo file for " + w.Workflow.Name) - return nil, "", 0, err + return nil, 0, err } - return &argoBuilder, filename, stepMax, nil + return &argoBuilder, stepMax, nil } // TODO implement this function From 42ee6abcb633eb3cb36b0a46afa73c92ace2380e Mon Sep 17 00:00:00 2001 From: pb Date: Tue, 8 Apr 2025 10:09:24 +0200 Subject: [PATCH 02/19] adapted the code to execute the wf in the dedicated namespace --- main.go | 3 ++- workflow_builder/admiralty_setter.go | 2 +- workflow_builder/argo_builder.go | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/main.go b/main.go index a316003..dc6658a 100644 --- a/main.go +++ b/main.go @@ -143,7 +143,8 @@ func executeOutside(argo_file_path string, stepMax int) { var stdout, stderr io.ReadCloser // var stderr io.ReadCloser var err error - cmd := exec.Command("argo", "submit", "--log", argo_file_path, "--serviceaccount=argo", "-n", "argo") + logger.Debug().Msg("executing :" + "argo submit --log " + argo_file_path + " --serviceaccount sa-" + conf.GetConfig().ExecutionID + " -n " + conf.GetConfig().ExecutionID) + cmd := exec.Command("argo", "submit", "--log", argo_file_path, "--serviceaccount", "sa-"+conf.GetConfig().ExecutionID, "-n", conf.GetConfig().ExecutionID ) if stdout, err = cmd.StdoutPipe(); err != nil { wf_logger.Error().Msg("Could not retrieve stdoutpipe " + err.Error()) return diff --git a/workflow_builder/admiralty_setter.go b/workflow_builder/admiralty_setter.go index f4af8ef..a2f6c67 100644 --- a/workflow_builder/admiralty_setter.go +++ b/workflow_builder/admiralty_setter.go @@ -8,7 +8,7 @@ import ( oclib "cloud.o-forge.io/core/oc-lib" "cloud.o-forge.io/core/oc-lib/models/peer" - "cloud.o-forge.io/core/oc-lib/tools" + tools "cloud.o-forge.io/core/oc-lib/tools" ) type AdmiraltySetter struct { diff --git a/workflow_builder/argo_builder.go b/workflow_builder/argo_builder.go index 9cf71a8..f942c9d 100644 --- a/workflow_builder/argo_builder.go +++ b/workflow_builder/argo_builder.go @@ -433,7 +433,7 @@ func (b *ArgoBuilder) CompleteBuild(executionsId string) (string, error) { if len(template.Metadata.Annotations) > 0 { if resp, ok := template.Metadata.Annotations["multicluster.admiralty.io/clustername"]; ok { fmt.Println(resp) - template.Metadata.Annotations["multicluster.admiralty.io/clustername"] = setter.NodeName + template.Metadata.Annotations["multicluster.admiralty.io/clustername"] = "target-" + conf.GetConfig().ExecutionID } } } From aa20edaf252d3fc07b666049673e1afd207f3195 Mon Sep 17 00:00:00 2001 From: pb Date: Tue, 8 Apr 2025 10:11:52 +0200 Subject: [PATCH 03/19] resolving commit error on main --- workflow_builder/admiralty_setter.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/workflow_builder/admiralty_setter.go b/workflow_builder/admiralty_setter.go index a2f6c67..9ab4a9f 100644 --- a/workflow_builder/admiralty_setter.go +++ b/workflow_builder/admiralty_setter.go @@ -51,13 +51,17 @@ func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID st }, }, ) - fmt.Println("Creating source in", remotePeerID, " ns-" + s.Id) - _ = s.callRemoteExecution(remotePeer, http.StatusCreated,caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true) - kubeconfig := s.getKubeconfig(remotePeer, caller) - _ = s.callRemoteExecution(localPeer, http.StatusCreated, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true) - _ = s.callRemoteExecution(localPeer,http.StatusCreated,caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true) - s.checkNodeStatus(localPeer,caller) + logger.Info().Msg(" Creating the Admiralty Source on " + remotePeerID + " ns-" + s.Id + "\n\n") + _ = s.callRemoteExecution(remotePeer, http.StatusCreated,caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil) + logger.Info().Msg(" Retrieving kubeconfig with the secret on " + remotePeerID + " ns-" + s.Id + "\n\n") + kubeconfig := s.getKubeconfig(remotePeer, caller) + logger.Info().Msg(" Creating a secret from the kubeconfig " + localPeerID + " ns-" + s.Id + "\n\n") + _ = s.callRemoteExecution(localPeer, http.StatusCreated, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig) + logger.Info().Msg(" Creating the Admiralty Target on " + localPeerID + " ns-" + s.Id + "\n\n") + _ = s.callRemoteExecution(localPeer,http.StatusCreated,caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil) + logger.Info().Msg(" Checking for the creation of the admiralty node on " + localPeerID + " ns-" + s.Id + "\n\n") + _ = s.callRemoteExecution(localPeer,http.StatusOK,caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil) return nil } From df09585cc955da0de76a9139c0a73f078a51519d Mon Sep 17 00:00:00 2001 From: pb Date: Tue, 8 Apr 2025 10:24:30 +0200 Subject: [PATCH 04/19] resolving some merge error --- workflow_builder/admiralty_setter.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/workflow_builder/admiralty_setter.go b/workflow_builder/admiralty_setter.go index 9ab4a9f..9c1b0c7 100644 --- a/workflow_builder/admiralty_setter.go +++ b/workflow_builder/admiralty_setter.go @@ -53,15 +53,15 @@ func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID st ) logger.Info().Msg(" Creating the Admiralty Source on " + remotePeerID + " ns-" + s.Id + "\n\n") - _ = s.callRemoteExecution(remotePeer, http.StatusCreated,caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil) + _ = s.callRemoteExecution(remotePeer, http.StatusCreated,caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true) logger.Info().Msg(" Retrieving kubeconfig with the secret on " + remotePeerID + " ns-" + s.Id + "\n\n") kubeconfig := s.getKubeconfig(remotePeer, caller) logger.Info().Msg(" Creating a secret from the kubeconfig " + localPeerID + " ns-" + s.Id + "\n\n") - _ = s.callRemoteExecution(localPeer, http.StatusCreated, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig) + _ = s.callRemoteExecution(localPeer, http.StatusCreated, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true) logger.Info().Msg(" Creating the Admiralty Target on " + localPeerID + " ns-" + s.Id + "\n\n") - _ = s.callRemoteExecution(localPeer,http.StatusCreated,caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil) + _ = s.callRemoteExecution(localPeer,http.StatusCreated,caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true) logger.Info().Msg(" Checking for the creation of the admiralty node on " + localPeerID + " ns-" + s.Id + "\n\n") - _ = s.callRemoteExecution(localPeer,http.StatusOK,caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil) + s.checkNodeStatus(localPeer,caller) return nil } From 4963284056e3fc57a3f3a450cad39a05349aa7b3 Mon Sep 17 00:00:00 2001 From: pb Date: Tue, 8 Apr 2025 17:21:59 +0200 Subject: [PATCH 05/19] reimplemented logging of wf when executed locally --- main.go | 28 ++++++---- models/local_argo_pods.go | 112 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 128 insertions(+), 12 deletions(-) create mode 100644 models/local_argo_pods.go diff --git a/main.go b/main.go index dc6658a..5b49c19 100644 --- a/main.go +++ b/main.go @@ -143,8 +143,8 @@ func executeOutside(argo_file_path string, stepMax int) { var stdout, stderr io.ReadCloser // var stderr io.ReadCloser var err error - logger.Debug().Msg("executing :" + "argo submit --log " + argo_file_path + " --serviceaccount sa-" + conf.GetConfig().ExecutionID + " -n " + conf.GetConfig().ExecutionID) - cmd := exec.Command("argo", "submit", "--log", argo_file_path, "--serviceaccount", "sa-"+conf.GetConfig().ExecutionID, "-n", conf.GetConfig().ExecutionID ) + logger.Debug().Msg("executing :" + "argo submit --watch " + argo_file_path + " --serviceaccount sa-" + conf.GetConfig().ExecutionID + " -n " + conf.GetConfig().ExecutionID) + cmd := exec.Command("argo", "submit", "--watch", argo_file_path, "--serviceaccount", "sa-"+conf.GetConfig().ExecutionID, "-n", conf.GetConfig().ExecutionID ) if stdout, err = cmd.StdoutPipe(); err != nil { wf_logger.Error().Msg("Could not retrieve stdoutpipe " + err.Error()) return @@ -154,10 +154,11 @@ func executeOutside(argo_file_path string, stepMax int) { } var wg sync.WaitGroup split := strings.Split(argo_file_path, "_") - argoLogs := models.NewArgoLogs(split[0], "argo", stepMax) + argoLogs := models.NewArgoLogs(split[0], conf.GetConfig().ExecutionID, stepMax) argoLogs.StartStepRecording(argoLogs.NewWatch(), wf_logger) - argoLogs.IsStreaming = true - go logWorkflow(argo_file_path, stepMax, stdout, argoLogs.NewWatch(), argoLogs.NewWatch(), argoLogs, []string{}, &wg) + argoLogs.IsStreaming = true // Used to determine wether or not the logs are read from a docker container or on localhost + // go logWorkflow(argo_file_path, stepMax, stdout, argoLogs.NewWatch(), argoLogs.NewWatch(), argoLogs, []string{}, &wg) + go models.LogLocalWorkflow(stdout,&wg) if err := cmd.Wait(); err != nil { wf_logger.Error().Msg("Could not execute argo submit") @@ -167,6 +168,10 @@ func executeOutside(argo_file_path string, stepMax int) { wg.Wait() } +// !!!! BUGGED !!!! +// Should be refactored to create a function dedicated to logging output from execution in a container +// LogLocalWorkflow() has been implemented to be used when oc-monitord is executed locally + // We could improve this function by creating an object with the same attribute as the output // and only send a new log if the current object has different values than the previous func logWorkflow(argo_file_path string, stepMax int, pipe io.ReadCloser, @@ -183,12 +188,8 @@ func logWorkflow(argo_file_path string, stepMax int, pipe io.ReadCloser, wg.Add(1) } seeit++ - } else if count == 0 { - if argoLogs.IsStreaming { - continue - } else { - break - } + } else if count == 0 && !argoLogs.IsStreaming { + break } if count == 1 { see = log @@ -202,7 +203,7 @@ func logWorkflow(argo_file_path string, stepMax int, pipe io.ReadCloser, current_watch.Logs = append(current_watch.Logs, strings.ReplaceAll(log, "\"", "")) } count++ - if strings.Contains(log, "sub-process exited") { + if strings.Contains(log, "sub-process exited") || argoLogs.IsStreaming { current_watch = argoLogs.StopStepRecording(current_watch) argoLogs.Seen = append(argoLogs.Seen, see) if checkStatus(current_watch, previous_watch, argoLogs) { @@ -223,6 +224,9 @@ func logWorkflow(argo_file_path string, stepMax int, pipe io.ReadCloser, } previous_watch = current_watch current_watch = &models.ArgoWatch{} + if argoLogs.IsStreaming { + current_watch.Logs = []string{} + } } } } diff --git a/models/local_argo_pods.go b/models/local_argo_pods.go new file mode 100644 index 0000000..0fbcef3 --- /dev/null +++ b/models/local_argo_pods.go @@ -0,0 +1,112 @@ +package models + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "oc-monitord/conf" + "strings" + "sync" + + "cloud.o-forge.io/core/oc-lib/logs" + "github.com/rs/zerolog" +) + +var logger zerolog.Logger +var wf_logger zerolog.Logger + +// Take the slice of string that make up one round of stderr outputs from the --watch option in argo submit +func NewLocalArgoLogs(inputs []string) *ArgoWatch { + var workflow ArgoWatch + + for _, input := range inputs { + line := strings.TrimSpace(input) + if line == "" { + continue + } + switch { + case strings.HasPrefix(line, "Name:"): + workflow.Name = parseValue(line) + case strings.HasPrefix(line, "Namespace:"): + workflow.Namespace = parseValue(line) + case strings.HasPrefix(line, "Status:"): + workflow.Status = parseValue(line) + case strings.HasPrefix(line, "PodRunning"): + workflow.PodRunning = parseBoolValue(line) + case strings.HasPrefix(line, "Completed"): + workflow.Completed = parseBoolValue(line) + case strings.HasPrefix(line, "Created:"): + workflow.Created = parseValue(line) + case strings.HasPrefix(line, "Started:"): + workflow.Started = parseValue(line) + case strings.HasPrefix(line, "Duration:"): + workflow.Duration = parseValue(line) + case strings.HasPrefix(line, "Progress:"): + workflow.Progress = parseValue(line) + } + } + + return &workflow +} + + + +func parseValue(line string) string { + parts := strings.SplitN(line, ":", 2) + if len(parts) < 2 { + return "" + } + return strings.TrimSpace(parts[1]) +} + +func parseBoolValue(line string) bool { + value := parseValue(line) + return value == "True" +} + +func LogLocalWorkflow(pipe io.ReadCloser, wg *sync.WaitGroup) { + logger = logs.GetLogger() + + logger.Debug().Msg("created wf_logger") + fmt.Println("created wf_logger") + wf_logger = logger.With().Str("argo_name", "MON WF DE TEST").Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() + + var current_watch, previous_watch ArgoWatch + + watch_output := make([]string, 0) + scanner := bufio.NewScanner(pipe) + for scanner.Scan() { + log := scanner.Text() + watch_output = append(watch_output, log) + + if strings.HasPrefix(log, "Progress:") { + + current_watch = *NewLocalArgoLogs(watch_output) + workflowName := current_watch.Name + if !current_watch.Equals(&previous_watch) { + wg.Add(1) + // checkStatus(current_watch.Status, previous_watch.Status) + jsonified, err := json.Marshal(current_watch) + if err != nil { + logger.Error().Msg("Could not create watch log for " + workflowName) + } + wf_logger.Info().Msg(string(jsonified)) + previous_watch = current_watch + current_watch = ArgoWatch{} + wg.Done() + } + } + } +} + +// Debug, no logs sent +// func logPods(pipe io.ReadCloser, name string) { +// pods_logger = wf_logger.With().Str("pod_name", name).Logger() +// scanner := bufio.NewScanner(pipe) +// for scanner.Scan() { +// log := scanner.Text() +// pods_logger.Info().Msg(log) +// } + +// } From 9a17623cabce7915d6c0a7d45b05c54beff37737 Mon Sep 17 00:00:00 2001 From: pb Date: Tue, 8 Apr 2025 17:22:43 +0200 Subject: [PATCH 06/19] added logging --- workflow_builder/admiralty_setter.go | 4 ++++ workflow_builder/argo_builder.go | 2 ++ 2 files changed, 6 insertions(+) diff --git a/workflow_builder/admiralty_setter.go b/workflow_builder/admiralty_setter.go index 9c1b0c7..bad8d16 100644 --- a/workflow_builder/admiralty_setter.go +++ b/workflow_builder/admiralty_setter.go @@ -7,10 +7,12 @@ import ( "time" oclib "cloud.o-forge.io/core/oc-lib" + "cloud.o-forge.io/core/oc-lib/logs" "cloud.o-forge.io/core/oc-lib/models/peer" tools "cloud.o-forge.io/core/oc-lib/tools" ) + type AdmiraltySetter struct { Id string // ID to identify the execution, correspond to workflow_executions id NodeName string // Allows to retrieve the name of the node used for this execution on each peer {"peerId": "nodeName"} @@ -18,6 +20,8 @@ type AdmiraltySetter struct { func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID string) error { + logger = logs.GetLogger() + data := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(remotePeerID) if data.Code != 200 { logger.Error().Msg("Error while trying to instantiate remote peer " + remotePeerID) diff --git a/workflow_builder/argo_builder.go b/workflow_builder/argo_builder.go index f942c9d..5631c26 100644 --- a/workflow_builder/argo_builder.go +++ b/workflow_builder/argo_builder.go @@ -14,6 +14,7 @@ import ( "time" oclib "cloud.o-forge.io/core/oc-lib" + "cloud.o-forge.io/core/oc-lib/logs" "cloud.o-forge.io/core/oc-lib/models/common/enum" "cloud.o-forge.io/core/oc-lib/models/resources" w "cloud.o-forge.io/core/oc-lib/models/workflow" @@ -62,6 +63,7 @@ type Spec struct { // TODO: found on a processing instance linked to storage // add s3, gcs, azure, etc if needed on a link between processing and storage func (b *ArgoBuilder) CreateDAG(namespace string, write bool) ( int, []string, []string, error) { + logger = logs.GetLogger() fmt.Println("Creating DAG", b.OriginWorkflow.Graph.Items) // handle services by checking if there is only one processing with hostname and port firstItems, lastItems, volumes := b.createTemplates(namespace) From 77a9b0770eb53af8330c52a728b74d7644905fba Mon Sep 17 00:00:00 2001 From: pb Date: Tue, 8 Apr 2025 17:23:01 +0200 Subject: [PATCH 07/19] added comments --- models/argo_logs.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/models/argo_logs.go b/models/argo_logs.go index 1796813..2f7dcf6 100644 --- a/models/argo_logs.go +++ b/models/argo_logs.go @@ -11,6 +11,7 @@ import ( "github.com/rs/zerolog" ) +// An object to monitor the logs generated by a specific pod from a workflow execution type ArgoWatch struct { Name string Namespace string @@ -47,6 +48,7 @@ func NewArgoLogs(name string, namespace string, stepMax int) *ArgoLogs { } } +// An object to monitor and log the output of an argo submit type ArgoLogs struct { Name string Namespace string From 5d8143c93e7ae2543907ca349d385c63c4b9d1a2 Mon Sep 17 00:00:00 2001 From: pb Date: Wed, 9 Apr 2025 09:31:48 +0200 Subject: [PATCH 08/19] renamed file --- models/{local_argo_pods.go => local_argo_logs.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename models/{local_argo_pods.go => local_argo_logs.go} (100%) diff --git a/models/local_argo_pods.go b/models/local_argo_logs.go similarity index 100% rename from models/local_argo_pods.go rename to models/local_argo_logs.go From c31184e2ec66a73765b2c0bbe7a743c6080bff85 Mon Sep 17 00:00:00 2001 From: pb Date: Wed, 9 Apr 2025 18:59:37 +0200 Subject: [PATCH 09/19] Implemented logging of local execution of argo submit --watch and logs produced by pods --- main.go | 83 ++++++++++++++++++++++++++++----------- models/local_argo_logs.go | 47 ++++++++++++++++------ 2 files changed, 95 insertions(+), 35 deletions(-) diff --git a/main.go b/main.go index 5b49c19..04d68c9 100644 --- a/main.go +++ b/main.go @@ -12,6 +12,7 @@ import ( "slices" "strings" "sync" + "time" "oc-monitord/conf" "oc-monitord/models" @@ -44,6 +45,7 @@ import ( var logger zerolog.Logger var wf_logger zerolog.Logger +var pods_logger zerolog.Logger var parser argparse.Parser var workflowName string @@ -84,7 +86,7 @@ func main() { err := new_wf.LoadFrom(conf.GetConfig().WorkflowID, conf.GetConfig().PeerID) if err != nil { - + logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API") } @@ -94,7 +96,6 @@ func main() { logger.Error().Msg(err.Error()) } - argo_file_path, err := builder.CompleteBuild(exec.ExecutionsID) if err != nil { logger.Error().Msg(err.Error()) @@ -109,7 +110,7 @@ func main() { if conf.GetConfig().KubeHost == "" { // Not in a k8s environment, get conf from parameters fmt.Println("Executes outside of k8s") - executeOutside(argo_file_path, stepMax) + executeOutside(argo_file_path, stepMax, builder.Workflow) } else { // Executed in a k8s environment fmt.Println("Executes inside a k8s") @@ -138,33 +139,68 @@ func executeInside(execID string, ns string, argo_file_path string, stepMax int) } -func executeOutside(argo_file_path string, stepMax int) { - // var stdout, stderr, stdout_logs, stderr_logs io.ReadCloser - var stdout, stderr io.ReadCloser +func executeOutside(argo_file_path string, stepMax int, workflow workflow_builder.Workflow) { + // var stdoutSubmit, stderrSubmit, stdout_logs, stderr_logs io.ReadCloser + var stdoutSubmit, stderrSubmit io.ReadCloser + var stdoutLogs, stderrLogs io.ReadCloser // var stderr io.ReadCloser + var wg sync.WaitGroup var err error - logger.Debug().Msg("executing :" + "argo submit --watch " + argo_file_path + " --serviceaccount sa-" + conf.GetConfig().ExecutionID + " -n " + conf.GetConfig().ExecutionID) - cmd := exec.Command("argo", "submit", "--watch", argo_file_path, "--serviceaccount", "sa-"+conf.GetConfig().ExecutionID, "-n", conf.GetConfig().ExecutionID ) - if stdout, err = cmd.StdoutPipe(); err != nil { + + logger.Debug().Msg("executing :" + "argo submit --watch " + argo_file_path + " --serviceaccount sa-" + conf.GetConfig().ExecutionID + " -n " + conf.GetConfig().ExecutionID) + + cmdSubmit := exec.Command("argo", "submit", "--watch", argo_file_path, "--serviceaccount", "sa-"+conf.GetConfig().ExecutionID, "-n", conf.GetConfig().ExecutionID) + if stdoutSubmit, err = cmdSubmit.StdoutPipe(); err != nil { wf_logger.Error().Msg("Could not retrieve stdoutpipe " + err.Error()) return } - if err := cmd.Start(); err != nil { - panic(err) - } - var wg sync.WaitGroup - split := strings.Split(argo_file_path, "_") - argoLogs := models.NewArgoLogs(split[0], conf.GetConfig().ExecutionID, stepMax) - argoLogs.StartStepRecording(argoLogs.NewWatch(), wf_logger) - argoLogs.IsStreaming = true // Used to determine wether or not the logs are read from a docker container or on localhost - // go logWorkflow(argo_file_path, stepMax, stdout, argoLogs.NewWatch(), argoLogs.NewWatch(), argoLogs, []string{}, &wg) - go models.LogLocalWorkflow(stdout,&wg) - if err := cmd.Wait(); err != nil { - wf_logger.Error().Msg("Could not execute argo submit") - wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderr).Text()) + // //======== Code block that implemented a method that logs both locally and container executed wf + // // Need to be improved, did not log well for local executions + // split := strings.Split(argo_file_path, "_") + // argoLogs := models.NewArgoLogs(split[0], conf.GetConfig().ExecutionID, stepMax) + // argoLogs.StartStepRecording(argoLogs.NewWatch(), wf_logger) + // argoLogs.IsStreaming = true // Used to determine wether or not the logs are read from a docker container or on localhost + // // go logWorkflow(argo_file_path, stepMax, stdout, argoLogs.NewWatch(), argoLogs.NewWatch(), argoLogs, []string{}, &wg) + // // ======= + + var steps []string + for _, template := range workflow.Spec.Templates { + steps = append(steps, template.Name) + } + + cmdLogs := exec.Command("argo", "logs", "oc-monitor-"+workflowName, "-n", conf.GetConfig().ExecutionID, "--follow") + if stdoutLogs, err = cmdLogs.StdoutPipe(); err != nil { + wf_logger.Error().Msg("Could not retrieve stdoutpipe for 'argo logs'" + err.Error()) + return + } + + go models.LogLocalWorkflow(workflowName, stdoutSubmit, &wg) + go models.LogPods(stdoutLogs, steps, &wg) + + fmt.Println("Starting argo submit") + if err := cmdSubmit.Start(); err != nil { + wf_logger.Error().Msg("Could not start argo submit") + wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text()) updateStatus("fatal", "") } + + time.Sleep(5 * time.Second) + + fmt.Println("Running argo logs") + if err := cmdLogs.Run(); err != nil { + wf_logger.Error().Msg("Could not run 'argo logs oc-monitor-" + workflowName + " -n " + conf.GetConfig().ExecutionID + " --follow") + wf_logger.Fatal().Msg(err.Error() + bufio.NewScanner(stderrLogs).Text()) + + } + + fmt.Println("Waiting argo submit") + if err := cmdSubmit.Wait(); err != nil { + wf_logger.Error().Msg("Could not execute argo submit") + wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text()) + updateStatus("fatal", "") + } + wg.Wait() } @@ -188,7 +224,7 @@ func logWorkflow(argo_file_path string, stepMax int, pipe io.ReadCloser, wg.Add(1) } seeit++ - } else if count == 0 && !argoLogs.IsStreaming { + } else if count == 0 && !argoLogs.IsStreaming { break } if count == 1 { @@ -326,6 +362,7 @@ func getContainerName(argo_file string) string { re := regexp.MustCompile(regex) container_name := re.FindString(argo_file) + return container_name } diff --git a/models/local_argo_logs.go b/models/local_argo_logs.go index 0fbcef3..6b658d8 100644 --- a/models/local_argo_logs.go +++ b/models/local_argo_logs.go @@ -14,7 +14,8 @@ import ( ) var logger zerolog.Logger -var wf_logger zerolog.Logger +var wfLogger zerolog.Logger + // Take the slice of string that make up one round of stderr outputs from the --watch option in argo submit func NewLocalArgoLogs(inputs []string) *ArgoWatch { @@ -65,12 +66,12 @@ func parseBoolValue(line string) bool { return value == "True" } -func LogLocalWorkflow(pipe io.ReadCloser, wg *sync.WaitGroup) { +func LogLocalWorkflow(wfName string, pipe io.ReadCloser, wg *sync.WaitGroup) { logger = logs.GetLogger() logger.Debug().Msg("created wf_logger") fmt.Println("created wf_logger") - wf_logger = logger.With().Str("argo_name", "MON WF DE TEST").Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() + wfLogger = logger.With().Str("argo_name", wfName).Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() var current_watch, previous_watch ArgoWatch @@ -80,6 +81,7 @@ func LogLocalWorkflow(pipe io.ReadCloser, wg *sync.WaitGroup) { log := scanner.Text() watch_output = append(watch_output, log) + // Log the progress of the WF if strings.HasPrefix(log, "Progress:") { current_watch = *NewLocalArgoLogs(watch_output) @@ -91,22 +93,43 @@ func LogLocalWorkflow(pipe io.ReadCloser, wg *sync.WaitGroup) { if err != nil { logger.Error().Msg("Could not create watch log for " + workflowName) } - wf_logger.Info().Msg(string(jsonified)) + wfLogger.Info().Msg(string(jsonified)) previous_watch = current_watch current_watch = ArgoWatch{} wg.Done() } } + + + } } // Debug, no logs sent -// func logPods(pipe io.ReadCloser, name string) { -// pods_logger = wf_logger.With().Str("pod_name", name).Logger() -// scanner := bufio.NewScanner(pipe) -// for scanner.Scan() { -// log := scanner.Text() -// pods_logger.Info().Msg(log) -// } +func LogPods(pipe io.ReadCloser, steps []string, wg *sync.WaitGroup) { + scanner := bufio.NewScanner(pipe) + for scanner.Scan() { + fmt.Println("new line") + wg.Add(1) + var podLogger zerolog.Logger + line := scanner.Text() + podName := strings.Split(line, ":")[0] + podLogger = wfLogger.With().Str("step_name", getStepName(podName, steps)).Logger() + log := strings.Split(line,podName+":")[1] + podLogger.Info().Msg(log) + wg.Done() + } + +} + +func getStepName(podName string, steps []string) string { + + for _, step := range(steps) { + if strings.Contains(podName,step){ + return step + } + } + + return "error" +} -// } From 27fd603e36b32695844a6144c365f41162248425 Mon Sep 17 00:00:00 2001 From: pb Date: Thu, 10 Apr 2025 11:10:16 +0200 Subject: [PATCH 10/19] logs for pods are better formatted --- main.go | 9 +++++---- models/argo_logs.go | 14 ++++++++++++++ models/local_argo_logs.go | 17 ++++++++++++----- 3 files changed, 31 insertions(+), 9 deletions(-) diff --git a/main.go b/main.go index 04d68c9..b2ae16b 100644 --- a/main.go +++ b/main.go @@ -147,7 +147,7 @@ func executeOutside(argo_file_path string, stepMax int, workflow workflow_builde var wg sync.WaitGroup var err error - logger.Debug().Msg("executing :" + "argo submit --watch " + argo_file_path + " --serviceaccount sa-" + conf.GetConfig().ExecutionID + " -n " + conf.GetConfig().ExecutionID) + logger.Debug().Msg("executing :" + "argo submit --watch " + argo_file_path + " --serviceaccount sa-" + conf.GetConfig().ExecutionID + " -n " + conf.GetConfig().ExecutionID ) cmdSubmit := exec.Command("argo", "submit", "--watch", argo_file_path, "--serviceaccount", "sa-"+conf.GetConfig().ExecutionID, "-n", conf.GetConfig().ExecutionID) if stdoutSubmit, err = cmdSubmit.StdoutPipe(); err != nil { @@ -169,14 +169,14 @@ func executeOutside(argo_file_path string, stepMax int, workflow workflow_builde steps = append(steps, template.Name) } - cmdLogs := exec.Command("argo", "logs", "oc-monitor-"+workflowName, "-n", conf.GetConfig().ExecutionID, "--follow") + cmdLogs := exec.Command("argo", "logs", "oc-monitor-"+workflowName, "-n", conf.GetConfig().ExecutionID, "--follow","--no-color") if stdoutLogs, err = cmdLogs.StdoutPipe(); err != nil { wf_logger.Error().Msg("Could not retrieve stdoutpipe for 'argo logs'" + err.Error()) return } go models.LogLocalWorkflow(workflowName, stdoutSubmit, &wg) - go models.LogPods(stdoutLogs, steps, &wg) + go models.LogPods(workflowName, stdoutLogs, steps, &wg) fmt.Println("Starting argo submit") if err := cmdSubmit.Start(); err != nil { @@ -189,7 +189,8 @@ func executeOutside(argo_file_path string, stepMax int, workflow workflow_builde fmt.Println("Running argo logs") if err := cmdLogs.Run(); err != nil { - wf_logger.Error().Msg("Could not run 'argo logs oc-monitor-" + workflowName + " -n " + conf.GetConfig().ExecutionID + " --follow") + wf_logger.Error().Msg("Could not run '" + strings.Join(cmdLogs.Args, " ") + "'") + wf_logger.Fatal().Msg(err.Error() + bufio.NewScanner(stderrLogs).Text()) } diff --git a/models/argo_logs.go b/models/argo_logs.go index 2f7dcf6..ecf150e 100644 --- a/models/argo_logs.go +++ b/models/argo_logs.go @@ -145,3 +145,17 @@ func (a *ArgoLogs) StopStepRecording(current *ArgoWatch) *ArgoWatch { current.Status = status return current } + +type ArgoPodLog struct { + PodName string + Step string + Message string +} + +func NewArgoPodLog(name string, step string, msg string) ArgoPodLog { + return ArgoPodLog{ + PodName: name, + Step: step, + Message: msg, + } +} \ No newline at end of file diff --git a/models/local_argo_logs.go b/models/local_argo_logs.go index 6b658d8..234becb 100644 --- a/models/local_argo_logs.go +++ b/models/local_argo_logs.go @@ -18,7 +18,7 @@ var wfLogger zerolog.Logger // Take the slice of string that make up one round of stderr outputs from the --watch option in argo submit -func NewLocalArgoLogs(inputs []string) *ArgoWatch { +func NewLocalArgoWatch(inputs []string) *ArgoWatch { var workflow ArgoWatch for _, input := range inputs { @@ -84,7 +84,7 @@ func LogLocalWorkflow(wfName string, pipe io.ReadCloser, wg *sync.WaitGroup) { // Log the progress of the WF if strings.HasPrefix(log, "Progress:") { - current_watch = *NewLocalArgoLogs(watch_output) + current_watch = *NewLocalArgoWatch(watch_output) workflowName := current_watch.Name if !current_watch.Equals(&previous_watch) { wg.Add(1) @@ -106,17 +106,24 @@ func LogLocalWorkflow(wfName string, pipe io.ReadCloser, wg *sync.WaitGroup) { } // Debug, no logs sent -func LogPods(pipe io.ReadCloser, steps []string, wg *sync.WaitGroup) { +func LogPods(wfName string, pipe io.ReadCloser, steps []string, wg *sync.WaitGroup) { scanner := bufio.NewScanner(pipe) for scanner.Scan() { + var podLogger zerolog.Logger fmt.Println("new line") wg.Add(1) - var podLogger zerolog.Logger + line := scanner.Text() podName := strings.Split(line, ":")[0] podLogger = wfLogger.With().Str("step_name", getStepName(podName, steps)).Logger() log := strings.Split(line,podName+":")[1] - podLogger.Info().Msg(log) + podLog := NewArgoPodLog(wfName,podName,log) + jsonifiedLog, err := json.Marshal(podLog) + if err != nil { + podLogger.Fatal().Msg(err.Error()) + } + + podLogger.Info().Msg(string(jsonifiedLog)) wg.Done() } From 9aefa18ea83b33b5911584c098b8e053f68326a8 Mon Sep 17 00:00:00 2001 From: pb Date: Fri, 11 Apr 2025 17:20:41 +0200 Subject: [PATCH 11/19] handle multiple response code for API calls --- workflow_builder/admiralty_setter.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/workflow_builder/admiralty_setter.go b/workflow_builder/admiralty_setter.go index bad8d16..2fbb780 100644 --- a/workflow_builder/admiralty_setter.go +++ b/workflow_builder/admiralty_setter.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/http" + "slices" "time" oclib "cloud.o-forge.io/core/oc-lib" @@ -57,13 +58,13 @@ func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID st ) logger.Info().Msg(" Creating the Admiralty Source on " + remotePeerID + " ns-" + s.Id + "\n\n") - _ = s.callRemoteExecution(remotePeer, http.StatusCreated,caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true) + _ = s.callRemoteExecution(remotePeer, []int{http.StatusCreated, http.StatusConflict},caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true) logger.Info().Msg(" Retrieving kubeconfig with the secret on " + remotePeerID + " ns-" + s.Id + "\n\n") kubeconfig := s.getKubeconfig(remotePeer, caller) logger.Info().Msg(" Creating a secret from the kubeconfig " + localPeerID + " ns-" + s.Id + "\n\n") - _ = s.callRemoteExecution(localPeer, http.StatusCreated, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true) + _ = s.callRemoteExecution(localPeer, []int{http.StatusCreated}, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true) logger.Info().Msg(" Creating the Admiralty Target on " + localPeerID + " ns-" + s.Id + "\n\n") - _ = s.callRemoteExecution(localPeer,http.StatusCreated,caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true) + _ = s.callRemoteExecution(localPeer,[]int{http.StatusCreated, http.StatusConflict},caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true) logger.Info().Msg(" Checking for the creation of the admiralty node on " + localPeerID + " ns-" + s.Id + "\n\n") s.checkNodeStatus(localPeer,caller) @@ -72,7 +73,7 @@ func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID st func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCaller) map[string]string { var kubedata map[string]string - _ = s.callRemoteExecution(peer, http.StatusOK, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil, true) + _ = s.callRemoteExecution(peer, []int{http.StatusOK}, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil, true) if caller.LastResults["body"] == nil || len(caller.LastResults["body"].([]byte)) == 0 { fmt.Println("Something went wrong when retrieving data from Get call for kubeconfig") panic(0) @@ -86,7 +87,7 @@ func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCalle return kubedata } -func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) *peer.PeerExecution { +func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode []int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) *peer.PeerExecution { resp, err := peer.LaunchPeerExecution(peer.UUID, dataID, dt, method, body, caller) if err != nil { fmt.Println("Error when executing on peer at", peer.Url) @@ -94,7 +95,7 @@ func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode int,ca panic(0) } - if caller.LastResults["code"].(int) != expectedCode { + if !slices.Contains(expectedCode, caller.LastResults["code"].(int)) { fmt.Println("Didn't receive the expected code :", caller.LastResults["code"], "when expecting", expectedCode) if _, ok := caller.LastResults["body"]; ok { logger.Info().Msg(string(caller.LastResults["body"].([]byte))) @@ -127,7 +128,7 @@ func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller){ func (s *AdmiraltySetter) checkNodeStatus(localPeer *peer.Peer, caller *tools.HTTPCaller){ for i := range(5) { time.Sleep(5 * time.Second) // let some time for kube to generate the node - _ = s.callRemoteExecution(localPeer,http.StatusOK,caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil, false) + _ = s.callRemoteExecution(localPeer,[]int{http.StatusOK},caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil, false) if caller.LastResults["code"] == 200 { s.storeNodeName(caller) return From cd804fbeb5a9753a16b47044eb04e218c85bb033 Mon Sep 17 00:00:00 2001 From: pb Date: Fri, 11 Apr 2025 17:20:41 +0200 Subject: [PATCH 12/19] handle multiple response code for API calls --- workflow_builder/admiralty_setter.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/workflow_builder/admiralty_setter.go b/workflow_builder/admiralty_setter.go index bad8d16..2fbb780 100644 --- a/workflow_builder/admiralty_setter.go +++ b/workflow_builder/admiralty_setter.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/http" + "slices" "time" oclib "cloud.o-forge.io/core/oc-lib" @@ -57,13 +58,13 @@ func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID st ) logger.Info().Msg(" Creating the Admiralty Source on " + remotePeerID + " ns-" + s.Id + "\n\n") - _ = s.callRemoteExecution(remotePeer, http.StatusCreated,caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true) + _ = s.callRemoteExecution(remotePeer, []int{http.StatusCreated, http.StatusConflict},caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true) logger.Info().Msg(" Retrieving kubeconfig with the secret on " + remotePeerID + " ns-" + s.Id + "\n\n") kubeconfig := s.getKubeconfig(remotePeer, caller) logger.Info().Msg(" Creating a secret from the kubeconfig " + localPeerID + " ns-" + s.Id + "\n\n") - _ = s.callRemoteExecution(localPeer, http.StatusCreated, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true) + _ = s.callRemoteExecution(localPeer, []int{http.StatusCreated}, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true) logger.Info().Msg(" Creating the Admiralty Target on " + localPeerID + " ns-" + s.Id + "\n\n") - _ = s.callRemoteExecution(localPeer,http.StatusCreated,caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true) + _ = s.callRemoteExecution(localPeer,[]int{http.StatusCreated, http.StatusConflict},caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true) logger.Info().Msg(" Checking for the creation of the admiralty node on " + localPeerID + " ns-" + s.Id + "\n\n") s.checkNodeStatus(localPeer,caller) @@ -72,7 +73,7 @@ func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID st func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCaller) map[string]string { var kubedata map[string]string - _ = s.callRemoteExecution(peer, http.StatusOK, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil, true) + _ = s.callRemoteExecution(peer, []int{http.StatusOK}, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil, true) if caller.LastResults["body"] == nil || len(caller.LastResults["body"].([]byte)) == 0 { fmt.Println("Something went wrong when retrieving data from Get call for kubeconfig") panic(0) @@ -86,7 +87,7 @@ func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCalle return kubedata } -func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) *peer.PeerExecution { +func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode []int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) *peer.PeerExecution { resp, err := peer.LaunchPeerExecution(peer.UUID, dataID, dt, method, body, caller) if err != nil { fmt.Println("Error when executing on peer at", peer.Url) @@ -94,7 +95,7 @@ func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode int,ca panic(0) } - if caller.LastResults["code"].(int) != expectedCode { + if !slices.Contains(expectedCode, caller.LastResults["code"].(int)) { fmt.Println("Didn't receive the expected code :", caller.LastResults["code"], "when expecting", expectedCode) if _, ok := caller.LastResults["body"]; ok { logger.Info().Msg(string(caller.LastResults["body"].([]byte))) @@ -127,7 +128,7 @@ func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller){ func (s *AdmiraltySetter) checkNodeStatus(localPeer *peer.Peer, caller *tools.HTTPCaller){ for i := range(5) { time.Sleep(5 * time.Second) // let some time for kube to generate the node - _ = s.callRemoteExecution(localPeer,http.StatusOK,caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil, false) + _ = s.callRemoteExecution(localPeer,[]int{http.StatusOK},caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil, false) if caller.LastResults["code"] == 200 { s.storeNodeName(caller) return From e2ceb6e58d5521aa9aee533c4dc941bf0351dd8e Mon Sep 17 00:00:00 2001 From: pb Date: Mon, 14 Apr 2025 18:20:49 +0200 Subject: [PATCH 13/19] Adapted some of the steps of the executeInside()'s method to work with the updated Admiralty environment, using execution id as namespace, serviceAccount naming convention and adding the serviceAccount in the workflow's YAML. Logging not working yet. --- README.md | 12 +++++++++--- conf/conf.go | 1 + main.go | 13 +++++++++---- tools/kubernetes.go | 3 ++- workflow_builder/argo_builder.go | 12 +++++++----- 5 files changed, 28 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 23a80f7..f71c870 100644 --- a/README.md +++ b/README.md @@ -48,11 +48,17 @@ In rules add a new entry : This command **must return "yes"** +# Notes features/admiralty-docker + +- When executing monitord as a container we need to change any url with "localhost" to the container's host IP. + + We can : + - declare a new parameter 'HOST_IP' + - decide that no peer can have "http://localhost" as its url and use an attribute from the peer object or isMyself() from oc-lib if a peer is the current host. + + ## TODO -- [ ] Logs the output of each pods : - - logsPods() function already exists - - need to implement the logic to create each pod's logger and start the monitoring routing - [ ] Allow the front to known on which IP the service are reachable - currently doing it by using `kubectl get nodes -o wide` diff --git a/conf/conf.go b/conf/conf.go index 860605b..0893916 100644 --- a/conf/conf.go +++ b/conf/conf.go @@ -18,6 +18,7 @@ type Config struct { KubeCA string KubeCert string KubeData string + ArgoHost string // when executed in a container will replace addresses with "localhost" in their url } var instance *Config diff --git a/main.go b/main.go index b2ae16b..f7ad03e 100644 --- a/main.go +++ b/main.go @@ -96,12 +96,12 @@ func main() { logger.Error().Msg(err.Error()) } - argo_file_path, err := builder.CompleteBuild(exec.ExecutionsID) + argoFilePath, err := builder.CompleteBuild(exec.ExecutionsID) if err != nil { logger.Error().Msg(err.Error()) } - workflowName = getContainerName(argo_file_path) + workflowName = getContainerName(argoFilePath) wf_logger = logger.With().Str("argo_name", workflowName).Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() wf_logger.Debug().Msg("Testing argo name") @@ -110,11 +110,12 @@ func main() { if conf.GetConfig().KubeHost == "" { // Not in a k8s environment, get conf from parameters fmt.Println("Executes outside of k8s") - executeOutside(argo_file_path, stepMax, builder.Workflow) + executeOutside(argoFilePath, stepMax, builder.Workflow) } else { // Executed in a k8s environment fmt.Println("Executes inside a k8s") - executeInside(exec.GetID(), "argo", argo_file_path, stepMax) + // executeInside(exec.GetID(), "argo", argo_file_path, stepMax) // commenting to use conf.ExecutionID instead of exec.GetID() + executeInside(conf.GetConfig().ExecutionID, conf.GetConfig().ExecutionID, argoFilePath, stepMax) } } @@ -294,6 +295,8 @@ func setConf(is_k8s bool, o *onion.Onion, parser *argparse.Parser) { host := parser.String("H", "host", &argparse.Options{Required: false, Default: "", Help: "Host for the Kubernetes cluster"}) port := parser.String("P", "port", &argparse.Options{Required: false, Default: "6443", Help: "Port for the Kubernetes cluster"}) + // argoHost := parser.String("h", "argoHost", &argparse.Options{Required: false, Default: "", Help: "Host where Argo is running from"}) // can't use -h because its reserved to help + err := parser.Parse(os.Args) if err != nil { fmt.Println(parser.Usage(err)) @@ -311,6 +314,8 @@ func setConf(is_k8s bool, o *onion.Onion, parser *argparse.Parser) { conf.GetConfig().KubeHost = *host conf.GetConfig().KubePort = *port + // conf.GetConfig().ArgoHost = *argoHost + decoded, err := base64.StdEncoding.DecodeString(*ca) if err == nil { conf.GetConfig().KubeCA = string(decoded) diff --git a/tools/kubernetes.go b/tools/kubernetes.go index d92171b..2b096be 100644 --- a/tools/kubernetes.go +++ b/tools/kubernetes.go @@ -52,6 +52,7 @@ func NewKubernetesTool() (Tool, error) { if err != nil { return nil, errors.New("Error creating Kubernetes versionned client: " + err.Error()) } + return &KubernetesTools{ Set: clientset, VersionedSet: clientset2, @@ -149,7 +150,7 @@ func (k *KubernetesTools) CreateArgoWorkflow(path string, ns string) (string, er if err != nil { return "", errors.New("failed to create workflow: " + err.Error()) } - fmt.Printf("workflow %s created in namespace %s\n", createdWf.Name, "argo") + fmt.Printf("workflow %s created in namespace %s\n", createdWf.Name, ns) return createdWf.Name, nil } diff --git a/workflow_builder/argo_builder.go b/workflow_builder/argo_builder.go index 5631c26..731a22c 100644 --- a/workflow_builder/argo_builder.go +++ b/workflow_builder/argo_builder.go @@ -53,11 +53,12 @@ func (b *Workflow) getDag() *Dag { } type Spec struct { - Entrypoint string `yaml:"entrypoint"` - Arguments []Parameter `yaml:"arguments,omitempty"` - Volumes []VolumeClaimTemplate `yaml:"volumeClaimTemplates,omitempty"` - Templates []Template `yaml:"templates"` - Timeout int `yaml:"activeDeadlineSeconds,omitempty"` + ServiceAccountName string `yaml:"serviceAccountName"` + Entrypoint string `yaml:"entrypoint"` + Arguments []Parameter `yaml:"arguments,omitempty"` + Volumes []VolumeClaimTemplate `yaml:"volumeClaimTemplates,omitempty"` + Templates []Template `yaml:"templates"` + Timeout int `yaml:"activeDeadlineSeconds,omitempty"` } // TODO: found on a processing instance linked to storage @@ -72,6 +73,7 @@ func (b *ArgoBuilder) CreateDAG(namespace string, write bool) ( int, []string, [ if b.Timeout > 0 { b.Workflow.Spec.Timeout = b.Timeout } + b.Workflow.Spec.ServiceAccountName = "sa-"+namespace b.Workflow.Spec.Entrypoint = "dag" b.Workflow.ApiVersion = "argoproj.io/v1alpha1" b.Workflow.Kind = "Workflow" From 04d6001fec097c94e171d01eaf122f15eab330dd Mon Sep 17 00:00:00 2001 From: pb Date: Mon, 14 Apr 2025 18:22:31 +0200 Subject: [PATCH 14/19] added entrypoint --- Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index ebd9760..d0d27dc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,4 +23,6 @@ FROM scratch WORKDIR /app -COPY --from=builder /app/oc-monitord . \ No newline at end of file +COPY --from=builder /app/oc-monitord . + +ENTRYPOINT ["./oc-monitord"] \ No newline at end of file From 31580f1905fa9ebd758e9e29ca56579445b3728a Mon Sep 17 00:00:00 2001 From: pb Date: Tue, 15 Apr 2025 11:40:44 +0200 Subject: [PATCH 15/19] moved the logger creation to the utils package to make them available to all packages without recreating or passing them --- main.go | 9 ++++++--- tools/kubernetes.go | 3 +++ utils/utils.go | 24 ++++++++++++++++++++++++ 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/main.go b/main.go index f7ad03e..8035a88 100644 --- a/main.go +++ b/main.go @@ -67,7 +67,7 @@ func main() { conf.GetConfig().Logs, ) - logger = logs.CreateLogger("oc-monitord") + logger = u.GetLogger() logger.Debug().Msg("Loki URL : " + conf.GetConfig().LokiURL) logger.Debug().Msg("Workflow executed : " + conf.GetConfig().ExecutionID) @@ -124,13 +124,16 @@ func executeInside(execID string, ns string, argo_file_path string, stepMax int) t, err := tools2.NewService(conf.GetConfig().Mode) if err != nil { logger.Error().Msg("Could not create KubernetesTool") + return } + name, err := t.CreateArgoWorkflow(argo_file_path, ns) + if err != nil { logger.Error().Msg("Could not create argo workflow : " + err.Error()) + return } else { - split := strings.Split(argo_file_path, "_") - argoLogs := models.NewArgoLogs(split[0], "argo", stepMax) + argoLogs := models.NewArgoLogs(workflowName, "argo", stepMax) argoLogs.StartStepRecording(argoLogs.NewWatch(), wf_logger) err := t.LogWorkflow(execID, ns, name, argo_file_path, stepMax, argoLogs.NewWatch(), argoLogs.NewWatch(), argoLogs, []string{}, logWorkflow) if err != nil { diff --git a/tools/kubernetes.go b/tools/kubernetes.go index 2b096be..794cbcd 100644 --- a/tools/kubernetes.go +++ b/tools/kubernetes.go @@ -66,6 +66,8 @@ func (k *KubernetesTools) LogWorkflow(execID string, namespace string, workflowN return errors.New("Could not retrieve workflow ID from execution ID " + execID) } if exec.State == enum.DRAFT || exec.State == enum.FAILURE || exec.State == enum.SUCCESS { + l := utils.GetWFLogger("") + l.Error().Msg("The execution's state doesn't meet requirement, state is : " + exec.State.String()) return nil } k.logWorkflow(namespace, workflowName, argoFilePath, stepMax, current_watch, previous_watch, argoLogs, seen, logFunc) @@ -76,6 +78,7 @@ func (k *KubernetesTools) logWorkflow(namespace string, workflowName string, arg seen []string, logFunc func(argoFilePath string, stepMax int, pipe io.ReadCloser, current_watch *models.ArgoWatch, previous_watch *models.ArgoWatch, argoLogs *models.ArgoLogs, seen []string, wg *sync.WaitGroup)) error { // List pods related to the Argo workflow + fmt.Println("\n!!!!!!!! !!!!!!!!!! !!!!!!!! &&&& & STARTING LOG\n\n") labelSelector := fmt.Sprintf("workflows.argoproj.io/workflow=%s", workflowName) for retries := 0; retries < 10; retries++ { // Retry for up to ~20 seconds // List workflow pods diff --git a/utils/utils.go b/utils/utils.go index 1222230..b926e5a 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -2,11 +2,21 @@ package utils import ( "oc-monitord/conf" + "sync" oclib "cloud.o-forge.io/core/oc-lib" + "cloud.o-forge.io/core/oc-lib/logs" "cloud.o-forge.io/core/oc-lib/models/workflow_execution" + "github.com/rs/zerolog" ) +var ( + logger zerolog.Logger + wf_logger zerolog.Logger + pods_logger zerolog.Logger + onceLogger sync.Once + onceWF sync.Once +) func GetExecution(exec_id string) *workflow_execution.WorkflowExecution { res := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), "", conf.GetConfig().PeerID, []string{}, nil).LoadOne(exec_id) if res.Code != 200 { @@ -16,3 +26,17 @@ func GetExecution(exec_id string) *workflow_execution.WorkflowExecution { } return res.ToWorkflowExecution() } + +func GetLogger() zerolog.Logger { + onceLogger.Do(func(){ + logger = logs.CreateLogger("oc-monitord") + }) + return logger +} + +func GetWFLogger(workflowName string) zerolog.Logger { + onceWF.Do(func(){ + wf_logger = logger.With().Str("argo_name", workflowName).Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() + }) + return wf_logger +} \ No newline at end of file From 6f7acee2dff194af52a477efc0d761cde627a506 Mon Sep 17 00:00:00 2001 From: pb Date: Tue, 15 Apr 2025 12:00:43 +0200 Subject: [PATCH 16/19] moved the logger creation to the utils package to make them available to all packages without recreating or passing them --- main.go | 2 +- utils/utils.go | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/main.go b/main.go index 8035a88..4b7f8e6 100644 --- a/main.go +++ b/main.go @@ -103,7 +103,7 @@ func main() { workflowName = getContainerName(argoFilePath) - wf_logger = logger.With().Str("argo_name", workflowName).Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() + wf_logger := u.GetWFLogger(workflowName) wf_logger.Debug().Msg("Testing argo name") _ = stepMax diff --git a/utils/utils.go b/utils/utils.go index b926e5a..c3e3922 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -36,7 +36,10 @@ func GetLogger() zerolog.Logger { func GetWFLogger(workflowName string) zerolog.Logger { onceWF.Do(func(){ - wf_logger = logger.With().Str("argo_name", workflowName).Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() + wf_logger = logger.With(). + Str("argo_name", workflowName). + Str("workflow_id", conf.GetConfig(). + WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() }) return wf_logger } \ No newline at end of file From fb8d994be3909ef3398dc93317a68a5958ace39d Mon Sep 17 00:00:00 2001 From: pb Date: Thu, 17 Apr 2025 16:51:29 +0200 Subject: [PATCH 17/19] Modified how logging with monitord container is implemented, with simpler logic thanks to the argo client library and k8 client-go for pods' logs --- logger/argo_logs.go | 219 ++++++++++++++++++++++++++++++++++++++++++++ main.go | 139 +++++----------------------- models/argo_logs.go | 161 -------------------------------- tools/interface.go | 10 +- tools/kubernetes.go | 148 +++++++++++++++--------------- 5 files changed, 322 insertions(+), 355 deletions(-) create mode 100644 logger/argo_logs.go delete mode 100644 models/argo_logs.go diff --git a/logger/argo_logs.go b/logger/argo_logs.go new file mode 100644 index 0000000..9b59ba6 --- /dev/null +++ b/logger/argo_logs.go @@ -0,0 +1,219 @@ +package logger + +import ( + "bufio" + "encoding/json" + "fmt" + "oc-monitord/tools" + "oc-monitord/utils" + "slices" + "time" + + "github.com/rs/zerolog" + "k8s.io/apimachinery/pkg/watch" + + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +) + +// An object to monitor the logs generated by a specific pod from a workflow execution +type ArgoWatch struct { + Name string + Namespace string + Status string + Conditions + Created string + Started string + Duration string + Progress string + Logs []string +} + +type Conditions struct { + PodRunning bool + Completed bool +} + +func (a *ArgoWatch) Equals(arg *ArgoWatch) bool { + if arg == nil { + return false + } + return a.Status == arg.Status && a.Progress == arg.Progress && a.Conditions.PodRunning == arg.Conditions.PodRunning && a.Conditions.Completed == arg.Conditions.Completed +} + +func NewArgoLogs(name string, namespace string, stepMax int) *ArgoLogs { + return &ArgoLogs{ + Name: "oc-monitor-" + name, + Namespace: namespace, + CreatedDate: time.Now().Format("2006-01-02 15:04:05"), + StepCount: 0, + StepMax: stepMax, + stop: false, + Seen: []string{}, + } +} + +// An object to monitor and log the output of an argo submit +type ArgoLogs struct { + Name string + Namespace string + CreatedDate string + StepCount int + StepMax int + stop bool + Started time.Time + Seen []string + Logs []string + IsStreaming bool +} + +func (a *ArgoLogs) NewWatch() *ArgoWatch { + return &ArgoWatch{ + Name: a.Name, + Namespace: a.Namespace, + Status: "Pending", + Created: a.CreatedDate, + Started: a.Started.Format("2006-01-02 15:04:05"), + Conditions: Conditions{ + PodRunning: a.StepCount > 0 && a.StepCount < a.StepMax, + Completed: a.StepCount == a.StepMax, + }, + Progress: fmt.Sprintf("%v/%v", a.StepCount, a.StepMax), + Duration: "0s", + Logs: []string{}, + } + +} + +func (a *ArgoLogs) StartStepRecording(current_watch *ArgoWatch, logger zerolog.Logger) { + jsonified, _ := json.Marshal(current_watch) + logger.Info().Msg(string(jsonified)) + a.StepCount += 1 + a.Started = time.Now() +} + + +type ArgoPodLog struct { + PodName string + Step string + Message string +} + +func NewArgoPodLog(name string, step string, msg string) ArgoPodLog { + return ArgoPodLog{ + PodName: name, + Step: step, + Message: msg, + } +} + +func LogKubernetesArgo(wfName string, executionID string, watcher watch.Interface) { + var argoWatcher *ArgoWatch + var pods []string + var node wfv1.NodeStatus + + wfl := utils.GetWFLogger("") + + for event := range (watcher.ResultChan()) { + wf, ok := event.Object.(*wfv1.Workflow) + if !ok { + wfl.Error().Msg("unexpected type") + continue + } + if len(wf.Status.Nodes) == 0 { + wfl.Debug().Msg("No node status yet") // The first output of the channel doesn't contain Nodes so we skip it + continue + } + + conditions := retrieveCondition(wf) + + // Retrieving the Status for the main node, which is named after the workflow + if node, ok = wf.Status.Nodes[wfName]; !ok { + bytified, _ := json.MarshalIndent(wf.Status.Nodes,"","\t") + wfl.Fatal().Msg("Could not find the " + wfName + " node in \n" + string(bytified)) + } + + now := time.Now() + start, _ := time.Parse(time.RFC3339, node.StartedAt.String() ) + duration := now.Sub(start) + + newWatcher := ArgoWatch{ + Name: node.Name, + Namespace: executionID, + Status: string(node.Phase), + Created: node.StartedAt.String(), + Started: node.StartedAt.String(), + Progress: string(node.Progress), + Duration: duration.String(), + Conditions: conditions, + } + + if argoWatcher == nil { + argoWatcher = &newWatcher + } + + if !newWatcher.Equals(argoWatcher){ + jsonified, _ := json.Marshal(newWatcher) + wfl.Info().Msg(string(jsonified)) + argoWatcher = &newWatcher + } + + // I don't think we need to use WaitGroup here, because the loop itself + // acts as blocking process for the main thread, because Argo watch never closes the channel + for _, pod := range wf.Status.Nodes{ + if !slices.Contains(pods,pod.Name){ + pl := wfl.With().Str("pod", pod.Name).Logger() + if wfName == pod.Name { pods = append(pods, pod.Name); continue } // One of the node is the Workflow, the others are the pods so don't try to log on the wf name + go logKubernetesPods(executionID, wfName, pod.Name, pl) + pods = append(pods, pod.Name) + } + } + + // Stop listening to the chan when the Workflow is completed or something bad happened + if node.Phase.Completed() { + wfl.Info().Msg(wfName + " worflow completed") + break + } + if node.Phase.FailedOrError() { + wfl.Error().Msg(wfName + "has failed, please refer to the logs") + wfl.Error().Msg(node.Message) + break + } + } +} + +func retrieveCondition(wf *wfv1.Workflow) (c Conditions) { + for _, cond := range wf.Status.Conditions { + if cond.Type == "PodRunning" { + c.PodRunning = cond.Status == "True" + } + if cond.Type == "Completed" { + c.Completed = cond.Status == "True" + } + } + + return + +} + +// Function needed to be executed as a go thread +func logKubernetesPods(executionId string, wfName string,podName string, logger zerolog.Logger){ + k, err := tools.NewKubernetesTool() + if err != nil { + logger.Error().Msg("Could not get Kubernetes tools") + return + } + + reader, err := k.GetPodLogger(executionId, wfName, podName) + if err != nil { + logger.Error().Msg(err.Error()) + return + } + + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + log := scanner.Text() + podLog := NewArgoPodLog(wfName,podName,log) + jsonified, _ := json.Marshal(podLog) + logger.Info().Msg(string(jsonified)) + } +} \ No newline at end of file diff --git a/main.go b/main.go index 4b7f8e6..9e3b4e1 100644 --- a/main.go +++ b/main.go @@ -3,19 +3,17 @@ package main import ( "bufio" "encoding/base64" - "encoding/json" "fmt" "io" "os" "os/exec" "regexp" - "slices" "strings" "sync" "time" "oc-monitord/conf" - "oc-monitord/models" + l "oc-monitord/logger" u "oc-monitord/utils" "oc-monitord/workflow_builder" @@ -45,7 +43,6 @@ import ( var logger zerolog.Logger var wf_logger zerolog.Logger -var pods_logger zerolog.Logger var parser argparse.Parser var workflowName string @@ -90,7 +87,7 @@ func main() { logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API") } - builder, stepMax, err := new_wf.ExportToArgo(exec.ExecutionsID, conf.GetConfig().Timeout) + builder, _, err := new_wf.ExportToArgo(exec.ExecutionsID, conf.GetConfig().Timeout) // Removed stepMax so far, I don't know if we need it anymore if err != nil { logger.Error().Msg("Could not create the Argo file for " + conf.GetConfig().WorkflowID) logger.Error().Msg(err.Error()) @@ -105,22 +102,21 @@ func main() { wf_logger := u.GetWFLogger(workflowName) wf_logger.Debug().Msg("Testing argo name") - _ = stepMax if conf.GetConfig().KubeHost == "" { // Not in a k8s environment, get conf from parameters fmt.Println("Executes outside of k8s") - executeOutside(argoFilePath, stepMax, builder.Workflow) + executeOutside(argoFilePath, builder.Workflow) } else { // Executed in a k8s environment fmt.Println("Executes inside a k8s") // executeInside(exec.GetID(), "argo", argo_file_path, stepMax) // commenting to use conf.ExecutionID instead of exec.GetID() - executeInside(conf.GetConfig().ExecutionID, conf.GetConfig().ExecutionID, argoFilePath, stepMax) + executeInside(conf.GetConfig().ExecutionID, conf.GetConfig().ExecutionID, argoFilePath) } } // So far we only log the output from -func executeInside(execID string, ns string, argo_file_path string, stepMax int) { +func executeInside(execID string, ns string, argo_file_path string) { t, err := tools2.NewService(conf.GetConfig().Mode) if err != nil { logger.Error().Msg("Could not create KubernetesTool") @@ -128,14 +124,20 @@ func executeInside(execID string, ns string, argo_file_path string, stepMax int) } name, err := t.CreateArgoWorkflow(argo_file_path, ns) - + _ = name if err != nil { logger.Error().Msg("Could not create argo workflow : " + err.Error()) + fmt.Println("CA :" + conf.GetConfig().KubeCA) + fmt.Println("Cert :" + conf.GetConfig().KubeCert) + fmt.Println("Data :" + conf.GetConfig().KubeData) return } else { - argoLogs := models.NewArgoLogs(workflowName, "argo", stepMax) - argoLogs.StartStepRecording(argoLogs.NewWatch(), wf_logger) - err := t.LogWorkflow(execID, ns, name, argo_file_path, stepMax, argoLogs.NewWatch(), argoLogs.NewWatch(), argoLogs, []string{}, logWorkflow) + watcher, err := t.GetArgoWatch(execID, workflowName) + if err != nil { + logger.Error().Msg("Could not retrieve Watcher : " + err.Error()) + } + + l.LogKubernetesArgo(name, execID, watcher) if err != nil { logger.Error().Msg("Could not log workflow : " + err.Error()) } @@ -143,11 +145,9 @@ func executeInside(execID string, ns string, argo_file_path string, stepMax int) } -func executeOutside(argo_file_path string, stepMax int, workflow workflow_builder.Workflow) { - // var stdoutSubmit, stderrSubmit, stdout_logs, stderr_logs io.ReadCloser +func executeOutside(argo_file_path string, workflow workflow_builder.Workflow) { var stdoutSubmit, stderrSubmit io.ReadCloser var stdoutLogs, stderrLogs io.ReadCloser - // var stderr io.ReadCloser var wg sync.WaitGroup var err error @@ -158,29 +158,20 @@ func executeOutside(argo_file_path string, stepMax int, workflow workflow_builde wf_logger.Error().Msg("Could not retrieve stdoutpipe " + err.Error()) return } - - // //======== Code block that implemented a method that logs both locally and container executed wf - // // Need to be improved, did not log well for local executions - // split := strings.Split(argo_file_path, "_") - // argoLogs := models.NewArgoLogs(split[0], conf.GetConfig().ExecutionID, stepMax) - // argoLogs.StartStepRecording(argoLogs.NewWatch(), wf_logger) - // argoLogs.IsStreaming = true // Used to determine wether or not the logs are read from a docker container or on localhost - // // go logWorkflow(argo_file_path, stepMax, stdout, argoLogs.NewWatch(), argoLogs.NewWatch(), argoLogs, []string{}, &wg) - // // ======= - - var steps []string - for _, template := range workflow.Spec.Templates { - steps = append(steps, template.Name) - } - + cmdLogs := exec.Command("argo", "logs", "oc-monitor-"+workflowName, "-n", conf.GetConfig().ExecutionID, "--follow","--no-color") if stdoutLogs, err = cmdLogs.StdoutPipe(); err != nil { wf_logger.Error().Msg("Could not retrieve stdoutpipe for 'argo logs'" + err.Error()) return } + + var steps []string + for _, template := range workflow.Spec.Templates { + steps = append(steps, template.Name) + } - go models.LogLocalWorkflow(workflowName, stdoutSubmit, &wg) - go models.LogPods(workflowName, stdoutLogs, steps, &wg) + go l.LogLocalWorkflow(workflowName, stdoutSubmit, &wg) + go l.LogLocalPod(workflowName, stdoutLogs, steps, &wg) fmt.Println("Starting argo submit") if err := cmdSubmit.Start(); err != nil { @@ -209,68 +200,6 @@ func executeOutside(argo_file_path string, stepMax int, workflow workflow_builde wg.Wait() } -// !!!! BUGGED !!!! -// Should be refactored to create a function dedicated to logging output from execution in a container -// LogLocalWorkflow() has been implemented to be used when oc-monitord is executed locally - -// We could improve this function by creating an object with the same attribute as the output -// and only send a new log if the current object has different values than the previous -func logWorkflow(argo_file_path string, stepMax int, pipe io.ReadCloser, - current_watch *models.ArgoWatch, previous_watch *models.ArgoWatch, - argoLogs *models.ArgoLogs, seen []string, wg *sync.WaitGroup) { - scanner := bufio.NewScanner(pipe) - count := 0 - see := "" - seeit := 0 - for scanner.Scan() { - log := scanner.Text() - if strings.Contains(log, "capturing logs") && count == 0 { - if !argoLogs.IsStreaming { - wg.Add(1) - } - seeit++ - } else if count == 0 && !argoLogs.IsStreaming { - break - } - if count == 1 { - see = log - if slices.Contains(argoLogs.Seen, see) && !argoLogs.IsStreaming { - wg.Done() - seeit-- - break - } - } - if !slices.Contains(current_watch.Logs, log) { - current_watch.Logs = append(current_watch.Logs, strings.ReplaceAll(log, "\"", "")) - } - count++ - if strings.Contains(log, "sub-process exited") || argoLogs.IsStreaming { - current_watch = argoLogs.StopStepRecording(current_watch) - argoLogs.Seen = append(argoLogs.Seen, see) - if checkStatus(current_watch, previous_watch, argoLogs) { - count = 0 - if !argoLogs.IsStreaming { - wg.Done() - } - seeit-- - } - jsonified, err := json.Marshal(current_watch) - if err != nil { - logger.Error().Msg("Could not create watch log") - } - if current_watch.Status == "Failed" { - wf_logger.Error().Msg(string(jsonified)) - } else { - wf_logger.Info().Msg(string(jsonified)) - } - previous_watch = current_watch - current_watch = &models.ArgoWatch{} - if argoLogs.IsStreaming { - current_watch.Logs = []string{} - } - } - } -} func loadConfig(is_k8s bool, parser *argparse.Parser) { var o *onion.Onion @@ -375,26 +304,6 @@ func getContainerName(argo_file string) string { return container_name } -// Uses the ArgoWatch object to update status of the workflow execution object -func checkStatus(current *models.ArgoWatch, previous *models.ArgoWatch, argoLogs *models.ArgoLogs) bool { - if previous == nil || current.Status != previous.Status || argoLogs.IsStreaming { - argoLogs.StepCount += 1 - if len(current.Logs) > 0 { - newLogs := []string{} - for _, log := range current.Logs { - if !slices.Contains(argoLogs.Logs, log) { - newLogs = append(newLogs, log) - } - } - updateStatus(current.Status, strings.Join(newLogs, "\n")) - current.Logs = newLogs - argoLogs.Logs = append(argoLogs.Logs, newLogs...) - } else { - updateStatus(current.Status, "") - } - } - return previous == nil || current.Status != previous.Status || argoLogs.IsStreaming -} func updateStatus(status string, log string) { exec_id := conf.GetConfig().ExecutionID diff --git a/models/argo_logs.go b/models/argo_logs.go deleted file mode 100644 index ecf150e..0000000 --- a/models/argo_logs.go +++ /dev/null @@ -1,161 +0,0 @@ -package models - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" - "time" - - "github.com/acarl005/stripansi" - "github.com/rs/zerolog" -) - -// An object to monitor the logs generated by a specific pod from a workflow execution -type ArgoWatch struct { - Name string - Namespace string - Status string - Conditions - Created string - Started string - Duration string - Progress string - Logs []string -} - -type Conditions struct { - PodRunning bool - Completed bool -} - -func (a *ArgoWatch) Equals(arg *ArgoWatch) bool { - if arg == nil { - return false - } - return a.Status == arg.Status && a.Progress == arg.Progress && a.Conditions.PodRunning == arg.Conditions.PodRunning && a.Conditions.Completed == arg.Conditions.Completed -} - -func NewArgoLogs(name string, namespace string, stepMax int) *ArgoLogs { - return &ArgoLogs{ - Name: "oc-monitor-" + name, - Namespace: namespace, - CreatedDate: time.Now().Format("2006-01-02 15:04:05"), - StepCount: 0, - StepMax: stepMax, - stop: false, - Seen: []string{}, - } -} - -// An object to monitor and log the output of an argo submit -type ArgoLogs struct { - Name string - Namespace string - CreatedDate string - StepCount int - StepMax int - stop bool - Started time.Time - Seen []string - Logs []string - IsStreaming bool -} - -func (a *ArgoLogs) NewWatch() *ArgoWatch { - return &ArgoWatch{ - Name: a.Name, - Namespace: a.Namespace, - Status: "Pending", - Created: a.CreatedDate, - Started: a.Started.Format("2006-01-02 15:04:05"), - Conditions: Conditions{ - PodRunning: a.StepCount > 0 && a.StepCount < a.StepMax, - Completed: a.StepCount == a.StepMax, - }, - Progress: fmt.Sprintf("%v/%v", a.StepCount, a.StepMax), - Duration: "0s", - Logs: []string{}, - } - -} - -func (a *ArgoLogs) StartStepRecording(current_watch *ArgoWatch, logger zerolog.Logger) { - jsonified, _ := json.Marshal(current_watch) - logger.Info().Msg(string(jsonified)) - a.StepCount += 1 - a.Started = time.Now() -} - -func (a *ArgoLogs) StopStepRecording(current *ArgoWatch) *ArgoWatch { - fn := strings.Split(a.Name, "_") - logs := []string{} - err := false - end := "" - for _, input := range current.Logs { - line := strings.TrimSpace(input) - if line == "" || !strings.Contains(line, fn[0]) || !strings.Contains(line, ":") { - continue - } - step := strings.Split(line, ":") - if strings.Contains(line, "sub-process exited") { - b := strings.Split(line, "time=\"") - if len(b) > 1 { - end = b[1][:19] - } - } - if len(step) < 2 || strings.Contains(line, "time=") || strings.TrimSpace(strings.Join(step[1:], " : ")) == "" || strings.TrimSpace(strings.Join(step[1:], " : ")) == a.Name { - continue - } - log := stripansi.Strip(strings.TrimSpace(strings.Join(step[1:], " : "))) - t, e := strconv.Unquote(log) - if e == nil { - logs = append(logs, t) - } else { - logs = append(logs, strings.ReplaceAll(log, "\"", "`")) - } - - if strings.Contains(logs[len(logs)-1], "Error") { - err = true - } - } - status := "Pending" - if a.StepCount > 0 { - status = "Running" - } - if a.StepCount == a.StepMax { - if err { - status = "Failed" - } else { - status = "Succeeded" - } - } - duration := float64(0) - if end != "" { - timeE, _ := time.Parse("2006-01-02T15:04:05", end) - duration = timeE.Sub(a.Started).Seconds() - } - current.Conditions = Conditions{ - PodRunning: a.StepCount > 0 && a.StepCount < a.StepMax, - Completed: a.StepCount == a.StepMax, - } - current.Progress = fmt.Sprintf("%v/%v", a.StepCount, a.StepMax) - current.Duration = fmt.Sprintf("%v", fmt.Sprintf("%.2f", duration)+"s") - - current.Status = status - return current -} - -type ArgoPodLog struct { - PodName string - Step string - Message string -} - -func NewArgoPodLog(name string, step string, msg string) ArgoPodLog { - return ArgoPodLog{ - PodName: name, - Step: step, - Message: msg, - } -} \ No newline at end of file diff --git a/tools/interface.go b/tools/interface.go index 2b356d8..b6f2610 100644 --- a/tools/interface.go +++ b/tools/interface.go @@ -3,17 +3,15 @@ package tools import ( "errors" "io" - "oc-monitord/models" - "sync" + + "k8s.io/apimachinery/pkg/watch" ) type Tool interface { CreateArgoWorkflow(path string, ns string) (string, error) CreateAccessSecret(ns string, login string, password string) (string, error) - LogWorkflow(execID string, namespace string, workflowName string, argoFilePath string, stepMax int, current_watch *models.ArgoWatch, previous_watch *models.ArgoWatch, - argoLogs *models.ArgoLogs, seen []string, - logFunc func(argoFilePath string, stepMax int, pipe io.ReadCloser, current_watch *models.ArgoWatch, previous_watch *models.ArgoWatch, - argoLogs *models.ArgoLogs, seen []string, wg *sync.WaitGroup)) error + GetArgoWatch(executionId string, wfName string) (watch.Interface, error) + GetPodLogger(ns string, wfName string, podName string) (io.ReadCloser, error) } var _service = map[string]func() (Tool, error){ diff --git a/tools/kubernetes.go b/tools/kubernetes.go index 794cbcd..1825cf7 100644 --- a/tools/kubernetes.go +++ b/tools/kubernetes.go @@ -7,21 +7,18 @@ import ( "fmt" "io" "oc-monitord/conf" - "oc-monitord/models" "oc-monitord/utils" "os" - "sync" "time" - "cloud.o-forge.io/core/oc-lib/models/common/enum" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned" "github.com/google/uuid" - corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" ) @@ -59,73 +56,6 @@ func NewKubernetesTool() (Tool, error) { }, nil } -func (k *KubernetesTools) LogWorkflow(execID string, namespace string, workflowName string, argoFilePath string, stepMax int, current_watch *models.ArgoWatch, previous_watch *models.ArgoWatch, argoLogs *models.ArgoLogs, - seen []string, logFunc func(argoFilePath string, stepMax int, pipe io.ReadCloser, current_watch *models.ArgoWatch, previous_watch *models.ArgoWatch, argoLogs *models.ArgoLogs, seen []string, wg *sync.WaitGroup)) error { - exec := utils.GetExecution(execID) - if exec == nil { - return errors.New("Could not retrieve workflow ID from execution ID " + execID) - } - if exec.State == enum.DRAFT || exec.State == enum.FAILURE || exec.State == enum.SUCCESS { - l := utils.GetWFLogger("") - l.Error().Msg("The execution's state doesn't meet requirement, state is : " + exec.State.String()) - return nil - } - k.logWorkflow(namespace, workflowName, argoFilePath, stepMax, current_watch, previous_watch, argoLogs, seen, logFunc) - return k.LogWorkflow(execID, namespace, workflowName, argoFilePath, stepMax, current_watch, previous_watch, argoLogs, seen, logFunc) -} - -func (k *KubernetesTools) logWorkflow(namespace string, workflowName string, argoFilePath string, stepMax int, current_watch *models.ArgoWatch, previous_watch *models.ArgoWatch, argoLogs *models.ArgoLogs, - seen []string, - logFunc func(argoFilePath string, stepMax int, pipe io.ReadCloser, current_watch *models.ArgoWatch, previous_watch *models.ArgoWatch, argoLogs *models.ArgoLogs, seen []string, wg *sync.WaitGroup)) error { - // List pods related to the Argo workflow - fmt.Println("\n!!!!!!!! !!!!!!!!!! !!!!!!!! &&&& & STARTING LOG\n\n") - labelSelector := fmt.Sprintf("workflows.argoproj.io/workflow=%s", workflowName) - for retries := 0; retries < 10; retries++ { // Retry for up to ~20 seconds - // List workflow pods - wfPods, err := k.Set.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - if err != nil { - return err - } - // If we found pods, stream logs - if len(wfPods.Items) > 0 { - var wg sync.WaitGroup - // Stream logs from all matching pods - for _, pod := range wfPods.Items { - for _, container := range pod.Spec.Containers { - wg.Add(1) - go k.streamLogs(namespace, pod.Name, container.Name, argoFilePath, stepMax, &wg, current_watch, previous_watch, argoLogs, seen, logFunc) - } - } - wg.Wait() - return nil - } - time.Sleep(2 * time.Second) // Wait before retrying - } - return errors.New("no pods found for the workflow") -} - -// Function to stream logs -func (k *KubernetesTools) streamLogs(namespace string, podName string, containerName string, - argoFilePath string, stepMax int, wg *sync.WaitGroup, current_watch *models.ArgoWatch, previous_watch *models.ArgoWatch, argoLogs *models.ArgoLogs, seen []string, - logFunc func(argo_file_path string, stepMax int, pipe io.ReadCloser, current_watch *models.ArgoWatch, previous_watch *models.ArgoWatch, argoLogs *models.ArgoLogs, seen []string, wg *sync.WaitGroup)) { - req := k.Set.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{ - Container: containerName, // Main container - Follow: true, // Equivalent to -f flag in kubectl logs - }) - defer wg.Done() - // Open stream - stream, err := req.Stream(context.Background()) - if err != nil { - return - } - defer stream.Close() - var internalWg sync.WaitGroup - logFunc(argoFilePath, stepMax, stream, current_watch, previous_watch, argoLogs, seen, &internalWg) - internalWg.Wait() -} - func (k *KubernetesTools) CreateArgoWorkflow(path string, ns string) (string, error) { // Read workflow YAML file workflowYAML, err := os.ReadFile(path) @@ -149,7 +79,7 @@ func (k *KubernetesTools) CreateArgoWorkflow(path string, ns string) (string, er } // Create the workflow in the "argo" namespace - createdWf, err := k.VersionedSet.ArgoprojV1alpha1().Workflows(ns).Create(context.Background(), workflow, metav1.CreateOptions{}) + createdWf, err := k.VersionedSet.ArgoprojV1alpha1().Workflows(ns).Create(context.TODO(), workflow, metav1.CreateOptions{}) if err != nil { return "", errors.New("failed to create workflow: " + err.Error()) } @@ -177,9 +107,81 @@ func (k *KubernetesTools) CreateAccessSecret(ns string, login string, password s Data: secretData, } // Create the Secret in Kubernetes - _, err := k.Set.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{}) + _, err := k.Set.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) if err != nil { return "", errors.New("Error creating secret: " + err.Error()) } return name, nil } + +func (k *KubernetesTools) GetArgoWatch(executionId string, wfName string) (watch.Interface, error){ + wfl := utils.GetWFLogger("") + wfl.Debug().Msg("Starting argo watch with argo lib") + fmt.Println("metadata.name=oc-monitor-"+wfName + " in namespace : " + executionId) + options := metav1.ListOptions{FieldSelector: "metadata.name=oc-monitor-"+wfName} + fmt.Println(options) + watcher, err := k.VersionedSet.ArgoprojV1alpha1().Workflows(executionId).Watch(context.TODO(), options) + if err != nil { + return nil, errors.New("Error executing 'argo watch " + wfName + " -n " + executionId + " with ArgoprojV1alpha1 client") + } + + + return watcher, nil + +} + +func (k *KubernetesTools) GetPodLogger(ns string, wfName string, nodeName string) (io.ReadCloser, error) { + var targetPod v1.Pod + + pods, err := k.Set.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{ + LabelSelector: "workflows.argoproj.io/workflow="+wfName, + }) + if err != nil { + return nil, fmt.Errorf("failed to list pods: " + err.Error()) + } + if len(pods.Items) == 0 { + return nil, fmt.Errorf("no pods found with label workflows.argoproj.io/node-name=" + nodeName) + } + + for _, pod := range pods.Items { + if pod.Annotations["workflows.argoproj.io/node-name"] == nodeName { + targetPod = pod + } + } + + // k8s API throws an error if we try getting logs while the container are not initialized, so we repeat status check there + k.testPodReady(targetPod, ns) + + // When using kubec logs for a pod we see it contacts /api/v1/namespaces/NAMESPACE/pods/oc-monitor-PODNAME/log?container=main so we add this container: main to the call + req, err := k.Set.CoreV1().Pods(ns).GetLogs(targetPod.Name, &v1.PodLogOptions{Follow: true, Container: "main"}). Stream(context.Background()) + if err != nil { + return nil, fmt.Errorf(" Error when trying to get logs for " + targetPod.Name + " : " + err.Error()) + } + + return req, nil +} + +func (k *KubernetesTools) testPodReady(pod v1.Pod, ns string) { + for { + pod, err := k.Set.CoreV1().Pods(ns).Get(context.Background(), pod.Name, metav1.GetOptions{}) + if err != nil { + wfl := utils.GetWFLogger("") + wfl.Error().Msg("Error fetching pod: " + err.Error() + "\n") + break + } + + var initialized bool + for _, cond := range pod.Status.Conditions { + if cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue { + initialized = true + return + } + } + + if initialized { + return + } + + time.Sleep(2 * time.Second) // avoid hammering the API + } +} \ No newline at end of file From 5f70feab59e1ac3d660dc80a6e34e27e48bebc5d Mon Sep 17 00:00:00 2001 From: pb Date: Thu, 17 Apr 2025 16:53:36 +0200 Subject: [PATCH 18/19] restructured the different package, cleaned some non used code, added comments, still have to reorganize packages to optimize packages --- {models => logger}/local_argo_logs.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) rename {models => logger}/local_argo_logs.go (96%) diff --git a/models/local_argo_logs.go b/logger/local_argo_logs.go similarity index 96% rename from models/local_argo_logs.go rename to logger/local_argo_logs.go index 234becb..95dcdfb 100644 --- a/models/local_argo_logs.go +++ b/logger/local_argo_logs.go @@ -1,4 +1,4 @@ -package models +package logger import ( "bufio" @@ -6,6 +6,7 @@ import ( "fmt" "io" "oc-monitord/conf" + "strings" "sync" @@ -106,7 +107,7 @@ func LogLocalWorkflow(wfName string, pipe io.ReadCloser, wg *sync.WaitGroup) { } // Debug, no logs sent -func LogPods(wfName string, pipe io.ReadCloser, steps []string, wg *sync.WaitGroup) { +func LogLocalPod(wfName string, pipe io.ReadCloser, steps []string, wg *sync.WaitGroup) { scanner := bufio.NewScanner(pipe) for scanner.Scan() { var podLogger zerolog.Logger @@ -118,6 +119,7 @@ func LogPods(wfName string, pipe io.ReadCloser, steps []string, wg *sync.WaitGro podLogger = wfLogger.With().Str("step_name", getStepName(podName, steps)).Logger() log := strings.Split(line,podName+":")[1] podLog := NewArgoPodLog(wfName,podName,log) + jsonifiedLog, err := json.Marshal(podLog) if err != nil { podLogger.Fatal().Msg(err.Error()) From e2d1746396d6900f05d843e29a33e4414df66422 Mon Sep 17 00:00:00 2001 From: pb Date: Thu, 17 Apr 2025 16:58:37 +0200 Subject: [PATCH 19/19] adding lib files --- go.mod | 54 +++++++++++------------ go.sum | 137 ++++++++++++++++++++++++--------------------------------- 2 files changed, 84 insertions(+), 107 deletions(-) diff --git a/go.mod b/go.mod index 874a1ce..609cd89 100644 --- a/go.mod +++ b/go.mod @@ -5,18 +5,18 @@ go 1.23.1 toolchain go1.23.3 require ( - cloud.o-forge.io/core/oc-lib v0.0.0-20250219142942-5111c9c8bec7 + cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9 github.com/akamensky/argparse v1.4.0 github.com/google/uuid v1.6.0 github.com/goraz/onion v0.1.3 github.com/nwtgck/go-fakelish v0.1.3 - github.com/rs/zerolog v1.33.0 + github.com/rs/zerolog v1.34.0 gopkg.in/yaml.v3 v3.0.1 ) require ( - github.com/beego/beego/v2 v2.3.1 // indirect - github.com/go-playground/validator/v10 v10.22.0 // indirect + github.com/beego/beego/v2 v2.3.7 // indirect + github.com/go-playground/validator/v10 v10.26.0 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect @@ -31,11 +31,11 @@ require ( github.com/argoproj/argo-workflows/v3 v3.6.4 github.com/beorn7/perks v1.0.1 // indirect github.com/biter777/countries v1.7.5 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.5 // indirect + github.com/gabriel-vasile/mimetype v1.4.8 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect @@ -43,33 +43,33 @@ require ( github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/snappy v0.0.4 // indirect + github.com/golang/snappy v1.0.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.10 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/montanaflynn/stats v0.7.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/nats-io/nats.go v1.37.0 // indirect - github.com/nats-io/nkeys v0.4.7 // indirect + github.com/nats-io/nats.go v1.41.0 // indirect + github.com/nats-io/nkeys v0.4.10 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.19.0 // indirect - github.com/prometheus/client_model v0.6.0 // indirect - github.com/prometheus/common v0.48.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.63.0 // indirect + github.com/prometheus/procfs v0.16.0 // indirect github.com/robfig/cron v1.2.0 // indirect - github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 // indirect + github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect github.com/smartystreets/goconvey v1.6.4 // indirect github.com/ugorji/go/codec v1.1.7 // indirect github.com/x448/float16 v0.8.4 // indirect @@ -77,16 +77,16 @@ require ( github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect - go.mongodb.org/mongo-driver v1.17.1 // indirect - golang.org/x/crypto v0.31.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/term v0.27.0 // indirect - golang.org/x/text v0.21.0 // indirect + go.mongodb.org/mongo-driver v1.17.3 // indirect + golang.org/x/crypto v0.37.0 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/text v0.24.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.32.1 diff --git a/go.sum b/go.sum index c7c76da..6f7e36d 100644 --- a/go.sum +++ b/go.sum @@ -1,33 +1,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.o-forge.io/core/oc-lib v0.0.0-20250213085018-271cc2caa026 h1:CYwpofGfpAhMDrT6jqvu9NI/tcgxCD8PKJZDKEfTvVI= -cloud.o-forge.io/core/oc-lib v0.0.0-20250213085018-271cc2caa026/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= -cloud.o-forge.io/core/oc-lib v0.0.0-20250213093249-c53e25e69a7b h1:HAb2h0011mE3QrHdOwJCua5w0r/BDOFLNb/557ZAzL0= -cloud.o-forge.io/core/oc-lib v0.0.0-20250213093249-c53e25e69a7b/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= -cloud.o-forge.io/core/oc-lib v0.0.0-20250217072519-cafadec1469f h1:esLB0EAn8IuOChW35kcBrPaN80z4A4yYyz1mXT45GQo= -cloud.o-forge.io/core/oc-lib v0.0.0-20250217072519-cafadec1469f/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218080121-a098f0a672ee h1:UIGIiE+O5LUrP18C8nrZxN1v6Lmzfdlv8pvHnSLKJz8= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218080121-a098f0a672ee/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218085355-6e6ed4ea2c64 h1:dANQHoMCyp3uioCHnUOpLFiG/UO+biyPUoSelDNJ814= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218085355-6e6ed4ea2c64/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218092508-b771b5d25ee5 h1:EwoctMKdVG1PJHRcBcRKCxgdAxy+TV1T617vxIZwkio= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218092508-b771b5d25ee5/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218101140-6bf058ab5ca4 h1:7om8VD4ZivHA2BKBwvqM98/a7D+MTwppd2FloNBg1Y4= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218101140-6bf058ab5ca4/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218113916-04f7537066c1 h1:on0zLtHo1Jj6FvQ/wuJCc/sxfBfgrd2qTFknpDh3wQM= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218113916-04f7537066c1/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218115549-81d3406305c5 h1:DP/XYrxSOc5ORMGvVNqTvFjxLF4cymUW/d3HIZXKDEk= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218115549-81d3406305c5/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218130229-7c30633bded0 h1:3EsRmeTz6OWHJETrPObctnGF8WgZtXHfwL2cjyHcfOk= -cloud.o-forge.io/core/oc-lib v0.0.0-20250218130229-7c30633bded0/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= -cloud.o-forge.io/core/oc-lib v0.0.0-20250219075511-241c6a5a0861 h1:XqTFKSZ8hXGCJbuu/SBwakpftevg1AKV7hDI50cXNUg= -cloud.o-forge.io/core/oc-lib v0.0.0-20250219075511-241c6a5a0861/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= -cloud.o-forge.io/core/oc-lib v0.0.0-20250219100312-b4a176667754 h1:7J5EUe/iNS6cT6KVDklpgGH7ak30iEFgWJDEPF6wik4= -cloud.o-forge.io/core/oc-lib v0.0.0-20250219100312-b4a176667754/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= -cloud.o-forge.io/core/oc-lib v0.0.0-20250219104152-3ecb0e9d960b h1:DhRqJdw2VePaYVlsh8OUA3zl+76Q0FWwGu+a+3aOf6s= -cloud.o-forge.io/core/oc-lib v0.0.0-20250219104152-3ecb0e9d960b/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= -cloud.o-forge.io/core/oc-lib v0.0.0-20250219142942-5111c9c8bec7 h1:fh6SzBPenzIxufIIzExtx4jEE4OhFposqn3EbHFr92Q= -cloud.o-forge.io/core/oc-lib v0.0.0-20250219142942-5111c9c8bec7/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= +cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9 h1:mSFFPwil5Ih+RPBvn88MBerQMtsoHnOuyCZQaf91a34= +cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= @@ -37,15 +11,15 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/argoproj/argo-workflows/v3 v3.6.4 h1:5+Cc1UwaQE5ka3w7R3hxZ1TK3M6VjDEXA5WSQ/IXrxY= github.com/argoproj/argo-workflows/v3 v3.6.4/go.mod h1:2f5zB8CkbNCCO1od+kd1dWkVokqcuyvu+tc+Jwx1MZg= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beego/beego/v2 v2.3.1 h1:7MUKMpJYzOXtCUsTEoXOxsDV/UcHw6CPbaWMlthVNsc= -github.com/beego/beego/v2 v2.3.1/go.mod h1:5cqHsOHJIxkq44tBpRvtDe59GuVRVv/9/tyVDxd5ce4= +github.com/beego/beego/v2 v2.3.7 h1:z4btKtjU/rfp5BiYHkGD2QPjK9i1E9GH+I7vfhn6Agk= +github.com/beego/beego/v2 v2.3.7/go.mod h1:5cqHsOHJIxkq44tBpRvtDe59GuVRVv/9/tyVDxd5ce4= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q= github.com/biter777/countries v1.7.5/go.mod h1:1HSpZ526mYqKJcpT5Ti1kcGQ0L0SrXWIaptUWjFfv2E= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -71,8 +45,8 @@ github.com/etcd-io/etcd v3.3.17+incompatible/go.mod h1:cdZ77EstHBwVtD6iTgzgvogwc github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/gabriel-vasile/mimetype v1.4.5 h1:J7wGKdGu33ocBOhGy0z653k/lFKLFDPJMG8Gql0kxn4= -github.com/gabriel-vasile/mimetype v1.4.5/go.mod h1:ibHel+/kbxn9x2407k1izTA1S81ku1z/DlgOW2QE0M4= +github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= +github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -88,8 +62,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao= -github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k= +github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -102,15 +76,15 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -125,8 +99,8 @@ github.com/goraz/onion v0.1.3 h1:KhyvbDA2b70gcz/d5izfwTiOH8SmrvV43AsVzpng3n0= github.com/goraz/onion v0.1.3/go.mod h1:XEmz1XoBz+wxTgWB8NwuvRm4RAu3vKxvrmYtzK+XCuQ= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -139,20 +113,23 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= -github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= @@ -172,10 +149,10 @@ github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8 github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE= -github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= -github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= -github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= +github.com/nats-io/nats.go v1.41.0 h1:PzxEva7fflkd+n87OtQTXqCTyLfIIMFJBpyccHLE2Ko= +github.com/nats-io/nats.go v1.41.0/go.mod h1:wV73x0FSI/orHPSYoyMeJB+KajMDoWyXmFaRrrYaaTo= +github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc= +github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nwtgck/go-fakelish v0.1.3 h1:bA8/xa9hQmzppexIhBvdmztcd/PJ4SPuAUTBdMKZ8G4= @@ -193,26 +170,26 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= +github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= +github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM= +github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= -github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 h1:DAYUYH5869yV94zvCES9F51oYtN5oGlwjxJJz7ZCnik= -github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg= +github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 h1:v9ezJDHA1XGxViAUSIoO/Id7Fl63u6d0YmsAm+/p2hs= +github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02/go.mod h1:RF16/A3L0xSa0oSERcnhd8Pu3IXSDZSK2gmGIMsttFE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skarademir/naturalsort v0.0.0-20150715044055-69a5d87bef62/go.mod h1:oIdVclZaltY1Nf7OQUkg1/2jImBJ+ZfKZuDIRSwk3p0= @@ -253,16 +230,16 @@ github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfS github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.mongodb.org/mongo-driver v1.17.1 h1:Wic5cJIwJgSpBhe3lx3+/RybR5PiYRMpVFgO7cOHyIM= -go.mongodb.org/mongo-driver v1.17.1/go.mod h1:wwWm/+BuOddhcq3n68LKRmgk2wXzmF6s0SFOa0GINL4= +go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ= +go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -282,12 +259,12 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -295,8 +272,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -312,18 +289,18 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -360,8 +337,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=