Corrected how some parameters were passed to log the right ressources

This commit is contained in:
pb 2025-05-15 12:05:52 +02:00
parent f3e84a4f43
commit 03675d09ae
4 changed files with 10 additions and 9 deletions

View File

@ -106,7 +106,7 @@ func NewArgoPodLog(name string, step string, msg string) ArgoPodLog {
}
}
func LogKubernetesArgo(wfName string, executionID string, watcher watch.Interface) {
func LogKubernetesArgo(wfName string, namespace string, watcher watch.Interface) {
var argoWatcher *ArgoWatch
var pods []string
var node wfv1.NodeStatus
@ -139,7 +139,7 @@ func LogKubernetesArgo(wfName string, executionID string, watcher watch.Interfac
newWatcher := ArgoWatch{
Name: node.Name,
Namespace: executionID,
Namespace: namespace,
Status: string(node.Phase),
Created: node.StartedAt.String(),
Started: node.StartedAt.String(),
@ -164,7 +164,7 @@ func LogKubernetesArgo(wfName string, executionID string, watcher watch.Interfac
if !slices.Contains(pods,pod.Name){
pl := wfl.With().Str("pod", pod.Name).Logger()
if wfName == pod.Name { pods = append(pods, pod.Name); continue } // One of the node is the Workflow, the others are the pods so don't try to log on the wf name
go logKubernetesPods(executionID, wfName, pod.Name, pl)
go logKubernetesPods(namespace, wfName, pod.Name, pl)
pods = append(pods, pod.Name)
}
}

View File

@ -134,12 +134,12 @@ func executeInside(execID string, ns string, argo_file_path string) {
logger.Info().Msg(fmt.Sprint("Data :" + conf.GetConfig().KubeData))
return
} else {
watcher, err := t.GetArgoWatch(execID, workflowName)
watcher, err := t.GetArgoWatch(ns, workflowName)
if err != nil {
logger.Error().Msg("Could not retrieve Watcher : " + err.Error())
}
l.LogKubernetesArgo(name, execID, watcher)
l.LogKubernetesArgo(name, ns, watcher)
if err != nil {
logger.Error().Msg("Could not log workflow : " + err.Error())
}

View File

@ -119,12 +119,12 @@ func (k *KubernetesTools) GetArgoWatch(executionId string, wfName string) (watch
wfl := utils.GetWFLogger("")
wfl.Debug().Msg("Starting argo watch with argo lib")
options := metav1.ListOptions{FieldSelector: "metadata.name=oc-monitor-"+wfName}
watcher, err := k.VersionedSet.ArgoprojV1alpha1().Workflows(executionId).Watch(context.TODO(), options)
watcher, err := k.VersionedSet.ArgoprojV1alpha1().Workflows(executionId).Watch(context.Background(), options)
if err != nil {
return nil, errors.New("Error executing 'argo watch " + wfName + " -n " + executionId + " with ArgoprojV1alpha1 client")
}
return watcher, nil
}
@ -139,7 +139,8 @@ func (k *KubernetesTools) GetPodLogger(ns string, wfName string, nodeName string
return nil, fmt.Errorf("failed to list pods: " + err.Error())
}
if len(pods.Items) == 0 {
return nil, fmt.Errorf("no pods found with label workflows.argoproj.io/node-name=" + nodeName)
return nil, fmt.Errorf("no pods found with label workflows.argoproj.io/workflow="+ wfName + " no pods found with label workflows.argoproj.io/node-name=" + nodeName + " in namespace " + ns)
}
for _, pod := range pods.Items {

View File

@ -44,7 +44,7 @@ func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID st
tools.POST :"/:id",
},
tools.ADMIRALTY_KUBECONFIG: {
tools.GET:"/:id/" + remotePeerID,
tools.GET:"/:id",
},
tools.ADMIRALTY_SECRET: {
tools.POST:"/:id/" + remotePeerID,