modified the conditions to start the kubernetes logs action of pods and added waitingroups to the logging method in order to wait for the logs to be retrieved before stopping the daemon

This commit is contained in:
pb
2025-05-19 18:56:41 +02:00
parent 03675d09ae
commit e1b0ad089c
2 changed files with 35 additions and 9 deletions

View File

@@ -132,17 +132,19 @@ func (k *KubernetesTools) GetArgoWatch(executionId string, wfName string) (watch
func (k *KubernetesTools) GetPodLogger(ns string, wfName string, nodeName string) (io.ReadCloser, error) {
var targetPod v1.Pod
pods, err := k.Set.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{
LabelSelector: "workflows.argoproj.io/workflow="+wfName,
LabelSelector: "workflows.argoproj.io/workflow="+wfName,
})
if err != nil {
return nil, fmt.Errorf("failed to list pods: " + err.Error())
}
if len(pods.Items) == 0 {
return nil, fmt.Errorf("no pods found with label workflows.argoproj.io/workflow="+ wfName + " no pods found with label workflows.argoproj.io/node-name=" + nodeName + " in namespace " + ns)
}
fmt.Println("Searching for workflows.argoproj.io/node-name=" + nodeName)
for _, pod := range pods.Items {
if pod.Annotations["workflows.argoproj.io/node-name"] == nodeName {
targetPod = pod
@@ -171,9 +173,12 @@ func (k *KubernetesTools) testPodReady(pod v1.Pod, ns string) {
}
var initialized bool
fmt.Println("============= \n " + pod.Name + " not ready yet \n==============")
for _, cond := range pod.Status.Conditions {
if cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue {
// It seems that for remote pods the pod gets the Succeeded status before it has time to display the it is ready to run in .status.conditions,so we added the OR condition
if (cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue) || pod.Status.Phase == v1.PodSucceeded {
initialized = true
fmt.Println("============= \n " + pod.Name + " ready \n==============")
return
}
}