Execution workflow execute change

This commit is contained in:
mr
2026-02-25 13:20:44 +01:00
parent 921ee900ce
commit 84f6af6e44
2 changed files with 76 additions and 115 deletions

View File

@@ -11,6 +11,8 @@ import (
"sync"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/models/common/enum"
"github.com/rs/zerolog"
"k8s.io/apimachinery/pkg/watch"
@@ -54,7 +56,7 @@ func NewArgoLogs(name string, namespace string, stepMax int) *ArgoLogs {
}
}
// An object to monitor and log the output of an argo submit
// An object to monitor and log the output of an argo submit
type ArgoLogs struct {
Name string
Namespace string
@@ -93,22 +95,21 @@ func (a *ArgoLogs) StartStepRecording(current_watch *ArgoWatch, logger zerolog.L
a.Started = time.Now()
}
type ArgoPodLog struct {
PodName string
Step string
Message string
PodName string
Step string
Message string
}
func NewArgoPodLog(name string, step string, msg string) ArgoPodLog {
return ArgoPodLog{
PodName: name,
Step: step,
Step: step,
Message: msg,
}
}
func LogKubernetesArgo(wfName string, namespace string, watcher watch.Interface) {
func LogKubernetesArgo(wfName string, execID string, namespace string, watcher watch.Interface) {
var argoWatcher *ArgoWatch
var pods []string
var node wfv1.NodeStatus
@@ -117,38 +118,38 @@ func LogKubernetesArgo(wfName string, namespace string, watcher watch.Interface)
wfl.Debug().Msg("Starting to log " + wfName)
var wg sync.WaitGroup
for event := range (watcher.ResultChan()) {
for event := range watcher.ResultChan() {
wf, ok := event.Object.(*wfv1.Workflow)
if !ok {
wfl.Error().Msg("unexpected type")
continue
}
if len(wf.Status.Nodes) == 0 {
wfl.Info().Msg("No node status yet") // The first output of the channel doesn't contain Nodes so we skip it
wfl.Info().Msg("No node status yet") // The first output of the channel doesn't contain Nodes so we skip it
continue
}
conditions := retrieveCondition(wf)
conditions := retrieveCondition(wf)
// Retrieving the Status for the main node, which is named after the workflow
if node, ok = wf.Status.Nodes[wfName]; !ok {
bytified, _ := json.MarshalIndent(wf.Status.Nodes,"","\t")
bytified, _ := json.MarshalIndent(wf.Status.Nodes, "", "\t")
wfl.Fatal().Msg("Could not find the " + wfName + " node in \n" + string(bytified))
}
now := time.Now()
start, _ := time.Parse(time.RFC3339, node.StartedAt.String() )
start, _ := time.Parse(time.RFC3339, node.StartedAt.String())
duration := now.Sub(start)
newWatcher := ArgoWatch{
Name: node.Name,
Namespace: namespace,
Status: string(node.Phase),
Created: node.StartedAt.String(),
Started: node.StartedAt.String(),
Progress: string(node.Progress),
Duration: duration.String(),
Name: node.Name,
Namespace: namespace,
Status: string(node.Phase),
Created: node.StartedAt.String(),
Started: node.StartedAt.String(),
Progress: string(node.Progress),
Duration: duration.String(),
Conditions: conditions,
}
@@ -156,23 +157,26 @@ func LogKubernetesArgo(wfName string, namespace string, watcher watch.Interface)
argoWatcher = &newWatcher
}
if !newWatcher.Equals(argoWatcher){
if !newWatcher.Equals(argoWatcher) {
jsonified, _ := json.Marshal(newWatcher)
wfl.Info().Msg(string(jsonified))
argoWatcher = &newWatcher
}
// I don't think we need to use WaitGroup here, because the loop itself
// I don't think we need to use WaitGroup here, because the loop itself
// acts as blocking process for the main thread, because Argo watch never closes the channel
for _, pod := range wf.Status.Nodes{
if !slices.Contains(pods,pod.Name){
pl := wfl.With().Str("pod", pod.Name).Logger()
if wfName == pod.Name { pods = append(pods, pod.Name); continue } // One of the node is the Workflow, the others are the pods so don't try to log on the wf name
pl.Info().Msg("Found a new pod to log : " + pod.Name)
for _, pod := range wf.Status.Nodes {
if !slices.Contains(pods, pod.Name) {
pl := wfl.With().Str("pod", pod.Name).Logger()
if wfName == pod.Name {
pods = append(pods, pod.Name)
continue
} // One of the node is the Workflow, the others are the pods so don't try to log on the wf name
pl.Info().Msg("Found a new pod to log : " + pod.Name)
wg.Add(1)
go logKubernetesPods(namespace, wfName, pod.Name, pl, &wg)
pods = append(pods, pod.Name)
}
}
}
// Stop listening to the chan when the Workflow is completed or something bad happened
@@ -180,11 +184,17 @@ func LogKubernetesArgo(wfName string, namespace string, watcher watch.Interface)
wfl.Info().Msg(wfName + " worflow completed")
wg.Wait()
wfl.Info().Msg(wfName + " exiting")
oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{
"state": enum.SUCCESS.EnumIndex(),
}, execID)
break
}
if node.Phase.FailedOrError() {
wfl.Error().Msg(wfName + "has failed, please refer to the logs")
wfl.Error().Msg(node.Message)
oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{
"state": enum.FAILURE.EnumIndex(),
}, execID)
wfl.Error().Msg(node.Message)
break
}
}
@@ -200,36 +210,36 @@ func retrieveCondition(wf *wfv1.Workflow) (c Conditions) {
}
}
return
return
}
// Function needed to be executed as a go thread
func logKubernetesPods(executionId string, wfName string,podName string, logger zerolog.Logger, wg *sync.WaitGroup){
// Function needed to be executed as a go thread
func logKubernetesPods(executionId string, wfName string, podName string, logger zerolog.Logger, wg *sync.WaitGroup) {
defer wg.Done()
s := strings.Split(podName, ".")
name := s[0] + "-" + s[1]
step := s[1]
k, err := tools.NewKubernetesTool()
if err != nil {
logger.Error().Msg("Could not get Kubernetes tools")
return
}
reader, err := k.GetPodLogger(executionId, wfName, podName)
if err != nil {
logger.Error().Msg(err.Error())
return
}
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
log := scanner.Text()
podLog := NewArgoPodLog(name,step,log)
podLog := NewArgoPodLog(name, step, log)
jsonified, _ := json.Marshal(podLog)
logger.Info().Msg(string(jsonified))
}
}
}