Modified how logging with monitord container is implemented, with simpler logic thanks to the argo client library and k8 client-go for pods' logs
This commit is contained in:
139
main.go
139
main.go
@@ -3,19 +3,17 @@ package main
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"oc-monitord/conf"
|
||||
"oc-monitord/models"
|
||||
l "oc-monitord/logger"
|
||||
u "oc-monitord/utils"
|
||||
"oc-monitord/workflow_builder"
|
||||
|
||||
@@ -45,7 +43,6 @@ import (
|
||||
|
||||
var logger zerolog.Logger
|
||||
var wf_logger zerolog.Logger
|
||||
var pods_logger zerolog.Logger
|
||||
var parser argparse.Parser
|
||||
var workflowName string
|
||||
|
||||
@@ -90,7 +87,7 @@ func main() {
|
||||
logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API")
|
||||
}
|
||||
|
||||
builder, stepMax, err := new_wf.ExportToArgo(exec.ExecutionsID, conf.GetConfig().Timeout)
|
||||
builder, _, err := new_wf.ExportToArgo(exec.ExecutionsID, conf.GetConfig().Timeout) // Removed stepMax so far, I don't know if we need it anymore
|
||||
if err != nil {
|
||||
logger.Error().Msg("Could not create the Argo file for " + conf.GetConfig().WorkflowID)
|
||||
logger.Error().Msg(err.Error())
|
||||
@@ -105,22 +102,21 @@ func main() {
|
||||
|
||||
wf_logger := u.GetWFLogger(workflowName)
|
||||
wf_logger.Debug().Msg("Testing argo name")
|
||||
_ = stepMax
|
||||
|
||||
if conf.GetConfig().KubeHost == "" {
|
||||
// Not in a k8s environment, get conf from parameters
|
||||
fmt.Println("Executes outside of k8s")
|
||||
executeOutside(argoFilePath, stepMax, builder.Workflow)
|
||||
executeOutside(argoFilePath, builder.Workflow)
|
||||
} else {
|
||||
// Executed in a k8s environment
|
||||
fmt.Println("Executes inside a k8s")
|
||||
// executeInside(exec.GetID(), "argo", argo_file_path, stepMax) // commenting to use conf.ExecutionID instead of exec.GetID()
|
||||
executeInside(conf.GetConfig().ExecutionID, conf.GetConfig().ExecutionID, argoFilePath, stepMax)
|
||||
executeInside(conf.GetConfig().ExecutionID, conf.GetConfig().ExecutionID, argoFilePath)
|
||||
}
|
||||
}
|
||||
|
||||
// So far we only log the output from
|
||||
func executeInside(execID string, ns string, argo_file_path string, stepMax int) {
|
||||
func executeInside(execID string, ns string, argo_file_path string) {
|
||||
t, err := tools2.NewService(conf.GetConfig().Mode)
|
||||
if err != nil {
|
||||
logger.Error().Msg("Could not create KubernetesTool")
|
||||
@@ -128,14 +124,20 @@ func executeInside(execID string, ns string, argo_file_path string, stepMax int)
|
||||
}
|
||||
|
||||
name, err := t.CreateArgoWorkflow(argo_file_path, ns)
|
||||
|
||||
_ = name
|
||||
if err != nil {
|
||||
logger.Error().Msg("Could not create argo workflow : " + err.Error())
|
||||
fmt.Println("CA :" + conf.GetConfig().KubeCA)
|
||||
fmt.Println("Cert :" + conf.GetConfig().KubeCert)
|
||||
fmt.Println("Data :" + conf.GetConfig().KubeData)
|
||||
return
|
||||
} else {
|
||||
argoLogs := models.NewArgoLogs(workflowName, "argo", stepMax)
|
||||
argoLogs.StartStepRecording(argoLogs.NewWatch(), wf_logger)
|
||||
err := t.LogWorkflow(execID, ns, name, argo_file_path, stepMax, argoLogs.NewWatch(), argoLogs.NewWatch(), argoLogs, []string{}, logWorkflow)
|
||||
watcher, err := t.GetArgoWatch(execID, workflowName)
|
||||
if err != nil {
|
||||
logger.Error().Msg("Could not retrieve Watcher : " + err.Error())
|
||||
}
|
||||
|
||||
l.LogKubernetesArgo(name, execID, watcher)
|
||||
if err != nil {
|
||||
logger.Error().Msg("Could not log workflow : " + err.Error())
|
||||
}
|
||||
@@ -143,11 +145,9 @@ func executeInside(execID string, ns string, argo_file_path string, stepMax int)
|
||||
|
||||
}
|
||||
|
||||
func executeOutside(argo_file_path string, stepMax int, workflow workflow_builder.Workflow) {
|
||||
// var stdoutSubmit, stderrSubmit, stdout_logs, stderr_logs io.ReadCloser
|
||||
func executeOutside(argo_file_path string, workflow workflow_builder.Workflow) {
|
||||
var stdoutSubmit, stderrSubmit io.ReadCloser
|
||||
var stdoutLogs, stderrLogs io.ReadCloser
|
||||
// var stderr io.ReadCloser
|
||||
var wg sync.WaitGroup
|
||||
var err error
|
||||
|
||||
@@ -158,29 +158,20 @@ func executeOutside(argo_file_path string, stepMax int, workflow workflow_builde
|
||||
wf_logger.Error().Msg("Could not retrieve stdoutpipe " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// //======== Code block that implemented a method that logs both locally and container executed wf
|
||||
// // Need to be improved, did not log well for local executions
|
||||
// split := strings.Split(argo_file_path, "_")
|
||||
// argoLogs := models.NewArgoLogs(split[0], conf.GetConfig().ExecutionID, stepMax)
|
||||
// argoLogs.StartStepRecording(argoLogs.NewWatch(), wf_logger)
|
||||
// argoLogs.IsStreaming = true // Used to determine wether or not the logs are read from a docker container or on localhost
|
||||
// // go logWorkflow(argo_file_path, stepMax, stdout, argoLogs.NewWatch(), argoLogs.NewWatch(), argoLogs, []string{}, &wg)
|
||||
// // =======
|
||||
|
||||
var steps []string
|
||||
for _, template := range workflow.Spec.Templates {
|
||||
steps = append(steps, template.Name)
|
||||
}
|
||||
|
||||
|
||||
cmdLogs := exec.Command("argo", "logs", "oc-monitor-"+workflowName, "-n", conf.GetConfig().ExecutionID, "--follow","--no-color")
|
||||
if stdoutLogs, err = cmdLogs.StdoutPipe(); err != nil {
|
||||
wf_logger.Error().Msg("Could not retrieve stdoutpipe for 'argo logs'" + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
var steps []string
|
||||
for _, template := range workflow.Spec.Templates {
|
||||
steps = append(steps, template.Name)
|
||||
}
|
||||
|
||||
go models.LogLocalWorkflow(workflowName, stdoutSubmit, &wg)
|
||||
go models.LogPods(workflowName, stdoutLogs, steps, &wg)
|
||||
go l.LogLocalWorkflow(workflowName, stdoutSubmit, &wg)
|
||||
go l.LogLocalPod(workflowName, stdoutLogs, steps, &wg)
|
||||
|
||||
fmt.Println("Starting argo submit")
|
||||
if err := cmdSubmit.Start(); err != nil {
|
||||
@@ -209,68 +200,6 @@ func executeOutside(argo_file_path string, stepMax int, workflow workflow_builde
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// !!!! BUGGED !!!!
|
||||
// Should be refactored to create a function dedicated to logging output from execution in a container
|
||||
// LogLocalWorkflow() has been implemented to be used when oc-monitord is executed locally
|
||||
|
||||
// We could improve this function by creating an object with the same attribute as the output
|
||||
// and only send a new log if the current object has different values than the previous
|
||||
func logWorkflow(argo_file_path string, stepMax int, pipe io.ReadCloser,
|
||||
current_watch *models.ArgoWatch, previous_watch *models.ArgoWatch,
|
||||
argoLogs *models.ArgoLogs, seen []string, wg *sync.WaitGroup) {
|
||||
scanner := bufio.NewScanner(pipe)
|
||||
count := 0
|
||||
see := ""
|
||||
seeit := 0
|
||||
for scanner.Scan() {
|
||||
log := scanner.Text()
|
||||
if strings.Contains(log, "capturing logs") && count == 0 {
|
||||
if !argoLogs.IsStreaming {
|
||||
wg.Add(1)
|
||||
}
|
||||
seeit++
|
||||
} else if count == 0 && !argoLogs.IsStreaming {
|
||||
break
|
||||
}
|
||||
if count == 1 {
|
||||
see = log
|
||||
if slices.Contains(argoLogs.Seen, see) && !argoLogs.IsStreaming {
|
||||
wg.Done()
|
||||
seeit--
|
||||
break
|
||||
}
|
||||
}
|
||||
if !slices.Contains(current_watch.Logs, log) {
|
||||
current_watch.Logs = append(current_watch.Logs, strings.ReplaceAll(log, "\"", ""))
|
||||
}
|
||||
count++
|
||||
if strings.Contains(log, "sub-process exited") || argoLogs.IsStreaming {
|
||||
current_watch = argoLogs.StopStepRecording(current_watch)
|
||||
argoLogs.Seen = append(argoLogs.Seen, see)
|
||||
if checkStatus(current_watch, previous_watch, argoLogs) {
|
||||
count = 0
|
||||
if !argoLogs.IsStreaming {
|
||||
wg.Done()
|
||||
}
|
||||
seeit--
|
||||
}
|
||||
jsonified, err := json.Marshal(current_watch)
|
||||
if err != nil {
|
||||
logger.Error().Msg("Could not create watch log")
|
||||
}
|
||||
if current_watch.Status == "Failed" {
|
||||
wf_logger.Error().Msg(string(jsonified))
|
||||
} else {
|
||||
wf_logger.Info().Msg(string(jsonified))
|
||||
}
|
||||
previous_watch = current_watch
|
||||
current_watch = &models.ArgoWatch{}
|
||||
if argoLogs.IsStreaming {
|
||||
current_watch.Logs = []string{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func loadConfig(is_k8s bool, parser *argparse.Parser) {
|
||||
var o *onion.Onion
|
||||
@@ -375,26 +304,6 @@ func getContainerName(argo_file string) string {
|
||||
return container_name
|
||||
}
|
||||
|
||||
// Uses the ArgoWatch object to update status of the workflow execution object
|
||||
func checkStatus(current *models.ArgoWatch, previous *models.ArgoWatch, argoLogs *models.ArgoLogs) bool {
|
||||
if previous == nil || current.Status != previous.Status || argoLogs.IsStreaming {
|
||||
argoLogs.StepCount += 1
|
||||
if len(current.Logs) > 0 {
|
||||
newLogs := []string{}
|
||||
for _, log := range current.Logs {
|
||||
if !slices.Contains(argoLogs.Logs, log) {
|
||||
newLogs = append(newLogs, log)
|
||||
}
|
||||
}
|
||||
updateStatus(current.Status, strings.Join(newLogs, "\n"))
|
||||
current.Logs = newLogs
|
||||
argoLogs.Logs = append(argoLogs.Logs, newLogs...)
|
||||
} else {
|
||||
updateStatus(current.Status, "")
|
||||
}
|
||||
}
|
||||
return previous == nil || current.Status != previous.Status || argoLogs.IsStreaming
|
||||
}
|
||||
|
||||
func updateStatus(status string, log string) {
|
||||
exec_id := conf.GetConfig().ExecutionID
|
||||
|
||||
Reference in New Issue
Block a user