package main import ( "encoding/base64" "fmt" "os" "regexp" "strings" "oc-monitord/conf" l "oc-monitord/logger" u "oc-monitord/utils" "oc-monitord/workflow_builder" oclib "cloud.o-forge.io/core/oc-lib" "cloud.o-forge.io/core/oc-lib/config" "cloud.o-forge.io/core/oc-lib/models/booking" "cloud.o-forge.io/core/oc-lib/models/common/enum" "cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/workflow_execution" "cloud.o-forge.io/core/oc-lib/tools" tools2 "oc-monitord/tools" "github.com/akamensky/argparse" "github.com/google/uuid" "github.com/rs/zerolog" ) // Command-line args: // - url: Loki URL (default: "http://127.0.0.1:3100") // - execution: Workflow Execution ID (required) to identify the current execution, allows to retrieve Workflow // - mongo: MongoDB URL (default: "mongodb://127.0.0.1:27017") // - db: MongoDB database name (default: "DC_myDC") // - timeout: Execution timeout (default: -1) var logger zerolog.Logger var wf_logger zerolog.Logger var parser argparse.Parser var workflowName string func main() { o := config.GetConfLoader("oc-monitord") parser = *argparse.NewParser("oc-monitord", "Launch the execution of a workflow given as a parameter and sends the produced logs to a loki database") loadConfig(&parser) fmt.Println("sqdqs", o.GetStringDefault("MONGO_URL", "mongodb://mongo:27017")) oclib.InitDaemon("oc-monitord") // Lance l'abonné NATS centralisé pour les confirmations PB_CONSIDERS. workflow_builder.StartConsidersListener() fmt.Println(conf.GetConfig()) logger = u.GetLogger() logger.Debug().Msg("Loki URL : " + config.GetConfig().LokiUrl) logger.Info().Msg("Workflow executed : " + conf.GetConfig().ExecutionID) exec := u.GetExecution(conf.GetConfig().ExecutionID) if exec == nil { logger.Fatal().Msg("Could not retrieve workflow ID from execution ID " + conf.GetConfig().ExecutionID + " on peer " + conf.GetConfig().PeerID) oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{ "state": enum.FAILURE.EnumIndex(), }, conf.GetConfig().ExecutionID) return } conf.GetConfig().WorkflowID = exec.WorkflowID logger.Info().Msg("Starting construction of yaml argo for workflow :" + exec.WorkflowID) if _, err := os.Stat("./argo_workflows/"); os.IsNotExist(err) { os.Mkdir("./argo_workflows/", 0755) logger.Info().Msg("Created argo_workflows/") } // // create argo new_wf := workflow_builder.WorflowDB{} err := new_wf.LoadFrom(conf.GetConfig().WorkflowID) if err != nil { logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API") } builder, _, err := new_wf.ExportToArgo(exec, conf.GetConfig().Timeout) // Removed stepMax so far, I don't know if we need it anymore if err != nil { logger.Error().Msg("Could not create the Argo file for " + conf.GetConfig().WorkflowID) logger.Error().Msg(err.Error()) oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{ "state": enum.FAILURE.EnumIndex(), }, exec.GetID()) return } argoFilePath, err := builder.CompleteBuild(exec.ExecutionsID) if err != nil { logger.Error().Msg("Error when completing the build of the workflow: " + err.Error()) oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{ "state": enum.FAILURE.EnumIndex(), }, exec.GetID()) return } workflowName = getContainerName(argoFilePath) if conf.GetConfig().KubeHost == "" { // Not in a k8s environment, get conf from parameters panic("can't exec with no kube for argo deployment") } else { // Executed in a k8s environment logger.Info().Msg("Executes inside a k8s") // executeInside(exec.GetID(), "argo", argo_file_path, stepMax) // commenting to use conf.ExecutionID instead of exec.GetID() executeInside(exec.ExecutionsID, exec.GetID(), argoFilePath) } } // So far we only log the output from func executeInside(ns string, execID string, argo_file_path string) { t, err := tools2.NewService(conf.GetConfig().Mode) if err != nil { logger.Error().Msg("Could not create KubernetesTool : " + err.Error()) return } name, err := t.CreateArgoWorkflow(argo_file_path, ns) // _ = name if err != nil { logger.Error().Msg("Could not create argo workflow : " + err.Error()) logger.Info().Msg(fmt.Sprint("CA :" + conf.GetConfig().KubeCA)) logger.Info().Msg(fmt.Sprint("Cert :" + conf.GetConfig().KubeCert)) logger.Info().Msg(fmt.Sprint("Data :" + conf.GetConfig().KubeData)) return } else { watcher, err := t.GetArgoWatch(ns, workflowName) if err != nil { logger.Error().Msg("Could not retrieve Watcher : " + err.Error()) oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{ "state": enum.FAILURE.EnumIndex(), }, execID) } l.LogKubernetesArgo(name, execID, ns, ns, watcher) logger.Info().Msg("Finished, exiting...") } } func loadConfig(parser *argparse.Parser) { mode := parser.String("M", "mode", &argparse.Options{Required: false, Default: "", Help: "Mode of the execution"}) execution := parser.String("e", "execution", &argparse.Options{Required: true, Help: "Execution ID of the workflow to request from oc-catalog API"}) peer := parser.String("p", "peer", &argparse.Options{Required: false, Default: "", Help: "Peer ID of the workflow to request from oc-catalog API"}) timeout := parser.Int("t", "timeout", &argparse.Options{Required: false, Default: -1, Help: "Timeout for the execution of the workflow"}) ca := parser.String("c", "ca", &argparse.Options{Required: false, Default: "", Help: "CA file for the Kubernetes cluster"}) cert := parser.String("C", "cert", &argparse.Options{Required: false, Default: "", Help: "Cert file for the Kubernetes cluster"}) data := parser.String("D", "data", &argparse.Options{Required: false, Default: "", Help: "Data file for the Kubernetes cluster"}) host := parser.String("H", "host", &argparse.Options{Required: false, Default: "", Help: "Host for the Kubernetes cluster"}) port := parser.String("P", "port", &argparse.Options{Required: false, Default: "6443", Help: "Port for the Kubernetes cluster"}) // argoHost := parser.String("h", "argoHost", &argparse.Options{Required: false, Default: "", Help: "Host where Argo is running from"}) // can't use -h because its reserved to help err := parser.Parse(os.Args) if err != nil { logger.Info().Msg(parser.Usage(err)) os.Exit(1) } conf.GetConfig().Logs = "debug" conf.GetConfig().Timeout = *timeout conf.GetConfig().Mode = *mode conf.GetConfig().ExecutionID = *execution conf.GetConfig().PeerID = *peer conf.GetConfig().KubeHost = *host conf.GetConfig().KubePort = *port // conf.GetConfig().ArgoHost = *argoHost decoded, err := base64.StdEncoding.DecodeString(*ca) if err == nil { conf.GetConfig().KubeCA = string(decoded) } decoded, err = base64.StdEncoding.DecodeString(*cert) if err == nil { conf.GetConfig().KubeCert = string(decoded) } decoded, err = base64.StdEncoding.DecodeString(*data) if err == nil { conf.GetConfig().KubeData = string(decoded) } } func IsValidUUID(u string) bool { _, err := uuid.Parse(u) return err == nil } func getContainerName(argo_file string) string { regex := "([a-zA-Z]+-[a-zA-Z]+)" re := regexp.MustCompile(regex) container_name := re.FindString(argo_file) return container_name } func updateStatus(status string, log string) { exec_id := conf.GetConfig().ExecutionID wf_exec := &workflow_execution.WorkflowExecution{AbstractObject: utils.AbstractObject{UUID: conf.GetConfig().ExecutionID}} wf_exec.ArgoStatusToState(status) exec, _, err := workflow_execution.NewAccessor(&tools.APIRequest{ PeerID: conf.GetConfig().PeerID, }).UpdateOne(wf_exec.Serialize(wf_exec), exec_id) if err != nil { logger.Error().Msg("Could not update status for workflow execution " + exec_id + err.Error()) } splitted := strings.Split(log, "-") if len(splitted) > 1 { we := exec.(*workflow_execution.WorkflowExecution) itemID := splitted[len(splitted)-1] // TODO: in logs found item ID caller := &tools.HTTPCaller{ URLS: map[tools.DataType]map[tools.METHOD]string{ tools.PEER: { tools.POST: "/status/", }, tools.BOOKING: { tools.PUT: "http://localhost:8080/booking/:id", }, }, } if we.PeerBookByGraph != nil { for peerID, val := range we.PeerBookByGraph { if val[itemID] == nil { continue } for _, log := range val[itemID] { (&peer.Peer{}).LaunchPeerExecution(peerID, log, tools.BOOKING, tools.PUT, &booking.Booking{ State: we.State, }, caller) } } } } }