4 Commits

Author SHA1 Message Date
mr
04ab15cb09 test 2025-06-24 15:31:45 +02:00
mr
2b002152a4 add resource use 2025-06-18 07:09:58 +02:00
mr
7fa115c5e1 correct error 2025-06-12 14:02:07 +02:00
mr
91f421af1e Refactor + Multi admiralty test 2025-06-12 14:01:16 +02:00
19 changed files with 885 additions and 1231 deletions

1
.gitignore vendored
View File

@@ -22,4 +22,3 @@
go.work go.work
argo_workflows/* argo_workflows/*
env.env

View File

@@ -3,8 +3,6 @@
build: clean build: clean
go build . go build .
dev: build
run: run:
./oc-monitord ./oc-monitord
@@ -12,16 +10,21 @@ clean:
rm -rf oc-monitord rm -rf oc-monitord
docker: docker:
DOCKER_BUILDKIT=1 docker build -t oc-monitord -f Dockerfile . DOCKER_BUILDKIT=1 docker build -t oc/oc-monitord:0.0.1 -f Dockerfile . --build-arg=HOST=$(HOST)
docker tag oc-monitord opencloudregistry/oc-monitord:latest docker tag oc/oc-monitord:0.0.1 oc/oc-monitord:latest
docker tag oc/oc-monitord:0.0.1 oc-monitord:latest
publish-kind: publish-kind:
kind load docker-image opencloudregistry/oc-monitord:latest --name $(CLUSTER_NAME) kind load docker-image oc/oc-monitord:0.0.1 --name opencloud
publish-registry: publish-registry:
docker push opencloudregistry/oc-monitord:latest @echo "TODO"
all: docker publish-kind docker-deploy:
docker compose up -d
ci: docker publish-registry run-docker: docker publish-kind publish-registry docker-deploy
.PHONY: build run clean docker publish-kind publish-registry all: docker publish-kind publish-registry
.PHONY: build run clean docker publish-kind publish-registry

View File

@@ -9,6 +9,7 @@ type Config struct {
NatsURL string NatsURL string
ExecutionID string ExecutionID string
PeerID string PeerID string
Groups []string
Timeout int Timeout int
WorkflowID string WorkflowID string
Logs string Logs string
@@ -18,7 +19,7 @@ type Config struct {
KubeCA string KubeCA string
KubeCert string KubeCert string
KubeData string KubeData string
ArgoHost string // when executed in a container will replace addresses with "localhost" in their url ArgoHost string // when executed in a container will replace addresses with "localhost" in their url
} }
var instance *Config var instance *Config

76
go.mod
View File

@@ -1,9 +1,11 @@
module oc-monitord module oc-monitord
go 1.25.0 go 1.23.1
toolchain go1.23.3
require ( require (
cloud.o-forge.io/core/oc-lib v0.0.0-20260224130821-ce8ef70516f7 cloud.o-forge.io/core/oc-lib v0.0.0-20250624102227-e600fedcab06
github.com/akamensky/argparse v1.4.0 github.com/akamensky/argparse v1.4.0
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/goraz/onion v0.1.3 github.com/goraz/onion v0.1.3
@@ -13,22 +15,16 @@ require (
) )
require ( require (
github.com/beego/beego/v2 v2.3.8 // indirect github.com/beego/beego/v2 v2.3.7 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/go-playground/validator/v10 v10.26.0 // indirect
github.com/go-playground/validator/v10 v10.27.0 // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/libp2p/go-libp2p/core v0.43.0-rc2 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect github.com/sirupsen/logrus v1.9.3 // indirect
github.com/ugorji/go/codec v1.1.7 // indirect github.com/ugorji/go/codec v1.1.7 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/grpc v1.63.0 // indirect google.golang.org/grpc v1.63.0 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
) )
require ( require (
@@ -37,10 +33,10 @@ require (
github.com/biter777/countries v1.7.5 // indirect github.com/biter777/countries v1.7.5 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.9 // indirect github.com/gabriel-vasile/mimetype v1.4.8 // indirect
github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/logr v1.4.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect
github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect
@@ -48,7 +44,7 @@ require (
github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v1.0.0 // indirect github.com/golang/snappy v1.0.0 // indirect
github.com/google/gnostic-models v0.7.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect
@@ -61,17 +57,17 @@ require (
github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/montanaflynn/stats v0.7.1 // indirect github.com/montanaflynn/stats v0.7.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nats-io/nats.go v1.44.0 // indirect github.com/nats-io/nats.go v1.41.0 // indirect
github.com/nats-io/nkeys v0.4.11 // indirect github.com/nats-io/nkeys v0.4.10 // indirect
github.com/nats-io/nuid v1.0.1 // indirect github.com/nats-io/nuid v1.0.1 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.23.0 // indirect github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.65.0 // indirect github.com/prometheus/common v0.63.0 // indirect
github.com/prometheus/procfs v0.17.0 // indirect github.com/prometheus/procfs v0.16.0 // indirect
github.com/robfig/cron v1.2.0 // indirect github.com/robfig/cron v1.2.0 // indirect
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect
github.com/smartystreets/goconvey v1.6.4 // indirect github.com/smartystreets/goconvey v1.6.4 // indirect
@@ -80,25 +76,25 @@ require (
github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
go.mongodb.org/mongo-driver v1.17.4 // indirect go.mongodb.org/mongo-driver v1.17.3 // indirect
golang.org/x/crypto v0.44.0 // indirect golang.org/x/crypto v0.37.0 // indirect
golang.org/x/net v0.47.0 // indirect golang.org/x/net v0.39.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/sync v0.18.0 // indirect golang.org/x/sync v0.13.0 // indirect
golang.org/x/sys v0.38.0 // indirect golang.org/x/sys v0.32.0 // indirect
golang.org/x/term v0.37.0 // indirect golang.org/x/term v0.31.0 // indirect
golang.org/x/text v0.31.0 // indirect golang.org/x/text v0.24.0 // indirect
golang.org/x/time v0.9.0 // indirect golang.org/x/time v0.7.0 // indirect
google.golang.org/protobuf v1.36.8 // indirect google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/api v0.35.1 k8s.io/api v0.32.1
k8s.io/apimachinery v0.35.1 k8s.io/apimachinery v0.32.1
k8s.io/client-go v0.35.1 k8s.io/client-go v0.32.1
k8s.io/klog/v2 v2.130.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect
) )

212
go.sum
View File

@@ -1,41 +1,27 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.o-forge.io/core/oc-lib v0.0.0-20250604083300-387785b40cb0 h1:iEm/Rf9I0OSCcncuFy61YOSZ3jdRlhJ/oLD97Pc2pCQ= cloud.o-forge.io/core/oc-lib v0.0.0-20250217072519-cafadec1469f h1:esLB0EAn8IuOChW35kcBrPaN80z4A4yYyz1mXT45GQo=
cloud.o-forge.io/core/oc-lib v0.0.0-20250604083300-387785b40cb0/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= cloud.o-forge.io/core/oc-lib v0.0.0-20250217072519-cafadec1469f/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
cloud.o-forge.io/core/oc-lib v0.0.0-20250620085001-583ca2fbacd5 h1:FEBwueVOOWKYf0tJuE0EKNIbjxmTyCMgkT4qATYsfbo= cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9 h1:mSFFPwil5Ih+RPBvn88MBerQMtsoHnOuyCZQaf91a34=
cloud.o-forge.io/core/oc-lib v0.0.0-20250620085001-583ca2fbacd5/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI= cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
cloud.o-forge.io/core/oc-lib v0.0.0-20250704084459-443546027b27 h1:iogk6pV3gybzQDBXMI6Qd/jvSA1h+3oRE+vLl1MRjew= cloud.o-forge.io/core/oc-lib v0.0.0-20250612084738-2a0ab8e54963 h1:ADDfqwtWF+VQTMSNAWPuhc4mmiKdgpHNmBB+UI2jRPE=
cloud.o-forge.io/core/oc-lib v0.0.0-20250704084459-443546027b27/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI= cloud.o-forge.io/core/oc-lib v0.0.0-20250612084738-2a0ab8e54963/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
cloud.o-forge.io/core/oc-lib v0.0.0-20250715125819-e735f78e58c6 h1:Gnkv59Ntl2RebC5tNauXuxyRXLfZ2XAJ0+ujMyFte5U= cloud.o-forge.io/core/oc-lib v0.0.0-20250617130633-8f2adb76e41c h1:k2y+ocElqwUK5yzyCf3rWrDUzPWbds4MbtG58+Szos0=
cloud.o-forge.io/core/oc-lib v0.0.0-20250715125819-e735f78e58c6/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI= cloud.o-forge.io/core/oc-lib v0.0.0-20250617130633-8f2adb76e41c/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20250805113921-40a61387b9f1 h1:53KzZ+1JqRY6J7EVzQpNBmLzNuxb8oHNW3UgqxkYABo= cloud.o-forge.io/core/oc-lib v0.0.0-20250617133502-9e5266326157 h1:853UvpMOM1QuWLrr/V8biDS8IcQcqHvoJsOT4epxDng=
cloud.o-forge.io/core/oc-lib v0.0.0-20250805113921-40a61387b9f1/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI= cloud.o-forge.io/core/oc-lib v0.0.0-20250617133502-9e5266326157/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20250808140536-e7a71188a3b5 h1:bmEG0M99WXWCHsMNUgfYqQNEM0YyN4dXxYV1LCY5EYg= cloud.o-forge.io/core/oc-lib v0.0.0-20250617141444-0b0952b28c7e h1:Z5vLv+Wzzz58abmHRnovoqbkVlKHuC8u8/RLv7FjtZw=
cloud.o-forge.io/core/oc-lib v0.0.0-20250808140536-e7a71188a3b5/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI= cloud.o-forge.io/core/oc-lib v0.0.0-20250617141444-0b0952b28c7e/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20250808141553-f4b0cf5683de h1:s47eEnWRCjBMOxbec5ROHztuwu0Zo7MuXgqWizgkiXU= cloud.o-forge.io/core/oc-lib v0.0.0-20250617144221-ec7a7e474637 h1:YiZbn6KmjgZ62uM+kH95Snd2nQliDKDnGMAxRr/VoUw=
cloud.o-forge.io/core/oc-lib v0.0.0-20250808141553-f4b0cf5683de/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI= cloud.o-forge.io/core/oc-lib v0.0.0-20250617144221-ec7a7e474637/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260113155325-5cdfc28d2f51 h1:jlSEprNaUBe628uP9a9TrJ16Q5Ej6OxHlAKNtrHrN2o= cloud.o-forge.io/core/oc-lib v0.0.0-20250624064953-2c8dcbe93d14 h1:iCTrYc2+W2BFLOupRK1sD6sOgsK4NIs6WMC+4LiWCaY=
cloud.o-forge.io/core/oc-lib v0.0.0-20260113155325-5cdfc28d2f51/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI= cloud.o-forge.io/core/oc-lib v0.0.0-20250624064953-2c8dcbe93d14/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260114125749-fa5b7543332d h1:6oGSN4Fb+H7LNVbUEN7vaDtWBHZTdd2Y1BkBdZ7MLXE= cloud.o-forge.io/core/oc-lib v0.0.0-20250624093207-3fdf5c3ebf29 h1:JitS1izRltTyOaWnvXnmYywHj0napsL6y0nBYiWUCNo=
cloud.o-forge.io/core/oc-lib v0.0.0-20260114125749-fa5b7543332d/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI= cloud.o-forge.io/core/oc-lib v0.0.0-20250624093207-3fdf5c3ebf29/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260129122033-186ba3e689c7 h1:NRFGRqN+j5g3DrtXMYN5T5XSYICG+OU2DisjBdID3j8= cloud.o-forge.io/core/oc-lib v0.0.0-20250624095852-147c7bc3a1d5 h1:0eV0E3kBZkOyoAurRmP9h4eHmFrZajOxSqoBgM3l3dk=
cloud.o-forge.io/core/oc-lib v0.0.0-20260129122033-186ba3e689c7/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI= cloud.o-forge.io/core/oc-lib v0.0.0-20250624095852-147c7bc3a1d5/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260203074447-30e6c9a6183c h1:c19lIseiUk5Hp+06EowfEbMWH1pK8AC/hvQ4ryWgJtY= cloud.o-forge.io/core/oc-lib v0.0.0-20250624102227-e600fedcab06 h1:+RSv62uIC7wsmibsp1XTanQMNznNeOGgPpfhb6ZHT4c=
cloud.o-forge.io/core/oc-lib v0.0.0-20260203074447-30e6c9a6183c/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI= cloud.o-forge.io/core/oc-lib v0.0.0-20250624102227-e600fedcab06/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260203083753-4f28b9b589d6 h1:N+0xkioACl3PNo+MquCIIOL/kSICevg340IYOFGQeOw=
cloud.o-forge.io/core/oc-lib v0.0.0-20260203083753-4f28b9b589d6/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260203150531-ef916fe2d995 h1:ZDRvnzTTNHgMm5hYmseHdEPqQ6rn/4v+P9f/JIxPaNw=
cloud.o-forge.io/core/oc-lib v0.0.0-20260203150531-ef916fe2d995/go.mod h1:T0UCxRd8w+qCVVC0NEyDiWIGC5ADwEbQ7hFcvftd4Ks=
cloud.o-forge.io/core/oc-lib v0.0.0-20260212123952-403913d8cf13 h1:DNIPQ7C+7wjbj5RUx29wLxuIe/wiSOcuUMlLRIv6Fvs=
cloud.o-forge.io/core/oc-lib v0.0.0-20260212123952-403913d8cf13/go.mod h1:jmyBwmsac/4V7XPL347qawF60JsBCDmNAMfn/ySXKYo=
cloud.o-forge.io/core/oc-lib v0.0.0-20260224093610-a9ebad78f3a8 h1:xoC5PAz1469QxrNm8rrsq5+BtwshEt+L2Nhf90MrqrM=
cloud.o-forge.io/core/oc-lib v0.0.0-20260224093610-a9ebad78f3a8/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260224120019-0f6aa1fe7881 h1:1JUGErc+3Runda7iapS5sieH+yFqWrGp+ljv7Kly+hc=
cloud.o-forge.io/core/oc-lib v0.0.0-20260224120019-0f6aa1fe7881/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260224122900-d18b031a293a h1:gdr886O31Ai5pEFgJC/mrJMJdhplnQg+UJdZF9mV1n4=
cloud.o-forge.io/core/oc-lib v0.0.0-20260224122900-d18b031a293a/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260224130821-ce8ef70516f7 h1:p9uJjMY+QkE4neA+xRmIRtAm9us94EKZqgajDdLOd0Y=
cloud.o-forge.io/core/oc-lib v0.0.0-20260224130821-ce8ef70516f7/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/akamensky/argparse v1.4.0 h1:YGzvsTqCvbEZhL8zZu2AiA5nq805NZh75JNj4ajn1xc= github.com/akamensky/argparse v1.4.0 h1:YGzvsTqCvbEZhL8zZu2AiA5nq805NZh75JNj4ajn1xc=
github.com/akamensky/argparse v1.4.0/go.mod h1:S5kwC7IuDcEr5VeXtGPRVZ5o/FdhcMlQz4IZQuw64xA= github.com/akamensky/argparse v1.4.0/go.mod h1:S5kwC7IuDcEr5VeXtGPRVZ5o/FdhcMlQz4IZQuw64xA=
@@ -43,8 +29,8 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd
github.com/argoproj/argo-workflows/v3 v3.6.4 h1:5+Cc1UwaQE5ka3w7R3hxZ1TK3M6VjDEXA5WSQ/IXrxY= github.com/argoproj/argo-workflows/v3 v3.6.4 h1:5+Cc1UwaQE5ka3w7R3hxZ1TK3M6VjDEXA5WSQ/IXrxY=
github.com/argoproj/argo-workflows/v3 v3.6.4/go.mod h1:2f5zB8CkbNCCO1od+kd1dWkVokqcuyvu+tc+Jwx1MZg= github.com/argoproj/argo-workflows/v3 v3.6.4/go.mod h1:2f5zB8CkbNCCO1od+kd1dWkVokqcuyvu+tc+Jwx1MZg=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beego/beego/v2 v2.3.8 h1:wplhB1pF4TxR+2SS4PUej8eDoH4xGfxuHfS7wAk9VBc= github.com/beego/beego/v2 v2.3.7 h1:z4btKtjU/rfp5BiYHkGD2QPjK9i1E9GH+I7vfhn6Agk=
github.com/beego/beego/v2 v2.3.8/go.mod h1:8vl9+RrXqvodrl9C8yivX1e6le6deCK6RWeq8R7gTTg= github.com/beego/beego/v2 v2.3.7/go.mod h1:5cqHsOHJIxkq44tBpRvtDe59GuVRVv/9/tyVDxd5ce4=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q= github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q=
@@ -65,14 +51,10 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw= github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw=
github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@@ -81,15 +63,11 @@ github.com/etcd-io/etcd v3.3.17+incompatible/go.mod h1:cdZ77EstHBwVtD6iTgzgvogwc
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=
@@ -104,8 +82,6 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k= github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@@ -122,8 +98,6 @@ github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@@ -167,8 +141,6 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/libp2p/go-libp2p/core v0.43.0-rc2 h1:1X1aDJNWhMfodJ/ynbaGLkgnC8f+hfBIqQDrzxFZOqI=
github.com/libp2p/go-libp2p/core v0.43.0-rc2/go.mod h1:NYeJ9lvyBv9nbDk2IuGb8gFKEOkIv/W5YRIy1pAJB2Q=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
@@ -191,18 +163,14 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/nats-io/nats.go v1.43.0 h1:uRFZ2FEoRvP64+UUhaTokyS18XBCR/xM2vQZKO4i8ug= github.com/nats-io/nats.go v1.41.0 h1:PzxEva7fflkd+n87OtQTXqCTyLfIIMFJBpyccHLE2Ko=
github.com/nats-io/nats.go v1.43.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= github.com/nats-io/nats.go v1.41.0/go.mod h1:wV73x0FSI/orHPSYoyMeJB+KajMDoWyXmFaRrrYaaTo=
github.com/nats-io/nats.go v1.44.0 h1:ECKVrDLdh/kDPV1g0gAQ+2+m2KprqZK5O/eJAyAnH2M= github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc=
github.com/nats-io/nats.go v1.44.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U=
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nwtgck/go-fakelish v0.1.3 h1:bA8/xa9hQmzppexIhBvdmztcd/PJ4SPuAUTBdMKZ8G4= github.com/nwtgck/go-fakelish v0.1.3 h1:bA8/xa9hQmzppexIhBvdmztcd/PJ4SPuAUTBdMKZ8G4=
@@ -222,19 +190,13 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@@ -288,28 +250,14 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ= go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ=
go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@@ -329,20 +277,12 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -350,14 +290,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -373,42 +307,20 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -445,17 +357,11 @@ google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8=
google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -469,37 +375,19 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc=
k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k=
k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q=
k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM=
k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs=
k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU=
k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU=
k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg=
k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM=
k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=

View File

@@ -11,8 +11,6 @@ import (
"sync" "sync"
"time" "time"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/models/common/enum"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
@@ -56,7 +54,7 @@ func NewArgoLogs(name string, namespace string, stepMax int) *ArgoLogs {
} }
} }
// An object to monitor and log the output of an argo submit // An object to monitor and log the output of an argo submit
type ArgoLogs struct { type ArgoLogs struct {
Name string Name string
Namespace string Namespace string
@@ -95,21 +93,22 @@ func (a *ArgoLogs) StartStepRecording(current_watch *ArgoWatch, logger zerolog.L
a.Started = time.Now() a.Started = time.Now()
} }
type ArgoPodLog struct { type ArgoPodLog struct {
PodName string PodName string
Step string Step string
Message string Message string
} }
func NewArgoPodLog(name string, step string, msg string) ArgoPodLog { func NewArgoPodLog(name string, step string, msg string) ArgoPodLog {
return ArgoPodLog{ return ArgoPodLog{
PodName: name, PodName: name,
Step: step, Step: step,
Message: msg, Message: msg,
} }
} }
func LogKubernetesArgo(wfName string, execID string, namespace string, watcher watch.Interface) { func LogKubernetesArgo(wfName string, namespace string, watcher watch.Interface) {
var argoWatcher *ArgoWatch var argoWatcher *ArgoWatch
var pods []string var pods []string
var node wfv1.NodeStatus var node wfv1.NodeStatus
@@ -118,38 +117,38 @@ func LogKubernetesArgo(wfName string, execID string, namespace string, watcher w
wfl.Debug().Msg("Starting to log " + wfName) wfl.Debug().Msg("Starting to log " + wfName)
var wg sync.WaitGroup var wg sync.WaitGroup
for event := range watcher.ResultChan() { for event := range (watcher.ResultChan()) {
wf, ok := event.Object.(*wfv1.Workflow) wf, ok := event.Object.(*wfv1.Workflow)
if !ok { if !ok {
wfl.Error().Msg("unexpected type") wfl.Error().Msg("unexpected type")
continue continue
} }
if len(wf.Status.Nodes) == 0 { if len(wf.Status.Nodes) == 0 {
wfl.Info().Msg("No node status yet") // The first output of the channel doesn't contain Nodes so we skip it wfl.Info().Msg("No node status yet") // The first output of the channel doesn't contain Nodes so we skip it
continue continue
} }
conditions := retrieveCondition(wf) conditions := retrieveCondition(wf)
// Retrieving the Status for the main node, which is named after the workflow // Retrieving the Status for the main node, which is named after the workflow
if node, ok = wf.Status.Nodes[wfName]; !ok { if node, ok = wf.Status.Nodes[wfName]; !ok {
bytified, _ := json.MarshalIndent(wf.Status.Nodes, "", "\t") bytified, _ := json.MarshalIndent(wf.Status.Nodes,"","\t")
wfl.Fatal().Msg("Could not find the " + wfName + " node in \n" + string(bytified)) wfl.Fatal().Msg("Could not find the " + wfName + " node in \n" + string(bytified))
} }
now := time.Now() now := time.Now()
start, _ := time.Parse(time.RFC3339, node.StartedAt.String()) start, _ := time.Parse(time.RFC3339, node.StartedAt.String() )
duration := now.Sub(start) duration := now.Sub(start)
newWatcher := ArgoWatch{ newWatcher := ArgoWatch{
Name: node.Name, Name: node.Name,
Namespace: namespace, Namespace: namespace,
Status: string(node.Phase), Status: string(node.Phase),
Created: node.StartedAt.String(), Created: node.StartedAt.String(),
Started: node.StartedAt.String(), Started: node.StartedAt.String(),
Progress: string(node.Progress), Progress: string(node.Progress),
Duration: duration.String(), Duration: duration.String(),
Conditions: conditions, Conditions: conditions,
} }
@@ -157,26 +156,23 @@ func LogKubernetesArgo(wfName string, execID string, namespace string, watcher w
argoWatcher = &newWatcher argoWatcher = &newWatcher
} }
if !newWatcher.Equals(argoWatcher) { if !newWatcher.Equals(argoWatcher){
jsonified, _ := json.Marshal(newWatcher) jsonified, _ := json.Marshal(newWatcher)
wfl.Info().Msg(string(jsonified)) wfl.Info().Msg(string(jsonified))
argoWatcher = &newWatcher argoWatcher = &newWatcher
} }
// I don't think we need to use WaitGroup here, because the loop itself // I don't think we need to use WaitGroup here, because the loop itself
// acts as blocking process for the main thread, because Argo watch never closes the channel // acts as blocking process for the main thread, because Argo watch never closes the channel
for _, pod := range wf.Status.Nodes { for _, pod := range wf.Status.Nodes{
if !slices.Contains(pods, pod.Name) { if !slices.Contains(pods,pod.Name){
pl := wfl.With().Str("pod", pod.Name).Logger() pl := wfl.With().Str("pod", pod.Name).Logger()
if wfName == pod.Name { if wfName == pod.Name { pods = append(pods, pod.Name); continue } // One of the node is the Workflow, the others are the pods so don't try to log on the wf name
pods = append(pods, pod.Name) pl.Info().Msg("Found a new pod to log : " + pod.Name)
continue
} // One of the node is the Workflow, the others are the pods so don't try to log on the wf name
pl.Info().Msg("Found a new pod to log : " + pod.Name)
wg.Add(1) wg.Add(1)
go logKubernetesPods(namespace, wfName, pod.Name, pl, &wg) go logKubernetesPods(namespace, wfName, pod.Name, pl, &wg)
pods = append(pods, pod.Name) pods = append(pods, pod.Name)
} }
} }
// Stop listening to the chan when the Workflow is completed or something bad happened // Stop listening to the chan when the Workflow is completed or something bad happened
@@ -184,17 +180,11 @@ func LogKubernetesArgo(wfName string, execID string, namespace string, watcher w
wfl.Info().Msg(wfName + " worflow completed") wfl.Info().Msg(wfName + " worflow completed")
wg.Wait() wg.Wait()
wfl.Info().Msg(wfName + " exiting") wfl.Info().Msg(wfName + " exiting")
oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{
"state": enum.SUCCESS.EnumIndex(),
}, execID)
break break
} }
if node.Phase.FailedOrError() { if node.Phase.FailedOrError() {
wfl.Error().Msg(wfName + "has failed, please refer to the logs") wfl.Error().Msg(wfName + "has failed, please refer to the logs")
oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{ wfl.Error().Msg(node.Message)
"state": enum.FAILURE.EnumIndex(),
}, execID)
wfl.Error().Msg(node.Message)
break break
} }
} }
@@ -210,36 +200,36 @@ func retrieveCondition(wf *wfv1.Workflow) (c Conditions) {
} }
} }
return return
} }
// Function needed to be executed as a go thread // Function needed to be executed as a go thread
func logKubernetesPods(executionId string, wfName string, podName string, logger zerolog.Logger, wg *sync.WaitGroup) { func logKubernetesPods(executionId string, wfName string,podName string, logger zerolog.Logger, wg *sync.WaitGroup){
defer wg.Done() defer wg.Done()
s := strings.Split(podName, ".") s := strings.Split(podName, ".")
name := s[0] + "-" + s[1] name := s[0] + "-" + s[1]
step := s[1] step := s[1]
k, err := tools.NewKubernetesTool() k, err := tools.NewKubernetesTool()
if err != nil { if err != nil {
logger.Error().Msg("Could not get Kubernetes tools") logger.Error().Msg("Could not get Kubernetes tools")
return return
} }
reader, err := k.GetPodLogger(executionId, wfName, podName) reader, err := k.GetPodLogger(executionId, wfName, podName)
if err != nil { if err != nil {
logger.Error().Msg(err.Error()) logger.Error().Msg(err.Error())
return return
} }
scanner := bufio.NewScanner(reader) scanner := bufio.NewScanner(reader)
for scanner.Scan() { for scanner.Scan() {
log := scanner.Text() log := scanner.Text()
podLog := NewArgoPodLog(name, step, log) podLog := NewArgoPodLog(name,step,log)
jsonified, _ := json.Marshal(podLog) jsonified, _ := json.Marshal(podLog)
logger.Info().Msg(string(jsonified)) logger.Info().Msg(string(jsonified))
} }
} }

109
main.go
View File

@@ -1,14 +1,20 @@
package main package main
import ( import (
"bufio"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"io"
"os" "os"
"os/exec"
"regexp" "regexp"
"strings" "strings"
"sync"
"time"
"oc-monitord/conf" "oc-monitord/conf"
l "oc-monitord/logger" l "oc-monitord/logger"
"oc-monitord/models"
u "oc-monitord/utils" u "oc-monitord/utils"
"oc-monitord/workflow_builder" "oc-monitord/workflow_builder"
@@ -16,7 +22,6 @@ import (
"cloud.o-forge.io/core/oc-lib/logs" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/booking" "cloud.o-forge.io/core/oc-lib/models/booking"
"cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution" "cloud.o-forge.io/core/oc-lib/models/workflow_execution"
@@ -49,9 +54,17 @@ func main() {
os.Setenv("test_service", "true") // Only for service demo, delete before merging on main os.Setenv("test_service", "true") // Only for service demo, delete before merging on main
parser = *argparse.NewParser("oc-monitord", "Launch the execution of a workflow given as a parameter and sends the produced logs to a loki database") parser = *argparse.NewParser("oc-monitord", "Launch the execution of a workflow given as a parameter and sends the produced logs to a loki database")
loadConfig(&parser) setConf(&parser)
oclib.InitDaemon("oc-monitord") oclib.InitDaemon("oc-monitord")
oclib.SetConfig(
conf.GetConfig().MongoURL,
conf.GetConfig().Database,
conf.GetConfig().NatsURL,
conf.GetConfig().LokiURL,
conf.GetConfig().Logs,
)
logger = u.GetLogger() logger = u.GetLogger()
logger.Debug().Msg("Loki URL : " + conf.GetConfig().LokiURL) logger.Debug().Msg("Loki URL : " + conf.GetConfig().LokiURL)
@@ -59,9 +72,6 @@ func main() {
exec := u.GetExecution(conf.GetConfig().ExecutionID) exec := u.GetExecution(conf.GetConfig().ExecutionID)
if exec == nil { if exec == nil {
logger.Fatal().Msg("Could not retrieve workflow ID from execution ID " + conf.GetConfig().ExecutionID + " on peer " + conf.GetConfig().PeerID) logger.Fatal().Msg("Could not retrieve workflow ID from execution ID " + conf.GetConfig().ExecutionID + " on peer " + conf.GetConfig().PeerID)
oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{
"state": enum.FAILURE.EnumIndex(),
}, conf.GetConfig().ExecutionID)
return return
} }
conf.GetConfig().WorkflowID = exec.WorkflowID conf.GetConfig().WorkflowID = exec.WorkflowID
@@ -81,40 +91,33 @@ func main() {
logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API") logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API")
} }
builder, _, err := new_wf.ExportToArgo(exec, conf.GetConfig().Timeout) // Removed stepMax so far, I don't know if we need it anymore builder, _, err := new_wf.ExportToArgo(exec.ExecutionsID, conf.GetConfig().Timeout) // Removed stepMax so far, I don't know if we need it anymore
if err != nil { if err != nil {
logger.Error().Msg("Could not create the Argo file for " + conf.GetConfig().WorkflowID) logger.Error().Msg("Could not create the Argo file for " + conf.GetConfig().WorkflowID)
logger.Error().Msg(err.Error()) logger.Error().Msg(err.Error())
oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{
"state": enum.FAILURE.EnumIndex(),
}, exec.GetID())
return
} }
argoFilePath, err := builder.CompleteBuild(exec.ExecutionsID) argoFilePath, err := builder.CompleteBuild(exec.ExecutionsID)
if err != nil { if err != nil {
logger.Error().Msg("Error when completing the build of the workflow: " + err.Error()) logger.Error().Msg("Error when completing the build of the workflow: " + err.Error())
oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{
"state": enum.FAILURE.EnumIndex(),
}, exec.GetID())
return
} }
workflowName = getContainerName(argoFilePath) workflowName = getContainerName(argoFilePath)
if conf.GetConfig().KubeHost == "" { if conf.GetConfig().KubeHost == "" {
// Not in a k8s environment, get conf from parameters // Not in a k8s environment, get conf from parameters
panic("can't exec with no kube for argo deployment") logger.Info().Msg("Executes outside of k8s")
executeOutside(argoFilePath, builder.Workflow)
} else { } else {
// Executed in a k8s environment // Executed in a k8s environment
logger.Info().Msg("Executes inside a k8s") logger.Info().Msg("Executes inside a k8s")
// executeInside(exec.GetID(), "argo", argo_file_path, stepMax) // commenting to use conf.ExecutionID instead of exec.GetID() // executeInside(exec.GetID(), "argo", argo_file_path, stepMax) // commenting to use conf.ExecutionID instead of exec.GetID()
executeInside(exec.ExecutionsID, exec.GetID(), argoFilePath) executeInside(exec.ExecutionsID, argoFilePath)
} }
} }
// So far we only log the output from // So far we only log the output from
func executeInside(ns string, execID string, argo_file_path string) { func executeInside(ns string, argo_file_path string) {
t, err := tools2.NewService(conf.GetConfig().Mode) t, err := tools2.NewService(conf.GetConfig().Mode)
if err != nil { if err != nil {
logger.Error().Msg("Could not create KubernetesTool") logger.Error().Msg("Could not create KubernetesTool")
@@ -133,25 +136,71 @@ func executeInside(ns string, execID string, argo_file_path string) {
watcher, err := t.GetArgoWatch(ns, workflowName) watcher, err := t.GetArgoWatch(ns, workflowName)
if err != nil { if err != nil {
logger.Error().Msg("Could not retrieve Watcher : " + err.Error()) logger.Error().Msg("Could not retrieve Watcher : " + err.Error())
oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{
"state": enum.FAILURE.EnumIndex(),
}, execID)
} }
l.LogKubernetesArgo(name, execID, ns, watcher) l.LogKubernetesArgo(name, ns, watcher)
if err != nil {
logger.Error().Msg("Could not log workflow : " + err.Error())
}
logger.Info().Msg("Finished, exiting...") logger.Info().Msg("Finished, exiting...")
} }
} }
func loadConfig(parser *argparse.Parser) { func executeOutside(argo_file_path string, workflow *models.Workflow) {
var o *onion.Onion var stdoutSubmit, stderrSubmit io.ReadCloser
o = initOnion(o) var stdoutLogs, stderrLogs io.ReadCloser
setConf(parser) var wg sync.WaitGroup
var err error
// if !IsValidUUID(conf.GetConfig().ExecutionID) { logger.Debug().Msg("executing :" + "argo submit --watch " + argo_file_path + " --serviceaccount sa-" + conf.GetConfig().ExecutionID + " -n " + conf.GetConfig().ExecutionID)
// logger.Fatal().Msg("Provided ID is not an UUID")
// } cmdSubmit := exec.Command("argo", "submit", "--watch", argo_file_path, "--serviceaccount", "sa-"+conf.GetConfig().ExecutionID, "-n", conf.GetConfig().ExecutionID)
if stdoutSubmit, err = cmdSubmit.StdoutPipe(); err != nil {
wf_logger.Error().Msg("Could not retrieve stdoutpipe " + err.Error())
return
}
cmdLogs := exec.Command("argo", "logs", "oc-monitor-"+workflowName, "-n", conf.GetConfig().ExecutionID, "--follow", "--no-color")
if stdoutLogs, err = cmdLogs.StdoutPipe(); err != nil {
wf_logger.Error().Msg("Could not retrieve stdoutpipe for 'argo logs'" + err.Error())
return
}
var steps []string
for _, template := range workflow.Spec.Templates {
steps = append(steps, template.Name)
}
go l.LogLocalWorkflow(workflowName, stdoutSubmit, &wg)
go l.LogLocalPod(workflowName, stdoutLogs, steps, &wg)
logger.Info().Msg("Starting argo submit")
if err := cmdSubmit.Start(); err != nil {
wf_logger.Error().Msg("Could not start argo submit")
wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text())
updateStatus("fatal", "")
}
time.Sleep(5 * time.Second)
logger.Info().Msg("Running argo logs")
if err := cmdLogs.Run(); err != nil {
wf_logger.Error().Msg("Could not run '" + strings.Join(cmdLogs.Args, " ") + "'")
wf_logger.Fatal().Msg(err.Error() + bufio.NewScanner(stderrLogs).Text())
}
logger.Info().Msg("Waiting argo submit")
if err := cmdSubmit.Wait(); err != nil {
wf_logger.Error().Msg("Could not execute argo submit")
wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text())
updateStatus("fatal", "")
}
wg.Wait()
} }
func setConf(parser *argparse.Parser) { func setConf(parser *argparse.Parser) {
@@ -159,6 +208,8 @@ func setConf(parser *argparse.Parser) {
mode := parser.String("M", "mode", &argparse.Options{Required: false, Default: "", Help: "Mode of the execution"}) mode := parser.String("M", "mode", &argparse.Options{Required: false, Default: "", Help: "Mode of the execution"})
execution := parser.String("e", "execution", &argparse.Options{Required: true, Help: "Execution ID of the workflow to request from oc-catalog API"}) execution := parser.String("e", "execution", &argparse.Options{Required: true, Help: "Execution ID of the workflow to request from oc-catalog API"})
peer := parser.String("p", "peer", &argparse.Options{Required: false, Default: "", Help: "Peer ID of the workflow to request from oc-catalog API"}) peer := parser.String("p", "peer", &argparse.Options{Required: false, Default: "", Help: "Peer ID of the workflow to request from oc-catalog API"})
groups := parser.String("g", "groups", &argparse.Options{Required: false, Default: "", Help: "Groups of the peer to request from oc-catalog API"})
mongo := parser.String("m", "mongo", &argparse.Options{Required: true, Default: "mongodb://127.0.0.1:27017", Help: "URL to reach the MongoDB"}) mongo := parser.String("m", "mongo", &argparse.Options{Required: true, Default: "mongodb://127.0.0.1:27017", Help: "URL to reach the MongoDB"})
db := parser.String("d", "database", &argparse.Options{Required: true, Default: "DC_myDC", Help: "Name of the database to query in MongoDB"}) db := parser.String("d", "database", &argparse.Options{Required: true, Default: "DC_myDC", Help: "Name of the database to query in MongoDB"})
timeout := parser.Int("t", "timeout", &argparse.Options{Required: false, Default: -1, Help: "Timeout for the execution of the workflow"}) timeout := parser.Int("t", "timeout", &argparse.Options{Required: false, Default: -1, Help: "Timeout for the execution of the workflow"})
@@ -185,7 +236,7 @@ func setConf(parser *argparse.Parser) {
conf.GetConfig().Mode = *mode conf.GetConfig().Mode = *mode
conf.GetConfig().ExecutionID = *execution conf.GetConfig().ExecutionID = *execution
conf.GetConfig().PeerID = *peer conf.GetConfig().PeerID = *peer
conf.GetConfig().Groups = strings.Split((*groups), ",")
conf.GetConfig().KubeHost = *host conf.GetConfig().KubeHost = *host
conf.GetConfig().KubePort = *port conf.GetConfig().KubePort = *port

View File

@@ -1,66 +0,0 @@
# Goal
We want to be able to instantiate a service that allows to store file located on a `processing` pod onto it.
We have already tested it with a static `Argo` yaml file, a MinIO running on the same kubernetes node, the minio service is reached because it is the only associated to the `serviceAccount`.
We have established three otpions that need to be available to the user for the feature to be implemented:
- Use a MinIO running constantly on the node that executes the argo workflow
- Use a MinIO
- A MinIO is instanciated when a new workflow is launched
# Requirements
- Helm : `https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3`
- Helm GO client : `$ go get github.com/mittwald/go-helm-client`
- MinIO chart : `https://charts.min.io/`
# Ressources
We need to create several ressources in order for the pods to communicate with the MinIO
## MinIO Auth Secrets
## Bucket ConfigMap
With the name `artifact-repositories` this configMap will be used by default. It contains the URL to the MinIO server and the key to the authentication data held in a `secret` ressource.
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
# If you want to use this config map by default, name it "artifact-repositories".
name: artifact-repositories
# annotations:
# # v3.0 and after - if you want to use a specific key, put that key into this annotation.
# workflows.argoproj.io/default-artifact-repository: oc-s3-artifact-repository
data:
oc-s3-artifact-repository: |
s3:
bucket: oc-bucket
endpoint: [ retrieve cluster with kubectl get service argo-artifacts -o jsonpath="{.spec.clusterIP}" ]:9000
insecure: true
accessKeySecret:
name: argo-artifact-secret
key: access-key
secretKeySecret:
name: argo-artifact-secret
key: secret-key
```
# Code modifications
Rajouter un attribut "isDataLink"
- true/false
Rajouter un attribut DataPath ou un truc comme ca
- liste de map[string]string permet de n'avoir qu'une copie par fichier)
- éditable uniquement a travers la méthode addDataPath
- clé : path du fichier / value : nom de la copie dans minio
===> on a besoin du meme attribut pour Processing -> Data et Data -> Processing

View File

@@ -1,5 +1,7 @@
package models package models
import "gopkg.in/yaml.v3"
type ServiceResource struct { type ServiceResource struct {
Action string `yaml:"action,omitempty"` Action string `yaml:"action,omitempty"`
SuccessCondition string `yaml:"successCondition,omitempty"` SuccessCondition string `yaml:"successCondition,omitempty"`
@@ -15,6 +17,24 @@ type Service struct {
Spec ServiceSpec `yaml:"spec"` Spec ServiceSpec `yaml:"spec"`
} }
func (s *Service) BindToArgo(workflow *Workflow) error {
service_manifest, err := yaml.Marshal(s)
if err != nil {
return err
}
service_template := Template{Name: "workflow-service-pod",
Resource: ServiceResource{
Action: "create",
SuccessCondition: "status.succeeded > 0",
FailureCondition: "status.failed > 3",
SetOwnerReference: true,
Manifest: string(service_manifest),
},
}
workflow.Spec.Templates = append(workflow.Spec.Templates, service_template)
return nil
}
type Metadata struct { type Metadata struct {
Name string `yaml:"name"` Name string `yaml:"name"`
} }

View File

@@ -3,14 +3,15 @@ package models
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"oc-monitord/conf" "strconv"
"strings" "strings"
w "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/models/workflow/graph"
"cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/common/models" "cloud.o-forge.io/core/oc-lib/models/common/models"
"cloud.o-forge.io/core/oc-lib/models/resources" "cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/resources/native_tools"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
"cloud.o-forge.io/core/oc-lib/tools"
) )
type Parameter struct { type Parameter struct {
@@ -18,11 +19,60 @@ type Parameter struct {
Value string `yaml:"value,omitempty"` Value string `yaml:"value,omitempty"`
} }
type Bounds struct {
CPU string `yaml:"cpu,omitempty"`
Memory string `yaml:"memory,omitempty"`
GPU string `yaml:"nvidia.com/gpu,omitempty"`
}
func NewBounds() *Bounds {
return &Bounds{
CPU: "0",
Memory: "0",
GPU: "0",
}
}
func (b *Bounds) Set(value float64, what string, isMin bool) bool {
i := float64(0)
switch what {
case "cpu":
if newI, err := strconv.ParseFloat(b.CPU, 64); err == nil {
i = newI
}
case "ram":
if newI, err := strconv.ParseFloat(b.Memory, 64); err == nil {
i = newI
}
case "gpu":
if newI, err := strconv.ParseFloat(b.GPU, 64); err == nil {
i = newI
}
}
ok := (value > i && !isMin) || (value < i && isMin)
if ok {
switch what {
case "cpu":
b.CPU = fmt.Sprintf("%f", value)
return true
case "ram":
b.Memory = fmt.Sprintf("%fGi", value)
return true
case "gpu":
b.GPU = fmt.Sprintf("%f", value)
return true
}
}
return false
}
type Container struct { type Container struct {
Image string `yaml:"image"` Image string `yaml:"image"`
Command []string `yaml:"command,omitempty,flow"` Command []string `yaml:"command,omitempty,flow"`
Args []string `yaml:"args,omitempty,flow"` Args []string `yaml:"args,omitempty,flow"`
VolumeMounts []VolumeMount `yaml:"volumeMounts,omitempty"` VolumeMounts []VolumeMount `yaml:"volumeMounts,omitempty"`
Requests Bounds `yaml:"requests,omitempty"`
Limits Bounds `yaml:"limits,omitempty"`
} }
func (c *Container) AddVolumeMount(volumeMount VolumeMount, volumes []VolumeMount) []VolumeMount { func (c *Container) AddVolumeMount(volumeMount VolumeMount, volumes []VolumeMount) []VolumeMount {
@@ -43,25 +93,85 @@ func (c *Container) AddVolumeMount(volumeMount VolumeMount, volumes []VolumeMoun
return volumes return volumes
} }
type VolumeMount struct {
Name string `yaml:"name"`
MountPath string `yaml:"mountPath"`
Storage *resources.StorageResource `yaml:"-"`
}
type Task struct { type Task struct {
Name string `yaml:"name"` Name string `yaml:"name"`
Template string `yaml:"template"` Template string `yaml:"template"`
Dependencies []string `yaml:"dependencies,omitempty"` Dependencies []string `yaml:"dependencies,omitempty"`
NodeSelector map[string]string `yaml:"nodeSelector,omitempty"`
Arguments struct { Arguments struct {
Parameters []Parameter `yaml:"parameters,omitempty"` Parameters []Parameter `yaml:"parameters,omitempty"`
} `yaml:"arguments,omitempty"` } `yaml:"arguments,omitempty"`
} }
func NewTask(processingName string, graphItemID string) *Task {
unique_name := GetArgoName(processingName, graphItemID)
return &Task{
Name: unique_name,
Template: unique_name,
}
}
func (t *Task) BindToArgo(
dag *Dag,
graphItemID string,
originWf *w.Workflow,
processing *resources.ProcessingResource,
firstItems, lastItems []string,
) (*Dag, []string, []string) {
if instance := processing.GetSelectedInstance(); instance != nil {
t.addParams(instance.(*resources.ProcessingInstance).Env)
t.addParams(instance.(*resources.ProcessingInstance).Inputs)
t.addParams(instance.(*resources.ProcessingInstance).Outputs)
}
t.Dependencies = TransformDepsToArgo(originWf.GetDependencies(graphItemID))
name := ""
if originWf.Graph.Items[graphItemID].Processing != nil {
name = originWf.Graph.Items[graphItemID].Processing.GetName()
}
if originWf.Graph.Items[graphItemID].Workflow != nil {
name = originWf.Graph.Items[graphItemID].Workflow.GetName()
}
if len(t.Dependencies) == 0 && name != "" {
firstItems = append(firstItems, GetArgoName(name, graphItemID))
}
if deps := originWf.IsDependancy(graphItemID); len(deps) == 0 && name != "" {
lastItems = append(lastItems, GetArgoName(name, graphItemID))
}
dag.Tasks = append(dag.Tasks, *t)
return dag, firstItems, lastItems
}
func (t *Task) addParams(params []models.Param) {
for _, value := range params {
t.Arguments.Parameters = append(t.Arguments.Parameters, Parameter{
Name: value.Name,
Value: value.Value,
})
}
}
func (t *Task) GetDeps(name string) (int, string) {
for i, deps := range t.Dependencies {
if strings.Contains(deps, name) {
return i, deps
}
}
return 0, ""
}
type Dag struct { type Dag struct {
Tasks []Task `yaml:"tasks,omitempty"` Tasks []Task `yaml:"tasks,omitempty"`
} }
func (d *Dag) GetTask(taskName string) *Task {
for _, task := range d.Tasks {
if strings.Contains(task.Name, taskName) {
return &task
}
}
return nil
}
type TemplateMetadata struct { type TemplateMetadata struct {
Labels map[string]string `yaml:"labels,omitempty"` Labels map[string]string `yaml:"labels,omitempty"`
Annotations map[string]string `yaml:"annotations,omitempty"` Annotations map[string]string `yaml:"annotations,omitempty"`
@@ -72,23 +182,76 @@ type Secret struct {
Key string `yaml:"key"` Key string `yaml:"key"`
} }
func NewSecret(name string, key string) *Secret {
return &Secret{Name: name, Key: key + "-key"}
}
type Key struct { type Key struct {
Key string `yaml:"key"` Key string `yaml:"key"`
Bucket string `yaml:"bucket"` Bucket string `yaml:"bucket"`
EndPoint string `yaml:"endpoint"` EndPoint string `yaml:"endpoint"`
Insecure bool `yaml:"insecure"` Insecure bool `yaml:"insecure"`
AccessKeySecret *Secret `yaml:"accessKeySecret"` AccessKeySecret *Secret `yaml accessKeySecret`
SecretKeySecret *Secret `yaml:"secretKeySecret"` SecretKeySecret *Secret `yaml secretKeySecret`
} }
type Artifact struct { type Artifact struct {
Name string `yaml:"name"` Name string `yaml:"name"`
Path string `yaml:"path"` Path string `yaml:"path"`
S3 *Key `yaml:"s3,omitempty"`
} }
type ArtifactRepositoryRef struct { func NewArtifact(name string, rw graph.StorageProcessingGraphLink, params []models.Param, template Template) *Artifact {
ConfigMap string `yaml:"configMap"` if rw.Write {
Key string `yaml:"key"` name += "-" + rw.Destination + "-input-write"
} else {
name = "-" + rw.Destination + "-input-read"
}
return &Artifact{
Name: name,
Path: template.ReplacePerEnv(rw.Source, params),
}
}
func (a *Artifact) BindToArgo(storageType enum.StorageType, rw graph.StorageProcessingGraphLink, params []models.Param, template Template) {
if rw.Write {
template.Outputs.Artifacts = append(template.Inputs.Artifacts, *a)
} else {
template.Inputs.Artifacts = append(template.Outputs.Artifacts, *a)
}
}
func (a *Artifact) bindS3(rw graph.StorageProcessingGraphLink, params []models.Param, template Template) {
a.S3 = &Key{
Key: template.ReplacePerEnv(rw.Destination+"/"+rw.FileName, params),
Insecure: true, // temporary
}
/* sel := storage.GetSelectedInstance()
if sel != nil {
if sel.(*resources.StorageResourceInstance).Credentials != nil {
tool, err := tools2.NewService(conf.GetConfig().Mode)
if err != nil || tool == nil {
logger.Error().Msg("Could not create the access secret")
} else {
id, err := tool.CreateAccessSecret(namespace,
sel.(*resources.StorageResourceInstance).Credentials.Login,
sel.(*resources.StorageResourceInstance).Credentials.Pass)
if err == nil {
a.S3.AccessKeySecret = NewSecret(id, "access")
a.S3.SecretKeySecret = NewSecret(id, "secret")
}
}
}
source := sel.(*resources.StorageResourceInstance).Source
a.S3.Key = strings.ReplaceAll(strings.ReplaceAll(a.S3.Key, source+"/", ""), source, "")
splits := strings.Split(a.S3.EndPoint, "/")
if len(splits) > 1 {
a.S3.Bucket = splits[0]
a.S3.EndPoint = strings.Join(splits[1:], "/")
} else {
a.S3.Bucket = splits[0]
}
} */
} }
type InOut struct { type InOut struct {
@@ -106,34 +269,8 @@ type Template struct {
Resource ServiceResource `yaml:"resource,omitempty"` Resource ServiceResource `yaml:"resource,omitempty"`
} }
func (template *Template) CreateEventContainer(exec *workflow_execution.WorkflowExecution, nt *resources.NativeTool, dag *Dag) { func (template *Template) CreateContainer(processing *resources.ProcessingResource, dag *Dag) {
container := Container{Image: "natsio/nats-box"} instance := processing.GetSelectedInstance()
container.Command = []string{"sh", "-c"} // all is bash
var event native_tools.WorkflowEventParams
b, err := json.Marshal(nt.Params)
if err != nil {
fmt.Println(err)
return
}
err = json.Unmarshal(b, &event)
if err != nil {
fmt.Println(err)
return
}
if event.WorkflowResourceID != "" {
container.Args = append(container.Args, "nats pub --server "+conf.GetConfig().NatsURL+":4222 "+tools.WORKFLOW_EVENT.GenerateKey()+" '{\"workflow_id\":\""+event.WorkflowResourceID+"\"}'")
container.Args = []string{strings.Join(container.Args, " ")}
template.Container = container
}
}
func (template *Template) CreateContainer(exec *workflow_execution.WorkflowExecution, processing *resources.ProcessingResource, dag *Dag) {
index := 0
if d, ok := exec.SelectedInstances[processing.GetID()]; ok {
index = d
}
instance := processing.GetSelectedInstance(&index)
if instance == nil { if instance == nil {
return return
} }
@@ -167,7 +304,7 @@ func (template *Template) CreateContainer(exec *workflow_execution.WorkflowExecu
func (template *Template) ReplacePerEnv(arg string, envs []models.Param) string { func (template *Template) ReplacePerEnv(arg string, envs []models.Param) string {
for _, v := range envs { for _, v := range envs {
if v.Name != "" && strings.Contains(arg, v.Name) { if strings.Contains(arg, v.Name) {
value := "{{ inputs.parameters." + v.Name + " }}" value := "{{ inputs.parameters." + v.Name + " }}"
arg = strings.ReplaceAll(arg, v.Name, value) arg = strings.ReplaceAll(arg, v.Name, value)
arg = strings.ReplaceAll(arg, "$"+v.Name, value) arg = strings.ReplaceAll(arg, "$"+v.Name, value)
@@ -179,10 +316,43 @@ func (template *Template) ReplacePerEnv(arg string, envs []models.Param) string
// Add the metadata that allow Admiralty to pick up an Argo Workflow that needs to be reparted // Add the metadata that allow Admiralty to pick up an Argo Workflow that needs to be reparted
// The value of "clustername" is the peerId, which must be replaced by the node name's for this specific execution // The value of "clustername" is the peerId, which must be replaced by the node name's for this specific execution
func (t *Template) AddAdmiraltyAnnotations(peerId string) { func (t *Template) AddAdmiraltyAnnotations(peerID, namespace string) error {
if t.Metadata.Annotations == nil { if t.Metadata.Annotations == nil {
t.Metadata.Annotations = make(map[string]string) t.Metadata.Annotations = make(map[string]string)
} }
t.Metadata.Annotations["multicluster.admiralty.io/elect"] = ""
t.Metadata.Annotations["multicluster.admiralty.io/clustername"] = peerId const key = "admiralty.io/multi-cluster-scheduler"
var annotation SchedulerAnnotation
// Parse existing annotation if it exists
if val, ok := t.Metadata.Annotations[key]; ok && val != "" {
if err := json.Unmarshal([]byte(val), &annotation); err != nil {
return fmt.Errorf("failed to parse existing scheduler annotation: %w", err)
}
}
// Add new affinity
annotation.Affinities = append(annotation.Affinities, affinity{
Cluster: "target-" + peerID + "-" + namespace,
Namespace: namespace,
})
// Encode back to JSON
bytes, err := json.Marshal(annotation)
if err != nil {
return fmt.Errorf("failed to encode scheduler annotation: %w", err)
}
t.Metadata.Annotations[key] = string(bytes)
return nil
}
type affinity struct {
Cluster string `json:"cluster"`
Namespace string `json:"namespace"`
}
type SchedulerAnnotation struct {
Affinities []affinity `json:"affinities"`
} }

92
models/utils.go Normal file
View File

@@ -0,0 +1,92 @@
package models
import (
"strings"
w "cloud.o-forge.io/core/oc-lib/models/workflow"
)
type WorkflowsDependancies struct {
FirstWfTasks map[string][]string
RelatedWfTasks map[string][]string
LastWfTasks map[string][]string
}
func NewWorkflowDependancies() *WorkflowsDependancies {
return &WorkflowsDependancies{
FirstWfTasks: map[string][]string{},
RelatedWfTasks: map[string][]string{},
LastWfTasks: map[string][]string{},
}
}
func (w *WorkflowsDependancies) BindFirstTasks(depsFunc func(v string) []w.Deps, dag *Dag) {
for wfID, firstTasks := range w.FirstWfTasks {
deps := depsFunc(wfID)
if task := dag.GetTask(wfID); task != nil && len(deps) > 0 {
task.Dependencies = append(task.Dependencies, firstTasks...)
}
}
}
func (w *WorkflowsDependancies) BindRelatedTasks(dag *Dag) {
for wfID, relatedWfTasks := range w.RelatedWfTasks {
for _, dep := range relatedWfTasks {
if task := dag.GetTask(dep); task != nil {
index := -1
if i, deps := task.GetDeps(wfID); deps != "" {
index = i
}
if index != -1 {
task.Dependencies = append(task.Dependencies[:index], task.Dependencies[index+1:]...)
}
if w.LastWfTasks[wfID] != nil {
task.Dependencies = append(task.Dependencies, w.LastWfTasks[wfID]...)
}
}
}
}
}
type Workflow struct {
ApiVersion string `yaml:"apiVersion"`
Kind string `yaml:"kind"`
Metadata struct {
Name string `yaml:"name"`
} `yaml:"metadata"`
Spec Spec `yaml:"spec,omitempty"`
}
func (b *Workflow) GetDag() *Dag {
for _, t := range b.Spec.Templates {
if t.Name == "dag" {
return t.Dag
}
}
b.Spec.Templates = append(b.Spec.Templates, Template{Name: "dag", Dag: &Dag{}})
return b.Spec.Templates[len(b.Spec.Templates)-1].Dag
}
type Spec struct {
ServiceAccountName string `yaml:"serviceAccountName"`
Entrypoint string `yaml:"entrypoint"`
Arguments []Parameter `yaml:"arguments,omitempty"`
Volumes []VolumeClaimTemplate `yaml:"volumeClaimTemplates,omitempty"`
Templates []Template `yaml:"templates"`
Timeout int `yaml:"activeDeadlineSeconds,omitempty"`
}
func GetArgoName(raw_name string, component_id string) (formatedName string) {
formatedName = strings.ReplaceAll(raw_name, " ", "-")
formatedName += "-" + component_id
formatedName = strings.ToLower(formatedName)
return
}
func TransformDepsToArgo(deps []w.Deps) []string {
argoDeps := []string{}
for _, dep := range deps {
argoDeps = append(argoDeps, GetArgoName(dep.Source, dep.Dest))
}
return argoDeps
}

View File

@@ -1,5 +1,12 @@
package models package models
import (
"fmt"
"strings"
"cloud.o-forge.io/core/oc-lib/models/resources"
)
type VolumeClaimTemplate struct { type VolumeClaimTemplate struct {
Metadata struct { Metadata struct {
Name string `yaml:"name"` Name string `yaml:"name"`
@@ -15,3 +22,22 @@ type VolumeSpec struct {
} `yaml:"requests"` } `yaml:"requests"`
} `yaml:"resources"` } `yaml:"resources"`
} }
type VolumeMount struct {
Name string `yaml:"name"`
MountPath string `yaml:"mountPath"`
Storage *resources.StorageResource `yaml:"-"`
}
func (v *VolumeMount) BindToArgo(workflow *Workflow) { // TODO : one think about remote volume but TG
index := 0
if v.Storage.SelectedInstanceIndex != nil && (*v.Storage.SelectedInstanceIndex) >= 0 {
index = *v.Storage.SelectedInstanceIndex
}
storage := v.Storage.Instances[index]
new_volume := VolumeClaimTemplate{}
new_volume.Metadata.Name = strings.ReplaceAll(strings.ToLower(v.Name), " ", "-")
new_volume.Spec.AccessModes = []string{"ReadWriteOnce"}
new_volume.Spec.Resources.Requests.Storage = fmt.Sprintf("%v", storage.SizeGB) + storage.SizeType.ToArgo()
workflow.Spec.Volumes = append(workflow.Spec.Volumes, new_volume)
}

View File

@@ -4,16 +4,14 @@ import (
"errors" "errors"
"io" "io"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
) )
type Tool interface { type Tool interface {
CreateArgoWorkflow(path string, ns string) (string, error) CreateArgoWorkflow(path string, ns string) (string, error)
CreateAccessSecret(user string, password string, storageId string, namespace string) (string, error) CreateAccessSecret(ns string, login string, password string) (string, error)
GetArgoWatch(executionId string, wfName string) (watch.Interface, error) GetArgoWatch(executionId string, wfName string) (watch.Interface, error)
GetPodLogger(ns string, wfName string, podName string) (io.ReadCloser, error) GetPodLogger(ns string, wfName string, podName string) (io.ReadCloser, error)
GetS3Secret(storageId string, namespace string) *v1.Secret
} }
var _service = map[string]func() (Tool, error){ var _service = map[string]func() (Tool, error){

View File

@@ -2,6 +2,7 @@ package tools
import ( import (
"context" "context"
"encoding/base64"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@@ -12,8 +13,8 @@ import (
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned" "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned"
"github.com/google/uuid"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer"
@@ -30,7 +31,7 @@ type KubernetesTools struct {
func NewKubernetesTool() (Tool, error) { func NewKubernetesTool() (Tool, error) {
// Load Kubernetes config (from ~/.kube/config) // Load Kubernetes config (from ~/.kube/config)
config := &rest.Config{ config := &rest.Config{
Host: "https://" + conf.GetConfig().KubeHost + ":" + conf.GetConfig().KubePort, Host: conf.GetConfig().KubeHost + ":" + conf.GetConfig().KubePort,
TLSClientConfig: rest.TLSClientConfig{ TLSClientConfig: rest.TLSClientConfig{
CAData: []byte(conf.GetConfig().KubeCA), CAData: []byte(conf.GetConfig().KubeCA),
CertData: []byte(conf.GetConfig().KubeCert), CertData: []byte(conf.GetConfig().KubeCert),
@@ -87,20 +88,21 @@ func (k *KubernetesTools) CreateArgoWorkflow(path string, ns string) (string, er
return createdWf.Name, nil return createdWf.Name, nil
} }
func (k *KubernetesTools) CreateAccessSecret(access string, password string, storageId string, namespace string) (string, error) { func (k *KubernetesTools) CreateAccessSecret(ns string, login string, password string) (string, error) {
// Namespace where the secret will be created // Namespace where the secret will be created
namespace := "default"
// Encode the secret data (Kubernetes requires base64-encoded values) // Encode the secret data (Kubernetes requires base64-encoded values)
secretData := map[string][]byte{ secretData := map[string][]byte{
"access-key": []byte(access), "access-key": []byte(base64.StdEncoding.EncodeToString([]byte(login))),
"secret-key": []byte(password), "secret-key": []byte(base64.StdEncoding.EncodeToString([]byte(password))),
} }
// Define the Secret object // Define the Secret object
name := storageId+"-secret-s3" name := uuid.New().String()
secret := &v1.Secret{ secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
Namespace: namespace, Namespace: ns,
}, },
Type: v1.SecretTypeOpaque, Type: v1.SecretTypeOpaque,
Data: secretData, Data: secretData,
@@ -110,28 +112,9 @@ func (k *KubernetesTools) CreateAccessSecret(access string, password string, sto
if err != nil { if err != nil {
return "", errors.New("Error creating secret: " + err.Error()) return "", errors.New("Error creating secret: " + err.Error())
} }
return name, nil return name, nil
} }
func (k *KubernetesTools) GetS3Secret(storageId string, namespace string) *v1.Secret {
secret, err := k.Set.CoreV1().Secrets(namespace).Get(context.TODO(), storageId + "-secret-s3", metav1.GetOptions{})
// Get(context.TODO(),storageId + "-artifact-server", metav1.GetOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
l := utils.GetLogger()
l.Fatal().Msg("An error happened when retrieving secret in " + namespace + " : " + err.Error())
}
if k8serrors.IsNotFound(err) {
return nil
}
return secret
// return secret
}
func (k *KubernetesTools) GetArgoWatch(executionId string, wfName string) (watch.Interface, error){ func (k *KubernetesTools) GetArgoWatch(executionId string, wfName string) (watch.Interface, error){
options := metav1.ListOptions{FieldSelector: "metadata.name=oc-monitor-"+wfName} options := metav1.ListOptions{FieldSelector: "metadata.name=oc-monitor-"+wfName}

View File

@@ -0,0 +1,146 @@
package workflow_builder
import (
"encoding/json"
"fmt"
"net/http"
"oc-monitord/utils"
"slices"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/peer"
tools "cloud.o-forge.io/core/oc-lib/tools"
)
type AdmiraltySetter struct {
Id string // ID to identify the execution, correspond to workflow_executions id
NodeName string // Allows to retrieve the name of the node used for this execution on each peer {"peerId": "nodeName"}
}
func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string, remotePeerID string) error {
logger := logs.GetLogger()
data := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", localPeerID, nil, nil).LoadOne(remotePeerID)
if data.Code != 200 {
logger.Error().Msg("Error while trying to instantiate remote peer " + remotePeerID)
return fmt.Errorf(data.Err)
}
remotePeer := data.ToPeer()
data = oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", localPeerID, nil, nil).LoadOne(localPeerID)
if data.Code != 200 {
logger.Error().Msg("Error while trying to instantiate local peer " + remotePeerID)
return fmt.Errorf(data.Err)
}
localPeer := data.ToPeer()
caller := tools.NewHTTPCaller(
map[tools.DataType]map[tools.METHOD]string{
tools.ADMIRALTY_SOURCE: {
tools.POST: "/:id",
},
tools.ADMIRALTY_KUBECONFIG: {
tools.GET: "/:id",
},
tools.ADMIRALTY_SECRET: {
tools.POST: "/:id/" + remotePeerID,
},
tools.ADMIRALTY_TARGET: {
tools.POST: "/:id/" + remotePeerID,
},
tools.ADMIRALTY_NODES: {
tools.GET: "/:id/" + remotePeerID,
},
},
)
logger.Info().Msg("\n\n Creating the Admiralty Source on " + remotePeerID + " ns-" + s.Id)
_ = s.callRemoteExecution(remotePeer, []int{http.StatusCreated, http.StatusConflict}, caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true)
logger.Info().Msg("\n\n Retrieving kubeconfig with the secret on " + remotePeerID + " ns-" + s.Id)
kubeconfig := s.getKubeconfig(remotePeer, caller)
logger.Info().Msg("\n\n Creating a secret from the kubeconfig " + localPeerID + " ns-" + s.Id)
_ = s.callRemoteExecution(localPeer, []int{http.StatusCreated}, caller, s.Id, tools.ADMIRALTY_SECRET, tools.POST, kubeconfig, true)
logger.Info().Msg("\n\n Creating the Admiralty Target on " + localPeerID + " in namespace " + s.Id)
_ = s.callRemoteExecution(localPeer, []int{http.StatusCreated, http.StatusConflict}, caller, s.Id, tools.ADMIRALTY_TARGET, tools.POST, nil, true)
logger.Info().Msg("\n\n Checking for the creation of the admiralty node on " + localPeerID + " ns-" + s.Id)
s.checkNodeStatus(localPeer, caller)
return nil
}
func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCaller) map[string]string {
var kubedata map[string]string
_ = s.callRemoteExecution(peer, []int{http.StatusOK}, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil, true)
if caller.LastResults["body"] == nil || len(caller.LastResults["body"].([]byte)) == 0 {
l := utils.GetLogger()
l.Error().Msg("Something went wrong when retrieving data from Get call for kubeconfig")
panic(0)
}
err := json.Unmarshal(caller.LastResults["body"].([]byte), &kubedata)
if err != nil {
l := utils.GetLogger()
l.Error().Msg("Something went wrong when unmarshalling data from Get call for kubeconfig")
panic(0)
}
return kubedata
}
func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode []int, caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) map[string]interface{} {
l := utils.GetLogger()
resp, err := peer.LaunchPeerExecution(peer.UUID, dataID, dt, method, body, caller)
if err != nil {
l.Error().Msg("Error when executing on peer at" + peer.Url)
l.Error().Msg(err.Error())
panic(0)
}
if !slices.Contains(expectedCode, caller.LastResults["code"].(int)) {
l.Error().Msg(fmt.Sprint("Didn't receive the expected code :", caller.LastResults["code"], "when expecting", expectedCode))
if _, ok := caller.LastResults["body"]; ok {
l.Info().Msg(string(caller.LastResults["body"].([]byte)))
}
if panicCode {
panic(0)
}
}
return resp
}
func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller) {
var data map[string]interface{}
if resp, ok := caller.LastResults["body"]; ok {
json.Unmarshal(resp.([]byte), &data)
}
if node, ok := data["node"]; ok {
metadata := node.(map[string]interface{})["metadata"]
name := metadata.(map[string]interface{})["name"].(string)
s.NodeName = name
} else {
l := utils.GetLogger()
l.Error().Msg("Could not retrieve data about the recently created node")
panic(0)
}
}
func (s *AdmiraltySetter) checkNodeStatus(localPeer *peer.Peer, caller *tools.HTTPCaller) {
for i := range 5 {
time.Sleep(10 * time.Second) // let some time for kube to generate the node
_ = s.callRemoteExecution(localPeer, []int{http.StatusOK}, caller, s.Id, tools.ADMIRALTY_NODES, tools.GET, nil, false)
if caller.LastResults["code"] == 200 {
s.storeNodeName(caller)
return
}
if i == 5 {
logger.Error().Msg("Node on " + localPeer.Name + " was never found, panicking !")
panic(0)
}
logger.Info().Msg("Could not verify that node is up. Retrying...")
}
}

View File

@@ -1,108 +1,45 @@
// Package workflow_builder traduit les informations du graphe d'un Workflow // A class that translates the informations held in the graph object
// (ses composants, ses liens) en un fichier YAML Argo Workflow prêt à être // via its lists of components into an argo file, using the a list of
// soumis à un cluster Kubernetes. Le point d'entrée principal est ArgoBuilder. // link ID to build the dag
package workflow_builder package workflow_builder
import ( import (
"encoding/json" "errors"
"fmt" "fmt"
"oc-monitord/conf" "oc-monitord/conf"
"oc-monitord/models"
. "oc-monitord/models" . "oc-monitord/models"
"os" "os"
"strings"
"time" "time"
oclib "cloud.o-forge.io/core/oc-lib" oclib "cloud.o-forge.io/core/oc-lib"
oclib_config "cloud.o-forge.io/core/oc-lib/config"
"cloud.o-forge.io/core/oc-lib/logs" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resources" "cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/resources/native_tools"
w "cloud.o-forge.io/core/oc-lib/models/workflow" w "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/models/workflow/graph"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
"cloud.o-forge.io/core/oc-lib/tools"
"github.com/nats-io/nats.go"
"github.com/nwtgck/go-fakelish" "github.com/nwtgck/go-fakelish"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
) )
// logger est le logger zerolog partagé au sein du package, initialisé à
// chaque appel de CreateDAG pour récupérer la configuration courante.
var logger zerolog.Logger var logger zerolog.Logger
// ArgoBuilder est le constructeur principal du fichier Argo Workflow.
// Il porte l'état de la construction (workflow source, templates générés,
// services k8s à créer, timeout global, liste des peers distants impliqués).
type ArgoBuilder struct { type ArgoBuilder struct {
// OriginWorkflow est le workflow métier Open Cloud dont on construit la représentation Argo.
OriginWorkflow *w.Workflow OriginWorkflow *w.Workflow
// Workflow est la structure YAML Argo en cours de construction. Workflow *models.Workflow
Workflow Workflow Services []*Service
// Services liste les services Kubernetes à exposer pour les processings "IsService". Timeout int
Services []*Service RemotePeers []string
// Timeout est la durée maximale d'exécution en secondes (activeDeadlineSeconds).
Timeout int
// RemotePeers contient les IDs des peers distants détectés via Admiralty.
RemotePeers []string
} }
// Workflow est la structure racine du fichier YAML Argo Workflow. // TODO: found on a processing instance linked to storage
// Elle correspond exactement au format attendu par le contrôleur Argo. // add s3, gcs, azure, etc if needed on a link between processing and storage
type Workflow struct { func (b *ArgoBuilder) CreateDAG(namespace string, write bool) (int, []string, []string, error) {
ApiVersion string `yaml:"apiVersion"`
Kind string `yaml:"kind"`
Metadata struct {
Name string `yaml:"name"`
} `yaml:"metadata"`
Spec Spec `yaml:"spec,omitempty"`
}
// getDag retourne le pointeur sur le template "dag" du workflow.
// S'il n'existe pas encore, il est créé et ajouté à la liste des templates.
func (b *Workflow) getDag() *Dag {
for _, t := range b.Spec.Templates {
if t.Name == "dag" {
return t.Dag
}
}
b.Spec.Templates = append(b.Spec.Templates, Template{Name: "dag", Dag: &Dag{}})
return b.Spec.Templates[len(b.Spec.Templates)-1].Dag
}
// Spec contient la spécification complète du workflow Argo :
// compte de service, point d'entrée, volumes, templates et timeout.
type Spec struct {
ArtifactRepositoryRef
ServiceAccountName string `yaml:"serviceAccountName,omitempty"`
Entrypoint string `yaml:"entrypoint"`
Arguments []Parameter `yaml:"arguments,omitempty"`
Volumes []VolumeClaimTemplate `yaml:"volumeClaimTemplates,omitempty"`
Templates []Template `yaml:"templates"`
Timeout int `yaml:"activeDeadlineSeconds,omitempty"`
}
// CreateDAG est le point d'entrée de la construction du DAG Argo.
// Il crée tous les templates (un par processing / native tool / sous-workflow),
// configure les volumes persistants, positionne les métadonnées globales du
// workflow et retourne :
// - le nombre de tâches dans le DAG,
// - les noms des premières tâches (sans dépendances),
// - les noms des dernières tâches (dont personne ne dépend),
// - une éventuelle erreur.
//
// Le paramètre write est conservé pour usage futur (écriture effective du YAML).
// TODO: gérer S3, GCS, Azure selon le type de stockage lié au processing.
func (b *ArgoBuilder) CreateDAG(exec *workflow_execution.WorkflowExecution, namespace string, write bool) (int, []string, []string, error) {
logger = logs.GetLogger() logger = logs.GetLogger()
logger.Info().Msg(fmt.Sprint("Creating DAG ", b.OriginWorkflow.Graph.Items)) logger.Info().Msg(fmt.Sprint("Creating DAG ", b.OriginWorkflow.Graph.Items))
// Crée un template Argo pour chaque nœud du graphe et collecte les volumes. // handle services by checking if there is only one processing with hostname and port
firstItems, lastItems, volumes := b.createTemplates(exec, namespace) firstItems, lastItems, volumes := b.createTemplates(namespace)
b.createVolumes(exec, volumes) b.createVolumes(volumes)
if b.Timeout > 0 { if b.Timeout > 0 {
b.Workflow.Spec.Timeout = b.Timeout b.Workflow.Spec.Timeout = b.Timeout
} }
@@ -110,622 +47,186 @@ func (b *ArgoBuilder) CreateDAG(exec *workflow_execution.WorkflowExecution, name
b.Workflow.Spec.Entrypoint = "dag" b.Workflow.Spec.Entrypoint = "dag"
b.Workflow.ApiVersion = "argoproj.io/v1alpha1" b.Workflow.ApiVersion = "argoproj.io/v1alpha1"
b.Workflow.Kind = "Workflow" b.Workflow.Kind = "Workflow"
if !write { return len(b.Workflow.GetDag().Tasks), firstItems, lastItems, nil
return len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil
}
return len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil
} }
// createTemplates parcourt tous les nœuds du graphe (processings, native tools, func (b *ArgoBuilder) createTemplates(namespace string) ([]string, []string, []models.VolumeMount) {
// sous-workflows) et génère les templates Argo correspondants. volumes := []models.VolumeMount{}
// Elle gère également le recâblage des dépendances DAG entre sous-workflows
// imbriqués, et l'ajout du pod de service si nécessaire.
// Retourne les premières tâches, les dernières tâches et les volumes à créer.
func (b *ArgoBuilder) createTemplates(exec *workflow_execution.WorkflowExecution, namespace string) ([]string, []string, []VolumeMount) {
volumes := []VolumeMount{}
firstItems := []string{} firstItems := []string{}
lastItems := []string{} lastItems := []string{}
items := b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsProcessing)
// --- Processings --- logger.Info().Msg(fmt.Sprint("Creating templates", len(items)))
for _, item := range b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsProcessing) { for _, item := range items {
index := 0 instance := item.Processing.GetSelectedInstance()
_, res := item.GetResource()
if d, ok := exec.SelectedInstances[res.GetID()]; ok {
index = d
}
instance := item.Processing.GetSelectedInstance(&index)
logger.Info().Msg(fmt.Sprint("Creating template for", item.Processing.GetName(), instance)) logger.Info().Msg(fmt.Sprint("Creating template for", item.Processing.GetName(), instance))
if instance == nil || instance.(*resources.ProcessingInstance).Access == nil && instance.(*resources.ProcessingInstance).Access.Container != nil { if instance == nil || instance.(*resources.ProcessingInstance).Access == nil && instance.(*resources.ProcessingInstance).Access.Container != nil {
logger.Error().Msg("Not enough configuration setup, template can't be created : " + item.Processing.GetName()) logger.Error().Msg("Not enough configuration setup, template can't be created : " + item.Processing.GetName())
return firstItems, lastItems, volumes return firstItems, lastItems, volumes
} }
volumes, firstItems, lastItems = b.createArgoTemplates(exec, volumes, firstItems, lastItems = b.createArgoTemplates(namespace,
namespace,
item.ID, item.Processing, volumes, firstItems, lastItems) item.ID, item.Processing, volumes, firstItems, lastItems)
} }
// --- Native Tools de type WORKFLOW_EVENT uniquement --- wfDeps := models.NewWorkflowDependancies()
for _, item := range b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsNativeTool) { for _, workflowID := range b.OriginWorkflow.Workflows {
if item.NativeTool.Kind != int(native_tools.WORKFLOW_EVENT) { b.createWorkflowArgoTemplate(workflowID, namespace, wfDeps)
continue
}
index := 0
_, res := item.GetResource()
if d, ok := exec.SelectedInstances[res.GetID()]; ok {
index = d
}
instance := item.NativeTool.GetSelectedInstance(&index)
logger.Info().Msg(fmt.Sprint("Creating template for", item.NativeTool.GetName(), instance))
volumes, firstItems, lastItems = b.createArgoTemplates(exec,
namespace, item.ID, item.NativeTool, volumes, firstItems, lastItems)
} }
wfDeps.BindRelatedTasks(b.Workflow.GetDag())
wfDeps.BindFirstTasks(b.OriginWorkflow.GetDependencies, b.Workflow.GetDag())
// --- Sous-workflows : chargement, construction récursive et fusion du DAG ---
firstWfTasks := map[string][]string{}
latestWfTasks := map[string][]string{}
relatedWfTasks := map[string][]string{}
for _, wf := range b.OriginWorkflow.Workflows {
realWorkflow, code, err := w.NewAccessor(nil).LoadOne(wf)
if code != 200 {
logger.Error().Msg("Error loading the workflow : " + err.Error())
continue
}
subBuilder := ArgoBuilder{OriginWorkflow: realWorkflow.(*w.Workflow), Timeout: b.Timeout}
_, fi, li, err := subBuilder.CreateDAG(exec, namespace, false)
if err != nil {
logger.Error().Msg("Error creating the subworkflow : " + err.Error())
continue
}
firstWfTasks[wf] = fi
if ok, depsOfIds := subBuilder.isArgoDependancy(wf); ok { // le sous-workflow est une dépendance d'autre chose
latestWfTasks[wf] = li
relatedWfTasks[wf] = depsOfIds
}
// Fusion des tâches, templates, volumes et arguments du sous-workflow dans le DAG principal.
subDag := subBuilder.Workflow.getDag()
d := b.Workflow.getDag()
d.Tasks = append(d.Tasks, subDag.Tasks...)
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, subBuilder.Workflow.Spec.Templates...)
b.Workflow.Spec.Volumes = append(b.Workflow.Spec.Volumes, subBuilder.Workflow.Spec.Volumes...)
b.Workflow.Spec.Arguments = append(b.Workflow.Spec.Arguments, subBuilder.Workflow.Spec.Arguments...)
b.Services = append(b.Services, subBuilder.Services...)
}
// Recâblage : les tâches qui dépendaient du sous-workflow dépendent désormais
// de sa dernière tâche réelle (latestWfTasks).
for wfID, depsOfIds := range relatedWfTasks {
for _, dep := range depsOfIds {
for _, task := range b.Workflow.getDag().Tasks {
if strings.Contains(task.Name, dep) {
index := -1
for i, depp := range task.Dependencies {
if strings.Contains(depp, wfID) {
index = i
break
}
}
if index != -1 {
task.Dependencies = append(task.Dependencies[:index], task.Dependencies[index+1:]...)
}
task.Dependencies = append(task.Dependencies, latestWfTasks[wfID]...)
}
}
}
}
// Les premières tâches du sous-workflow héritent des dépendances
// que le sous-workflow avait vis-à-vis du DAG principal.
for wfID, fi := range firstWfTasks {
deps := b.getArgoDependencies(wfID)
if len(deps) > 0 {
for _, dep := range fi {
for _, task := range b.Workflow.getDag().Tasks {
if strings.Contains(task.Name, dep) {
task.Dependencies = append(task.Dependencies, deps...)
}
}
}
}
}
// Si des services Kubernetes sont nécessaires, on ajoute le pod dédié.
if b.Services != nil { if b.Services != nil {
dag := b.Workflow.getDag() dag := b.Workflow.GetDag()
dag.Tasks = append(dag.Tasks, Task{Name: "workflow-service-pod", Template: "workflow-service-pod"}) dag.Tasks = append(dag.Tasks, Task{Name: "workflow-service-pod", Template: "workflow-service-pod"})
b.addServiceToArgo() b.addServiceToArgo()
} }
return firstItems, lastItems, volumes return firstItems, lastItems, volumes
} }
// createArgoTemplates crée le template Argo pour un nœud du graphe (processing func (b *ArgoBuilder) createWorkflowArgoTemplate(
// ou native tool). Il : workflowID string,
// 1. Ajoute la tâche au DAG avec ses dépendances. namespace string,
// 2. Crée le template de container (ou d'événement pour les native tools). wfDeps *models.WorkflowsDependancies,
// 3. Ajoute les annotations Admiralty si le processing est hébergé sur un peer distant. ) {
// 4. Crée un service Kubernetes si le processing est déclaré IsService. realWorkflow, code, err := w.NewAccessor(nil).LoadOne(workflowID)
// 5. Configure les annotations de stockage (S3, volumes locaux). if code != 200 {
logger.Error().Msg("Error loading the workflow : " + err.Error())
return
}
subBuilder := ArgoBuilder{OriginWorkflow: realWorkflow.(*w.Workflow), Workflow: &models.Workflow{}, Timeout: b.Timeout}
_, fi, li, err := subBuilder.CreateDAG(namespace, false)
if err != nil {
logger.Error().Msg("Error creating the subworkflow : " + err.Error())
return
}
wfDeps.FirstWfTasks[workflowID] = fi
if depsOfIds := subBuilder.OriginWorkflow.IsDependancy(workflowID); len(depsOfIds) > 0 { // IS BEFORE
wfDeps.LastWfTasks[workflowID] = li
wfDeps.RelatedWfTasks[workflowID] = models.TransformDepsToArgo(depsOfIds)
}
subDag := subBuilder.Workflow.GetDag()
d := b.Workflow.GetDag()
d.Tasks = append(d.Tasks, subDag.Tasks...) // add the tasks of the subworkflow to the main workflow
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, subBuilder.Workflow.Spec.Templates...)
b.Workflow.Spec.Volumes = append(b.Workflow.Spec.Volumes, subBuilder.Workflow.Spec.Volumes...)
b.Workflow.Spec.Arguments = append(b.Workflow.Spec.Arguments, subBuilder.Workflow.Spec.Arguments...)
b.Services = append(b.Services, subBuilder.Services...)
}
func (b *ArgoBuilder) createArgoTemplates( func (b *ArgoBuilder) createArgoTemplates(
exec *workflow_execution.WorkflowExecution,
namespace string, namespace string,
id string, id string,
obj resources.ResourceInterface, processing *resources.ProcessingResource,
volumes []VolumeMount, volumes []models.VolumeMount,
firstItems []string, firstItems []string,
lastItems []string) ([]VolumeMount, []string, []string) { lastItems []string,
) ([]models.VolumeMount, []string, []string) {
_, firstItems, lastItems = b.addTaskToArgo(exec, b.Workflow.getDag(), id, obj, firstItems, lastItems) _, firstItems, lastItems = NewTask(processing.Name, id).BindToArgo(b.Workflow.GetDag(), id, b.OriginWorkflow, processing, firstItems, lastItems)
template := &Template{Name: getArgoName(obj.GetName(), id)} template := &Template{Name: models.GetArgoName(processing.GetName(), id)}
logger.Info().Msg(fmt.Sprint("Creating template for", template.Name)) logger.Info().Msg(fmt.Sprint("Creating template for", template.Name))
// Vérifie si le processing est sur un peer distant (Admiralty). template.CreateContainer(processing, b.Workflow.GetDag())
isReparted, peer := b.isReparted(obj, id) if err := b.RepartiteProcess(*processing, id, template, namespace); err != nil {
if obj.GetType() == tools.PROCESSING_RESOURCE.String() { logger.Error().Msg(fmt.Sprint("problem to sets up repartition expected %v", err.Error()))
template.CreateContainer(exec, obj.(*resources.ProcessingResource), b.Workflow.getDag()) return volumes, firstItems, lastItems
} else if obj.GetType() == tools.NATIVE_TOOL.String() {
template.CreateEventContainer(exec, obj.(*resources.NativeTool), b.Workflow.getDag())
} }
// get datacenter from the processing
if isReparted { if processing.IsService {
logger.Debug().Msg("Reparted processing, on " + peer.GetID()) b.CreateService(id, processing)
b.RemotePeers = append(b.RemotePeers, peer.GetID())
template.AddAdmiraltyAnnotations(peer.GetID())
}
// Si le processing expose un service Kubernetes, on l'enregistre et on
// applique le label "app" pour que le Service puisse le sélectionner.
if obj.GetType() == tools.PROCESSING_RESOURCE.String() && obj.(*resources.ProcessingResource).IsService {
b.CreateService(exec, id, obj)
template.Metadata.Labels = make(map[string]string) template.Metadata.Labels = make(map[string]string)
template.Metadata.Labels["app"] = "oc-service-" + obj.GetName() template.Metadata.Labels["app"] = "oc-service-" + processing.GetName() // Construct the template for the k8s service and add a link in graph between k8s service and processing
} }
volumes = b.addStorageAnnotations(exec, id, template, namespace, volumes)
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, *template) b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, *template)
return volumes, firstItems, lastItems return volumes, firstItems, lastItems
} }
// addStorageAnnotations parcourt tous les nœuds de stockage liés au processing func (b *ArgoBuilder) createVolumes(volumes []models.VolumeMount) { // TODO : one think about remote volume but TG
// identifié par id. Pour chaque lien de stockage :
// - Construit le nom de l'artefact Argo (lecture ou écriture).
// - Pour les stockages S3 : appelle waitForConsiders (STORAGE_RESOURCE) pour
// attendre la validation PB_CONSIDERS avant de configurer les annotations S3.
// - Pour les volumes locaux : ajoute un VolumeMount dans le container.
func (b *ArgoBuilder) addStorageAnnotations(exec *workflow_execution.WorkflowExecution, id string, template *Template, namespace string, volumes []VolumeMount) []VolumeMount {
// Récupère tous les nœuds de stockage connectés au processing courant.
related := b.OriginWorkflow.GetByRelatedProcessing(id, b.OriginWorkflow.Graph.IsStorage)
for _, r := range related {
storage := r.Node.(*resources.StorageResource)
for _, linkToStorage := range r.Links {
for _, rw := range linkToStorage.StorageLinkInfos {
var art Artifact
// Le nom de l'artefact doit être alphanumérique + '-' ou '_'.
artifactBaseName := strings.Join(strings.Split(storage.GetName(), " "), "-") + "-" + strings.Replace(rw.FileName, ".", "-", -1)
if rw.Write {
// Écriture vers S3 : Path = chemin du fichier dans le pod.
art = Artifact{Path: template.ReplacePerEnv(rw.Source, linkToStorage.Env)}
art.Name = artifactBaseName + "-input-write"
} else {
// Lecture depuis S3 : Path = destination dans le pod.
art = Artifact{Path: template.ReplacePerEnv(rw.Destination+"/"+rw.FileName, linkToStorage.Env)}
art.Name = artifactBaseName + "-input-read"
}
if storage.StorageType == enum.S3 {
// Pour chaque ressource de compute liée à ce stockage S3,
// on notifie via NATS et on attend la validation PB_CONSIDERS
// avec DataType = STORAGE_RESOURCE avant de continuer.
for _, r := range b.getStorageRelatedProcessing(storage.GetID()) {
waitForConsiders(exec.ExecutionsID, tools.STORAGE_RESOURCE, ArgoKubeEvent{
ExecutionsID: exec.ExecutionsID,
DestPeerID: r.GetID(),
Type: tools.STORAGE_RESOURCE,
SourcePeerID: storage.GetCreatorID(),
OriginID: conf.GetConfig().PeerID,
})
}
// Configure la référence au dépôt d'artefacts S3 dans le Spec.
b.addS3annotations(storage, namespace)
}
if rw.Write {
template.Outputs.Artifacts = append(template.Outputs.Artifacts, art)
} else {
template.Inputs.Artifacts = append(template.Inputs.Artifacts, art)
}
}
}
// Si l'instance de stockage est locale, on monte un volume persistant.
index := 0
if s, ok := exec.SelectedInstances[storage.GetID()]; ok {
index = s
}
s := storage.Instances[index]
if s.Local {
volumes = template.Container.AddVolumeMount(VolumeMount{
Name: strings.ReplaceAll(strings.ToLower(storage.GetName()), " ", "-"),
MountPath: s.Source,
Storage: storage,
}, volumes)
}
}
return volumes
}
// getStorageRelatedProcessing retourne la liste des ressources de compute
// connectées (via un processing intermédiaire) au stockage identifié par storageId.
// Ces ressources sont utilisées pour construire les ArgoKubeEvent destinés
// à la validation NATS.
func (b *ArgoBuilder) getStorageRelatedProcessing(storageId string) (res []resources.ResourceInterface) {
var storageLinks []graph.GraphLink
// On ne conserve que les liens impliquant ce stockage.
for _, link := range b.OriginWorkflow.Graph.Links {
if link.Destination.ID == storageId || link.Source.ID == storageId {
storageLinks = append(storageLinks, link)
}
}
for _, link := range storageLinks {
var resourceId string
// L'opposé du lien est soit la source soit la destination selon la direction.
if link.Source.ID != storageId {
resourceId = link.Source.ID
} else {
resourceId = link.Destination.ID
}
// Si l'opposé est un processing, on récupère ses ressources de compute.
if b.OriginWorkflow.Graph.IsProcessing(b.OriginWorkflow.Graph.Items[resourceId]) {
res = append(res, b.getComputeProcessing(resourceId)...)
}
}
return
}
// getComputeProcessing retourne toutes les ressources de compute attachées
// au processing identifié par processingId dans le graphe du workflow.
func (b *ArgoBuilder) getComputeProcessing(processingId string) (res []resources.ResourceInterface) {
arr := []resources.ResourceInterface{}
computeRel := b.OriginWorkflow.GetByRelatedProcessing(processingId, b.OriginWorkflow.Graph.IsCompute)
for _, rel := range computeRel {
arr = append(arr, rel.Node)
}
return arr
}
// addS3annotations configure la référence au dépôt d'artefacts S3 dans le Spec
// du workflow Argo. La ConfigMap et la clé sont dérivées de l'ID du stockage.
// Le namespace est conservé en signature pour une évolution future.
func (b *ArgoBuilder) addS3annotations(storage *resources.StorageResource, namespace string) {
b.Workflow.Spec.ArtifactRepositoryRef = ArtifactRepositoryRef{
ConfigMap: storage.GetID() + "-artifact-repository",
Key: storage.GetID() + "-s3-local",
}
}
// addTaskToArgo ajoute une tâche au DAG Argo pour le nœud graphItemID.
// Elle résout les dépendances DAG, propage les paramètres d'environnement,
// d'entrée et de sortie de l'instance sélectionnée, et met à jour les listes
// firstItems / lastItems utilisées pour le recâblage des sous-workflows.
func (b *ArgoBuilder) addTaskToArgo(exec *workflow_execution.WorkflowExecution, dag *Dag, graphItemID string, processing resources.ResourceInterface,
firstItems []string, lastItems []string) (*Dag, []string, []string) {
unique_name := getArgoName(processing.GetName(), graphItemID)
step := Task{Name: unique_name, Template: unique_name}
index := 0
if d, ok := exec.SelectedInstances[processing.GetID()]; ok {
index = d
}
instance := processing.GetSelectedInstance(&index)
if instance != nil {
// Propagation des variables d'environnement, entrées et sorties
// de l'instance vers les paramètres de la tâche Argo.
for _, value := range instance.(*resources.ProcessingInstance).Env {
step.Arguments.Parameters = append(step.Arguments.Parameters, Parameter{
Name: value.Name,
Value: value.Value,
})
}
for _, value := range instance.(*resources.ProcessingInstance).Inputs {
step.Arguments.Parameters = append(step.Arguments.Parameters, Parameter{
Name: value.Name,
Value: value.Value,
})
}
for _, value := range instance.(*resources.ProcessingInstance).Outputs {
step.Arguments.Parameters = append(step.Arguments.Parameters, Parameter{
Name: value.Name,
Value: value.Value,
})
}
}
step.Dependencies = b.getArgoDependencies(graphItemID)
// Détermine si ce nœud est une première ou une dernière tâche du DAG.
name := ""
if b.OriginWorkflow.Graph.Items[graphItemID].Processing != nil {
name = b.OriginWorkflow.Graph.Items[graphItemID].Processing.GetName()
}
if b.OriginWorkflow.Graph.Items[graphItemID].Workflow != nil {
name = b.OriginWorkflow.Graph.Items[graphItemID].Workflow.GetName()
}
if len(step.Dependencies) == 0 && name != "" {
firstItems = append(firstItems, getArgoName(name, graphItemID))
}
if ok, _ := b.isArgoDependancy(graphItemID); !ok && name != "" {
lastItems = append(lastItems, getArgoName(name, graphItemID))
}
dag.Tasks = append(dag.Tasks, step)
return dag, firstItems, lastItems
}
// createVolumes crée les PersistentVolumeClaims Argo (volumeClaimTemplates)
// pour chaque volume local référencé dans les templates de processing.
// TODO: gérer les volumes distants.
func (b *ArgoBuilder) createVolumes(exec *workflow_execution.WorkflowExecution, volumes []VolumeMount) {
for _, volume := range volumes { for _, volume := range volumes {
index := 0 volume.BindToArgo(b.Workflow)
if s, ok := exec.SelectedInstances[volume.Storage.GetID()]; ok {
index = s
}
storage := volume.Storage.Instances[index]
new_volume := VolumeClaimTemplate{}
new_volume.Metadata.Name = strings.ReplaceAll(strings.ToLower(volume.Name), " ", "-")
new_volume.Spec.AccessModes = []string{"ReadWriteOnce"}
new_volume.Spec.Resources.Requests.Storage = fmt.Sprintf("%v", storage.SizeGB) + storage.SizeType.ToArgo()
b.Workflow.Spec.Volumes = append(b.Workflow.Spec.Volumes, new_volume)
} }
} }
// isArgoDependancy vérifie si le nœud identifié par id est une dépendance // Verify if a processing resource is attached to another Compute than the one hosting
// d'au moins un autre nœud du DAG (i.e. s'il existe un lien sortant vers // the current Open Cloud instance. If true return the peer ID to contact
// un processing ou un workflow). func (b *ArgoBuilder) RepartiteProcess(processing resources.ProcessingResource, graphID string, template *models.Template, namespace string) error {
// Retourne true + la liste des noms Argo des nœuds qui en dépendent. computeAttached := b.OriginWorkflow.GetByRelatedProcessing(processing.GetID(), b.OriginWorkflow.Graph.IsCompute)
func (b *ArgoBuilder) isArgoDependancy(id string) (bool, []string) { if len(computeAttached) == 0 {
dependancyOfIDs := []string{} return errors.New("No compute was found attached to processing " + processing.Name + " : " + processing.UUID)
isDeps := false }
for _, link := range b.OriginWorkflow.Graph.Links { // Creates an accessor srtictly for Peer Collection
if _, ok := b.OriginWorkflow.Graph.Items[link.Destination.ID]; !ok { for _, related := range computeAttached {
logger.Info().Msg(fmt.Sprint("Could not find the source of the link", link.Destination.ID)) instance := related.Node.GetSelectedInstance().(*resources.ComputeResourceInstance)
if instance == nil {
continue continue
} }
source := b.OriginWorkflow.Graph.Items[link.Destination.ID].Processing partner := instance.GetSelectedPartnership(conf.GetConfig().PeerID, conf.GetConfig().Groups)
if id == link.Source.ID && source != nil { if partner == nil {
isDeps = true logger.Error().Msg("can't proceed on datacenter because of missing pricing profiles " + related.Node.GetID())
dependancyOfIDs = append(dependancyOfIDs, getArgoName(source.GetName(), link.Destination.ID))
}
wourceWF := b.OriginWorkflow.Graph.Items[link.Destination.ID].Workflow
if id == link.Source.ID && wourceWF != nil {
isDeps = true
dependancyOfIDs = append(dependancyOfIDs, getArgoName(wourceWF.GetName(), link.Destination.ID))
}
}
return isDeps, dependancyOfIDs
}
// getArgoDependencies retourne la liste des noms de tâches Argo dont dépend
// le nœud identifié par id (liens entrants depuis des processings).
func (b *ArgoBuilder) getArgoDependencies(id string) (dependencies []string) {
for _, link := range b.OriginWorkflow.Graph.Links {
if _, ok := b.OriginWorkflow.Graph.Items[link.Source.ID]; !ok {
logger.Info().Msg(fmt.Sprint("Could not find the source of the link", link.Source.ID))
continue continue
} }
source := b.OriginWorkflow.Graph.Items[link.Source.ID].Processing garanteed, allowed := b.setResourcesAllowedAndGaranteed(b.Workflow.GetDag(), models.NewBounds(), models.NewBounds(), "gpu", partner)
if id == link.Destination.ID && source != nil { garanteed, allowed = b.setResourcesAllowedAndGaranteed(b.Workflow.GetDag(), garanteed, allowed, "cpu", partner)
dependency_name := getArgoName(source.GetName(), link.Source.ID) garanteed.Set(float64(partner.(*resources.ComputeResourcePartnership).MinGaranteedRAMSize), "ram", false)
dependencies = append(dependencies, dependency_name) allowed.Set(float64(partner.(*resources.ComputeResourcePartnership).MaxAllowedRAMSize), "ram", false)
continue
}
}
return
}
// getArgoName construit le nom unique d'une tâche / template Argo à partir res := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", "", nil, nil).LoadOne(related.Node.GetCreatorID())
// du nom humain de la ressource et de son ID dans le graphe. if res.Err != "" {
// Les espaces sont remplacés par des tirets et tout est mis en minuscules. return errors.New(res.Err)
func getArgoName(raw_name string, component_id string) (formatedName string) {
formatedName = strings.ReplaceAll(raw_name, " ", "-")
formatedName += "-" + component_id
formatedName = strings.ToLower(formatedName)
return
}
// isReparted vérifie si le processing est hébergé sur un Compute appartenant
// à un peer distant (Relation != 1, i.e. pas le peer local).
// Si c'est le cas, elle retourne true et le Peer concerné pour qu'Admiralty
// puisse router les pods vers le bon cluster.
func (b *ArgoBuilder) isReparted(processing resources.ResourceInterface, graphID string) (bool, *peer.Peer) {
computeAttached := b.retrieveProcessingCompute(graphID)
if computeAttached == nil {
logger.Error().Msg("No compute was found attached to processing " + processing.GetName() + " : " + processing.GetID())
panic(0)
}
// Résolution du Peer propriétaire du Compute via l'API oc-lib.
req := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", "", nil, nil)
if req == nil {
fmt.Println("TODO : handle error when trying to create a request on the Peer Collection")
return false, nil
}
res := req.LoadOne(computeAttached.CreatorID)
if res.Err != "" {
fmt.Print("TODO : handle error when requesting PeerID")
fmt.Print(res.Err)
return false, nil
}
peer := res.ToPeer()
// Relation == 1 signifie "moi-même" : le processing est local.
isNotReparted := peer.Relation == 1
logger.Info().Msg(fmt.Sprint("Result IsMySelf for ", peer.UUID, " : ", isNotReparted))
return !isNotReparted, peer
}
// retrieveProcessingCompute parcourt les liens du graphe pour retrouver
// la ressource de Compute directement connectée au nœud graphID.
// Retourne nil si aucun Compute n'est trouvé.
func (b *ArgoBuilder) retrieveProcessingCompute(graphID string) *resources.ComputeResource {
for _, link := range b.OriginWorkflow.Graph.Links {
var oppositeId string
if link.Source.ID == graphID {
oppositeId = link.Destination.ID
} else if link.Destination.ID == graphID {
oppositeId = link.Source.ID
} }
if oppositeId != "" { peer := *res.ToPeer()
dt, res := b.OriginWorkflow.Graph.GetResource(oppositeId)
if dt == oclib.COMPUTE_RESOURCE { isNotReparted := peer.State == 1
return res.(*resources.ComputeResource) logger.Info().Msg(fmt.Sprint("Result IsMySelf for ", peer.UUID, " : ", isNotReparted))
} else { if !(isNotReparted) {
continue logger.Debug().Msg("Reparted processing, on " + peer.UUID)
} b.RemotePeers = append(b.RemotePeers, peer.UUID)
template.AddAdmiraltyAnnotations(peer.UUID, namespace)
} }
} }
return nil return nil
} }
// waitForConsiders publie un ArgoKubeEvent sur le canal NATS ARGO_KUBE_EVENT func (b *ArgoBuilder) setResourcesAllowedAndGaranteed(dag *Dag, minbound *models.Bounds, maxbound *models.Bounds, typ string, partner resources.ResourcePartnerITF) (*models.Bounds, *models.Bounds) {
// puis se bloque jusqu'à réception d'un PropalgationMessage vérifiant : selector := ""
// - Action == PB_CONSIDERS values := map[string]float64{}
// - DataType == dataType (COMPUTE_RESOURCE ou STORAGE_RESOURCE) if typ == "gpu" {
// - Payload décodé en JSON contenant "executions_id" == executionsId values = partner.(*resources.ComputeResourcePartnership).MinGaranteedGPUsMemoryGB
// } else {
// Cela garantit que l'infrastructure distante (Admiralty ou Minio) a bien values = partner.(*resources.ComputeResourcePartnership).MinGaranteedCPUsCores
// pris en compte la demande avant que la construction du workflow continue.
// Un timeout de 5 minutes est appliqué pour éviter un blocage indéfini.
func waitForConsiders(executionsId string, dataType tools.DataType, event ArgoKubeEvent) {
// Sérialise l'événement et le publie sur ARGO_KUBE_EVENT.
b, err := json.Marshal(event)
if err != nil {
logger.Error().Msg("Cannot marshal ArgoKubeEvent: " + err.Error())
return
} }
tools.NewNATSCaller().SetNATSPub(tools.ARGO_KUBE_EVENT, tools.NATSResponse{ for name, GPU := range values {
FromApp: "oc-monitord", if minbound.Set(float64(GPU), typ, true) {
Datatype: dataType, selector = name
User: "root",
Method: int(tools.PROPALGATION_EVENT),
Payload: b,
})
// Connexion NATS pour écouter la réponse PB_CONSIDERS.
natsURL := oclib_config.GetConfig().NATSUrl
if natsURL == "" {
logger.Error().Msg("NATS_SERVER not set, skipping PB_CONSIDERS wait")
return
}
nc, err := nats.Connect(natsURL)
if err != nil {
logger.Error().Msg("NATS connect error waiting for PB_CONSIDERS: " + err.Error())
return
}
defer nc.Close()
// Souscription au canal PROPALGATION_EVENT avec un buffer de 64 messages.
ch := make(chan *nats.Msg, 64)
sub, err := nc.ChanSubscribe(tools.PROPALGATION_EVENT.GenerateKey(), ch)
if err != nil {
logger.Error().Msg("NATS subscribe error waiting for PB_CONSIDERS: " + err.Error())
return
}
defer sub.Unsubscribe()
timeout := time.After(5 * time.Minute)
for {
select {
case msg := <-ch:
// Désérialise le message en PropalgationMessage.
var pm tools.PropalgationMessage
if err := json.Unmarshal(msg.Data, &pm); err != nil {
continue
}
// Filtre : action, type de données.
if pm.Action != tools.PB_CONSIDERS || pm.DataType != int(dataType) {
continue
}
// Filtre : executions_id dans le Payload du PropalgationMessage.
var body struct {
ExecutionsID string `json:"executions_id"`
}
if err := json.Unmarshal(pm.Payload, &body); err != nil {
continue
}
if body.ExecutionsID != executionsId {
continue
}
logger.Info().Msg(fmt.Sprintf("PB_CONSIDERS received for executions_id=%s datatype=%s", executionsId, dataType.String()))
return
case <-timeout:
logger.Warn().Msg(fmt.Sprintf("Timeout waiting for PB_CONSIDERS executions_id=%s datatype=%s", executionsId, dataType.String()))
return
} }
} }
if selector != "" {
for _, t := range dag.Tasks {
t.NodeSelector[typ+"-type"] = selector
}
}
if typ == "gpu" {
values = partner.(*resources.ComputeResourcePartnership).MaxAllowedGPUsMemoryGB
} else {
values = partner.(*resources.ComputeResourcePartnership).MaxAllowedCPUsCores
}
if max, ok := values[selector]; ok {
maxbound.Set(float64(max), typ, false)
} else {
maxbound.GPU = minbound.GPU
}
return minbound, maxbound
} }
// ArgoKubeEvent est la structure publiée sur NATS lors de la demande de // Execute the last actions once the YAML file for the Argo Workflow is created
// provisionnement d'une ressource distante (Admiralty ou stockage S3). func (b *ArgoBuilder) CompleteBuild(namespace string) (string, error) {
// Le champ OriginID identifie le peer initiateur : c'est vers lui que la
// réponse PB_CONSIDERS sera routée par le système de propagation.
type ArgoKubeEvent struct {
// ExecutionsID est l'identifiant de l'exécution de workflow en cours.
ExecutionsID string `json:"executions_id"`
// DestPeerID est le peer de destination (compute ou peer S3 cible).
DestPeerID string `json:"dest_peer_id"`
// Type indique la nature de la ressource : COMPUTE_RESOURCE ou STORAGE_RESOURCE.
Type tools.DataType `json:"data_type"`
// SourcePeerID est le peer source de la ressource demandée.
SourcePeerID string `json:"source_peer_id"`
// OriginID est le peer qui a initié la demande de provisionnement ;
// la réponse PB_CONSIDERS lui sera renvoyée.
OriginID string `json:"origin_id"`
}
// CompleteBuild finalise la construction du workflow Argo après la génération
// du DAG. Elle effectue dans l'ordre :
// 1. Pour chaque peer distant (Admiralty) : publie un ArgoKubeEvent de type
// COMPUTE_RESOURCE et attend la validation PB_CONSIDERS via waitForConsiders.
// 2. Met à jour les annotations Admiralty des templates avec le nom de cluster
// construit à partir du peerId et de l'executionsId.
// 3. Sérialise le workflow en YAML et l'écrit dans ./argo_workflows/.
//
// Retourne le chemin du fichier YAML généré.
func (b *ArgoBuilder) CompleteBuild(executionsId string) (string, error) {
logger.Info().Msg("DEV :: Completing build") logger.Info().Msg("DEV :: Completing build")
setter := AdmiraltySetter{Id: namespace}
// --- Étape 1 : validation Admiralty pour chaque peer distant --- // Setup admiralty for each node
for _, peer := range b.RemotePeers { for _, peer := range b.RemotePeers {
logger.Info().Msg(fmt.Sprint("DEV :: Launching Admiralty Setup for ", peer)) logger.Info().Msg(fmt.Sprint("DEV :: Launching Admiralty Setup for ", peer))
// Publie l'événement COMPUTE_RESOURCE et attend PB_CONSIDERS (bloquant). setter.InitializeAdmiralty(conf.GetConfig().PeerID, peer)
waitForConsiders(executionsId, tools.COMPUTE_RESOURCE, ArgoKubeEvent{
ExecutionsID: executionsId,
Type: tools.COMPUTE_RESOURCE,
DestPeerID: conf.GetConfig().PeerID,
SourcePeerID: peer,
OriginID: conf.GetConfig().PeerID,
})
} }
// Generate the YAML file
// --- Étape 2 : mise à jour du nom de cluster Admiralty ---
// Le nom final du cluster cible est "target-<peerId>-<executionsId>".
for _, template := range b.Workflow.Spec.Templates {
if len(template.Metadata.Annotations) > 0 {
if peerId, ok := template.Metadata.Annotations["multicluster.admiralty.io/clustername"]; ok {
template.Metadata.Annotations["multicluster.admiralty.io/clustername"] = "target-" + tools.GetConcatenatedName(peerId, executionsId)
}
}
}
// --- Étape 3 : génération et écriture du fichier YAML ---
random_name := fakelish.GenerateFakeWord(5, 8) + "-" + fakelish.GenerateFakeWord(5, 8) random_name := fakelish.GenerateFakeWord(5, 8) + "-" + fakelish.GenerateFakeWord(5, 8)
b.Workflow.Metadata.Name = "oc-monitor-" + random_name b.Workflow.Metadata.Name = "oc-monitor-" + random_name
logger = oclib.GetLogger() logger = oclib.GetLogger()
@@ -734,11 +235,12 @@ func (b *ArgoBuilder) CompleteBuild(executionsId string) (string, error) {
logger.Error().Msg("Could not transform object to yaml file") logger.Error().Msg("Could not transform object to yaml file")
return "", err return "", err
} }
// Nom de fichier horodaté au format DD_MM_YYYY_hhmmss. // Give a unique name to each argo file with its timestamp DD:MM:YYYY_hhmmss
current_timestamp := time.Now().Format("02_01_2006_150405") current_timestamp := time.Now().Format("02_01_2006_150405")
file_name := random_name + "_" + current_timestamp + ".yml" file_name := random_name + "_" + current_timestamp + ".yml"
workflows_dir := "./argo_workflows/" workflows_dir := "./argo_workflows/"
err = os.WriteFile(workflows_dir+file_name, []byte(yamlified), 0660) err = os.WriteFile(workflows_dir+file_name, []byte(yamlified), 0660)
if err != nil { if err != nil {
logger.Error().Msg("Could not write the yaml file") logger.Error().Msg("Could not write the yaml file")
return "", err return "", err

View File

@@ -1,128 +0,0 @@
# argo_builder.go — Résumé
## Rôle général
`argo_builder.go` traduit un **Workflow Open Cloud** (graphe de nœuds : processings,
stockages, computes, sous-workflows) en un **fichier YAML Argo Workflow** prêt à
être soumis à un cluster Kubernetes.
---
## Structures principales
| Struct | Rôle |
|---|---|
| `ArgoBuilder` | Constructeur principal. Porte le workflow source, la structure YAML en cours de build, les services k8s, le timeout et la liste des peers distants (Admiralty). |
| `Workflow` | Racine du YAML Argo (`apiVersion`, `kind`, `metadata`, `spec`). |
| `Spec` | Spécification du workflow : compte de service, entrypoint, templates, volumes, timeout, référence au dépôt d'artefacts S3. |
| `ArgoKubeEvent` | Événement publié sur NATS lors de la demande de provisionnement d'une ressource distante (compute ou stockage S3). Contient `executions_id`, `dest_peer_id`, `source_peer_id`, `data_type`, `origin_id`. |
---
## Flux d'exécution principal
```
CreateDAG()
└─ createTemplates()
├─ [pour chaque processing] createArgoTemplates()
│ ├─ addTaskToArgo() → ajoute la tâche au DAG + dépendances
│ ├─ CreateContainer() → template container Argo
│ ├─ AddAdmiraltyAnnotations() → si peer distant détecté
│ └─ addStorageAnnotations() → S3 + volumes locaux
├─ [pour chaque native tool WORKFLOW_EVENT] createArgoTemplates()
└─ [pour chaque sous-workflow]
├─ CreateDAG() récursif
└─ fusion DAG + recâblage des dépendances
└─ createVolumes() → PersistentVolumeClaims
CompleteBuild()
├─ waitForConsiders() × N peers → validation Admiralty (COMPUTE_RESOURCE)
├─ mise à jour annotations Admiralty (clustername)
└─ écriture du YAML dans ./argo_workflows/
```
---
## Fonctions clés
### `CreateDAG(exec, namespace, write) → (nbTâches, firstItems, lastItems, err)`
Point d'entrée. Initialise le logger, déclenche la création des templates et des
volumes, configure les métadonnées globales du workflow Argo.
### `createTemplates(exec, namespace) → (firstItems, lastItems, volumes)`
Itère sur tous les nœuds du graphe.
- Processings → template container.
- Native tools `WORKFLOW_EVENT` → template événement.
- Sous-workflows → build récursif + fusion DAG + recâblage des dépendances entrantes/sortantes.
### `createArgoTemplates(exec, namespace, id, obj, …)`
Crée le template Argo pour un nœud donné.
Détecte si le processing est **réparti** (peer distant via `isReparted`) → ajoute les
annotations Admiralty et enregistre le peer dans `RemotePeers`.
Délègue la configuration du stockage à `addStorageAnnotations`.
### `addStorageAnnotations(exec, id, template, namespace, volumes)`
Pour chaque stockage lié au processing :
- **S3** : appelle `waitForConsiders(STORAGE_RESOURCE)` pour chaque compute associé,
puis configure la référence au dépôt d'artefacts via `addS3annotations`.
- **Local** : monte un `VolumeMount` dans le container.
### `waitForConsiders(executionsId, dataType, event)`
**Fonction bloquante.**
1. Publie l'`ArgoKubeEvent` sur le canal NATS `ARGO_KUBE_EVENT`.
2. S'abonne à `PROPALGATION_EVENT`.
3. Attend un `PropalgationMessage` vérifiant :
- `Action == PB_CONSIDERS`
- `DataType == dataType`
- `Payload.executions_id == executionsId`
4. Timeout : **5 minutes**.
| Appelant | DataType attendu | Signification |
|---|---|---|
| `addStorageAnnotations` (S3) | `STORAGE_RESOURCE` | Le stockage S3 distant est prêt |
| `CompleteBuild` (Admiralty) | `COMPUTE_RESOURCE` | Le cluster cible Admiralty est configuré |
### `CompleteBuild(executionsId) → (cheminYAML, err)`
Finalise le build :
1. Pour chaque peer dans `RemotePeers``waitForConsiders(COMPUTE_RESOURCE)` (bloquant, séquentiel).
2. Met à jour les annotations `multicluster.admiralty.io/clustername` avec `target-<peerId>-<executionsId>`.
3. Sérialise le workflow en YAML et l'écrit dans `./argo_workflows/<nom>_<timestamp>.yml`.
### `isReparted(processing, graphID) → (bool, *peer.Peer)`
Retrouve le Compute attaché au processing, charge le Peer propriétaire via l'API
oc-lib, et vérifie si `Relation != 1` (pas le peer local).
### `addTaskToArgo(exec, dag, graphItemID, processing, …)`
Crée une `Task` Argo (nom unique, template, dépendances DAG, paramètres env/inputs/outputs)
et la rattache au DAG. Met à jour `firstItems` / `lastItems`.
### `isArgoDependancy(id) → (bool, []string)`
Vérifie si un nœud est utilisé comme source d'un lien sortant vers un autre
processing ou workflow (il est donc une dépendance pour quelqu'un).
### `getArgoDependencies(id) → []string`
Retourne les noms des tâches Argo dont ce nœud dépend (liens entrants).
---
## Protocole NATS utilisé
```
Publication → canal : ARGO_KUBE_EVENT
payload : NATSResponse{Method: PROPALGATION_EVENT, Payload: ArgoKubeEvent}
Attente ← canal : PROPALGATION_EVENT
filtre : PropalgationMessage{
Action = PB_CONSIDERS,
DataType = COMPUTE_RESOURCE | STORAGE_RESOURCE,
Payload = {"executions_id": "<id en cours>"}
}
```
---
## Fichier YAML produit
- Nom : `oc-monitor-<mot1>-<mot2>_<DD_MM_YYYY_hhmmss>.yml`
- Dossier : `./argo_workflows/`
- Permissions : `0660`

View File

@@ -5,11 +5,9 @@ import (
"strings" "strings"
"cloud.o-forge.io/core/oc-lib/models/resources" "cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
"gopkg.in/yaml.v3"
) )
func (b *ArgoBuilder) CreateService(exec *workflow_execution.WorkflowExecution, id string, processing resources.ResourceInterface) { func (b *ArgoBuilder) CreateService(id string, processing *resources.ProcessingResource) {
new_service := models.Service{ new_service := models.Service{
APIVersion: "v1", APIVersion: "v1",
Kind: "Service", Kind: "Service",
@@ -25,21 +23,17 @@ func (b *ArgoBuilder) CreateService(exec *workflow_execution.WorkflowExecution,
if processing == nil { if processing == nil {
return return
} }
b.completeServicePorts(exec, &new_service, id, processing) b.completeServicePorts(&new_service, id, processing)
b.Services = append(b.Services, &new_service) b.Services = append(b.Services, &new_service)
} }
func (b *ArgoBuilder) completeServicePorts(exec *workflow_execution.WorkflowExecution, service *models.Service, id string, processing resources.ResourceInterface) { func (b *ArgoBuilder) completeServicePorts(service *models.Service, id string, processing *resources.ProcessingResource) {
index := 0 instance := processing.GetSelectedInstance()
if d, ok := exec.SelectedInstances[processing.GetID()]; ok {
index = d
}
instance := processing.GetSelectedInstance(&index)
if instance != nil && instance.(*resources.ProcessingInstance).Access != nil && instance.(*resources.ProcessingInstance).Access.Container != nil { if instance != nil && instance.(*resources.ProcessingInstance).Access != nil && instance.(*resources.ProcessingInstance).Access.Container != nil {
for _, execute := range instance.(*resources.ProcessingInstance).Access.Container.Exposes { for _, execute := range instance.(*resources.ProcessingInstance).Access.Container.Exposes {
if execute.PAT != 0 { if execute.PAT != 0 {
new_port_translation := models.ServicePort{ new_port_translation := models.ServicePort{
Name: strings.ToLower(processing.GetName()) + id, Name: strings.ToLower(processing.Name) + id,
Port: execute.Port, Port: execute.Port,
TargetPort: execute.PAT, TargetPort: execute.PAT,
Protocol: "TCP", Protocol: "TCP",
@@ -52,20 +46,9 @@ func (b *ArgoBuilder) completeServicePorts(exec *workflow_execution.WorkflowExec
func (b *ArgoBuilder) addServiceToArgo() error { func (b *ArgoBuilder) addServiceToArgo() error {
for _, service := range b.Services { for _, service := range b.Services {
service_manifest, err := yaml.Marshal(service) if err := service.BindToArgo(b.Workflow); err != nil {
if err != nil {
return err return err
} }
service_template := models.Template{Name: "workflow-service-pod",
Resource: models.ServiceResource{
Action: "create",
SuccessCondition: "status.succeeded > 0",
FailureCondition: "status.failed > 3",
SetOwnerReference: true,
Manifest: string(service_manifest),
},
}
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, service_template)
} }
return nil return nil
} }

View File

@@ -3,10 +3,10 @@ package workflow_builder
import ( import (
"errors" "errors"
"fmt" "fmt"
"oc-monitord/models"
oclib "cloud.o-forge.io/core/oc-lib" oclib "cloud.o-forge.io/core/oc-lib"
workflow "cloud.o-forge.io/core/oc-lib/models/workflow" workflow "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
) )
type WorflowDB struct { type WorflowDB struct {
@@ -42,15 +42,15 @@ func (w *WorflowDB) getWorkflow(workflow_id string, peerID string) (workflow *wo
return new_wf, nil return new_wf, nil
} }
func (w *WorflowDB) ExportToArgo(exec *workflow_execution.WorkflowExecution, timeout int) (*ArgoBuilder, int, error) { func (w *WorflowDB) ExportToArgo(namespace string, timeout int) (*ArgoBuilder, int, error) {
logger := oclib.GetLogger() logger := oclib.GetLogger()
logger.Info().Msg(fmt.Sprint("Exporting to Argo", w.Workflow)) logger.Info().Msg(fmt.Sprint("Exporting to Argo", w.Workflow))
if len(w.Workflow.Name) == 0 || w.Workflow.Graph == nil { if len(w.Workflow.Name) == 0 || w.Workflow.Graph == nil {
return nil, 0, fmt.Errorf("can't export a graph that has not been loaded yet") return nil, 0, fmt.Errorf("can't export a graph that has not been loaded yet")
} }
argoBuilder := ArgoBuilder{OriginWorkflow: w.Workflow, Timeout: timeout} argoBuilder := ArgoBuilder{OriginWorkflow: w.Workflow, Workflow: &models.Workflow{}, Timeout: timeout}
stepMax, _, _, err := argoBuilder.CreateDAG(exec, exec.ExecutionsID, true) stepMax, _, _, err := argoBuilder.CreateDAG(namespace, true)
if err != nil { if err != nil {
logger.Error().Msg("Could not create the argo file for " + w.Workflow.Name) logger.Error().Msg("Could not create the argo file for " + w.Workflow.Name)
return nil, 0, err return nil, 0, err