1 Commits

Author SHA1 Message Date
pb
411effb000 Added the label to YAML to excute the pods on another node 2025-05-28 10:53:15 +02:00
23 changed files with 763 additions and 1501 deletions

1
.gitignore vendored
View File

@@ -22,4 +22,3 @@
go.work
argo_workflows/*
env.env

View File

@@ -12,16 +12,16 @@ clean:
rm -rf oc-monitord
docker:
DOCKER_BUILDKIT=1 docker build -t oc-monitord -f Dockerfile .
docker tag oc-monitord opencloudregistry/oc-monitord:latest
DOCKER_BUILDKIT=1 docker build -t oc/oc-monitord:0.0.1 -f Dockerfile .
docker tag oc/oc-monitord:0.0.1 oc/oc-monitord:latest
docker tag oc/oc-monitord:0.0.1 oc-monitord:latest
publish-kind:
kind load docker-image opencloudregistry/oc-monitord:latest --name $(CLUSTER_NAME)
kind load docker-image oc/oc-monitord:0.0.1 --name opencloud
publish-registry:
docker push opencloudregistry/oc-monitord:latest
@echo "TODO"
all: docker publish-kind
ci: docker publish-registry
all: docker publish-kind publish-registry
.PHONY: build run clean docker publish-kind publish-registry

View File

@@ -3,6 +3,10 @@ package conf
import "sync"
type Config struct {
MongoURL string
Database string
LokiURL string
NatsURL string
ExecutionID string
PeerID string
Timeout int
@@ -14,7 +18,7 @@ type Config struct {
KubeCA string
KubeCert string
KubeData string
ArgoHost string // when executed in a container will replace addresses with "localhost" in their url
ArgoHost string // when executed in a container will replace addresses with "localhost" in their url
}
var instance *Config

View File

@@ -0,0 +1,3 @@
{
"oc-catalog": "https://oc-catalog:8087"
}

View File

@@ -0,0 +1,3 @@
{
"oc-catalog": "https://localhost:8087"
}

83
go.mod
View File

@@ -1,34 +1,30 @@
module oc-monitord
go 1.25.0
go 1.23.1
toolchain go1.23.3
require (
cloud.o-forge.io/core/oc-lib v0.0.0-20260320151407-88d2e526283b
cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9
github.com/akamensky/argparse v1.4.0
github.com/google/uuid v1.6.0
github.com/goraz/onion v0.1.3
github.com/nwtgck/go-fakelish v0.1.3
github.com/rs/zerolog v1.34.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/beego/beego/v2 v2.3.8 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/go-playground/validator/v10 v10.27.0 // indirect
github.com/beego/beego/v2 v2.3.7 // indirect
github.com/go-playground/validator/v10 v10.26.0 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/goraz/onion v0.1.3 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/libp2p/go-libp2p/core v0.43.0-rc2 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/ugorji/go/codec v1.1.7 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/grpc v1.63.0 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)
require (
@@ -37,10 +33,10 @@ require (
github.com/biter777/countries v1.7.5 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.4 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
@@ -48,7 +44,9 @@ require (
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v1.0.0 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
@@ -59,16 +57,18 @@ require (
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/montanaflynn/stats v0.7.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nats-io/nats.go v1.44.0
github.com/nats-io/nkeys v0.4.11 // indirect
github.com/nats-io/nats.go v1.41.0 // indirect
github.com/nats-io/nkeys v0.4.10 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/prometheus/client_golang v1.23.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.65.0 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.63.0 // indirect
github.com/prometheus/procfs v0.16.0 // indirect
github.com/robfig/cron v1.2.0 // indirect
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect
github.com/smartystreets/goconvey v1.6.4 // indirect
github.com/x448/float16 v0.8.4 // indirect
@@ -76,24 +76,25 @@ require (
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
go.mongodb.org/mongo-driver v1.17.4 // indirect
golang.org/x/crypto v0.44.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.9.0 // indirect
google.golang.org/protobuf v1.36.8 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
go.mongodb.org/mongo-driver v1.17.3 // indirect
golang.org/x/crypto v0.37.0 // indirect
golang.org/x/net v0.39.0 // indirect
golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/sync v0.13.0 // indirect
golang.org/x/sys v0.32.0 // indirect
golang.org/x/term v0.31.0 // indirect
golang.org/x/text v0.24.0 // indirect
golang.org/x/time v0.7.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/api v0.35.1
k8s.io/apimachinery v0.35.1
k8s.io/client-go v0.35.1
k8s.io/api v0.32.1
k8s.io/apimachinery v0.32.1
k8s.io/client-go v0.32.1
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)

227
go.sum
View File

@@ -1,26 +1,16 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.o-forge.io/core/oc-lib v0.0.0-20260319065647-5b7edb53a984 h1:6HAlL367LM75T7IokS5H4y7iZg8mrk05uAy/yANKwdc=
cloud.o-forge.io/core/oc-lib v0.0.0-20260319065647-5b7edb53a984/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260319071818-28b5b7d39ffe h1:CHiWQAX7j/bMfbytCWGL2mUgSWYoDY4+bFQbCHEfypk=
cloud.o-forge.io/core/oc-lib v0.0.0-20260319071818-28b5b7d39ffe/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260320093030-a62fbc6c7a03 h1:GyfeEHGlyQIFtuzmwsJZ9b64dr9D7zvi6RCo1e/E5wc=
cloud.o-forge.io/core/oc-lib v0.0.0-20260320093030-a62fbc6c7a03/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260320103359-c34b8c67038b h1:VdLBRXb0wSsR9lzkoEGvhScRe4cNJy/QoGTkyG302uQ=
cloud.o-forge.io/core/oc-lib v0.0.0-20260320103359-c34b8c67038b/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260320151407-88d2e526283b h1:QEdy0FxwWcXYHVLcC06tRmhFl6T/pr2M7l2Auni/sSU=
cloud.o-forge.io/core/oc-lib v0.0.0-20260320151407-88d2e526283b/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9 h1:mSFFPwil5Ih+RPBvn88MBerQMtsoHnOuyCZQaf91a34=
cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/akamensky/argparse v1.4.0 h1:YGzvsTqCvbEZhL8zZu2AiA5nq805NZh75JNj4ajn1xc=
github.com/akamensky/argparse v1.4.0/go.mod h1:S5kwC7IuDcEr5VeXtGPRVZ5o/FdhcMlQz4IZQuw64xA=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/argoproj/argo-workflows/v3 v3.6.4 h1:5+Cc1UwaQE5ka3w7R3hxZ1TK3M6VjDEXA5WSQ/IXrxY=
github.com/argoproj/argo-workflows/v3 v3.6.4/go.mod h1:2f5zB8CkbNCCO1od+kd1dWkVokqcuyvu+tc+Jwx1MZg=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beego/beego/v2 v2.3.8 h1:wplhB1pF4TxR+2SS4PUej8eDoH4xGfxuHfS7wAk9VBc=
github.com/beego/beego/v2 v2.3.8/go.mod h1:8vl9+RrXqvodrl9C8yivX1e6le6deCK6RWeq8R7gTTg=
github.com/beego/beego/v2 v2.3.7 h1:z4btKtjU/rfp5BiYHkGD2QPjK9i1E9GH+I7vfhn6Agk=
github.com/beego/beego/v2 v2.3.7/go.mod h1:5cqHsOHJIxkq44tBpRvtDe59GuVRVv/9/tyVDxd5ce4=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q=
@@ -41,27 +31,23 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw=
github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/etcd-io/etcd v3.3.17+incompatible/go.mod h1:cdZ77EstHBwVtD6iTgzgvogwcjo9m4iOqoijouPJ4bs=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=
@@ -74,8 +60,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@@ -90,15 +76,18 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -113,8 +102,6 @@ github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -126,8 +113,6 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -136,10 +121,6 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/libp2p/go-libp2p/core v0.43.0-rc2 h1:1X1aDJNWhMfodJ/ynbaGLkgnC8f+hfBIqQDrzxFZOqI=
github.com/libp2p/go-libp2p/core v0.43.0-rc2/go.mod h1:NYeJ9lvyBv9nbDk2IuGb8gFKEOkIv/W5YRIy1pAJB2Q=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
@@ -151,8 +132,6 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@@ -162,61 +141,47 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/nats-io/nats.go v1.44.0 h1:ECKVrDLdh/kDPV1g0gAQ+2+m2KprqZK5O/eJAyAnH2M=
github.com/nats-io/nats.go v1.44.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
github.com/nats-io/nats.go v1.41.0 h1:PzxEva7fflkd+n87OtQTXqCTyLfIIMFJBpyccHLE2Ko=
github.com/nats-io/nats.go v1.41.0/go.mod h1:wV73x0FSI/orHPSYoyMeJB+KajMDoWyXmFaRrrYaaTo=
github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc=
github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nwtgck/go-fakelish v0.1.3 h1:bA8/xa9hQmzppexIhBvdmztcd/PJ4SPuAUTBdMKZ8G4=
github.com/nwtgck/go-fakelish v0.1.3/go.mod h1:2HC44/OwVWwOa/g3+P2jUM3FEHQ0ya4gyCSU19PPd3Y=
github.com/ogier/pflag v0.0.1/go.mod h1:zkFki7tvTa0tafRvTBIZTvzYyAu6kQhPZFnshFFPE+g=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
@@ -231,24 +196,20 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
@@ -267,33 +228,23 @@ github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfS
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ=
go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -306,12 +257,12 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -319,8 +270,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -336,20 +287,20 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -360,8 +311,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -384,13 +335,13 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8=
google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -402,25 +353,21 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q=
k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM=
k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU=
k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM=
k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA=
k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc=
k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k=
k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs=
k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU=
k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

View File

@@ -11,8 +11,6 @@ import (
"sync"
"time"
"cloud.o-forge.io/core/oc-lib/models/common/enum"
octools "cloud.o-forge.io/core/oc-lib/tools"
"github.com/rs/zerolog"
"k8s.io/apimachinery/pkg/watch"
@@ -48,7 +46,7 @@ func NewArgoLogs(name string, namespace string, stepMax int) *ArgoLogs {
return &ArgoLogs{
Name: "oc-monitor-" + name,
Namespace: namespace,
CreatedDate: time.Now().UTC().Format("2006-01-02 15:04:05"),
CreatedDate: time.Now().Format("2006-01-02 15:04:05"),
StepCount: 0,
StepMax: stepMax,
stop: false,
@@ -56,7 +54,7 @@ func NewArgoLogs(name string, namespace string, stepMax int) *ArgoLogs {
}
}
// An object to monitor and log the output of an argo submit
// An object to monitor and log the output of an argo submit
type ArgoLogs struct {
Name string
Namespace string
@@ -95,31 +93,22 @@ func (a *ArgoLogs) StartStepRecording(current_watch *ArgoWatch, logger zerolog.L
a.Started = time.Now()
}
type ArgoPodLog struct {
PodName string
Step string
Message string
PodName string
Step string
Message string
}
func NewArgoPodLog(name string, step string, msg string) ArgoPodLog {
return ArgoPodLog{
PodName: name,
Step: step,
Step: step,
Message: msg,
}
}
// LogKubernetesArgo watches an Argo workflow and emits NATS lifecycle events.
// It no longer writes directly to the database — all state transitions are
// delegated to oc-scheduler (WorkflowExecution) and oc-datacenter (Bookings)
// via the dedicated NATS channels.
//
// - wfName : Argo workflow name (also the name of the root DAG node)
// - execID : WorkflowExecution UUID (for oc-scheduler to update state)
// - executionsID: run-group ID shared by all bookings of this run
// - namespace : Kubernetes namespace
// - watcher : Argo watch stream
func LogKubernetesArgo(wfName string, execID string, executionsID string, namespace string, watcher watch.Interface) {
func LogKubernetesArgo(wfName string, namespace string, watcher watch.Interface) {
var argoWatcher *ArgoWatch
var pods []string
var node wfv1.NodeStatus
@@ -128,61 +117,38 @@ func LogKubernetesArgo(wfName string, execID string, executionsID string, namesp
wfl.Debug().Msg("Starting to log " + wfName)
var wg sync.WaitGroup
// nodePhases tracks the last known phase of each step node so we can detect
// phase transitions and emit WORKFLOW_STEP_DONE_EVENT exactly once per step.
nodePhases := map[string]wfv1.NodePhase{}
// stepResults captures the final NodeStatus of every completed step so the
// WORKFLOW_DONE_EVENT can include a full recap (Steps slice) for oc-scheduler
// and oc-catalog to catch up if they missed individual STEP_DONE events.
stepResults := map[string]wfv1.NodeStatus{}
workflowStartedEmitted := false
for event := range watcher.ResultChan() {
for event := range (watcher.ResultChan()) {
wf, ok := event.Object.(*wfv1.Workflow)
if !ok {
wfl.Error().Msg("unexpected type")
continue
}
if len(wf.Status.Nodes) == 0 {
wfl.Info().Msg("No node status yet")
wfl.Debug().Msg("No node status yet") // The first output of the channel doesn't contain Nodes so we skip it
continue
}
// ── Emit WORKFLOW_STARTED_EVENT once ────────────────────────────────
if !workflowStartedEmitted {
realStart := wf.Status.StartedAt.Time
emitLifecycleEvent(octools.WORKFLOW_STARTED_EVENT, octools.WorkflowLifecycleEvent{
ExecutionID: execID,
ExecutionsID: executionsID,
State: enum.STARTED.EnumIndex(),
RealStart: &realStart,
})
workflowStartedEmitted = true
}
conditions := retrieveCondition(wf)
conditions := retrieveCondition(wf)
// Retrieving the Status for the main node, which is named after the workflow
if node, ok = wf.Status.Nodes[wfName]; !ok {
bytified, _ := json.MarshalIndent(wf.Status.Nodes, "", "\t")
bytified, _ := json.MarshalIndent(wf.Status.Nodes,"","\t")
wfl.Fatal().Msg("Could not find the " + wfName + " node in \n" + string(bytified))
}
now := time.Now().UTC()
start, _ := time.Parse(time.RFC3339, node.StartedAt.String())
duration := now.Sub(start.UTC())
now := time.Now()
start, _ := time.Parse(time.RFC3339, node.StartedAt.String() )
duration := now.Sub(start)
newWatcher := ArgoWatch{
Name: node.Name,
Namespace: namespace,
Status: string(node.Phase),
Created: node.StartedAt.String(),
Started: node.StartedAt.String(),
Progress: string(node.Progress),
Duration: duration.String(),
Name: node.Name,
Namespace: namespace,
Status: string(node.Phase),
Created: node.StartedAt.String(),
Started: node.StartedAt.String(),
Progress: string(node.Progress),
Duration: duration.String(),
Conditions: conditions,
}
@@ -190,157 +156,40 @@ func LogKubernetesArgo(wfName string, execID string, executionsID string, namesp
argoWatcher = &newWatcher
}
if !newWatcher.Equals(argoWatcher) {
if !newWatcher.Equals(argoWatcher){
jsonified, _ := json.Marshal(newWatcher)
wfl.Info().Msg(string(jsonified))
argoWatcher = &newWatcher
}
// ── Per-step completion detection ────────────────────────────────────
for _, stepNode := range wf.Status.Nodes {
if stepNode.Name == wfName {
continue // skip the main DAG node
}
prev := nodePhases[stepNode.Name]
nodePhases[stepNode.Name] = stepNode.Phase
if prev == stepNode.Phase {
continue // no change
}
if !stepNode.Phase.Completed() && !stepNode.Phase.FailedOrError() {
continue // not terminal yet
}
if prev.Completed() || prev.FailedOrError() {
continue // already processed
}
bookingID := extractBookingID(stepNode.Name)
if bookingID == "" {
continue
}
stepState := enum.SUCCESS
if stepNode.Phase.FailedOrError() {
stepState = enum.FAILURE
}
realStart := stepNode.StartedAt.Time
realEnd := stepNode.FinishedAt.Time
if realEnd.IsZero() {
realEnd = time.Now().UTC()
}
emitLifecycleEvent(octools.WORKFLOW_STEP_DONE_EVENT, octools.WorkflowLifecycleEvent{
ExecutionID: execID,
ExecutionsID: executionsID,
BookingID: bookingID,
State: stepState.EnumIndex(),
RealStart: &realStart,
RealEnd: &realEnd,
})
// Store for the final recap emitted with WORKFLOW_DONE_EVENT.
stepResults[bookingID] = stepNode
}
// ── Pod log streaming ────────────────────────────────────────────────
for _, pod := range wf.Status.Nodes {
if pod.Type != wfv1.NodeTypePod {
continue
}
if !slices.Contains(pods, pod.Name) {
pl := wfl.With().Str("pod", pod.Name).Logger()
pl.Info().Msg("Found a new pod to log : " + pod.Name)
// I don't think we need to use WaitGroup here, because the loop itself
// acts as blocking process for the main thread, because Argo watch never closes the channel
for _, pod := range wf.Status.Nodes{
if !slices.Contains(pods,pod.Name){
pl := wfl.With().Str("pod", pod.Name).Logger()
if wfName == pod.Name { pods = append(pods, pod.Name); continue } // One of the node is the Workflow, the others are the pods so don't try to log on the wf name
pl.Info().Msg("Found a new pod to log : " + pod.Name)
wg.Add(1)
go logKubernetesPods(namespace, wfName, pod.Name, pl, &wg)
pods = append(pods, pod.Name)
}
}
}
// ── Workflow terminal phase ──────────────────────────────────────────
if node.Phase.Completed() || node.Phase.FailedOrError() {
if node.Phase.Completed() {
wfl.Info().Msg(wfName + " workflow completed")
} else {
wfl.Error().Msg(wfName + " has failed, please refer to the logs")
wfl.Error().Msg(node.Message)
}
// Stop listening to the chan when the Workflow is completed or something bad happened
if node.Phase.Completed() {
wfl.Info().Msg(wfName + " worflow completed")
wg.Wait()
wfl.Info().Msg(wfName + " exiting")
finalState := enum.SUCCESS
if node.Phase.FailedOrError() {
finalState = enum.FAILURE
}
realStart := node.StartedAt.Time
realEnd := node.FinishedAt.Time
if realEnd.IsZero() {
realEnd = time.Now().UTC()
}
// Build recap from all observed step results.
steps := make([]octools.StepMetric, 0, len(stepResults))
for bookingID, s := range stepResults {
stepState := enum.SUCCESS
if s.Phase.FailedOrError() {
stepState = enum.FAILURE
}
start := s.StartedAt.Time
end := s.FinishedAt.Time
if end.IsZero() {
end = realEnd
}
steps = append(steps, octools.StepMetric{
BookingID: bookingID,
State: stepState.EnumIndex(),
RealStart: &start,
RealEnd: &end,
})
}
emitLifecycleEvent(octools.WORKFLOW_DONE_EVENT, octools.WorkflowLifecycleEvent{
ExecutionID: execID,
ExecutionsID: executionsID,
State: finalState.EnumIndex(),
RealStart: &realStart,
RealEnd: &realEnd,
Steps: steps,
})
break
}
if node.Phase.FailedOrError() {
wfl.Error().Msg(wfName + "has failed, please refer to the logs")
wfl.Error().Msg(node.Message)
break
}
}
}
// emitLifecycleEvent publishes a WorkflowLifecycleEvent on the given NATS channel.
func emitLifecycleEvent(method octools.NATSMethod, evt octools.WorkflowLifecycleEvent) {
payload, err := json.Marshal(evt)
if err != nil {
return
}
octools.NewNATSCaller().SetNATSPub(method, octools.NATSResponse{
FromApp: "oc-monitord",
Method: int(method),
Payload: payload,
})
}
// extractBookingID extracts the bookingID (UUID, 36 chars) from an Argo node
// display name. Argo step nodes are named "{wfName}.{taskName}" where taskName
// is "{resource-name}-{bookingID}" as generated by getArgoName in argo_builder.
func extractBookingID(nodeName string) string {
parts := strings.SplitN(nodeName, ".", 2)
if len(parts) < 2 {
return ""
}
taskName := parts[1]
if len(taskName) < 36 {
return ""
}
candidate := taskName[len(taskName)-36:]
// Validate UUID shape: 8-4-4-4-12 with dashes at positions 8,13,18,23.
if candidate[8] == '-' && candidate[13] == '-' && candidate[18] == '-' && candidate[23] == '-' {
return candidate
}
return ""
}
func retrieveCondition(wf *wfv1.Workflow) (c Conditions) {
for _, cond := range wf.Status.Conditions {
if cond.Type == "PodRunning" {
@@ -350,34 +199,37 @@ func retrieveCondition(wf *wfv1.Workflow) (c Conditions) {
c.Completed = cond.Status == "True"
}
}
return
return
}
// logKubernetesPods streams pod logs to the structured logger.
func logKubernetesPods(executionId string, wfName string, podName string, logger zerolog.Logger, wg *sync.WaitGroup) {
// Function needed to be executed as a go thread
func logKubernetesPods(executionId string, wfName string,podName string, logger zerolog.Logger, wg *sync.WaitGroup){
defer wg.Done()
s := strings.Split(podName, ".")
name := s[0] + "-" + s[1]
step := s[1]
k, err := tools.NewKubernetesTool()
if err != nil {
logger.Error().Msg("Could not get Kubernetes tools")
return
}
reader, err := k.GetPodLogger(executionId, wfName, podName)
if err != nil {
logger.Error().Msg(err.Error())
return
}
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
log := scanner.Text()
podLog := NewArgoPodLog(name, step, log)
podLog := NewArgoPodLog(name,step,log)
jsonified, _ := json.Marshal(podLog)
logger.Info().Msg(string(jsonified))
}
}
}

181
main.go
View File

@@ -1,11 +1,16 @@
package main
import (
"bufio"
"encoding/base64"
"fmt"
"io"
"os"
"os/exec"
"regexp"
"strings"
"sync"
"time"
"oc-monitord/conf"
l "oc-monitord/logger"
@@ -14,9 +19,8 @@ import (
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/config"
"cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/booking"
"cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
@@ -26,6 +30,7 @@ import (
"github.com/akamensky/argparse"
"github.com/google/uuid"
"github.com/goraz/onion"
"github.com/rs/zerolog"
)
@@ -41,35 +46,35 @@ var wf_logger zerolog.Logger
var parser argparse.Parser
var workflowName string
const defaultConfigFile = "/etc/oc/ocmonitord_conf.json"
const localConfigFile = "./conf/local_ocmonitord_conf.json"
func main() {
o := config.GetConfLoader("oc-monitord")
os.Setenv("test_service", "true") // Only for service demo, delete before merging on main
parser = *argparse.NewParser("oc-monitord", "Launch the execution of a workflow given as a parameter and sends the produced logs to a loki database")
loadConfig(&parser)
fmt.Println("sqdqs", o.GetStringDefault("MONGO_URL", "mongodb://mongo:27017"))
loadConfig(false, &parser)
oclib.InitDaemon("oc-monitord")
// Lance l'abonné NATS centralisé pour les confirmations PB_CONSIDERS.
workflow_builder.StartConsidersListener()
fmt.Println(conf.GetConfig())
oclib.SetConfig(
conf.GetConfig().MongoURL,
conf.GetConfig().Database,
conf.GetConfig().NatsURL,
conf.GetConfig().LokiURL,
conf.GetConfig().Logs,
)
logger = u.GetLogger()
logger.Debug().Msg("Loki URL : " + config.GetConfig().LokiUrl)
logger.Info().Msg("Workflow executed : " + conf.GetConfig().ExecutionID)
logger.Debug().Msg("Loki URL : " + conf.GetConfig().LokiURL)
logger.Debug().Msg("Workflow executed : " + conf.GetConfig().ExecutionID)
exec := u.GetExecution(conf.GetConfig().ExecutionID)
if exec == nil {
logger.Fatal().Msg("Could not retrieve workflow ID from execution ID " + conf.GetConfig().ExecutionID + " on peer " + conf.GetConfig().PeerID)
oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{
"state": enum.FAILURE.EnumIndex(),
}, conf.GetConfig().ExecutionID)
return
}
conf.GetConfig().WorkflowID = exec.WorkflowID
logger.Info().Msg("Starting construction of yaml argo for workflow :" + exec.WorkflowID)
logger.Debug().Msg("Starting construction of yaml argo for workflow :" + exec.WorkflowID)
if _, err := os.Stat("./argo_workflows/"); os.IsNotExist(err) {
os.Mkdir("./argo_workflows/", 0755)
@@ -79,53 +84,49 @@ func main() {
// // create argo
new_wf := workflow_builder.WorflowDB{}
err := new_wf.LoadFrom(conf.GetConfig().WorkflowID)
err := new_wf.LoadFrom(conf.GetConfig().WorkflowID, conf.GetConfig().PeerID)
if err != nil {
logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API")
}
builder, _, err := new_wf.ExportToArgo(exec, conf.GetConfig().Timeout) // Removed stepMax so far, I don't know if we need it anymore
builder, _, err := new_wf.ExportToArgo(exec.ExecutionsID, conf.GetConfig().Timeout) // Removed stepMax so far, I don't know if we need it anymore
if err != nil {
logger.Error().Msg("Could not create the Argo file for " + conf.GetConfig().WorkflowID)
logger.Error().Msg(err.Error())
oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{
"state": enum.FAILURE.EnumIndex(),
}, exec.GetID())
return
}
argoFilePath, err := builder.CompleteBuild(exec.ExecutionsID)
if err != nil {
logger.Error().Msg("Error when completing the build of the workflow: " + err.Error())
oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{
"state": enum.FAILURE.EnumIndex(),
}, exec.GetID())
return
}
workflowName = getContainerName(argoFilePath)
wf_logger := u.GetWFLogger(workflowName)
wf_logger.Debug().Msg("Testing argo name")
if conf.GetConfig().KubeHost == "" {
// Not in a k8s environment, get conf from parameters
panic("can't exec with no kube for argo deployment")
logger.Info().Msg("Executes outside of k8s")
executeOutside(argoFilePath, builder.Workflow)
} else {
// Executed in a k8s environment
logger.Info().Msg("Executes inside a k8s")
// executeInside(exec.GetID(), "argo", argo_file_path, stepMax) // commenting to use conf.ExecutionID instead of exec.GetID()
executeInside(exec.ExecutionsID, exec.GetID(), argoFilePath)
executeInside(conf.GetConfig().ExecutionID, exec.ExecutionsID, argoFilePath)
}
}
// So far we only log the output from
func executeInside(ns string, execID string, argo_file_path string) {
func executeInside(execID string, ns string, argo_file_path string) {
t, err := tools2.NewService(conf.GetConfig().Mode)
if err != nil {
logger.Error().Msg("Could not create KubernetesTool : " + err.Error())
logger.Error().Msg("Could not create KubernetesTool")
return
}
name, err := t.CreateArgoWorkflow(argo_file_path, ns)
// _ = name
// _ = name
if err != nil {
logger.Error().Msg("Could not create argo workflow : " + err.Error())
logger.Info().Msg(fmt.Sprint("CA :" + conf.GetConfig().KubeCA))
@@ -136,21 +137,91 @@ func executeInside(ns string, execID string, argo_file_path string) {
watcher, err := t.GetArgoWatch(ns, workflowName)
if err != nil {
logger.Error().Msg("Could not retrieve Watcher : " + err.Error())
oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), nil).UpdateOne(map[string]interface{}{
"state": enum.FAILURE.EnumIndex(),
}, execID)
}
l.LogKubernetesArgo(name, execID, ns, ns, watcher)
l.LogKubernetesArgo(name, ns, watcher)
if err != nil {
logger.Error().Msg("Could not log workflow : " + err.Error())
}
logger.Info().Msg("Finished, exiting...")
}
}
func loadConfig(parser *argparse.Parser) {
func executeOutside(argo_file_path string, workflow workflow_builder.Workflow) {
var stdoutSubmit, stderrSubmit io.ReadCloser
var stdoutLogs, stderrLogs io.ReadCloser
var wg sync.WaitGroup
var err error
logger.Debug().Msg("executing :" + "argo submit --watch " + argo_file_path + " --serviceaccount sa-" + conf.GetConfig().ExecutionID + " -n " + conf.GetConfig().ExecutionID )
cmdSubmit := exec.Command("argo", "submit", "--watch", argo_file_path, "--serviceaccount", "sa-"+conf.GetConfig().ExecutionID, "-n", conf.GetConfig().ExecutionID)
if stdoutSubmit, err = cmdSubmit.StdoutPipe(); err != nil {
wf_logger.Error().Msg("Could not retrieve stdoutpipe " + err.Error())
return
}
cmdLogs := exec.Command("argo", "logs", "oc-monitor-"+workflowName, "-n", conf.GetConfig().ExecutionID, "--follow","--no-color")
if stdoutLogs, err = cmdLogs.StdoutPipe(); err != nil {
wf_logger.Error().Msg("Could not retrieve stdoutpipe for 'argo logs'" + err.Error())
return
}
var steps []string
for _, template := range workflow.Spec.Templates {
steps = append(steps, template.Name)
}
go l.LogLocalWorkflow(workflowName, stdoutSubmit, &wg)
go l.LogLocalPod(workflowName, stdoutLogs, steps, &wg)
logger.Info().Msg("Starting argo submit")
if err := cmdSubmit.Start(); err != nil {
wf_logger.Error().Msg("Could not start argo submit")
wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text())
updateStatus("fatal", "")
}
time.Sleep(5 * time.Second)
logger.Info().Msg("Running argo logs")
if err := cmdLogs.Run(); err != nil {
wf_logger.Error().Msg("Could not run '" + strings.Join(cmdLogs.Args, " ") + "'")
wf_logger.Fatal().Msg(err.Error() + bufio.NewScanner(stderrLogs).Text())
}
logger.Info().Msg("Waiting argo submit")
if err := cmdSubmit.Wait(); err != nil {
wf_logger.Error().Msg("Could not execute argo submit")
wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text())
updateStatus("fatal", "")
}
wg.Wait()
}
func loadConfig(is_k8s bool, parser *argparse.Parser) {
var o *onion.Onion
o = initOnion(o)
setConf(is_k8s, o, parser)
// if !IsValidUUID(conf.GetConfig().ExecutionID) {
// logger.Fatal().Msg("Provided ID is not an UUID")
// }
}
func setConf(is_k8s bool, o *onion.Onion, parser *argparse.Parser) {
url := parser.String("u", "url", &argparse.Options{Required: true, Default: "http://127.0.0.1:3100", Help: "Url to the Loki database logs will be sent to"})
mode := parser.String("M", "mode", &argparse.Options{Required: false, Default: "", Help: "Mode of the execution"})
execution := parser.String("e", "execution", &argparse.Options{Required: true, Help: "Execution ID of the workflow to request from oc-catalog API"})
peer := parser.String("p", "peer", &argparse.Options{Required: false, Default: "", Help: "Peer ID of the workflow to request from oc-catalog API"})
mongo := parser.String("m", "mongo", &argparse.Options{Required: true, Default: "mongodb://127.0.0.1:27017", Help: "URL to reach the MongoDB"})
db := parser.String("d", "database", &argparse.Options{Required: true, Default: "DC_myDC", Help: "Name of the database to query in MongoDB"})
timeout := parser.Int("t", "timeout", &argparse.Options{Required: false, Default: -1, Help: "Timeout for the execution of the workflow"})
ca := parser.String("c", "ca", &argparse.Options{Required: false, Default: "", Help: "CA file for the Kubernetes cluster"})
@@ -168,6 +239,9 @@ func loadConfig(parser *argparse.Parser) {
os.Exit(1)
}
conf.GetConfig().Logs = "debug"
conf.GetConfig().LokiURL = *url
conf.GetConfig().MongoURL = *mongo
conf.GetConfig().Database = *db
conf.GetConfig().Timeout = *timeout
conf.GetConfig().Mode = *mode
conf.GetConfig().ExecutionID = *execution
@@ -192,6 +266,34 @@ func loadConfig(parser *argparse.Parser) {
}
}
func initOnion(o *onion.Onion) *onion.Onion {
logger = logs.CreateLogger("oc-monitord")
configFile := ""
l3 := onion.NewEnvLayerPrefix("_", "OCMONITORD")
l2, err := onion.NewFileLayer(defaultConfigFile, nil)
if err == nil {
logger.Info().Msg("Config file found : " + defaultConfigFile)
configFile = defaultConfigFile
}
l1, err := onion.NewFileLayer(localConfigFile, nil)
if err == nil {
logger.Info().Msg("Local config file found " + localConfigFile + ", overriding default file")
configFile = localConfigFile
}
if configFile == "" {
logger.Info().Msg("No config file found, using env")
o = onion.New(l3)
} else if l1 == nil && l2 == nil {
o = onion.New(l1, l2, l3)
} else if l1 == nil {
o = onion.New(l2, l3)
} else if l2 == nil {
o = onion.New(l1, l3)
}
return o
}
func IsValidUUID(u string) bool {
_, err := uuid.Parse(u)
return err == nil
@@ -206,6 +308,7 @@ func getContainerName(argo_file string) string {
return container_name
}
func updateStatus(status string, log string) {
exec_id := conf.GetConfig().ExecutionID
@@ -213,7 +316,7 @@ func updateStatus(status string, log string) {
wf_exec.ArgoStatusToState(status)
exec, _, err := workflow_execution.NewAccessor(&tools.APIRequest{
PeerID: conf.GetConfig().PeerID,
}).UpdateOne(wf_exec.Serialize(wf_exec), exec_id)
}).UpdateOne(wf_exec, exec_id)
if err != nil {
logger.Error().Msg("Could not update status for workflow execution " + exec_id + err.Error())
}

View File

@@ -1,66 +0,0 @@
# Goal
We want to be able to instantiate a service that allows to store file located on a `processing` pod onto it.
We have already tested it with a static `Argo` yaml file, a MinIO running on the same kubernetes node, the minio service is reached because it is the only associated to the `serviceAccount`.
We have established three otpions that need to be available to the user for the feature to be implemented:
- Use a MinIO running constantly on the node that executes the argo workflow
- Use a MinIO
- A MinIO is instanciated when a new workflow is launched
# Requirements
- Helm : `https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3`
- Helm GO client : `$ go get github.com/mittwald/go-helm-client`
- MinIO chart : `https://charts.min.io/`
# Ressources
We need to create several ressources in order for the pods to communicate with the MinIO
## MinIO Auth Secrets
## Bucket ConfigMap
With the name `artifact-repositories` this configMap will be used by default. It contains the URL to the MinIO server and the key to the authentication data held in a `secret` ressource.
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
# If you want to use this config map by default, name it "artifact-repositories".
name: artifact-repositories
# annotations:
# # v3.0 and after - if you want to use a specific key, put that key into this annotation.
# workflows.argoproj.io/default-artifact-repository: oc-s3-artifact-repository
data:
oc-s3-artifact-repository: |
s3:
bucket: oc-bucket
endpoint: [ retrieve cluster with kubectl get service argo-artifacts -o jsonpath="{.spec.clusterIP}" ]:9000
insecure: true
accessKeySecret:
name: argo-artifact-secret
key: access-key
secretKeySecret:
name: argo-artifact-secret
key: secret-key
```
# Code modifications
Rajouter un attribut "isDataLink"
- true/false
Rajouter un attribut DataPath ou un truc comme ca
- liste de map[string]string permet de n'avoir qu'une copie par fichier)
- éditable uniquement a travers la méthode addDataPath
- clé : path du fichier / value : nom de la copie dans minio
===> on a besoin du meme attribut pour Processing -> Data et Data -> Processing

View File

@@ -1,17 +1,10 @@
package models
import (
"encoding/json"
"fmt"
"os/exec"
"strings"
"cloud.o-forge.io/core/oc-lib/config"
"cloud.o-forge.io/core/oc-lib/models/common/models"
"cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/resources/native_tools"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
"cloud.o-forge.io/core/oc-lib/tools"
)
type Parameter struct {
@@ -20,11 +13,10 @@ type Parameter struct {
}
type Container struct {
Image string `yaml:"image"`
ImagePullPolicy string `yaml:"imagePullPolicy,omitempty"`
Command []string `yaml:"command,omitempty,flow"`
Args []string `yaml:"args,omitempty,flow"`
VolumeMounts []VolumeMount `yaml:"volumeMounts,omitempty"`
Image string `yaml:"image"`
Command []string `yaml:"command,omitempty,flow"`
Args []string `yaml:"args,omitempty,flow"`
VolumeMounts []VolumeMount `yaml:"volumeMounts,omitempty"`
}
func (c *Container) AddVolumeMount(volumeMount VolumeMount, volumes []VolumeMount) []VolumeMount {
@@ -46,10 +38,9 @@ func (c *Container) AddVolumeMount(volumeMount VolumeMount, volumes []VolumeMoun
}
type VolumeMount struct {
Name string `yaml:"name"`
MountPath string `yaml:"mountPath"`
Storage *resources.StorageResource `yaml:"-"`
IsReparted bool `yaml:"-"`
Name string `yaml:"name"`
MountPath string `yaml:"mountPath"`
Storage *resources.StorageResource `yaml:"-"`
}
type Task struct {
@@ -66,7 +57,7 @@ type Dag struct {
}
type TemplateMetadata struct {
Labels map[string]string `yaml:"labels,omitempty"`
Labels map[string]string `yaml:"labels,omitempty"`
Annotations map[string]string `yaml:"annotations,omitempty"`
}
@@ -80,18 +71,14 @@ type Key struct {
Bucket string `yaml:"bucket"`
EndPoint string `yaml:"endpoint"`
Insecure bool `yaml:"insecure"`
AccessKeySecret *Secret `yaml:"accessKeySecret"`
SecretKeySecret *Secret `yaml:"secretKeySecret"`
AccessKeySecret *Secret `yaml accessKeySecret`
SecretKeySecret *Secret `yaml secretKeySecret`
}
type Artifact struct {
Name string `yaml:"name"`
Path string `yaml:"path"`
}
type ArtifactRepositoryRef struct {
ConfigMap string `yaml:"configMap"`
Key string `yaml:"key"`
S3 *Key `yaml:"s3,omitempty"`
}
type InOut struct {
@@ -100,69 +87,22 @@ type InOut struct {
}
type Template struct {
Name string `yaml:"name"`
Inputs InOut `yaml:"inputs,omitempty"`
Outputs InOut `yaml:"outputs,omitempty"`
Container Container `yaml:"container,omitempty"`
Dag *Dag `yaml:"dag,omitempty"`
Metadata TemplateMetadata `yaml:"metadata,omitempty"`
Resource ServiceResource `yaml:"resource,omitempty"`
NodeSelector map[string]string `yaml:"nodeSelector,omitempty"`
Name string `yaml:"name"`
Inputs InOut `yaml:"inputs,omitempty"`
Outputs InOut `yaml:"outputs,omitempty"`
Container Container `yaml:"container,omitempty"`
Dag *Dag `yaml:"dag,omitempty"`
Metadata TemplateMetadata `yaml:"metadata,omitempty"`
Resource ServiceResource `yaml:"resource,omitempty"`
}
func (template *Template) CreateEventContainer(execution *workflow_execution.WorkflowExecution, nt *resources.NativeTool, dag *Dag) {
container := Container{Image: "natsio/nats-box", ImagePullPolicy: "IfNotPresent"}
container.Command = []string{"sh", "-c"} // all is bash
var event native_tools.WorkflowEventParams
b, err := json.Marshal(nt.Params)
if err != nil {
fmt.Println(err)
return
}
err = json.Unmarshal(b, &event)
if err != nil {
fmt.Println(err)
return
}
if event.WorkflowResourceID != "" {
event.Payload = event.Input
event.Input = ""
if b, err := json.Marshal(event); err == nil {
payload, err := json.Marshal(&tools.NATSResponse{
FromApp: "oc-monitord",
Datatype: tools.NATIVE_TOOL,
Method: int(tools.WORKFLOW_EVENT),
Payload: b,
})
if err == nil {
cmd := exec.Command(
"nats",
"pub",
"--server", config.GetConfig().NATSUrl+":4222",
tools.WORKFLOW_EVENT.GenerateKey(),
string(payload),
)
for _, args := range cmd.Args {
container.Args = append(container.Args, args)
}
template.Container = container
}
}
}
}
func (template *Template) CreateContainer(exec *workflow_execution.WorkflowExecution, processing *resources.ProcessingResource, dag *Dag) {
index := 0
if d, ok := exec.SelectedInstances[processing.GetID()]; ok {
index = d
}
instance := processing.GetSelectedInstance(&index)
func (template *Template) CreateContainer(processing *resources.ProcessingResource, dag *Dag) {
instance := processing.GetSelectedInstance()
if instance == nil {
return
}
inst := instance.(*resources.ProcessingInstance)
container := Container{Image: inst.Access.Container.Image, ImagePullPolicy: "IfNotPresent"}
container := Container{Image: inst.Access.Container.Image}
if container.Image == "" {
return
}
@@ -191,7 +131,7 @@ func (template *Template) CreateContainer(exec *workflow_execution.WorkflowExecu
func (template *Template) ReplacePerEnv(arg string, envs []models.Param) string {
for _, v := range envs {
if v.Name != "" && strings.Contains(arg, v.Name) {
if strings.Contains(arg, v.Name) {
value := "{{ inputs.parameters." + v.Name + " }}"
arg = strings.ReplaceAll(arg, v.Name, value)
arg = strings.ReplaceAll(arg, "$"+v.Name, value)
@@ -201,20 +141,12 @@ func (template *Template) ReplacePerEnv(arg string, envs []models.Param) string
return arg
}
// AddAdmiraltyAnnotations marque le template pour qu'Admiralty route le pod
// vers le cluster virtuel correspondant au peerId.
//
// - elect: "" déclenche le webhook Admiralty sur le pod créé par Argo.
// - nodeSelector cible le virtual node Admiralty dont le label
// multicluster.admiralty.io/cluster-name vaut peerId,
// ce qui contraint le scheduling au cluster distant.
func (t *Template) AddAdmiraltyAnnotations(peerId string) {
// Add the metadata that allow Admiralty to pick up an Argo Workflow that needs to be reparted
// The value of "clustername" is the peerId, which must be replaced by the node name's for this specific execution
func (t *Template) AddAdmiraltyAnnotations(peerId string){
if t.Metadata.Annotations == nil {
t.Metadata.Annotations = make(map[string]string)
}
t.Metadata.Annotations["multicluster.admiralty.io/elect"] = ""
if t.NodeSelector == nil {
t.NodeSelector = make(map[string]string)
}
t.NodeSelector["multicluster.admiralty.io/cluster-name"] = peerId
}
t.Metadata.Annotations["multicluster.admiralty.io/clustername"] = peerId
}

View File

@@ -2,8 +2,7 @@ package models
type VolumeClaimTemplate struct {
Metadata struct {
Name string `yaml:"name"`
Annotations map[string]string `yaml:"annotations,omitempty"`
Name string `yaml:"name"`
} `yaml:"metadata"`
Spec VolumeSpec `yaml:"spec"`
}
@@ -16,12 +15,3 @@ type VolumeSpec struct {
} `yaml:"requests"`
} `yaml:"resources"`
}
// ExistingVolume references a pre-provisioned PVC (created by oc-datacenter).
// Used in Workflow.Spec.ExistingVolumes (yaml: "volumes") instead of volumeClaimTemplates.
type ExistingVolume struct {
Name string `yaml:"name"`
PersistentVolumeClaim struct {
ClaimName string `yaml:"claimName"`
} `yaml:"persistentVolumeClaim"`
}

Binary file not shown.

View File

@@ -4,16 +4,14 @@ import (
"errors"
"io"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/watch"
)
type Tool interface {
CreateArgoWorkflow(path string, ns string) (string, error)
CreateAccessSecret(user string, password string, storageId string, namespace string) (string, error)
CreateAccessSecret(ns string, login string, password string) (string, error)
GetArgoWatch(executionId string, wfName string) (watch.Interface, error)
GetPodLogger(ns string, wfName string, podName string) (io.ReadCloser, error)
GetS3Secret(storageId string, namespace string) *v1.Secret
}
var _service = map[string]func() (Tool, error){
@@ -21,7 +19,6 @@ var _service = map[string]func() (Tool, error){
}
func NewService(name string) (Tool, error) {
return NewKubernetesTool()
service, ok := _service[name]
if !ok {
return nil, errors.New("service not found")

View File

@@ -2,17 +2,19 @@ package tools
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"oc-monitord/conf"
"oc-monitord/utils"
"os"
"time"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned"
"github.com/google/uuid"
v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
@@ -29,7 +31,7 @@ type KubernetesTools struct {
func NewKubernetesTool() (Tool, error) {
// Load Kubernetes config (from ~/.kube/config)
config := &rest.Config{
Host: "https://" + conf.GetConfig().KubeHost + ":" + conf.GetConfig().KubePort,
Host: conf.GetConfig().KubeHost + ":" + conf.GetConfig().KubePort,
TLSClientConfig: rest.TLSClientConfig{
CAData: []byte(conf.GetConfig().KubeCA),
CertData: []byte(conf.GetConfig().KubeCert),
@@ -75,7 +77,7 @@ func (k *KubernetesTools) CreateArgoWorkflow(path string, ns string) (string, er
if !ok {
return "", errors.New("decoded object is not a Workflow")
}
fmt.Println("NAMESPACE", ns)
// Create the workflow in the "argo" namespace
createdWf, err := k.VersionedSet.ArgoprojV1alpha1().Workflows(ns).Create(context.TODO(), workflow, metav1.CreateOptions{})
if err != nil {
@@ -86,20 +88,21 @@ func (k *KubernetesTools) CreateArgoWorkflow(path string, ns string) (string, er
return createdWf.Name, nil
}
func (k *KubernetesTools) CreateAccessSecret(access string, password string, storageId string, namespace string) (string, error) {
func (k *KubernetesTools) CreateAccessSecret(ns string, login string, password string) (string, error) {
// Namespace where the secret will be created
namespace := "default"
// Encode the secret data (Kubernetes requires base64-encoded values)
secretData := map[string][]byte{
"access-key": []byte(access),
"secret-key": []byte(password),
"access-key": []byte(base64.StdEncoding.EncodeToString([]byte(login))),
"secret-key": []byte(base64.StdEncoding.EncodeToString([]byte(password))),
}
// Define the Secret object
name := storageId + "-secret-s3"
name := uuid.New().String()
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Namespace: ns,
},
Type: v1.SecretTypeOpaque,
Data: secretData,
@@ -109,101 +112,78 @@ func (k *KubernetesTools) CreateAccessSecret(access string, password string, sto
if err != nil {
return "", errors.New("Error creating secret: " + err.Error())
}
return name, nil
}
func (k *KubernetesTools) GetS3Secret(storageId string, namespace string) *v1.Secret {
secret, err := k.Set.CoreV1().Secrets(namespace).Get(context.TODO(), storageId+"-secret-s3", metav1.GetOptions{})
// Get(context.TODO(),storageId + "-artifact-server", metav1.GetOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
l := utils.GetLogger()
l.Fatal().Msg("An error happened when retrieving secret in " + namespace + " : " + err.Error())
}
if k8serrors.IsNotFound(err) {
return nil
}
return secret
// return secret
}
func (k *KubernetesTools) GetArgoWatch(executionId string, wfName string) (watch.Interface, error) {
options := metav1.ListOptions{FieldSelector: "metadata.name=oc-monitor-" + wfName}
func (k *KubernetesTools) GetArgoWatch(executionId string, wfName string) (watch.Interface, error){
wfl := utils.GetWFLogger("")
wfl.Debug().Msg("Starting argo watch with argo lib")
options := metav1.ListOptions{FieldSelector: "metadata.name=oc-monitor-"+wfName}
watcher, err := k.VersionedSet.ArgoprojV1alpha1().Workflows(executionId).Watch(context.Background(), options)
if err != nil {
return nil, errors.New("Error executing 'argo watch " + wfName + " -n " + executionId + " with ArgoprojV1alpha1 client")
}
return watcher, nil
return watcher, nil
}
func (k *KubernetesTools) GetPodLogger(ns string, wfName string, nodeName string) (io.ReadCloser, error) {
var targetPod v1.Pod
pods, err := k.Set.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{
LabelSelector: "workflows.argoproj.io/workflow=" + wfName,
})
if err != nil {
return nil, fmt.Errorf("%s", "failed to list pods: "+err.Error())
}
if len(pods.Items) == 0 {
return nil, fmt.Errorf("%s", "no pods found with label workflows.argoproj.io/workflow="+wfName+" no pods found with label workflows.argoproj.io/node-name="+nodeName+" in namespace "+ns)
}
for _, pod := range pods.Items {
LabelSelector: "workflows.argoproj.io/workflow="+wfName,
})
if err != nil {
return nil, fmt.Errorf("failed to list pods: " + err.Error())
}
if len(pods.Items) == 0 {
return nil, fmt.Errorf("no pods found with label workflows.argoproj.io/workflow="+ wfName + " no pods found with label workflows.argoproj.io/node-name=" + nodeName + " in namespace " + ns)
}
for _, pod := range pods.Items {
if pod.Annotations["workflows.argoproj.io/node-name"] == nodeName {
targetPod = pod
}
}
if targetPod.Name == "" {
return nil, fmt.Errorf("no pod found matching node-name %s in namespace %s", nodeName, ns)
}
// k8s API throws an error if we try getting logs while the container are not initialized, so we repeat status check there
k.testPodReady(targetPod, ns)
// When using kubec logs for a pod we see it contacts /api/v1/namespaces/NAMESPACE/pods/oc-monitor-PODNAME/log?container=main so we add this container: main to the call
req, err := k.Set.CoreV1().Pods(ns).GetLogs(targetPod.Name, &v1.PodLogOptions{Follow: true, Container: "main"}).Stream(context.Background())
req, err := k.Set.CoreV1().Pods(ns).GetLogs(targetPod.Name, &v1.PodLogOptions{Follow: true, Container: "main"}). Stream(context.Background())
if err != nil {
return nil, fmt.Errorf("%s", " Error when trying to get logs for "+targetPod.Name+" : "+err.Error())
return nil, fmt.Errorf(" Error when trying to get logs for " + targetPod.Name + " : " + err.Error())
}
return req, nil
}
func (k *KubernetesTools) testPodReady(pod v1.Pod, ns string) {
wfl := utils.GetWFLogger("")
watcher, err := k.Set.CoreV1().Pods(ns).Watch(context.Background(), metav1.ListOptions{
FieldSelector: "metadata.name=" + pod.Name,
ResourceVersion: pod.ResourceVersion,
})
if err != nil {
wfl.Error().Msg("Error watching pod: " + err.Error() + "\n")
return
}
defer watcher.Stop()
for event := range watcher.ResultChan() {
p, ok := event.Object.(*v1.Pod)
if !ok {
continue
for {
pod, err := k.Set.CoreV1().Pods(ns).Get(context.Background(), pod.Name, metav1.GetOptions{})
if err != nil {
wfl := utils.GetWFLogger("")
wfl.Error().Msg("Error fetching pod: " + err.Error() + "\n")
break
}
// It seems that for remote pods the pod gets the Succeeded status before it has time to display the it is ready to run in .status.conditions,so we added the OR condition
if p.Status.Phase == v1.PodSucceeded {
return
}
for _, cond := range p.Status.Conditions {
if cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue {
var initialized bool
for _, cond := range pod.Status.Conditions {
// It seems that for remote pods the pod gets the Succeeded status before it has time to display the it is ready to run in .status.conditions,so we added the OR condition
if (cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue) || pod.Status.Phase == v1.PodSucceeded {
initialized = true
return
}
}
if initialized {
return
}
time.Sleep(2 * time.Second) // avoid hammering the API
}
}
}

View File

@@ -0,0 +1,147 @@
package workflow_builder
import (
"encoding/json"
"fmt"
"net/http"
"oc-monitord/utils"
"slices"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/peer"
tools "cloud.o-forge.io/core/oc-lib/tools"
)
type AdmiraltySetter struct {
Id string // ID to identify the execution, correspond to workflow_executions id
NodeName string // Allows to retrieve the name of the node used for this execution on each peer {"peerId": "nodeName"}
}
func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID string) error {
logger := logs.GetLogger()
data := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(remotePeerID)
if data.Code != 200 {
logger.Error().Msg("Error while trying to instantiate remote peer " + remotePeerID)
return fmt.Errorf(data.Err)
}
remotePeer := data.ToPeer()
data = oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(localPeerID)
if data.Code != 200 {
logger.Error().Msg("Error while trying to instantiate local peer " + remotePeerID)
return fmt.Errorf(data.Err)
}
localPeer := data.ToPeer()
caller := tools.NewHTTPCaller(
map[tools.DataType]map[tools.METHOD]string{
tools.ADMIRALTY_SOURCE: {
tools.POST :"/:id",
},
tools.ADMIRALTY_KUBECONFIG: {
tools.GET:"/:id",
},
tools.ADMIRALTY_SECRET: {
tools.POST:"/:id/" + remotePeerID,
},
tools.ADMIRALTY_TARGET: {
tools.POST:"/:id/" + remotePeerID,
},
tools.ADMIRALTY_NODES: {
tools.GET:"/:id/" + remotePeerID,
},
},
)
logger.Info().Msg("\n\n Creating the Admiralty Source on " + remotePeerID + " ns-" + s.Id)
_ = s.callRemoteExecution(remotePeer, []int{http.StatusCreated, http.StatusConflict},caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true)
logger.Info().Msg("\n\n Retrieving kubeconfig with the secret on " + remotePeerID + " ns-" + s.Id)
kubeconfig := s.getKubeconfig(remotePeer, caller)
logger.Info().Msg("\n\n Creating a secret from the kubeconfig " + localPeerID + " ns-" + s.Id)
_ = s.callRemoteExecution(localPeer, []int{http.StatusCreated}, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true)
logger.Info().Msg("\n\n Creating the Admiralty Target on " + localPeerID + " in namespace " + s.Id )
_ = s.callRemoteExecution(localPeer,[]int{http.StatusCreated, http.StatusConflict},caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true)
logger.Info().Msg("\n\n Checking for the creation of the admiralty node on " + localPeerID + " ns-" + s.Id)
s.checkNodeStatus(localPeer,caller)
return nil
}
func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCaller) map[string]string {
var kubedata map[string]string
_ = s.callRemoteExecution(peer, []int{http.StatusOK}, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil, true)
if caller.LastResults["body"] == nil || len(caller.LastResults["body"].([]byte)) == 0 {
l := utils.GetLogger()
l.Error().Msg("Something went wrong when retrieving data from Get call for kubeconfig")
panic(0)
}
err := json.Unmarshal(caller.LastResults["body"].([]byte), &kubedata)
if err != nil {
l := utils.GetLogger()
l.Error().Msg("Something went wrong when unmarshalling data from Get call for kubeconfig")
panic(0)
}
return kubedata
}
func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode []int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) *peer.PeerExecution {
l := utils.GetLogger()
resp, err := peer.LaunchPeerExecution(peer.UUID, dataID, dt, method, body, caller)
if err != nil {
l.Error().Msg("Error when executing on peer at" + peer.Url)
l.Error().Msg(err.Error())
panic(0)
}
if !slices.Contains(expectedCode, caller.LastResults["code"].(int)) {
l.Error().Msg(fmt.Sprint("Didn't receive the expected code :", caller.LastResults["code"], "when expecting", expectedCode))
if _, ok := caller.LastResults["body"]; ok {
l.Info().Msg(string(caller.LastResults["body"].([]byte)))
}
if panicCode {
panic(0)
}
}
return resp
}
func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller){
var data map[string]interface{}
if resp, ok := caller.LastResults["body"]; ok {
json.Unmarshal(resp.([]byte), &data)
}
if node, ok := data["node"]; ok {
metadata := node.(map[string]interface{})["metadata"]
name := metadata.(map[string]interface{})["name"].(string)
s.NodeName = name
} else {
l := utils.GetLogger()
l.Error().Msg("Could not retrieve data about the recently created node")
panic(0)
}
}
func (s *AdmiraltySetter) checkNodeStatus(localPeer *peer.Peer, caller *tools.HTTPCaller){
for i := range(5) {
time.Sleep(10 * time.Second) // let some time for kube to generate the node
_ = s.callRemoteExecution(localPeer,[]int{http.StatusOK},caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil, false)
if caller.LastResults["code"] == 200 {
s.storeNodeName(caller)
return
}
if i == 5 {
logger.Error().Msg("Node on " + localPeer.Name + " was never found, panicking !")
panic(0)
}
logger.Info().Msg("Could not verify that node is up. Retrying...")
}
}

View File

@@ -1,15 +1,14 @@
// Package workflow_builder traduit les informations du graphe d'un Workflow
// (ses composants, ses liens) en un fichier YAML Argo Workflow prêt à être
// soumis à un cluster Kubernetes. Le point d'entrée principal est ArgoBuilder.
// A class that translates the informations held in the graph object
// via its lists of components into an argo file, using the a list of
// link ID to build the dag
package workflow_builder
import (
"encoding/json"
"fmt"
"oc-monitord/conf"
. "oc-monitord/models"
"sync"
tools2 "oc-monitord/tools"
"os"
"strings"
"time"
@@ -17,46 +16,23 @@ import (
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/resources/native_tools"
w "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/models/workflow/graph"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
"cloud.o-forge.io/core/oc-lib/tools"
"github.com/nwtgck/go-fakelish"
"github.com/rs/zerolog"
"gopkg.in/yaml.v3"
)
// logger est le logger zerolog partagé au sein du package, initialisé à
// chaque appel de CreateDAG pour récupérer la configuration courante.
var logger zerolog.Logger
// ArgoBuilder est le constructeur principal du fichier Argo Workflow.
// Il porte l'état de la construction (workflow source, templates générés,
// services k8s à créer, timeout global, liste des peers distants impliqués).
type ArgoBuilder struct {
// OriginWorkflow est le workflow métier Open Cloud dont on construit la représentation Argo.
OriginWorkflow *w.Workflow
// Workflow est la structure YAML Argo en cours de construction.
Workflow Workflow
// Services liste les services Kubernetes à exposer pour les processings "IsService".
Services []*Service
// Timeout est la durée maximale d'exécution en secondes (activeDeadlineSeconds).
Timeout int
// RemotePeers contient les IDs des peers distants détectés via Admiralty.
RemotePeers []string
// HasLocalCompute indique qu'au moins un processing s'exécute sur le kube local.
// Le kube local doit recevoir son propre ArgoKubeEvent COMPUTE_RESOURCE.
HasLocalCompute bool
// PeerImages associe chaque peer aux images de conteneurs qu'il doit exécuter.
// Clé "" désigne le peer local. Utilisé pour le pre-pull et le release post-exec.
PeerImages map[string][]string
Workflow Workflow
Services []*Service
Timeout int
RemotePeers []string
}
// Workflow est la structure racine du fichier YAML Argo Workflow.
// Elle correspond exactement au format attendu par le contrôleur Argo.
type Workflow struct {
ApiVersion string `yaml:"apiVersion"`
Kind string `yaml:"kind"`
@@ -66,8 +42,6 @@ type Workflow struct {
Spec Spec `yaml:"spec,omitempty"`
}
// getDag retourne le pointeur sur le template "dag" du workflow.
// S'il n'existe pas encore, il est créé et ajouté à la liste des templates.
func (b *Workflow) getDag() *Dag {
for _, t := range b.Spec.Templates {
if t.Name == "dag" {
@@ -78,118 +52,59 @@ func (b *Workflow) getDag() *Dag {
return b.Spec.Templates[len(b.Spec.Templates)-1].Dag
}
// PodSecurityContext mirrors the subset of k8s PodSecurityContext used by Argo.
type PodSecurityContext struct {
RunAsUser *int64 `yaml:"runAsUser,omitempty"`
RunAsGroup *int64 `yaml:"runAsGroup,omitempty"`
FSGroup *int64 `yaml:"fsGroup,omitempty"`
}
// Spec contient la spécification complète du workflow Argo :
// compte de service, point d'entrée, volumes, templates et timeout.
type Spec struct {
ArtifactRepositoryRef
ServiceAccountName string `yaml:"serviceAccountName,omitempty"`
Entrypoint string `yaml:"entrypoint"`
Arguments []Parameter `yaml:"arguments,omitempty"`
Volumes []VolumeClaimTemplate `yaml:"volumeClaimTemplates,omitempty"`
ExistingVolumes []ExistingVolume `yaml:"volumes,omitempty"`
Templates []Template `yaml:"templates"`
Timeout int `yaml:"activeDeadlineSeconds,omitempty"`
SecurityContext *PodSecurityContext `yaml:"securityContext,omitempty"`
ServiceAccountName string `yaml:"serviceAccountName"`
Entrypoint string `yaml:"entrypoint"`
Arguments []Parameter `yaml:"arguments,omitempty"`
Volumes []VolumeClaimTemplate `yaml:"volumeClaimTemplates,omitempty"`
Templates []Template `yaml:"templates"`
Timeout int `yaml:"activeDeadlineSeconds,omitempty"`
NodeSelector struct{
NodeRole string `yaml:"node-role"`
} `yaml:"nodeSelector"`
}
// CreateDAG est le point d'entrée de la construction du DAG Argo.
// Il crée tous les templates (un par processing / native tool / sous-workflow),
// configure les volumes persistants, positionne les métadonnées globales du
// workflow et retourne :
// - le nombre de tâches dans le DAG,
// - les noms des premières tâches (sans dépendances),
// - les noms des dernières tâches (dont personne ne dépend),
// - une éventuelle erreur.
//
// Le paramètre write est conservé pour usage futur (écriture effective du YAML).
// TODO: gérer S3, GCS, Azure selon le type de stockage lié au processing.
func (b *ArgoBuilder) CreateDAG(exec *workflow_execution.WorkflowExecution, namespace string, write bool) (int, []string, []string, error) {
// TODO: found on a processing instance linked to storage
// add s3, gcs, azure, etc if needed on a link between processing and storage
func (b *ArgoBuilder) CreateDAG(namespace string, write bool) ( int, []string, []string, error) {
logger = logs.GetLogger()
logger.Info().Msg(fmt.Sprint("Creating DAG ", b.OriginWorkflow.Graph.Items))
// Crée un template Argo pour chaque nœud du graphe et collecte les volumes.
firstItems, lastItems, volumes, err := b.createTemplates(exec, namespace)
if err != nil {
return 0, firstItems, lastItems, err
}
b.createVolumes(exec, volumes)
// handle services by checking if there is only one processing with hostname and port
firstItems, lastItems, volumes := b.createTemplates(namespace)
b.createVolumes(volumes)
if b.Timeout > 0 {
b.Workflow.Spec.Timeout = b.Timeout
}
b.Workflow.Spec.ServiceAccountName = "sa-" + namespace
b.Workflow.Spec.NodeSelector.NodeRole = "worker"
b.Workflow.Spec.ServiceAccountName = "sa-"+namespace
b.Workflow.Spec.Entrypoint = "dag"
b.Workflow.ApiVersion = "argoproj.io/v1alpha1"
b.Workflow.Kind = "Workflow"
if !write {
return len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil
}
return len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil
return len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil
}
// createTemplates parcourt tous les nœuds du graphe (processings, native tools,
// sous-workflows) et génère les templates Argo correspondants.
// Elle gère également le recâblage des dépendances DAG entre sous-workflows
// imbriqués, et l'ajout du pod de service si nécessaire.
// Retourne les premières tâches, les dernières tâches et les volumes à créer.
func (b *ArgoBuilder) createTemplates(exec *workflow_execution.WorkflowExecution, namespace string) ([]string, []string, []VolumeMount, error) {
func (b *ArgoBuilder) createTemplates(namespace string) ([]string, []string, []VolumeMount) {
volumes := []VolumeMount{}
firstItems := []string{}
lastItems := []string{}
// --- Processings ---
items := b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsProcessing)
logger.Info().Msg(fmt.Sprint("Creating templates", len(items)))
for _, item := range b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsProcessing) {
index := 0
_, res := item.GetResource()
if d, ok := exec.SelectedInstances[res.GetID()]; ok {
index = d
}
instance := item.Processing.GetSelectedInstance(&index)
instance := item.Processing.GetSelectedInstance()
logger.Info().Msg(fmt.Sprint("Creating template for", item.Processing.GetName(), instance))
if instance == nil || instance.(*resources.ProcessingInstance).Access == nil && instance.(*resources.ProcessingInstance).Access.Container != nil {
logger.Error().Msg("Not enough configuration setup, template can't be created : " + item.Processing.GetName())
return firstItems, lastItems, volumes, nil
}
// Un même processing peut être bookié sur plusieurs peers : on crée
// un template Argo distinct par peer, déployés en parallèle.
for _, pb := range getAllPeersForItem(exec, item.ID) {
var err error
volumes, firstItems, lastItems, err = b.createArgoTemplates(exec,
namespace, item.ID, pb.PeerID, pb.BookingID, item.Processing, volumes, firstItems, lastItems)
if err != nil {
return firstItems, lastItems, volumes, err
}
return firstItems, lastItems, volumes
}
volumes, firstItems, lastItems = b.createArgoTemplates(namespace,
item.ID, item.Processing, volumes, firstItems, lastItems)
}
// --- Native Tools de type WORKFLOW_EVENT uniquement ---
for _, item := range b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsNativeTool) {
if item.NativeTool.Kind != int(native_tools.WORKFLOW_EVENT) {
continue
}
index := 0
_, res := item.GetResource()
if d, ok := exec.SelectedInstances[res.GetID()]; ok {
index = d
}
instance := item.NativeTool.GetSelectedInstance(&index)
logger.Info().Msg(fmt.Sprint("Creating template for", item.NativeTool.GetName(), instance))
var err error
volumes, firstItems, lastItems, err = b.createArgoTemplates(exec,
namespace, item.ID, "", item.ID, item.NativeTool, volumes, firstItems, lastItems)
if err != nil {
return firstItems, lastItems, volumes, err
}
}
// --- Sous-workflows : chargement, construction récursive et fusion du DAG ---
firstWfTasks := map[string][]string{}
latestWfTasks := map[string][]string{}
relatedWfTasks := map[string][]string{}
@@ -200,28 +115,24 @@ func (b *ArgoBuilder) createTemplates(exec *workflow_execution.WorkflowExecution
continue
}
subBuilder := ArgoBuilder{OriginWorkflow: realWorkflow.(*w.Workflow), Timeout: b.Timeout}
_, fi, li, err := subBuilder.CreateDAG(exec, namespace, false)
_, fi, li, err := subBuilder.CreateDAG(namespace, false)
if err != nil {
logger.Error().Msg("Error creating the subworkflow : " + err.Error())
continue
}
firstWfTasks[wf] = fi
if ok, depsOfIds := subBuilder.isArgoDependancy(exec, wf); ok { // le sous-workflow est une dépendance d'autre chose
if ok, depsOfIds := subBuilder.isArgoDependancy(wf); ok { // IS BEFORE
latestWfTasks[wf] = li
relatedWfTasks[wf] = depsOfIds
}
// Fusion des tâches, templates, volumes et arguments du sous-workflow dans le DAG principal.
subDag := subBuilder.Workflow.getDag()
d := b.Workflow.getDag()
d.Tasks = append(d.Tasks, subDag.Tasks...)
d.Tasks = append(d.Tasks, subDag.Tasks...) // add the tasks of the subworkflow to the main workflow
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, subBuilder.Workflow.Spec.Templates...)
b.Workflow.Spec.Volumes = append(b.Workflow.Spec.Volumes, subBuilder.Workflow.Spec.Volumes...)
b.Workflow.Spec.Arguments = append(b.Workflow.Spec.Arguments, subBuilder.Workflow.Spec.Arguments...)
b.Services = append(b.Services, subBuilder.Services...)
}
// Recâblage : les tâches qui dépendaient du sous-workflow dépendent désormais
// de sa dernière tâche réelle (latestWfTasks).
for wfID, depsOfIds := range relatedWfTasks {
for _, dep := range depsOfIds {
for _, task := range b.Workflow.getDag().Tasks {
@@ -241,11 +152,8 @@ func (b *ArgoBuilder) createTemplates(exec *workflow_execution.WorkflowExecution
}
}
}
// Les premières tâches du sous-workflow héritent des dépendances
// que le sous-workflow avait vis-à-vis du DAG principal.
for wfID, fi := range firstWfTasks {
deps := b.getArgoDependencies(exec, wfID)
deps := b.getArgoDependencies(wfID)
if len(deps) > 0 {
for _, dep := range fi {
for _, task := range b.Workflow.getDag().Tasks {
@@ -256,253 +164,116 @@ func (b *ArgoBuilder) createTemplates(exec *workflow_execution.WorkflowExecution
}
}
}
// Si des services Kubernetes sont nécessaires, on ajoute le pod dédié.
if b.Services != nil {
dag := b.Workflow.getDag()
dag.Tasks = append(dag.Tasks, Task{Name: "workflow-service-pod", Template: "workflow-service-pod"})
b.addServiceToArgo()
}
return firstItems, lastItems, volumes, nil
return firstItems, lastItems, volumes
}
// createArgoTemplates crée le template Argo pour un nœud du graphe (processing
// ou native tool) sur un peer donné. Il :
// 1. Ajoute la tâche au DAG avec ses dépendances.
// 2. Crée le template de container (ou d'événement pour les native tools).
// 3. Ajoute les annotations Admiralty si peerID désigne un peer distant.
// 4. Crée un service Kubernetes si le processing est déclaré IsService.
// 5. Configure les annotations de stockage (S3, volumes locaux).
func (b *ArgoBuilder) createArgoTemplates(
exec *workflow_execution.WorkflowExecution,
namespace string,
graphID string,
peerID string,
bookingID string,
obj resources.ResourceInterface,
func (b *ArgoBuilder) createArgoTemplates(namespace string,
id string,
processing *resources.ProcessingResource,
volumes []VolumeMount,
firstItems []string,
lastItems []string,
) ([]VolumeMount, []string, []string, error) {
_, firstItems, lastItems = b.addTaskToArgo(exec, b.Workflow.getDag(), graphID, bookingID, obj, firstItems, lastItems)
template := &Template{Name: getArgoName(obj.GetName(), bookingID)}
lastItems []string) ([]VolumeMount, []string, []string) {
_, firstItems, lastItems = b.addTaskToArgo(b.Workflow.getDag(), id, processing, firstItems, lastItems)
template := &Template{Name: getArgoName(processing.GetName(), id)}
logger.Info().Msg(fmt.Sprint("Creating template for", template.Name))
if obj.GetType() == tools.PROCESSING_RESOURCE.String() {
template.CreateContainer(exec, obj.(*resources.ProcessingResource), b.Workflow.getDag())
} else if obj.GetType() == tools.NATIVE_TOOL.String() {
template.CreateEventContainer(exec, obj.(*resources.NativeTool), b.Workflow.getDag())
}
// Enregistre l'image pour le pre-pull sur le peer cible.
// peerID == "" désigne le peer local (clé "" dans PeerImages).
b.addPeerImage(peerID, template.Container.Image)
// Vérifie si le peer est distant (Admiralty).
isReparted, remotePeer := b.isPeerReparted(peerID)
isReparted, peerId := b.isProcessingReparted(*processing, id)
template.CreateContainer(processing, b.Workflow.getDag())
if isReparted {
logger.Debug().Msg("Reparted processing, on " + remotePeer.GetID())
b.RemotePeers = append(b.RemotePeers, remotePeer.GetID())
template.AddAdmiraltyAnnotations(remotePeer.GetID())
} else {
// Processing local : le kube local doit aussi être configuré.
b.HasLocalCompute = true
logger.Debug().Msg("Reparted processing, on " + peerId)
b.RemotePeers = append(b.RemotePeers, peerId)
template.AddAdmiraltyAnnotations(peerId)
}
// Si le processing expose un service Kubernetes, on l'enregistre et on
// applique le label "app" pour que le Service puisse le sélectionner.
if obj.GetType() == tools.PROCESSING_RESOURCE.String() && obj.(*resources.ProcessingResource).IsService {
b.CreateService(exec, graphID, obj)
// get datacenter from the processing
if processing.IsService {
b.CreateService(id, processing)
template.Metadata.Labels = make(map[string]string)
template.Metadata.Labels["app"] = "oc-service-" + obj.GetName()
template.Metadata.Labels["app"] = "oc-service-" + processing.GetName() // Construct the template for the k8s service and add a link in graph between k8s service and processing
}
var err error
volumes, err = b.addStorageAnnotations(exec, graphID, template, namespace, volumes, isReparted)
if err != nil {
return volumes, firstItems, lastItems, err
}
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, *template)
return volumes, firstItems, lastItems, nil
}
// addStorageAnnotations parcourt tous les nœuds de stockage liés au processing
// identifié par id. Pour chaque lien de stockage :
// - Construit le nom de l'artefact Argo (lecture ou écriture).
// - Pour les stockages S3 : appelle waitForConsiders (STORAGE_RESOURCE) pour
// attendre la validation PB_CONSIDERS avant de configurer les annotations S3.
// - Pour les volumes locaux : ajoute un VolumeMount dans le container.
// Si isReparted est true (step Admiralty), le volume local est marqué comme
// réparti afin que createVolumes ne génère pas de PVC local-path incompatible
// avec les virtual kubelets.
func (b *ArgoBuilder) addStorageAnnotations(exec *workflow_execution.WorkflowExecution, id string, template *Template, namespace string, volumes []VolumeMount, isReparted bool) ([]VolumeMount, error) {
// Récupère tous les nœuds de stockage connectés au processing courant.
related := b.OriginWorkflow.GetByRelatedProcessing(id, b.OriginWorkflow.Graph.IsStorage)
for _, r := range related {
storage := r.Node.(*resources.StorageResource)
for _, linkToStorage := range r.Links {
for _, rw := range linkToStorage.StorageLinkInfos {
var art Artifact
// Le nom de l'artefact doit être alphanumérique + '-' ou '_'.
artifactBaseName := strings.Join(strings.Split(storage.GetName(), " "), "-") + "-" + strings.Replace(rw.FileName, ".", "-", -1)
art := Artifact{Path: template.ReplacePerEnv(rw.Source, linkToStorage.Env)}
if rw.Write {
// Écriture vers S3 : Path = chemin du fichier dans le pod.
art = Artifact{Path: template.ReplacePerEnv(rw.Source, linkToStorage.Env)}
art.Name = artifactBaseName + "-input-write"
art.Name = storage.GetName() + "-" + rw.Destination + "-input-write"
} else {
// Lecture depuis S3 : Path = destination dans le pod.
art = Artifact{Path: template.ReplacePerEnv(rw.Destination+"/"+rw.FileName, linkToStorage.Env)}
art.Name = artifactBaseName + "-input-read"
art.Name = storage.GetName() + "-" + rw.Destination + "-input-read"
}
if storage.StorageType == enum.S3 {
// Pour chaque ressource de compute liée à ce stockage S3,
// on notifie via NATS et on attend la validation PB_CONSIDERS
// avec DataType = STORAGE_RESOURCE avant de continuer.
// Les goroutines tournent en parallèle ; un timeout sur l'une
// d'elles est une erreur fatale qui stoppe la suite du build.
relatedProcessing := b.getStorageRelatedProcessing(storage.GetID())
var wg sync.WaitGroup
errCh := make(chan error, len(relatedProcessing))
for _, r := range relatedProcessing {
wg.Add(1)
go waitForConsiders(exec.ExecutionsID, tools.STORAGE_RESOURCE, ArgoKubeEvent{
ExecutionsID: exec.ExecutionsID,
DestPeerID: r.GetID(),
Type: tools.STORAGE_RESOURCE,
SourcePeerID: storage.GetCreatorID(),
OriginID: conf.GetConfig().PeerID,
}, &wg, errCh)
art.S3 = &Key{
Key: template.ReplacePerEnv(rw.Destination+"/"+rw.FileName, linkToStorage.Env),
Insecure: true, // temporary
}
wg.Wait()
close(errCh)
for err := range errCh {
if err != nil {
return volumes, err
sel := storage.GetSelectedInstance()
if sel != nil {
if sel.(*resources.StorageResourceInstance).Credentials != nil {
tool, err := tools2.NewService(conf.GetConfig().Mode)
if err != nil || tool == nil {
logger.Error().Msg("Could not create the access secret")
} else {
id, err := tool.CreateAccessSecret(namespace,
sel.(*resources.StorageResourceInstance).Credentials.Login,
sel.(*resources.StorageResourceInstance).Credentials.Pass)
if err == nil {
art.S3.AccessKeySecret = &Secret{
Name: id,
Key: "access-key",
}
art.S3.SecretKeySecret = &Secret{
Name: id,
Key: "secret-key",
}
}
}
}
art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source+"/", "")
art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source, "")
splits := strings.Split(art.S3.EndPoint, "/")
if len(splits) > 1 {
art.S3.Bucket = splits[0]
art.S3.EndPoint = strings.Join(splits[1:], "/")
} else {
art.S3.Bucket = splits[0]
}
}
// Configure la référence au dépôt d'artefacts S3 dans le Spec.
b.addS3annotations(storage, namespace)
}
if rw.Write {
template.Outputs.Artifacts = append(template.Outputs.Artifacts, art)
template.Outputs.Artifacts = append(template.Inputs.Artifacts, art)
} else {
template.Inputs.Artifacts = append(template.Inputs.Artifacts, art)
template.Inputs.Artifacts = append(template.Outputs.Artifacts, art)
}
}
}
// Si l'instance de stockage est locale, on pré-provisionne le PVC via
// oc-datacenter (même pattern que MinIO) puis on monte un volume existant.
index := 0
if s, ok := exec.SelectedInstances[storage.GetID()]; ok {
index = s
if storage.SelectedInstanceIndex != nil && (*storage.SelectedInstanceIndex) >= 0 {
index = *storage.SelectedInstanceIndex
}
s := storage.Instances[index]
if s.Local {
var pvcWg sync.WaitGroup
pvcErrCh := make(chan error, 1)
pvcWg.Add(1)
go waitForConsiders(exec.ExecutionsID, tools.STORAGE_RESOURCE, ArgoKubeEvent{
ExecutionsID: exec.ExecutionsID,
Type: tools.STORAGE_RESOURCE,
SourcePeerID: conf.GetConfig().PeerID,
DestPeerID: conf.GetConfig().PeerID,
OriginID: conf.GetConfig().PeerID,
MinioID: storage.GetID(),
Local: true,
StorageName: storage.GetName(),
}, &pvcWg, pvcErrCh)
pvcWg.Wait()
close(pvcErrCh)
for err := range pvcErrCh {
if err != nil {
return volumes, err
}
}
volumes = template.Container.AddVolumeMount(VolumeMount{
Name: strings.ReplaceAll(strings.ToLower(storage.GetName()), " ", "-"),
MountPath: s.Source,
Storage: storage,
IsReparted: isReparted,
Name: strings.ReplaceAll(strings.ToLower(storage.GetName()), " ", "-"),
MountPath: s.Source,
Storage: storage,
}, volumes)
}
}
return volumes, nil
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, *template)
return volumes, firstItems, lastItems
}
// getStorageRelatedProcessing retourne la liste des ressources de compute
// connectées (via un processing intermédiaire) au stockage identifié par storageId.
// Ces ressources sont utilisées pour construire les ArgoKubeEvent destinés
// à la validation NATS.
func (b *ArgoBuilder) getStorageRelatedProcessing(storageId string) (res []resources.ResourceInterface) {
var storageLinks []graph.GraphLink
// On ne conserve que les liens impliquant ce stockage.
for _, link := range b.OriginWorkflow.Graph.Links {
if link.Destination.ID == storageId || link.Source.ID == storageId {
storageLinks = append(storageLinks, link)
}
}
for _, link := range storageLinks {
var resourceId string
// L'opposé du lien est soit la source soit la destination selon la direction.
if link.Source.ID != storageId {
resourceId = link.Source.ID
} else {
resourceId = link.Destination.ID
}
// Si l'opposé est un processing, on récupère ses ressources de compute.
if b.OriginWorkflow.Graph.IsProcessing(b.OriginWorkflow.Graph.Items[resourceId]) {
res = append(res, b.getComputeProcessing(resourceId)...)
}
}
return
}
// getComputeProcessing retourne toutes les ressources de compute attachées
// au processing identifié par processingId dans le graphe du workflow.
func (b *ArgoBuilder) getComputeProcessing(processingId string) (res []resources.ResourceInterface) {
arr := []resources.ResourceInterface{}
computeRel := b.OriginWorkflow.GetByRelatedProcessing(processingId, b.OriginWorkflow.Graph.IsCompute)
for _, rel := range computeRel {
arr = append(arr, rel.Node)
}
return arr
}
// addS3annotations configure la référence au dépôt d'artefacts S3 dans le Spec
// du workflow Argo. La ConfigMap et la clé sont dérivées de l'ID du stockage.
// Le namespace est conservé en signature pour une évolution future.
func (b *ArgoBuilder) addS3annotations(storage *resources.StorageResource, namespace string) {
b.Workflow.Spec.ArtifactRepositoryRef = ArtifactRepositoryRef{
ConfigMap: storage.GetID() + "-artifact-repository",
Key: storage.GetID() + "-s3-local",
}
}
// addTaskToArgo ajoute une tâche au DAG Argo pour le nœud graphItemID.
// Elle résout les dépendances DAG, propage les paramètres d'environnement,
// d'entrée et de sortie de l'instance sélectionnée, et met à jour les listes
// firstItems / lastItems utilisées pour le recâblage des sous-workflows.
// bookingID est le nom unique de cette instance (peut varier par peer).
func (b *ArgoBuilder) addTaskToArgo(exec *workflow_execution.WorkflowExecution, dag *Dag, graphItemID string, bookingID string, processing resources.ResourceInterface,
func (b *ArgoBuilder) addTaskToArgo(dag *Dag, graphItemID string, processing *resources.ProcessingResource,
firstItems []string, lastItems []string) (*Dag, []string, []string) {
unique_name := getArgoName(processing.GetName(), bookingID)
unique_name := getArgoName(processing.GetName(), graphItemID)
step := Task{Name: unique_name, Template: unique_name}
index := 0
if d, ok := exec.SelectedInstances[processing.GetID()]; ok {
index = d
}
instance := processing.GetSelectedInstance(&index)
instance := processing.GetSelectedInstance()
if instance != nil {
// Propagation des variables d'environnement, entrées et sorties
// de l'instance vers les paramètres de la tâche Argo.
for _, value := range instance.(*resources.ProcessingInstance).Env {
step.Arguments.Parameters = append(step.Arguments.Parameters, Parameter{
Name: value.Name,
@@ -522,10 +293,7 @@ func (b *ArgoBuilder) addTaskToArgo(exec *workflow_execution.WorkflowExecution,
})
}
}
step.Dependencies = b.getArgoDependencies(exec, graphItemID)
// Détermine si ce nœud est une première ou une dernière tâche du DAG.
step.Dependencies = b.getArgoDependencies(graphItemID)
name := ""
if b.OriginWorkflow.Graph.Items[graphItemID].Processing != nil {
name = b.OriginWorkflow.Graph.Items[graphItemID].Processing.GetName()
@@ -534,51 +302,31 @@ func (b *ArgoBuilder) addTaskToArgo(exec *workflow_execution.WorkflowExecution,
name = b.OriginWorkflow.Graph.Items[graphItemID].Workflow.GetName()
}
if len(step.Dependencies) == 0 && name != "" {
firstItems = append(firstItems, getArgoName(name, bookingID))
firstItems = append(firstItems, getArgoName(name, graphItemID))
}
if ok, _ := b.isArgoDependancy(exec, graphItemID); !ok && name != "" {
lastItems = append(lastItems, getArgoName(name, bookingID))
if ok, _ := b.isArgoDependancy(graphItemID); !ok && name != "" {
lastItems = append(lastItems, getArgoName(name, graphItemID))
}
dag.Tasks = append(dag.Tasks, step)
return dag, firstItems, lastItems
}
// createVolumes référence les PVCs pré-provisionnés par oc-datacenter comme
// volumes existants (ExistingVolumes) dans le Spec Argo.
// Le nom du PVC est calculé de manière déterministe : <storageName>-<executionsID>,
// identique à ClaimName() dans oc-datacenter/infrastructure/storage/pvc_setter.go.
func (b *ArgoBuilder) createVolumes(exec *workflow_execution.WorkflowExecution, volumes []VolumeMount) {
seen := make(map[string]struct{})
func (b *ArgoBuilder) createVolumes(volumes []VolumeMount) { // TODO : one think about remote volume but TG
for _, volume := range volumes {
name := strings.ReplaceAll(strings.ToLower(volume.Name), " ", "-")
if _, ok := seen[name]; ok {
continue
}
seen[name] = struct{}{}
claimName := name + "-" + exec.ExecutionsID
ev := ExistingVolume{}
ev.Name = name
ev.PersistentVolumeClaim.ClaimName = claimName
b.Workflow.Spec.ExistingVolumes = append(b.Workflow.Spec.ExistingVolumes, ev)
}
// hostPath PVs are created as root:root 0755. Ensure pods can read/write
// by running as root when local volumes are present.
if len(b.Workflow.Spec.ExistingVolumes) > 0 && b.Workflow.Spec.SecurityContext == nil {
zero := int64(0)
b.Workflow.Spec.SecurityContext = &PodSecurityContext{
RunAsUser: &zero,
RunAsGroup: &zero,
FSGroup: &zero,
index := 0
if volume.Storage.SelectedInstanceIndex != nil && (*volume.Storage.SelectedInstanceIndex) >= 0 {
index = *volume.Storage.SelectedInstanceIndex
}
storage := volume.Storage.Instances[index]
new_volume := VolumeClaimTemplate{}
new_volume.Metadata.Name = strings.ReplaceAll(strings.ToLower(volume.Name), " ", "-")
new_volume.Spec.AccessModes = []string{"ReadWriteOnce"}
new_volume.Spec.Resources.Requests.Storage = fmt.Sprintf("%v", storage.SizeGB) + storage.SizeType.ToArgo()
b.Workflow.Spec.Volumes = append(b.Workflow.Spec.Volumes, new_volume)
}
}
// isArgoDependancy vérifie si le nœud identifié par id est une dépendance
// d'au moins un autre nœud du DAG (i.e. s'il existe un lien sortant vers
// un processing ou un workflow).
// Retourne true + la liste des noms Argo des nœuds qui en dépendent.
func (b *ArgoBuilder) isArgoDependancy(exec *workflow_execution.WorkflowExecution, id string) (bool, []string) {
func (b *ArgoBuilder) isArgoDependancy(id string) (bool, []string) {
dependancyOfIDs := []string{}
isDeps := false
for _, link := range b.OriginWorkflow.Graph.Links {
@@ -589,26 +337,18 @@ func (b *ArgoBuilder) isArgoDependancy(exec *workflow_execution.WorkflowExecutio
source := b.OriginWorkflow.Graph.Items[link.Destination.ID].Processing
if id == link.Source.ID && source != nil {
isDeps = true
for _, pb := range getAllPeersForItem(exec, link.Destination.ID) {
dependancyOfIDs = append(dependancyOfIDs, getArgoName(source.GetName(), pb.BookingID))
}
dependancyOfIDs = append(dependancyOfIDs, getArgoName(source.GetName(), link.Destination.ID))
}
wourceWF := b.OriginWorkflow.Graph.Items[link.Destination.ID].Workflow
if id == link.Source.ID && wourceWF != nil {
isDeps = true
for _, pb := range getAllPeersForItem(exec, link.Destination.ID) {
dependancyOfIDs = append(dependancyOfIDs, getArgoName(wourceWF.GetName(), pb.BookingID))
}
dependancyOfIDs = append(dependancyOfIDs, getArgoName(wourceWF.GetName(), link.Destination.ID))
}
}
return isDeps, dependancyOfIDs
}
// getArgoDependencies retourne la liste des noms de tâches Argo dont dépend
// le nœud identifié par id (liens entrants depuis des processings).
// Si le processing source est bookié sur N peers, toutes ses instances sont
// retournées comme dépendances (la tâche courante attend toutes les instances).
func (b *ArgoBuilder) getArgoDependencies(exec *workflow_execution.WorkflowExecution, id string) (dependencies []string) {
func (b *ArgoBuilder) getArgoDependencies(id string) (dependencies []string) {
for _, link := range b.OriginWorkflow.Graph.Links {
if _, ok := b.OriginWorkflow.Graph.Items[link.Source.ID]; !ok {
logger.Info().Msg(fmt.Sprint("Could not find the source of the link", link.Source.ID))
@@ -616,17 +356,14 @@ func (b *ArgoBuilder) getArgoDependencies(exec *workflow_execution.WorkflowExecu
}
source := b.OriginWorkflow.Graph.Items[link.Source.ID].Processing
if id == link.Destination.ID && source != nil {
for _, pb := range getAllPeersForItem(exec, link.Source.ID) {
dependencies = append(dependencies, getArgoName(source.GetName(), pb.BookingID))
}
dependency_name := getArgoName(source.GetName(), link.Source.ID)
dependencies = append(dependencies, dependency_name)
continue
}
}
return
}
// getArgoName construit le nom unique d'une tâche / template Argo à partir
// du nom humain de la ressource et de son ID dans le graphe.
// Les espaces sont remplacés par des tirets et tout est mis en minuscules.
func getArgoName(raw_name string, component_id string) (formatedName string) {
formatedName = strings.ReplaceAll(raw_name, " ", "-")
formatedName += "-" + component_id
@@ -634,228 +371,95 @@ func getArgoName(raw_name string, component_id string) (formatedName string) {
return
}
// peerBooking associe un peerID à son bookingID pour un item du graphe.
type peerBooking struct {
PeerID string
BookingID string
}
// getAllPeersForItem retourne tous les (peerID, bookingID) enregistrés dans
// PeerBookByGraph pour un item donné. Si aucun booking n'est trouvé (item
// non encore planifié ou sous-workflow), retourne une entrée locale de
// fallback avec BookingID = graphItemID.
func getAllPeersForItem(exec *workflow_execution.WorkflowExecution, graphItemID string) []peerBooking {
var result []peerBooking
for peerID, byGraph := range exec.PeerBookByGraph {
if bookings, ok := byGraph[graphItemID]; ok && len(bookings) > 0 {
result = append(result, peerBooking{PeerID: peerID, BookingID: bookings[0]})
}
}
if len(result) == 0 {
result = []peerBooking{{PeerID: "", BookingID: graphItemID}}
}
return result
}
// isPeerReparted vérifie si le peerID désigne un peer distant (Relation != 1).
// Un peerID vide signifie exécution locale : retourne false sans appel réseau.
func (b *ArgoBuilder) isPeerReparted(peerID string) (bool, *peer.Peer) {
if peerID == "" {
return false, nil
// Verify if a processing resource is attached to another Compute than the one hosting
// the current Open Cloud instance. If true return the peer ID to contact
func (b *ArgoBuilder) isProcessingReparted(processing resources.ProcessingResource, graphID string) (bool, string) {
computeAttached := b.retrieveProcessingCompute(graphID)
if computeAttached == nil {
logger.Error().Msg("No compute was found attached to processing " + processing.Name + " : " + processing.UUID)
panic(0)
}
// Creates an accessor srtictly for Peer Collection
req := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", "", nil, nil)
if req == nil {
fmt.Println("TODO : handle error when trying to create a request on the Peer Collection")
return false, nil
return false, ""
}
res := req.LoadOne(peerID)
res := req.LoadOne(computeAttached.CreatorID)
if res.Err != "" {
fmt.Print("TODO : handle error when requesting PeerID: " + res.Err)
return false, nil
fmt.Print("TODO : handle error when requesting PeerID")
fmt.Print(res.Err)
return false, ""
}
p := res.ToPeer()
// Relation == 1 signifie "moi-même" : le processing est local.
isNotReparted := p.Relation == 1
logger.Info().Msg(fmt.Sprint("Result IsMySelf for ", p.UUID, " : ", isNotReparted))
return !isNotReparted, p
peer := *res.ToPeer()
isNotReparted := peer.State == 1
logger.Info().Msg(fmt.Sprint("Result IsMySelf for ", peer.UUID, " : ", isNotReparted))
return !isNotReparted, peer.UUID
}
// waitForConsiders publie un ArgoKubeEvent sur NATS puis attend la confirmation
// PB_CONSIDERS via le cache global (globalConsidersCache), sans ouvrir de
// connexion NATS supplémentaire. Le listener centralisé (StartConsidersListener)
// dispatche le message vers le bon canal.
// Un timeout de 5 minutes est appliqué pour éviter un blocage indéfini.
func waitForConsiders(executionsId string, dataType tools.DataType, event ArgoKubeEvent, wg *sync.WaitGroup, errCh chan<- error) {
defer wg.Done()
// Sérialise l'événement et le publie sur ARGO_KUBE_EVENT.
b, err := json.Marshal(event)
if err != nil {
logger.Error().Msg("Cannot marshal ArgoKubeEvent: " + err.Error())
errCh <- err
return
}
tools.NewNATSCaller().SetNATSPub(tools.ARGO_KUBE_EVENT, tools.NATSResponse{
FromApp: "oc-monitord",
Datatype: dataType,
User: "root",
Method: int(tools.ARGO_KUBE_EVENT),
Payload: b,
})
// Enregistrement dans le cache et attente de la confirmation.
// Pour COMPUTE_RESOURCE, SourcePeerID différencie le peer compute (local ou distant).
// Pour STORAGE_RESOURCE, SourcePeerID est le peer hébergeant le stockage.
key := considersKey(executionsId, dataType, event.SourcePeerID)
ch, unregister := globalConsidersCache.register(key)
defer unregister()
select {
case <-ch:
logger.Info().Msg(fmt.Sprintf("PB_CONSIDERS received for executions_id=%s datatype=%s source_peer=%s dest_peer=%s", executionsId, dataType.String(), event.SourcePeerID, event.DestPeerID))
errCh <- nil
case <-time.After(5 * time.Minute):
err := fmt.Errorf("timeout waiting for PB_CONSIDERS executions_id=%s datatype=%s", executionsId, dataType.String())
logger.Error().Msg(err.Error())
errCh <- err
}
}
// ArgoKubeEvent est la structure publiée sur NATS lors de la demande de
// provisionnement d'une ressource distante (Admiralty ou stockage S3).
// Le champ OriginID identifie le peer initiateur : c'est vers lui que la
// réponse PB_CONSIDERS sera routée par le système de propagation.
type ArgoKubeEvent struct {
// ExecutionsID est l'identifiant de l'exécution de workflow en cours.
ExecutionsID string `json:"executions_id"`
// DestPeerID est le peer de destination (compute ou peer S3 cible).
DestPeerID string `json:"dest_peer_id"`
// Type indique la nature de la ressource : COMPUTE_RESOURCE ou STORAGE_RESOURCE.
Type tools.DataType `json:"data_type"`
// SourcePeerID est le peer source de la ressource demandée.
SourcePeerID string `json:"source_peer_id"`
// OriginID est le peer qui a initié la demande de provisionnement ;
// la réponse PB_CONSIDERS lui sera renvoyée.
OriginID string `json:"origin_id"`
// MinioID est l'ID de la ressource storage (Minio ou local PVC).
MinioID string `json:"minio_id,omitempty"`
// Local signale un storage Local=true (PVC pré-provisionné par oc-datacenter).
Local bool `json:"local,omitempty"`
// StorageName est le nom normalisé du storage, utilisé pour calculer le claimName.
StorageName string `json:"storage_name,omitempty"`
// Images est la liste des images de conteneurs à pre-pull sur le peer cible
// avant le démarrage du workflow. Vide pour les events STORAGE_RESOURCE.
Images []string `json:"images,omitempty"`
}
// addPeerImage enregistre une image à pre-pull pour un peer donné.
// Clé "" désigne le peer local. Les doublons sont ignorés.
func (b *ArgoBuilder) addPeerImage(peerID, image string) {
if image == "" {
return
}
if b.PeerImages == nil {
b.PeerImages = make(map[string][]string)
}
for _, existing := range b.PeerImages[peerID] {
if existing == image {
return
func (b *ArgoBuilder) retrieveProcessingCompute(graphID string) *resources.ComputeResource {
for _, link := range b.OriginWorkflow.Graph.Links {
// If a link contains the id of the processing
var oppositeId string
if link.Source.ID == graphID {
oppositeId = link.Destination.ID
} else if link.Destination.ID == graphID {
oppositeId = link.Source.ID
}
if oppositeId != "" {
dt, res := b.OriginWorkflow.Graph.GetResource(oppositeId)
if dt == oclib.COMPUTE_RESOURCE {
return res.(*resources.ComputeResource)
} else {
continue
}
}
}
b.PeerImages[peerID] = append(b.PeerImages[peerID], image)
return nil
}
// CompleteBuild finalise la construction du workflow Argo après la génération
// du DAG. Elle effectue dans l'ordre :
// 1. Pour chaque peer distant (Admiralty) : publie un ArgoKubeEvent de type
// COMPUTE_RESOURCE et attend la validation PB_CONSIDERS via waitForConsiders.
// 2. Met à jour les annotations Admiralty des templates avec le nom de cluster
// construit à partir du peerId et de l'executionsId.
// 3. Sérialise le workflow en YAML et l'écrit dans ./argo_workflows/.
//
// Retourne le chemin du fichier YAML généré.
// Execute the last actions once the YAML file for the Argo Workflow is created
func (b *ArgoBuilder) CompleteBuild(executionsId string) (string, error) {
logger.Info().Msg("DEV :: Completing build")
// --- Étape 1 : validation kube pour tous les peers (local + distants) ---
// Les goroutines tournent en parallèle ; un timeout est une erreur fatale.
// Déduplique RemotePeers : plusieurs processings peuvent pointer vers le même
// peer distant, on ne doit envoyer qu'un seul ArgoKubeEvent par peer.
seen := make(map[string]struct{})
uniqueRemotePeers := b.RemotePeers[:0]
for _, p := range b.RemotePeers {
if _, ok := seen[p]; !ok {
seen[p] = struct{}{}
uniqueRemotePeers = append(uniqueRemotePeers, p)
}
}
b.RemotePeers = uniqueRemotePeers
total := len(b.RemotePeers)
if b.HasLocalCompute {
total++
}
var wg sync.WaitGroup
errCh := make(chan error, total)
// Le kube local doit aussi être configuré s'il porte au moins un processing.
if b.HasLocalCompute {
if localPeer, err := oclib.GetMySelf(); err == nil {
logger.Info().Msg("DEV :: Launching local kube setup for " + localPeer.GetID())
wg.Add(1)
go waitForConsiders(executionsId, tools.COMPUTE_RESOURCE, ArgoKubeEvent{
ExecutionsID: executionsId,
Type: tools.COMPUTE_RESOURCE,
DestPeerID: localPeer.GetID(),
SourcePeerID: localPeer.GetID(),
OriginID: localPeer.GetID(),
Images: b.PeerImages[""], // images à pre-pull sur le cluster local
}, &wg, errCh)
}
}
// Peers distants via Admiralty.
logger.Info().Msg(fmt.Sprint("DEV :: Completing build"))
setter := AdmiraltySetter{Id: executionsId}
// Setup admiralty for each node
for _, peer := range b.RemotePeers {
logger.Info().Msg(fmt.Sprint("DEV :: Launching Admiralty Setup for ", peer))
if self, err := oclib.GetMySelf(); err == nil {
wg.Add(1)
go waitForConsiders(executionsId, tools.COMPUTE_RESOURCE, ArgoKubeEvent{
ExecutionsID: executionsId,
Type: tools.COMPUTE_RESOURCE,
DestPeerID: self.GetID(),
SourcePeerID: peer,
OriginID: self.GetID(),
Images: b.PeerImages[peer], // images à pre-pull sur le cluster distant (via kubeconfig Admiralty)
}, &wg, errCh)
}
setter.InitializeAdmiralty(conf.GetConfig().PeerID,peer)
}
wg.Wait()
close(errCh)
for err := range errCh {
if err != nil {
return "", err
// Update the name of the admiralty node to use
for _, template := range b.Workflow.Spec.Templates {
if len(template.Metadata.Annotations) > 0 {
if peerId, ok := template.Metadata.Annotations["multicluster.admiralty.io/clustername"]; ok {
template.Metadata.Annotations["multicluster.admiralty.io/clustername"] = "target-" + peerId + "-" + conf.GetConfig().ExecutionID
}
}
}
// --- Étape 2 : génération et écriture du fichier YAML ---
// Generate the YAML file
random_name := fakelish.GenerateFakeWord(5, 8) + "-" + fakelish.GenerateFakeWord(5, 8)
b.Workflow.Metadata.Name = "oc-monitor-" + random_name
logger = oclib.GetLogger()
yamlified, err := yaml.Marshal(b.Workflow)
if err != nil {
logger.Error().Msg("Could not transform object to yaml file")
return "", err
return "", err
}
// Nom de fichier horodaté au format DD_MM_YYYY_hhmmss.
current_timestamp := time.Now().UTC().Format("02_01_2006_150405")
// Give a unique name to each argo file with its timestamp DD:MM:YYYY_hhmmss
current_timestamp := time.Now().Format("02_01_2006_150405")
file_name := random_name + "_" + current_timestamp + ".yml"
workflows_dir := "./argo_workflows/"
err = os.WriteFile(workflows_dir+file_name, []byte(yamlified), 0660)
if err != nil {
logger.Error().Msg("Could not write the yaml file")
return "", err

View File

@@ -1,128 +0,0 @@
# argo_builder.go — Résumé
## Rôle général
`argo_builder.go` traduit un **Workflow Open Cloud** (graphe de nœuds : processings,
stockages, computes, sous-workflows) en un **fichier YAML Argo Workflow** prêt à
être soumis à un cluster Kubernetes.
---
## Structures principales
| Struct | Rôle |
|---|---|
| `ArgoBuilder` | Constructeur principal. Porte le workflow source, la structure YAML en cours de build, les services k8s, le timeout et la liste des peers distants (Admiralty). |
| `Workflow` | Racine du YAML Argo (`apiVersion`, `kind`, `metadata`, `spec`). |
| `Spec` | Spécification du workflow : compte de service, entrypoint, templates, volumes, timeout, référence au dépôt d'artefacts S3. |
| `ArgoKubeEvent` | Événement publié sur NATS lors de la demande de provisionnement d'une ressource distante (compute ou stockage S3). Contient `executions_id`, `dest_peer_id`, `source_peer_id`, `data_type`, `origin_id`. |
---
## Flux d'exécution principal
```
CreateDAG()
└─ createTemplates()
├─ [pour chaque processing] createArgoTemplates()
│ ├─ addTaskToArgo() → ajoute la tâche au DAG + dépendances
│ ├─ CreateContainer() → template container Argo
│ ├─ AddAdmiraltyAnnotations() → si peer distant détecté
│ └─ addStorageAnnotations() → S3 + volumes locaux
├─ [pour chaque native tool WORKFLOW_EVENT] createArgoTemplates()
└─ [pour chaque sous-workflow]
├─ CreateDAG() récursif
└─ fusion DAG + recâblage des dépendances
└─ createVolumes() → PersistentVolumeClaims
CompleteBuild()
├─ waitForConsiders() × N peers → validation Admiralty (COMPUTE_RESOURCE)
├─ mise à jour annotations Admiralty (clustername)
└─ écriture du YAML dans ./argo_workflows/
```
---
## Fonctions clés
### `CreateDAG(exec, namespace, write) → (nbTâches, firstItems, lastItems, err)`
Point d'entrée. Initialise le logger, déclenche la création des templates et des
volumes, configure les métadonnées globales du workflow Argo.
### `createTemplates(exec, namespace) → (firstItems, lastItems, volumes)`
Itère sur tous les nœuds du graphe.
- Processings → template container.
- Native tools `WORKFLOW_EVENT` → template événement.
- Sous-workflows → build récursif + fusion DAG + recâblage des dépendances entrantes/sortantes.
### `createArgoTemplates(exec, namespace, id, obj, …)`
Crée le template Argo pour un nœud donné.
Détecte si le processing est **réparti** (peer distant via `isReparted`) → ajoute les
annotations Admiralty et enregistre le peer dans `RemotePeers`.
Délègue la configuration du stockage à `addStorageAnnotations`.
### `addStorageAnnotations(exec, id, template, namespace, volumes)`
Pour chaque stockage lié au processing :
- **S3** : appelle `waitForConsiders(STORAGE_RESOURCE)` pour chaque compute associé,
puis configure la référence au dépôt d'artefacts via `addS3annotations`.
- **Local** : monte un `VolumeMount` dans le container.
### `waitForConsiders(executionsId, dataType, event)`
**Fonction bloquante.**
1. Publie l'`ArgoKubeEvent` sur le canal NATS `ARGO_KUBE_EVENT`.
2. S'abonne à `PROPALGATION_EVENT`.
3. Attend un `PropalgationMessage` vérifiant :
- `Action == PB_CONSIDERS`
- `DataType == dataType`
- `Payload.executions_id == executionsId`
4. Timeout : **5 minutes**.
| Appelant | DataType attendu | Signification |
|---|---|---|
| `addStorageAnnotations` (S3) | `STORAGE_RESOURCE` | Le stockage S3 distant est prêt |
| `CompleteBuild` (Admiralty) | `COMPUTE_RESOURCE` | Le cluster cible Admiralty est configuré |
### `CompleteBuild(executionsId) → (cheminYAML, err)`
Finalise le build :
1. Pour chaque peer dans `RemotePeers``waitForConsiders(COMPUTE_RESOURCE)` (bloquant, séquentiel).
2. Met à jour les annotations `multicluster.admiralty.io/clustername` avec `target-<peerId>-<executionsId>`.
3. Sérialise le workflow en YAML et l'écrit dans `./argo_workflows/<nom>_<timestamp>.yml`.
### `isReparted(processing, graphID) → (bool, *peer.Peer)`
Retrouve le Compute attaché au processing, charge le Peer propriétaire via l'API
oc-lib, et vérifie si `Relation != 1` (pas le peer local).
### `addTaskToArgo(exec, dag, graphItemID, processing, …)`
Crée une `Task` Argo (nom unique, template, dépendances DAG, paramètres env/inputs/outputs)
et la rattache au DAG. Met à jour `firstItems` / `lastItems`.
### `isArgoDependancy(id) → (bool, []string)`
Vérifie si un nœud est utilisé comme source d'un lien sortant vers un autre
processing ou workflow (il est donc une dépendance pour quelqu'un).
### `getArgoDependencies(id) → []string`
Retourne les noms des tâches Argo dont ce nœud dépend (liens entrants).
---
## Protocole NATS utilisé
```
Publication → canal : ARGO_KUBE_EVENT
payload : NATSResponse{Method: PROPALGATION_EVENT, Payload: ArgoKubeEvent}
Attente ← canal : PROPALGATION_EVENT
filtre : PropalgationMessage{
Action = PB_CONSIDERS,
DataType = COMPUTE_RESOURCE | STORAGE_RESOURCE,
Payload = {"executions_id": "<id en cours>"}
}
```
---
## Fichier YAML produit
- Nom : `oc-monitor-<mot1>-<mot2>_<DD_MM_YYYY_hhmmss>.yml`
- Dossier : `./argo_workflows/`
- Permissions : `0660`

View File

@@ -5,11 +5,10 @@ import (
"strings"
"cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
"gopkg.in/yaml.v3"
)
func (b *ArgoBuilder) CreateService(exec *workflow_execution.WorkflowExecution, id string, processing resources.ResourceInterface) {
func (b *ArgoBuilder) CreateService(id string, processing *resources.ProcessingResource) {
new_service := models.Service{
APIVersion: "v1",
Kind: "Service",
@@ -25,21 +24,17 @@ func (b *ArgoBuilder) CreateService(exec *workflow_execution.WorkflowExecution,
if processing == nil {
return
}
b.completeServicePorts(exec, &new_service, id, processing)
b.completeServicePorts(&new_service, id, processing)
b.Services = append(b.Services, &new_service)
}
func (b *ArgoBuilder) completeServicePorts(exec *workflow_execution.WorkflowExecution, service *models.Service, id string, processing resources.ResourceInterface) {
index := 0
if d, ok := exec.SelectedInstances[processing.GetID()]; ok {
index = d
}
instance := processing.GetSelectedInstance(&index)
func (b *ArgoBuilder) completeServicePorts(service *models.Service, id string, processing *resources.ProcessingResource) {
instance := processing.GetSelectedInstance()
if instance != nil && instance.(*resources.ProcessingInstance).Access != nil && instance.(*resources.ProcessingInstance).Access.Container != nil {
for _, execute := range instance.(*resources.ProcessingInstance).Access.Container.Exposes {
if execute.PAT != 0 {
new_port_translation := models.ServicePort{
Name: strings.ToLower(processing.GetName()) + id,
Name: strings.ToLower(processing.Name) + id,
Port: execute.Port,
TargetPort: execute.PAT,
Protocol: "TCP",

View File

@@ -1,100 +0,0 @@
package workflow_builder
import (
"encoding/json"
"fmt"
"strconv"
"sync"
"cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/tools"
)
// considersCache stocke les canaux en attente d'un PB_CONSIDERS,
// indexés par "executionsId:dataType". Un même message NATS réveille
// tous les waiters enregistrés sous la même clé (broadcast).
type considersCache struct {
mu sync.Mutex
pending map[string][]chan struct{}
}
var globalConsidersCache = &considersCache{
pending: make(map[string][]chan struct{}),
}
// considersKey construit la clé du cache à partir de l'ID d'exécution,
// du type de données et du peer compute (SourcePeerID).
// peerID permet de différencier plusieurs waiters COMPUTE_RESOURCE du même
// executionsId (1 local + N distants en parallèle).
func considersKey(executionsId string, dataType tools.DataType, peerID string) string {
key := executionsId + ":" + strconv.Itoa(dataType.EnumIndex())
if peerID != "" {
key += ":" + peerID
}
return key
}
// register inscrit un nouveau canal d'attente pour la clé donnée.
// Retourne le canal à lire et une fonction de désinscription à appeler en defer.
func (c *considersCache) register(key string) (<-chan struct{}, func()) {
ch := make(chan struct{}, 1)
c.mu.Lock()
c.pending[key] = append(c.pending[key], ch)
c.mu.Unlock()
unregister := func() {
c.mu.Lock()
defer c.mu.Unlock()
list := c.pending[key]
for i, existing := range list {
if existing == ch {
c.pending[key] = append(list[:i], list[i+1:]...)
break
}
}
if len(c.pending[key]) == 0 {
delete(c.pending, key)
}
}
return ch, unregister
}
// confirm réveille tous les waiters enregistrés sous la clé donnée
// et les supprime du cache.
func (c *considersCache) confirm(key string) {
c.mu.Lock()
list := c.pending[key]
delete(c.pending, key)
c.mu.Unlock()
for _, ch := range list {
select {
case ch <- struct{}{}:
default:
}
}
}
// StartConsidersListener démarre un abonné NATS global via ListenNats (oclib)
// qui reçoit les messages CONSIDERS_EVENT et réveille les goroutines en attente
// via globalConsidersCache. Doit être appelé une seule fois au démarrage.
func StartConsidersListener() {
log := logs.GetLogger()
log.Info().Msg("Considers NATS listener starting on " + tools.CONSIDERS_EVENT.GenerateKey())
go tools.NewNATSCaller().ListenNats(map[tools.NATSMethod]func(tools.NATSResponse){
tools.CONSIDERS_EVENT: func(resp tools.NATSResponse) {
fmt.Println("CONSIDERS")
var body struct {
ExecutionsID string `json:"executions_id"`
PeerID string `json:"peer_id,omitempty"`
}
if err := json.Unmarshal(resp.Payload, &body); err != nil {
log.Error().Msg("CONSIDERS_EVENT: cannot unmarshal payload: " + err.Error())
return
}
key := considersKey(body.ExecutionsID, resp.Datatype, body.PeerID)
log.Info().Msg(fmt.Sprintf("CONSIDERS_EVENT dispatched for key=%s", key))
globalConsidersCache.confirm(key)
},
})
}

View File

@@ -6,7 +6,6 @@ import (
oclib "cloud.o-forge.io/core/oc-lib"
workflow "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
)
type WorflowDB struct {
@@ -14,20 +13,20 @@ type WorflowDB struct {
}
// Create the obj!ects from the mxgraphxml stored in the workflow given as a parameter
func (w *WorflowDB) LoadFrom(workflow_id string) error {
func (w *WorflowDB) LoadFrom(workflow_id string, peerID string) error {
logger.Info().Msg("Loading workflow from " + workflow_id)
var err error
if w.Workflow, err = w.getWorkflow(workflow_id); err != nil {
if w.Workflow, err = w.getWorkflow(workflow_id, peerID); err != nil {
return err
}
return nil
}
// Use oclib to retrieve the graph contained in the workflow referenced
func (w *WorflowDB) getWorkflow(workflow_id string) (workflow *workflow.Workflow, err error) {
func (w *WorflowDB) getWorkflow(workflow_id string, peerID string) (workflow *workflow.Workflow, err error) {
logger := oclib.GetLogger()
lib_data := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.WORKFLOW), nil).LoadOne(workflow_id)
lib_data := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW), "", peerID, []string{}, nil).LoadOne(workflow_id)
logger.Info().Msg(fmt.Sprint("ERR", lib_data.Code, lib_data.Err))
if lib_data.Code != 200 {
logger.Error().Msg("Error loading the graph")
@@ -42,7 +41,7 @@ func (w *WorflowDB) getWorkflow(workflow_id string) (workflow *workflow.Workflow
return new_wf, nil
}
func (w *WorflowDB) ExportToArgo(exec *workflow_execution.WorkflowExecution, timeout int) (*ArgoBuilder, int, error) {
func (w *WorflowDB) ExportToArgo(namespace string, timeout int) (*ArgoBuilder, int, error) {
logger := oclib.GetLogger()
logger.Info().Msg(fmt.Sprint("Exporting to Argo", w.Workflow))
if len(w.Workflow.Name) == 0 || w.Workflow.Graph == nil {
@@ -50,7 +49,7 @@ func (w *WorflowDB) ExportToArgo(exec *workflow_execution.WorkflowExecution, tim
}
argoBuilder := ArgoBuilder{OriginWorkflow: w.Workflow, Timeout: timeout}
stepMax, _, _, err := argoBuilder.CreateDAG(exec, exec.ExecutionsID, true)
stepMax, _, _, err := argoBuilder.CreateDAG(namespace, true)
if err != nil {
logger.Error().Msg("Could not create the argo file for " + w.Workflow.Name)
return nil, 0, err

View File

@@ -6,5 +6,5 @@ import (
func TestGetGraph(t *testing.T) {
w := WorflowDB{}
w.LoadFrom("test-log")
w.LoadFrom("test-log", "")
}