Compare commits

..

No commits in common. "main" and "feature/admiralty" have entirely different histories.

19 changed files with 296 additions and 500 deletions

View File

@ -3,8 +3,6 @@
build: clean build: clean
go build . go build .
dev: build
run: run:
./oc-monitord ./oc-monitord
@ -12,15 +10,16 @@ clean:
rm -rf oc-monitord rm -rf oc-monitord
docker: docker:
DOCKER_BUILDKIT=1 docker build -t oc-monitord: -f Dockerfile . DOCKER_BUILDKIT=1 docker build -t oc/oc-monitord:0.0.1 -f Dockerfile .
docker tag oc-monitord oc/oc-monitord:0.0.1 docker tag oc/oc-monitord:0.0.1 oc/oc-monitord:latest
docker tag oc/oc-monitord:0.0.1 oc-monitord:latest
publish-kind: publish-kind:
kind load docker-image oc/oc-monitord:0.0.1 --name opencloud | true kind load docker-image oc/oc-monitord:0.0.1 --name opencloud
publish-registry: publish-registry:
@echo "TODO" @echo "TODO"
all: docker publish-kind publish-registry all: docker publish-kind publish-registry
.PHONY: build run clean docker publish-kind publish-registry .PHONY: build run clean docker publish-kind publish-registry

View File

@ -1,26 +1,52 @@
# oc-monitor # oc-monitor
DO : ## Deploy in k8s (dev)
make build
## Summary While a registry with all of the OC docker images has not been set-up we can export this image to k3s ctr
oc-monitord is a daemon which can be run : > docker save oc-monitord:latest | sudo k3s ctr images import -
- as a binary
- as a container
It is used to perform several actions regarding the execution of an Open Cloud workflow : Then in the pod manifest for oc-monitord use :
- generating a YAML file that can be interpreted by **Argo Workflow** to create and execute pods in a kubernetes environment
- setting up the different resources needed to execute a workflow over several peers/kubernetes nodes with **Admiralty** : token, secrets, targets and sources ```
- creating the workflow and logging the output from image: docker.io/library/oc-monitord
- Argo watch, which gives informations about the workflow in general (phase, number of steps executed, status...) imagePullPolicy: Never
- Pods : which are the logs generated by the pods ```
Not doing so will end up in the pod having a `ErrorImagePull`
## Allow argo to create services
In order for monitord to expose **open cloud services** on the node, we need to give him permission to create **k8s services**.
For that we can update the RBAC configuration for a role already created by argo :
### Manually edit the rbac authorization
> kubectl edit roles.rbac.authorization.k8s.io -n argo argo-role
In rules add a new entry :
```
- apiGroups:
- ""
resources:
- services
verbs:
- get
- create
```
### Patch the rbac authorization with a one liner
> kubectl patch role argo-role -n argo --type='json' -p='[{"op": "add", "path": "/rules/-", "value": {"apiGroups": [""], "resources": ["services"], "verbs": ["get","create"]}}]'
### Check wether the modification is effective
> kubectl auth can-i create services --as=system:serviceaccount:argo:argo -n argo
This command **must return "yes"**
To execute, the daemon needs several options :
- **-u** :
- **-m** :
- **-d** :
- **-e** :
# Notes features/admiralty-docker # Notes features/admiralty-docker
@ -31,17 +57,14 @@ To execute, the daemon needs several options :
- decide that no peer can have "http://localhost" as its url and use an attribute from the peer object or isMyself() from oc-lib if a peer is the current host. - decide that no peer can have "http://localhost" as its url and use an attribute from the peer object or isMyself() from oc-lib if a peer is the current host.
## TODO ## TODO
- [ ] Allow the front to known on which IP the service are reachable - [ ] Allow the front to known on which IP the service are reachable
- currently doing it by using `kubectl get nodes -o wide` - currently doing it by using `kubectl get nodes -o wide`
- [ ] Implement writing and reading from S3 bucket/MinIO when a data resource is linked to a compute resource.
### Adding ingress handling to support reverse proxing
### Adding ingress handling to support reverse proxing
- Test wether ingress-nginx is running or not - Test wether ingress-nginx is running or not
- Do something if not found : stop running and send error log OR start installation - Do something if not found : stop running and send error log OR start installation
- -

Binary file not shown.

Before

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

View File

@ -1,4 +0,0 @@
KUBERNETES_SERVICE_HOST=192.168.1.169
KUBE_CA="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTWpNeE1USXdNell3SGhjTk1qUXdPREE0TVRBeE16VTJXaGNOTXpRd09EQTJNVEF4TXpVMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTWpNeE1USXdNell3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTVlk3ZHZhNEdYTVdkMy9jMlhLN3JLYjlnWXgyNSthaEE0NmkyNVBkSFAKRktQL2UxSVMyWVF0dzNYZW1TTUQxaStZdzJSaVppNUQrSVZUamNtNHdhcnFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWtlUVJpNFJiODduME5yRnZaWjZHClc2SU55NnN3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUlnRXA5ck04WmdNclRZSHYxZjNzOW5DZXZZeWVVa3lZUk4KWjUzazdoaytJS1FDSVFDbk05TnVGKzlTakIzNDFacGZ5ays2NEpWdkpSM3BhcmVaejdMd2lhNm9kdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
KUBE_CERT="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJWUxWNkFPQkdrU1F3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOekl6TVRFeU1ETTJNQjRYRFRJME1EZ3dPREV3TVRNMU5sb1hEVEkxTURndwpPREV3TVRNMU5sb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJGQ2Q1MFdPeWdlQ2syQzcKV2FrOWY4MVAvSkJieVRIajRWOXBsTEo0ck5HeHFtSjJOb2xROFYxdUx5RjBtOTQ2Nkc0RmRDQ2dqaXFVSk92Swp3NVRPNnd5alNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVFJkOFI5cXVWK2pjeUVmL0ovT1hQSzMyS09XekFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQTArbThqTDBJVldvUTZ0dnB4cFo4NVlMalF1SmpwdXM0aDdnSXRxS3NmUVVDSUI2M2ZNdzFBMm5OVWU1TgpIUGZOcEQwSEtwcVN0Wnk4djIyVzliYlJUNklZCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTWpNeE1USXdNell3SGhjTk1qUXdPREE0TVRBeE16VTJXaGNOTXpRd09EQTJNVEF4TXpVMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTWpNeE1USXdNell3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRc3hXWk9pbnIrcVp4TmFEQjVGMGsvTDF5cE01VHAxOFRaeU92ektJazQKRTFsZWVqUm9STW0zNmhPeVljbnN3d3JoNnhSUnBpMW5RdGhyMzg0S0Z6MlBvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTBYZkVmYXJsZm8zTWhIL3lmemx6Cnl0OWlqbHN3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUxJL2dNYnNMT3MvUUpJa3U2WHVpRVMwTEE2cEJHMXgKcnBlTnpGdlZOekZsQWlFQW1wdjBubjZqN3M0MVI0QzFNMEpSL0djNE53MHdldlFmZWdEVGF1R2p3cFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
KUBE_DATA="LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU5ZS1BFb1dhd1NKUzJlRW5oWmlYMk5VZlY1ZlhKV2krSVNnV09TNFE5VTlvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFVUozblJZN0tCNEtUWUx0WnFUMS96VS84a0Z2Sk1lUGhYMm1Vc25pczBiR3FZblkyaVZEeApYVzR2SVhTYjNqcm9iZ1YwSUtDT0twUWs2OHJEbE03ckRBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo="

41
go.mod
View File

@ -5,7 +5,7 @@ go 1.23.1
toolchain go1.23.3 toolchain go1.23.3
require ( require (
cloud.o-forge.io/core/oc-lib v0.0.0-20250808141553-f4b0cf5683de cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9
github.com/akamensky/argparse v1.4.0 github.com/akamensky/argparse v1.4.0
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/goraz/onion v0.1.3 github.com/goraz/onion v0.1.3
@ -15,12 +15,11 @@ require (
) )
require ( require (
github.com/beego/beego/v2 v2.3.8 // indirect github.com/beego/beego/v2 v2.3.7 // indirect
github.com/go-playground/validator/v10 v10.27.0 // indirect github.com/go-playground/validator/v10 v10.26.0 // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect github.com/sirupsen/logrus v1.9.3 // indirect
github.com/ugorji/go/codec v1.1.7 // indirect
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
@ -28,6 +27,7 @@ require (
) )
require ( require (
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
github.com/argoproj/argo-workflows/v3 v3.6.4 github.com/argoproj/argo-workflows/v3 v3.6.4
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/biter777/countries v1.7.5 // indirect github.com/biter777/countries v1.7.5 // indirect
@ -35,7 +35,7 @@ require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.9 // indirect github.com/gabriel-vasile/mimetype v1.4.8 // indirect
github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/logr v1.4.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect
@ -60,32 +60,33 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/montanaflynn/stats v0.7.1 // indirect github.com/montanaflynn/stats v0.7.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nats-io/nats.go v1.44.0 // indirect github.com/nats-io/nats.go v1.41.0 // indirect
github.com/nats-io/nkeys v0.4.11 // indirect github.com/nats-io/nkeys v0.4.10 // indirect
github.com/nats-io/nuid v1.0.1 // indirect github.com/nats-io/nuid v1.0.1 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.23.0 // indirect github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.65.0 // indirect github.com/prometheus/common v0.63.0 // indirect
github.com/prometheus/procfs v0.17.0 // indirect github.com/prometheus/procfs v0.16.0 // indirect
github.com/robfig/cron v1.2.0 // indirect github.com/robfig/cron v1.2.0 // indirect
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect
github.com/smartystreets/goconvey v1.6.4 // indirect github.com/smartystreets/goconvey v1.6.4 // indirect
github.com/ugorji/go/codec v1.1.7 // indirect
github.com/x448/float16 v0.8.4 // indirect github.com/x448/float16 v0.8.4 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
go.mongodb.org/mongo-driver v1.17.4 // indirect go.mongodb.org/mongo-driver v1.17.3 // indirect
golang.org/x/crypto v0.41.0 // indirect golang.org/x/crypto v0.37.0 // indirect
golang.org/x/net v0.43.0 // indirect golang.org/x/net v0.39.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/sync v0.16.0 // indirect golang.org/x/sync v0.13.0 // indirect
golang.org/x/sys v0.35.0 // indirect golang.org/x/sys v0.32.0 // indirect
golang.org/x/term v0.34.0 // indirect golang.org/x/term v0.31.0 // indirect
golang.org/x/text v0.28.0 // indirect golang.org/x/text v0.24.0 // indirect
golang.org/x/time v0.7.0 // indirect golang.org/x/time v0.7.0 // indirect
google.golang.org/protobuf v1.36.7 // indirect google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/api v0.32.1 k8s.io/api v0.32.1

118
go.sum
View File

@ -1,28 +1,18 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.o-forge.io/core/oc-lib v0.0.0-20250604083300-387785b40cb0 h1:iEm/Rf9I0OSCcncuFy61YOSZ3jdRlhJ/oLD97Pc2pCQ= cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9 h1:mSFFPwil5Ih+RPBvn88MBerQMtsoHnOuyCZQaf91a34=
cloud.o-forge.io/core/oc-lib v0.0.0-20250604083300-387785b40cb0/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
cloud.o-forge.io/core/oc-lib v0.0.0-20250620085001-583ca2fbacd5 h1:FEBwueVOOWKYf0tJuE0EKNIbjxmTyCMgkT4qATYsfbo=
cloud.o-forge.io/core/oc-lib v0.0.0-20250620085001-583ca2fbacd5/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20250704084459-443546027b27 h1:iogk6pV3gybzQDBXMI6Qd/jvSA1h+3oRE+vLl1MRjew=
cloud.o-forge.io/core/oc-lib v0.0.0-20250704084459-443546027b27/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20250715125819-e735f78e58c6 h1:Gnkv59Ntl2RebC5tNauXuxyRXLfZ2XAJ0+ujMyFte5U=
cloud.o-forge.io/core/oc-lib v0.0.0-20250715125819-e735f78e58c6/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20250805113921-40a61387b9f1 h1:53KzZ+1JqRY6J7EVzQpNBmLzNuxb8oHNW3UgqxkYABo=
cloud.o-forge.io/core/oc-lib v0.0.0-20250805113921-40a61387b9f1/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20250808140536-e7a71188a3b5 h1:bmEG0M99WXWCHsMNUgfYqQNEM0YyN4dXxYV1LCY5EYg=
cloud.o-forge.io/core/oc-lib v0.0.0-20250808140536-e7a71188a3b5/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20250808141553-f4b0cf5683de h1:s47eEnWRCjBMOxbec5ROHztuwu0Zo7MuXgqWizgkiXU=
cloud.o-forge.io/core/oc-lib v0.0.0-20250808141553-f4b0cf5683de/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
github.com/akamensky/argparse v1.4.0 h1:YGzvsTqCvbEZhL8zZu2AiA5nq805NZh75JNj4ajn1xc= github.com/akamensky/argparse v1.4.0 h1:YGzvsTqCvbEZhL8zZu2AiA5nq805NZh75JNj4ajn1xc=
github.com/akamensky/argparse v1.4.0/go.mod h1:S5kwC7IuDcEr5VeXtGPRVZ5o/FdhcMlQz4IZQuw64xA= github.com/akamensky/argparse v1.4.0/go.mod h1:S5kwC7IuDcEr5VeXtGPRVZ5o/FdhcMlQz4IZQuw64xA=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/argoproj/argo-workflows/v3 v3.6.4 h1:5+Cc1UwaQE5ka3w7R3hxZ1TK3M6VjDEXA5WSQ/IXrxY= github.com/argoproj/argo-workflows/v3 v3.6.4 h1:5+Cc1UwaQE5ka3w7R3hxZ1TK3M6VjDEXA5WSQ/IXrxY=
github.com/argoproj/argo-workflows/v3 v3.6.4/go.mod h1:2f5zB8CkbNCCO1od+kd1dWkVokqcuyvu+tc+Jwx1MZg= github.com/argoproj/argo-workflows/v3 v3.6.4/go.mod h1:2f5zB8CkbNCCO1od+kd1dWkVokqcuyvu+tc+Jwx1MZg=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beego/beego/v2 v2.3.8 h1:wplhB1pF4TxR+2SS4PUej8eDoH4xGfxuHfS7wAk9VBc= github.com/beego/beego/v2 v2.3.7 h1:z4btKtjU/rfp5BiYHkGD2QPjK9i1E9GH+I7vfhn6Agk=
github.com/beego/beego/v2 v2.3.8/go.mod h1:8vl9+RrXqvodrl9C8yivX1e6le6deCK6RWeq8R7gTTg= github.com/beego/beego/v2 v2.3.7/go.mod h1:5cqHsOHJIxkq44tBpRvtDe59GuVRVv/9/tyVDxd5ce4=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q= github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q=
@ -55,8 +45,8 @@ github.com/etcd-io/etcd v3.3.17+incompatible/go.mod h1:cdZ77EstHBwVtD6iTgzgvogwc
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY= github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@ -74,8 +64,6 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k= github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@ -161,12 +149,10 @@ github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/nats-io/nats.go v1.43.0 h1:uRFZ2FEoRvP64+UUhaTokyS18XBCR/xM2vQZKO4i8ug= github.com/nats-io/nats.go v1.41.0 h1:PzxEva7fflkd+n87OtQTXqCTyLfIIMFJBpyccHLE2Ko=
github.com/nats-io/nats.go v1.43.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= github.com/nats-io/nats.go v1.41.0/go.mod h1:wV73x0FSI/orHPSYoyMeJB+KajMDoWyXmFaRrrYaaTo=
github.com/nats-io/nats.go v1.44.0 h1:ECKVrDLdh/kDPV1g0gAQ+2+m2KprqZK5O/eJAyAnH2M= github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc=
github.com/nats-io/nats.go v1.44.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U=
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nwtgck/go-fakelish v0.1.3 h1:bA8/xa9hQmzppexIhBvdmztcd/PJ4SPuAUTBdMKZ8G4= github.com/nwtgck/go-fakelish v0.1.3 h1:bA8/xa9hQmzppexIhBvdmztcd/PJ4SPuAUTBdMKZ8G4=
@ -186,19 +172,13 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@ -252,22 +232,14 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ= go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ=
go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@ -287,18 +259,12 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -306,12 +272,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -327,32 +289,18 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -391,8 +339,6 @@ google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8=
google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@ -7,8 +7,6 @@ import (
"oc-monitord/tools" "oc-monitord/tools"
"oc-monitord/utils" "oc-monitord/utils"
"slices" "slices"
"strings"
"sync"
"time" "time"
"github.com/rs/zerolog" "github.com/rs/zerolog"
@ -108,16 +106,13 @@ func NewArgoPodLog(name string, step string, msg string) ArgoPodLog {
} }
} }
func LogKubernetesArgo(wfName string, namespace string, watcher watch.Interface) { func LogKubernetesArgo(wfName string, executionID string, watcher watch.Interface) {
var argoWatcher *ArgoWatch var argoWatcher *ArgoWatch
var pods []string var pods []string
var node wfv1.NodeStatus var node wfv1.NodeStatus
wfl := utils.GetWFLogger("") wfl := utils.GetWFLogger("")
wfl.Debug().Msg("Starting to log " + wfName)
var wg sync.WaitGroup
for event := range (watcher.ResultChan()) { for event := range (watcher.ResultChan()) {
wf, ok := event.Object.(*wfv1.Workflow) wf, ok := event.Object.(*wfv1.Workflow)
if !ok { if !ok {
@ -125,7 +120,7 @@ func LogKubernetesArgo(wfName string, namespace string, watcher watch.Interface)
continue continue
} }
if len(wf.Status.Nodes) == 0 { if len(wf.Status.Nodes) == 0 {
wfl.Info().Msg("No node status yet") // The first output of the channel doesn't contain Nodes so we skip it wfl.Debug().Msg("No node status yet") // The first output of the channel doesn't contain Nodes so we skip it
continue continue
} }
@ -143,7 +138,7 @@ func LogKubernetesArgo(wfName string, namespace string, watcher watch.Interface)
newWatcher := ArgoWatch{ newWatcher := ArgoWatch{
Name: node.Name, Name: node.Name,
Namespace: namespace, Namespace: executionID,
Status: string(node.Phase), Status: string(node.Phase),
Created: node.StartedAt.String(), Created: node.StartedAt.String(),
Started: node.StartedAt.String(), Started: node.StartedAt.String(),
@ -168,9 +163,7 @@ func LogKubernetesArgo(wfName string, namespace string, watcher watch.Interface)
if !slices.Contains(pods,pod.Name){ if !slices.Contains(pods,pod.Name){
pl := wfl.With().Str("pod", pod.Name).Logger() pl := wfl.With().Str("pod", pod.Name).Logger()
if wfName == pod.Name { pods = append(pods, pod.Name); continue } // One of the node is the Workflow, the others are the pods so don't try to log on the wf name if wfName == pod.Name { pods = append(pods, pod.Name); continue } // One of the node is the Workflow, the others are the pods so don't try to log on the wf name
pl.Info().Msg("Found a new pod to log : " + pod.Name) go logKubernetesPods(executionID, wfName, pod.Name, pl)
wg.Add(1)
go logKubernetesPods(namespace, wfName, pod.Name, pl, &wg)
pods = append(pods, pod.Name) pods = append(pods, pod.Name)
} }
} }
@ -178,8 +171,6 @@ func LogKubernetesArgo(wfName string, namespace string, watcher watch.Interface)
// Stop listening to the chan when the Workflow is completed or something bad happened // Stop listening to the chan when the Workflow is completed or something bad happened
if node.Phase.Completed() { if node.Phase.Completed() {
wfl.Info().Msg(wfName + " worflow completed") wfl.Info().Msg(wfName + " worflow completed")
wg.Wait()
wfl.Info().Msg(wfName + " exiting")
break break
} }
if node.Phase.FailedOrError() { if node.Phase.FailedOrError() {
@ -205,31 +196,24 @@ func retrieveCondition(wf *wfv1.Workflow) (c Conditions) {
} }
// Function needed to be executed as a go thread // Function needed to be executed as a go thread
func logKubernetesPods(executionId string, wfName string,podName string, logger zerolog.Logger, wg *sync.WaitGroup){ func logKubernetesPods(executionId string, wfName string,podName string, logger zerolog.Logger){
defer wg.Done()
s := strings.Split(podName, ".")
name := s[0] + "-" + s[1]
step := s[1]
k, err := tools.NewKubernetesTool() k, err := tools.NewKubernetesTool()
if err != nil { if err != nil {
logger.Error().Msg("Could not get Kubernetes tools") logger.Error().Msg("Could not get Kubernetes tools")
return return
} }
reader, err := k.GetPodLogger(executionId, wfName, podName) reader, err := k.GetPodLogger(executionId, wfName, podName)
if err != nil { if err != nil {
logger.Error().Msg(err.Error()) logger.Error().Msg(err.Error())
return return
} }
scanner := bufio.NewScanner(reader) scanner := bufio.NewScanner(reader)
for scanner.Scan() { for scanner.Scan() {
log := scanner.Text() log := scanner.Text()
podLog := NewArgoPodLog(name,step,log) podLog := NewArgoPodLog(wfName,podName,log)
jsonified, _ := json.Marshal(podLog) jsonified, _ := json.Marshal(podLog)
logger.Info().Msg(string(jsonified)) logger.Info().Msg(string(jsonified))
} }
} }

View File

@ -3,6 +3,7 @@ package logger
import ( import (
"bufio" "bufio"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"oc-monitord/conf" "oc-monitord/conf"
@ -70,6 +71,7 @@ func LogLocalWorkflow(wfName string, pipe io.ReadCloser, wg *sync.WaitGroup) {
logger = logs.GetLogger() logger = logs.GetLogger()
logger.Debug().Msg("created wf_logger") logger.Debug().Msg("created wf_logger")
fmt.Println("created wf_logger")
wfLogger = logger.With().Str("argo_name", wfName).Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() wfLogger = logger.With().Str("argo_name", wfName).Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger()
var current_watch, previous_watch ArgoWatch var current_watch, previous_watch ArgoWatch
@ -109,6 +111,7 @@ func LogLocalPod(wfName string, pipe io.ReadCloser, steps []string, wg *sync.Wai
scanner := bufio.NewScanner(pipe) scanner := bufio.NewScanner(pipe)
for scanner.Scan() { for scanner.Scan() {
var podLogger zerolog.Logger var podLogger zerolog.Logger
fmt.Println("new line")
wg.Add(1) wg.Add(1)
line := scanner.Text() line := scanner.Text()

41
main.go
View File

@ -67,14 +67,11 @@ func main() {
logger = u.GetLogger() logger = u.GetLogger()
logger.Debug().Msg("Loki URL : " + conf.GetConfig().LokiURL) logger.Debug().Msg("Loki URL : " + conf.GetConfig().LokiURL)
logger.Info().Msg("Workflow executed : " + conf.GetConfig().ExecutionID) logger.Debug().Msg("Workflow executed : " + conf.GetConfig().ExecutionID)
exec := u.GetExecution(conf.GetConfig().ExecutionID) exec := u.GetExecution(conf.GetConfig().ExecutionID)
if exec == nil {
logger.Fatal().Msg("Could not retrieve workflow ID from execution ID " + conf.GetConfig().ExecutionID + " on peer " + conf.GetConfig().PeerID)
}
conf.GetConfig().WorkflowID = exec.WorkflowID conf.GetConfig().WorkflowID = exec.WorkflowID
logger.Info().Msg("Starting construction of yaml argo for workflow :" + exec.WorkflowID) logger.Debug().Msg("Starting construction of yaml argo for workflow :" + exec.WorkflowID)
if _, err := os.Stat("./argo_workflows/"); os.IsNotExist(err) { if _, err := os.Stat("./argo_workflows/"); os.IsNotExist(err) {
os.Mkdir("./argo_workflows/", 0755) os.Mkdir("./argo_workflows/", 0755)
@ -86,6 +83,7 @@ func main() {
err := new_wf.LoadFrom(conf.GetConfig().WorkflowID, conf.GetConfig().PeerID) err := new_wf.LoadFrom(conf.GetConfig().WorkflowID, conf.GetConfig().PeerID)
if err != nil { if err != nil {
logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API") logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API")
} }
@ -97,20 +95,23 @@ func main() {
argoFilePath, err := builder.CompleteBuild(exec.ExecutionsID) argoFilePath, err := builder.CompleteBuild(exec.ExecutionsID)
if err != nil { if err != nil {
logger.Error().Msg("Error when completing the build of the workflow: " + err.Error()) logger.Error().Msg(err.Error())
} }
workflowName = getContainerName(argoFilePath) workflowName = getContainerName(argoFilePath)
wf_logger := u.GetWFLogger(workflowName)
wf_logger.Debug().Msg("Testing argo name")
if conf.GetConfig().KubeHost == "" { if conf.GetConfig().KubeHost == "" {
// Not in a k8s environment, get conf from parameters // Not in a k8s environment, get conf from parameters
logger.Info().Msg("Executes outside of k8s") fmt.Println("Executes outside of k8s")
executeOutside(argoFilePath, builder.Workflow) executeOutside(argoFilePath, builder.Workflow)
} else { } else {
// Executed in a k8s environment // Executed in a k8s environment
logger.Info().Msg("Executes inside a k8s") fmt.Println("Executes inside a k8s")
// executeInside(exec.GetID(), "argo", argo_file_path, stepMax) // commenting to use conf.ExecutionID instead of exec.GetID() // executeInside(exec.GetID(), "argo", argo_file_path, stepMax) // commenting to use conf.ExecutionID instead of exec.GetID()
executeInside(conf.GetConfig().ExecutionID, exec.ExecutionsID, argoFilePath) executeInside(conf.GetConfig().ExecutionID, conf.GetConfig().ExecutionID, argoFilePath)
} }
} }
@ -123,25 +124,23 @@ func executeInside(execID string, ns string, argo_file_path string) {
} }
name, err := t.CreateArgoWorkflow(argo_file_path, ns) name, err := t.CreateArgoWorkflow(argo_file_path, ns)
// _ = name _ = name
if err != nil { if err != nil {
logger.Error().Msg("Could not create argo workflow : " + err.Error()) logger.Error().Msg("Could not create argo workflow : " + err.Error())
logger.Info().Msg(fmt.Sprint("CA :" + conf.GetConfig().KubeCA)) fmt.Println("CA :" + conf.GetConfig().KubeCA)
logger.Info().Msg(fmt.Sprint("Cert :" + conf.GetConfig().KubeCert)) fmt.Println("Cert :" + conf.GetConfig().KubeCert)
logger.Info().Msg(fmt.Sprint("Data :" + conf.GetConfig().KubeData)) fmt.Println("Data :" + conf.GetConfig().KubeData)
return return
} else { } else {
watcher, err := t.GetArgoWatch(ns, workflowName) watcher, err := t.GetArgoWatch(execID, workflowName)
if err != nil { if err != nil {
logger.Error().Msg("Could not retrieve Watcher : " + err.Error()) logger.Error().Msg("Could not retrieve Watcher : " + err.Error())
} }
l.LogKubernetesArgo(name, ns, watcher) l.LogKubernetesArgo(name, execID, watcher)
if err != nil { if err != nil {
logger.Error().Msg("Could not log workflow : " + err.Error()) logger.Error().Msg("Could not log workflow : " + err.Error())
} }
logger.Info().Msg("Finished, exiting...")
} }
} }
@ -174,7 +173,7 @@ func executeOutside(argo_file_path string, workflow workflow_builder.Workflow) {
go l.LogLocalWorkflow(workflowName, stdoutSubmit, &wg) go l.LogLocalWorkflow(workflowName, stdoutSubmit, &wg)
go l.LogLocalPod(workflowName, stdoutLogs, steps, &wg) go l.LogLocalPod(workflowName, stdoutLogs, steps, &wg)
logger.Info().Msg("Starting argo submit") fmt.Println("Starting argo submit")
if err := cmdSubmit.Start(); err != nil { if err := cmdSubmit.Start(); err != nil {
wf_logger.Error().Msg("Could not start argo submit") wf_logger.Error().Msg("Could not start argo submit")
wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text()) wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text())
@ -183,7 +182,7 @@ func executeOutside(argo_file_path string, workflow workflow_builder.Workflow) {
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
logger.Info().Msg("Running argo logs") fmt.Println("Running argo logs")
if err := cmdLogs.Run(); err != nil { if err := cmdLogs.Run(); err != nil {
wf_logger.Error().Msg("Could not run '" + strings.Join(cmdLogs.Args, " ") + "'") wf_logger.Error().Msg("Could not run '" + strings.Join(cmdLogs.Args, " ") + "'")
@ -191,7 +190,7 @@ func executeOutside(argo_file_path string, workflow workflow_builder.Workflow) {
} }
logger.Info().Msg("Waiting argo submit") fmt.Println("Waiting argo submit")
if err := cmdSubmit.Wait(); err != nil { if err := cmdSubmit.Wait(); err != nil {
wf_logger.Error().Msg("Could not execute argo submit") wf_logger.Error().Msg("Could not execute argo submit")
wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text()) wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text())
@ -232,7 +231,7 @@ func setConf(is_k8s bool, o *onion.Onion, parser *argparse.Parser) {
err := parser.Parse(os.Args) err := parser.Parse(os.Args)
if err != nil { if err != nil {
logger.Info().Msg(parser.Usage(err)) fmt.Println(parser.Usage(err))
os.Exit(1) os.Exit(1)
} }
conf.GetConfig().Logs = "debug" conf.GetConfig().Logs = "debug"

View File

@ -1,66 +0,0 @@
# Goal
We want to be able to instantiate a service that allows to store file located on a `processing` pod onto it.
We have already tested it with a static `Argo` yaml file, a MinIO running on the same kubernetes node, the minio service is reached because it is the only associated to the `serviceAccount`.
We have established three otpions that need to be available to the user for the feature to be implemented:
- Use a MinIO running constantly on the node that executes the argo workflow
- Use a MinIO
- A MinIO is instanciated when a new workflow is launched
# Requirements
- Helm : `https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3`
- Helm GO client : `$ go get github.com/mittwald/go-helm-client`
- MinIO chart : `https://charts.min.io/`
# Ressources
We need to create several ressources in order for the pods to communicate with the MinIO
## MinIO Auth Secrets
## Bucket ConfigMap
With the name `artifact-repositories` this configMap will be used by default. It contains the URL to the MinIO server and the key to the authentication data held in a `secret` ressource.
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
# If you want to use this config map by default, name it "artifact-repositories".
name: artifact-repositories
# annotations:
# # v3.0 and after - if you want to use a specific key, put that key into this annotation.
# workflows.argoproj.io/default-artifact-repository: oc-s3-artifact-repository
data:
oc-s3-artifact-repository: |
s3:
bucket: oc-bucket
endpoint: [ retrieve cluster with kubectl get service argo-artifacts -o jsonpath="{.spec.clusterIP}" ]:9000
insecure: true
accessKeySecret:
name: argo-artifact-secret
key: access-key
secretKeySecret:
name: argo-artifact-secret
key: secret-key
```
# Code modifications
Rajouter un attribut "isDataLink"
- true/false
Rajouter un attribut DataPath ou un truc comme ca
- liste de map[string]string permet de n'avoir qu'une copie par fichier)
- éditable uniquement a travers la méthode addDataPath
- clé : path du fichier / value : nom de la copie dans minio
===> on a besoin du meme attribut pour Processing -> Data et Data -> Processing

View File

@ -71,8 +71,8 @@ type Key struct {
Bucket string `yaml:"bucket"` Bucket string `yaml:"bucket"`
EndPoint string `yaml:"endpoint"` EndPoint string `yaml:"endpoint"`
Insecure bool `yaml:"insecure"` Insecure bool `yaml:"insecure"`
AccessKeySecret *Secret `yaml:"accessKeySecret"` AccessKeySecret *Secret `yaml accessKeySecret`
SecretKeySecret *Secret `yaml:"secretKeySecret"` SecretKeySecret *Secret `yaml secretKeySecret`
} }
type Artifact struct { type Artifact struct {
@ -131,7 +131,7 @@ func (template *Template) CreateContainer(processing *resources.ProcessingResour
func (template *Template) ReplacePerEnv(arg string, envs []models.Param) string { func (template *Template) ReplacePerEnv(arg string, envs []models.Param) string {
for _, v := range envs { for _, v := range envs {
if v.Name != "" && strings.Contains(arg, v.Name) { if strings.Contains(arg, v.Name) {
value := "{{ inputs.parameters." + v.Name + " }}" value := "{{ inputs.parameters." + v.Name + " }}"
arg = strings.ReplaceAll(arg, v.Name, value) arg = strings.ReplaceAll(arg, v.Name, value)
arg = strings.ReplaceAll(arg, "$"+v.Name, value) arg = strings.ReplaceAll(arg, "$"+v.Name, value)

Binary file not shown.

View File

@ -4,16 +4,14 @@ import (
"errors" "errors"
"io" "io"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
) )
type Tool interface { type Tool interface {
CreateArgoWorkflow(path string, ns string) (string, error) CreateArgoWorkflow(path string, ns string) (string, error)
CreateAccessSecret(user string, password string, storageId string, namespace string) (string, error) CreateAccessSecret(ns string, login string, password string) (string, error)
GetArgoWatch(executionId string, wfName string) (watch.Interface, error) GetArgoWatch(executionId string, wfName string) (watch.Interface, error)
GetPodLogger(ns string, wfName string, podName string) (io.ReadCloser, error) GetPodLogger(ns string, wfName string, podName string) (io.ReadCloser, error)
GetS3Secret(storageId string, namespace string) *v1.Secret
} }
var _service = map[string]func() (Tool, error){ var _service = map[string]func() (Tool, error){

View File

@ -2,6 +2,7 @@ package tools
import ( import (
"context" "context"
"encoding/base64"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -12,8 +13,8 @@ import (
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned" "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned"
"github.com/google/uuid"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer"
@ -30,7 +31,7 @@ type KubernetesTools struct {
func NewKubernetesTool() (Tool, error) { func NewKubernetesTool() (Tool, error) {
// Load Kubernetes config (from ~/.kube/config) // Load Kubernetes config (from ~/.kube/config)
config := &rest.Config{ config := &rest.Config{
Host: "https://" + conf.GetConfig().KubeHost + ":" + conf.GetConfig().KubePort, Host: conf.GetConfig().KubeHost + ":" + conf.GetConfig().KubePort,
TLSClientConfig: rest.TLSClientConfig{ TLSClientConfig: rest.TLSClientConfig{
CAData: []byte(conf.GetConfig().KubeCA), CAData: []byte(conf.GetConfig().KubeCA),
CertData: []byte(conf.GetConfig().KubeCert), CertData: []byte(conf.GetConfig().KubeCert),
@ -82,25 +83,25 @@ func (k *KubernetesTools) CreateArgoWorkflow(path string, ns string) (string, er
if err != nil { if err != nil {
return "", errors.New("failed to create workflow: " + err.Error()) return "", errors.New("failed to create workflow: " + err.Error())
} }
l := utils.GetLogger() fmt.Printf("workflow %s created in namespace %s\n", createdWf.Name, ns)
l.Info().Msg(fmt.Sprintf("workflow %s created in namespace %s\n", createdWf.Name, ns))
return createdWf.Name, nil return createdWf.Name, nil
} }
func (k *KubernetesTools) CreateAccessSecret(access string, password string, storageId string, namespace string) (string, error) { func (k *KubernetesTools) CreateAccessSecret(ns string, login string, password string) (string, error) {
// Namespace where the secret will be created // Namespace where the secret will be created
namespace := "default"
// Encode the secret data (Kubernetes requires base64-encoded values) // Encode the secret data (Kubernetes requires base64-encoded values)
secretData := map[string][]byte{ secretData := map[string][]byte{
"access-key": []byte(access), "access-key": []byte(base64.StdEncoding.EncodeToString([]byte(login))),
"secret-key": []byte(password), "secret-key": []byte(base64.StdEncoding.EncodeToString([]byte(password))),
} }
// Define the Secret object // Define the Secret object
name := storageId+"-secret-s3" name := uuid.New().String()
secret := &v1.Secret{ secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
Namespace: namespace, Namespace: ns,
}, },
Type: v1.SecretTypeOpaque, Type: v1.SecretTypeOpaque,
Data: secretData, Data: secretData,
@ -110,35 +111,20 @@ func (k *KubernetesTools) CreateAccessSecret(access string, password string, sto
if err != nil { if err != nil {
return "", errors.New("Error creating secret: " + err.Error()) return "", errors.New("Error creating secret: " + err.Error())
} }
return name, nil return name, nil
} }
func (k *KubernetesTools) GetS3Secret(storageId string, namespace string) *v1.Secret {
secret, err := k.Set.CoreV1().Secrets(namespace).Get(context.TODO(), storageId + "-secret-s3", metav1.GetOptions{})
// Get(context.TODO(),storageId + "-artifact-server", metav1.GetOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
l := utils.GetLogger()
l.Fatal().Msg("An error happened when retrieving secret in " + namespace + " : " + err.Error())
}
if k8serrors.IsNotFound(err) {
return nil
}
return secret
// return secret
}
func (k *KubernetesTools) GetArgoWatch(executionId string, wfName string) (watch.Interface, error){ func (k *KubernetesTools) GetArgoWatch(executionId string, wfName string) (watch.Interface, error){
wfl := utils.GetWFLogger("")
wfl.Debug().Msg("Starting argo watch with argo lib")
fmt.Println("metadata.name=oc-monitor-"+wfName + " in namespace : " + executionId)
options := metav1.ListOptions{FieldSelector: "metadata.name=oc-monitor-"+wfName} options := metav1.ListOptions{FieldSelector: "metadata.name=oc-monitor-"+wfName}
fmt.Println(options)
watcher, err := k.VersionedSet.ArgoprojV1alpha1().Workflows(executionId).Watch(context.Background(), options) watcher, err := k.VersionedSet.ArgoprojV1alpha1().Workflows(executionId).Watch(context.TODO(), options)
if err != nil { if err != nil {
return nil, errors.New("Error executing 'argo watch " + wfName + " -n " + executionId + " with ArgoprojV1alpha1 client") return nil, errors.New("Error executing 'argo watch " + wfName + " -n " + executionId + " with ArgoprojV1alpha1 client")
} }
return watcher, nil return watcher, nil
@ -147,18 +133,16 @@ func (k *KubernetesTools) GetArgoWatch(executionId string, wfName string) (watch
func (k *KubernetesTools) GetPodLogger(ns string, wfName string, nodeName string) (io.ReadCloser, error) { func (k *KubernetesTools) GetPodLogger(ns string, wfName string, nodeName string) (io.ReadCloser, error) {
var targetPod v1.Pod var targetPod v1.Pod
pods, err := k.Set.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{ pods, err := k.Set.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{
LabelSelector: "workflows.argoproj.io/workflow="+wfName, LabelSelector: "workflows.argoproj.io/workflow="+wfName,
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to list pods: " + err.Error()) return nil, fmt.Errorf("failed to list pods: " + err.Error())
} }
if len(pods.Items) == 0 { if len(pods.Items) == 0 {
return nil, fmt.Errorf("no pods found with label workflows.argoproj.io/node-name=" + nodeName)
return nil, fmt.Errorf("no pods found with label workflows.argoproj.io/workflow="+ wfName + " no pods found with label workflows.argoproj.io/node-name=" + nodeName + " in namespace " + ns)
} }
for _, pod := range pods.Items { for _, pod := range pods.Items {
if pod.Annotations["workflows.argoproj.io/node-name"] == nodeName { if pod.Annotations["workflows.argoproj.io/node-name"] == nodeName {
targetPod = pod targetPod = pod
@ -188,8 +172,7 @@ func (k *KubernetesTools) testPodReady(pod v1.Pod, ns string) {
var initialized bool var initialized bool
for _, cond := range pod.Status.Conditions { for _, cond := range pod.Status.Conditions {
// It seems that for remote pods the pod gets the Succeeded status before it has time to display the it is ready to run in .status.conditions,so we added the OR condition if cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue {
if (cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue) || pod.Status.Phase == v1.PodSucceeded {
initialized = true initialized = true
return return
} }

View File

@ -17,13 +17,11 @@ var (
onceLogger sync.Once onceLogger sync.Once
onceWF sync.Once onceWF sync.Once
) )
func GetExecution(exec_id string) *workflow_execution.WorkflowExecution { func GetExecution(exec_id string) *workflow_execution.WorkflowExecution {
res := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), "", conf.GetConfig().PeerID, []string{}, nil).LoadOne(exec_id) res := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), "", conf.GetConfig().PeerID, []string{}, nil).LoadOne(exec_id)
if res.Code != 200 { if res.Code != 200 {
logger := oclib.GetLogger() logger := oclib.GetLogger()
logger.Error().Msg("Error retrieving execution " + exec_id) logger.Error().Msg("Could not retrieve workflow ID from execution ID " + exec_id)
logger.Error().Msg(res.Err)
return nil return nil
} }
return res.ToWorkflowExecution() return res.ToWorkflowExecution()

View File

@ -4,7 +4,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"oc-monitord/utils"
"slices" "slices"
"time" "time"
@ -22,7 +21,7 @@ type AdmiraltySetter struct {
func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID string) error { func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID string) error {
logger := logs.GetLogger() logger = logs.GetLogger()
data := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(remotePeerID) data := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(remotePeerID)
if data.Code != 200 { if data.Code != 200 {
@ -31,42 +30,42 @@ func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID st
} }
remotePeer := data.ToPeer() remotePeer := data.ToPeer()
data = oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(localPeerID) data = oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(localPeerID)
if data.Code != 200 { if data.Code != 200 {
logger.Error().Msg("Error while trying to instantiate local peer " + remotePeerID) logger.Error().Msg("Error while trying to instantiate local peer " + remotePeerID)
return fmt.Errorf(data.Err) return fmt.Errorf(data.Err)
} }
localPeer := data.ToPeer() localPeer := data.ToPeer()
caller := tools.NewHTTPCaller( caller := tools.NewHTTPCaller(
map[tools.DataType]map[tools.METHOD]string{ map[tools.DataType]map[tools.METHOD]string{
tools.ADMIRALTY_SOURCE: { tools.ADMIRALTY_SOURCE: map[tools.METHOD]string{
tools.POST :"/:id", tools.POST :"/:id",
},
tools.ADMIRALTY_KUBECONFIG: {
tools.GET:"/:id",
},
tools.ADMIRALTY_SECRET: {
tools.POST:"/:id/" + remotePeerID,
},
tools.ADMIRALTY_TARGET: {
tools.POST:"/:id/" + remotePeerID,
},
tools.ADMIRALTY_NODES: {
tools.GET:"/:id/" + remotePeerID,
},
}, },
) tools.ADMIRALTY_KUBECONFIG: map[tools.METHOD]string{
tools.GET:"/:id",
logger.Info().Msg("\n\n Creating the Admiralty Source on " + remotePeerID + " ns-" + s.Id) },
s.callRemoteExecution(remotePeer, []int{http.StatusCreated, http.StatusConflict},caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true) tools.ADMIRALTY_SECRET: map[tools.METHOD]string{
logger.Info().Msg("\n\n Retrieving kubeconfig with the secret on " + remotePeerID + " ns-" + s.Id) tools.POST:"/:id",
},
tools.ADMIRALTY_TARGET: map[tools.METHOD]string{
tools.POST:"/:id",
},
tools.ADMIRALTY_NODES: map[tools.METHOD]string{
tools.GET:"/:id",
},
},
)
logger.Info().Msg(" Creating the Admiralty Source on " + remotePeerID + " ns-" + s.Id + "\n\n")
_ = s.callRemoteExecution(remotePeer, []int{http.StatusCreated, http.StatusConflict},caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true)
logger.Info().Msg(" Retrieving kubeconfig with the secret on " + remotePeerID + " ns-" + s.Id + "\n\n")
kubeconfig := s.getKubeconfig(remotePeer, caller) kubeconfig := s.getKubeconfig(remotePeer, caller)
logger.Info().Msg("\n\n Creating a secret from the kubeconfig " + localPeerID + " ns-" + s.Id) logger.Info().Msg(" Creating a secret from the kubeconfig " + localPeerID + " ns-" + s.Id + "\n\n")
s.callRemoteExecution(localPeer, []int{http.StatusCreated}, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true) _ = s.callRemoteExecution(localPeer, []int{http.StatusCreated}, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true)
logger.Info().Msg("\n\n Creating the Admiralty Target on " + localPeerID + " in namespace " + s.Id ) logger.Info().Msg(" Creating the Admiralty Target on " + localPeerID + " ns-" + s.Id + "\n\n")
s.callRemoteExecution(localPeer,[]int{http.StatusCreated, http.StatusConflict},caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true) _ = s.callRemoteExecution(localPeer,[]int{http.StatusCreated, http.StatusConflict},caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true)
logger.Info().Msg("\n\n Checking for the creation of the admiralty node on " + localPeerID + " ns-" + s.Id) logger.Info().Msg(" Checking for the creation of the admiralty node on " + localPeerID + " ns-" + s.Id + "\n\n")
s.checkNodeStatus(localPeer,caller) s.checkNodeStatus(localPeer,caller)
return nil return nil
@ -74,41 +73,40 @@ func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID st
func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCaller) map[string]string { func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCaller) map[string]string {
var kubedata map[string]string var kubedata map[string]string
s.callRemoteExecution(peer, []int{http.StatusOK}, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil, true) _ = s.callRemoteExecution(peer, []int{http.StatusOK}, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil, true)
if caller.LastResults["body"] == nil || len(caller.LastResults["body"].([]byte)) == 0 { if caller.LastResults["body"] == nil || len(caller.LastResults["body"].([]byte)) == 0 {
l := utils.GetLogger() fmt.Println("Something went wrong when retrieving data from Get call for kubeconfig")
l.Error().Msg("Something went wrong when retrieving data from Get call for kubeconfig")
panic(0) panic(0)
} }
err := json.Unmarshal(caller.LastResults["body"].([]byte), &kubedata) err := json.Unmarshal(caller.LastResults["body"].([]byte), &kubedata)
if err != nil { if err != nil {
l := utils.GetLogger() fmt.Println("Something went wrong when unmarshalling data from Get call for kubeconfig")
l.Error().Msg("Something went wrong when unmarshalling data from Get call for kubeconfig")
panic(0) panic(0)
} }
return kubedata return kubedata
} }
func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode []int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) { func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode []int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) *peer.PeerExecution {
l := utils.GetLogger() resp, err := peer.LaunchPeerExecution(peer.UUID, dataID, dt, method, body, caller)
_, err := peer.LaunchPeerExecution(peer.UUID, dataID, dt, method, body, caller)
if err != nil { if err != nil {
l.Error().Msg("Error when executing on peer at" + peer.Url) fmt.Println("Error when executing on peer at", peer.Url)
l.Error().Msg(err.Error()) fmt.Println(err)
panic(0) panic(0)
} }
if !slices.Contains(expectedCode, caller.LastResults["code"].(int)) { if !slices.Contains(expectedCode, caller.LastResults["code"].(int)) {
l.Error().Msg(fmt.Sprint("Didn't receive the expected code :", caller.LastResults["code"], "when expecting", expectedCode)) fmt.Println("Didn't receive the expected code :", caller.LastResults["code"], "when expecting", expectedCode)
if _, ok := caller.LastResults["body"]; ok { if _, ok := caller.LastResults["body"]; ok {
l.Info().Msg(string(caller.LastResults["body"].([]byte))) logger.Info().Msg(string(caller.LastResults["body"].([]byte)))
// fmt.Println(string(caller.LastResults["body"].([]byte)))
} }
if panicCode { if panicCode {
panic(0) panic(0)
} }
} }
return resp
} }
func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller){ func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller){
@ -122,16 +120,15 @@ func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller){
name := metadata.(map[string]interface{})["name"].(string) name := metadata.(map[string]interface{})["name"].(string)
s.NodeName = name s.NodeName = name
} else { } else {
l := utils.GetLogger() fmt.Println("Could not retrieve data about the recently created node")
l.Error().Msg("Could not retrieve data about the recently created node")
panic(0) panic(0)
} }
} }
func (s *AdmiraltySetter) checkNodeStatus(localPeer *peer.Peer, caller *tools.HTTPCaller){ func (s *AdmiraltySetter) checkNodeStatus(localPeer *peer.Peer, caller *tools.HTTPCaller){
for i := range(5) { for i := range(5) {
time.Sleep(10 * time.Second) // let some time for kube to generate the node time.Sleep(5 * time.Second) // let some time for kube to generate the node
s.callRemoteExecution(localPeer,[]int{http.StatusOK},caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil, false) _ = s.callRemoteExecution(localPeer,[]int{http.StatusOK},caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil, false)
if caller.LastResults["code"] == 200 { if caller.LastResults["code"] == 200 {
s.storeNodeName(caller) s.storeNodeName(caller)
return return

View File

@ -8,7 +8,6 @@ import (
"fmt" "fmt"
"oc-monitord/conf" "oc-monitord/conf"
. "oc-monitord/models" . "oc-monitord/models"
tools2 "oc-monitord/tools" tools2 "oc-monitord/tools"
"os" "os"
"strings" "strings"
@ -19,7 +18,6 @@ import (
"cloud.o-forge.io/core/oc-lib/models/common/enum" "cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/resources" "cloud.o-forge.io/core/oc-lib/models/resources"
w "cloud.o-forge.io/core/oc-lib/models/workflow" w "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/models/workflow/graph"
"github.com/nwtgck/go-fakelish" "github.com/nwtgck/go-fakelish"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
@ -28,11 +26,11 @@ import (
var logger zerolog.Logger var logger zerolog.Logger
type ArgoBuilder struct { type ArgoBuilder struct {
OriginWorkflow *w.Workflow OriginWorkflow *w.Workflow
Workflow Workflow Workflow Workflow
Services []*Service Services []*Service
Timeout int Timeout int
RemotePeers []string RemotePeers []string
} }
type Workflow struct { type Workflow struct {
@ -55,7 +53,7 @@ func (b *Workflow) getDag() *Dag {
} }
type Spec struct { type Spec struct {
ServiceAccountName string `yaml:"serviceAccountName,omitempty"` ServiceAccountName string `yaml:"serviceAccountName"`
Entrypoint string `yaml:"entrypoint"` Entrypoint string `yaml:"entrypoint"`
Arguments []Parameter `yaml:"arguments,omitempty"` Arguments []Parameter `yaml:"arguments,omitempty"`
Volumes []VolumeClaimTemplate `yaml:"volumeClaimTemplates,omitempty"` Volumes []VolumeClaimTemplate `yaml:"volumeClaimTemplates,omitempty"`
@ -67,7 +65,7 @@ type Spec struct {
// add s3, gcs, azure, etc if needed on a link between processing and storage // add s3, gcs, azure, etc if needed on a link between processing and storage
func (b *ArgoBuilder) CreateDAG(namespace string, write bool) ( int, []string, []string, error) { func (b *ArgoBuilder) CreateDAG(namespace string, write bool) ( int, []string, []string, error) {
logger = logs.GetLogger() logger = logs.GetLogger()
logger.Info().Msg(fmt.Sprint("Creating DAG ", b.OriginWorkflow.Graph.Items)) fmt.Println("Creating DAG", b.OriginWorkflow.Graph.Items)
// handle services by checking if there is only one processing with hostname and port // handle services by checking if there is only one processing with hostname and port
firstItems, lastItems, volumes := b.createTemplates(namespace) firstItems, lastItems, volumes := b.createTemplates(namespace)
b.createVolumes(volumes) b.createVolumes(volumes)
@ -75,7 +73,7 @@ func (b *ArgoBuilder) CreateDAG(namespace string, write bool) ( int, []string, [
if b.Timeout > 0 { if b.Timeout > 0 {
b.Workflow.Spec.Timeout = b.Timeout b.Workflow.Spec.Timeout = b.Timeout
} }
b.Workflow.Spec.ServiceAccountName = "sa-" + namespace b.Workflow.Spec.ServiceAccountName = "sa-"+namespace
b.Workflow.Spec.Entrypoint = "dag" b.Workflow.Spec.Entrypoint = "dag"
b.Workflow.ApiVersion = "argoproj.io/v1alpha1" b.Workflow.ApiVersion = "argoproj.io/v1alpha1"
b.Workflow.Kind = "Workflow" b.Workflow.Kind = "Workflow"
@ -86,16 +84,16 @@ func (b *ArgoBuilder) CreateDAG(namespace string, write bool) ( int, []string, [
return len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil return len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil
} }
func (b *ArgoBuilder) createTemplates(namespace string) ([]string, []string, []VolumeMount) { func (b *ArgoBuilder) createTemplates(namespace string) ([]string, []string, []VolumeMount) {
volumes := []VolumeMount{} volumes := []VolumeMount{}
firstItems := []string{} firstItems := []string{}
lastItems := []string{} lastItems := []string{}
items := b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsProcessing) items := b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsProcessing)
logger.Info().Msg(fmt.Sprint("Creating templates", len(items))) fmt.Println("Creating templates", len(items))
for _, item := range b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsProcessing) { for _, item := range b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsProcessing) {
instance := item.Processing.GetSelectedInstance() instance := item.Processing.GetSelectedInstance()
logger.Info().Msg(fmt.Sprint("Creating template for", item.Processing.GetName(), instance)) fmt.Println("Creating template for", item.Processing.GetName(), instance)
if instance == nil || instance.(*resources.ProcessingInstance).Access == nil && instance.(*resources.ProcessingInstance).Access.Container != nil { if instance == nil || instance.(*resources.ProcessingInstance).Access == nil && instance.(*resources.ProcessingInstance).Access.Container != nil {
logger.Error().Msg("Not enough configuration setup, template can't be created : " + item.Processing.GetName()) logger.Error().Msg("Not enough configuration setup, template can't be created : " + item.Processing.GetName())
return firstItems, lastItems, volumes return firstItems, lastItems, volumes
@ -178,12 +176,10 @@ func (b *ArgoBuilder) createArgoTemplates(namespace string,
lastItems []string) ([]VolumeMount, []string, []string) { lastItems []string) ([]VolumeMount, []string, []string) {
_, firstItems, lastItems = b.addTaskToArgo(b.Workflow.getDag(), id, processing, firstItems, lastItems) _, firstItems, lastItems = b.addTaskToArgo(b.Workflow.getDag(), id, processing, firstItems, lastItems)
template := &Template{Name: getArgoName(processing.GetName(), id)} template := &Template{Name: getArgoName(processing.GetName(), id)}
logger.Info().Msg(fmt.Sprint("Creating template for", template.Name)) fmt.Println("Creating template for", template.Name)
isReparted, peerId := b.isProcessingReparted(*processing, id) isReparted, peerId := b.isProcessingReparted(*processing,id)
template.CreateContainer(processing, b.Workflow.getDag()) template.CreateContainer(processing, b.Workflow.getDag())
if isReparted { if isReparted {
logger.Debug().Msg("Reparted processing, on " + peerId)
b.RemotePeers = append(b.RemotePeers, peerId) b.RemotePeers = append(b.RemotePeers, peerId)
template.AddAdmiraltyAnnotations(peerId) template.AddAdmiraltyAnnotations(peerId)
} }
@ -193,38 +189,59 @@ func (b *ArgoBuilder) createArgoTemplates(namespace string,
template.Metadata.Labels = make(map[string]string) template.Metadata.Labels = make(map[string]string)
template.Metadata.Labels["app"] = "oc-service-" + processing.GetName() // Construct the template for the k8s service and add a link in graph between k8s service and processing template.Metadata.Labels["app"] = "oc-service-" + processing.GetName() // Construct the template for the k8s service and add a link in graph between k8s service and processing
} }
related := b.OriginWorkflow.GetByRelatedProcessing(id, b.OriginWorkflow.Graph.IsStorage)
volumes = b.addStorageAnnotations(id, template, namespace, volumes)
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, *template)
return volumes, firstItems, lastItems
}
func (b *ArgoBuilder) addStorageAnnotations(id string, template *Template, namespace string, volumes []VolumeMount) []VolumeMount {
related := b.OriginWorkflow.GetByRelatedProcessing(id, b.OriginWorkflow.Graph.IsStorage) // Retrieve all of the storage node linked to the processing for which we create the template
for _, r := range related { for _, r := range related {
storage := r.Node.(*resources.StorageResource) storage := r.Node.(*resources.StorageResource)
for _, linkToStorage := range r.Links { for _, linkToStorage := range r.Links {
for _, rw := range linkToStorage.StorageLinkInfos { for _, rw := range linkToStorage.StorageLinkInfos {
var art Artifact art := Artifact{Path: template.ReplacePerEnv(rw.Source, linkToStorage.Env)}
artifactBaseName := strings.Join(strings.Split(storage.GetName(), " "), "-") + "-" + strings.Replace(rw.FileName, ".", "-", -1) // Parameter/Artifact name must consist of alpha-numeric characters, '_' or '-'
if rw.Write { if rw.Write {
art = Artifact{Path: template.ReplacePerEnv(rw.Source, linkToStorage.Env)} // When we are writing to the s3 the Path element is the path to the file in the pod art.Name = storage.GetName() + "-" + rw.Destination + "-input-write"
art.Name = artifactBaseName + "-input-write"
} else { } else {
art = Artifact{Path: template.ReplacePerEnv(rw.Destination+"/"+rw.FileName, linkToStorage.Env)} // When we are reading from the s3 the Path element in pod should be the destination of the file art.Name = storage.GetName() + "-" + rw.Destination + "-input-read"
art.Name = artifactBaseName + "-input-read"
} }
if storage.StorageType == enum.S3 { if storage.StorageType == enum.S3 {
art.S3 = &Key{
b.addS3annotations(&art, template, rw, linkToStorage, storage, namespace) Key: template.ReplacePerEnv(rw.Destination+"/"+rw.FileName, linkToStorage.Env),
Insecure: true, // temporary
}
sel := storage.GetSelectedInstance()
if sel != nil {
if sel.(*resources.StorageResourceInstance).Credentials != nil {
tool, err := tools2.NewService(conf.GetConfig().Mode)
if err != nil || tool == nil {
logger.Error().Msg("Could not create the access secret")
} else {
id, err := tool.CreateAccessSecret(namespace,
sel.(*resources.StorageResourceInstance).Credentials.Login,
sel.(*resources.StorageResourceInstance).Credentials.Pass)
if err == nil {
art.S3.AccessKeySecret = &Secret{
Name: id,
Key: "access-key",
}
art.S3.SecretKeySecret = &Secret{
Name: id,
Key: "secret-key",
}
}
}
}
art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source+"/", "")
art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source, "")
splits := strings.Split(art.S3.EndPoint, "/")
if len(splits) > 1 {
art.S3.Bucket = splits[0]
art.S3.EndPoint = strings.Join(splits[1:], "/")
} else {
art.S3.Bucket = splits[0]
}
}
} }
if rw.Write { if rw.Write {
template.Outputs.Artifacts = append(template.Outputs.Artifacts, art) template.Outputs.Artifacts = append(template.Inputs.Artifacts, art)
} else { } else {
template.Inputs.Artifacts = append(template.Inputs.Artifacts, art) template.Inputs.Artifacts = append(template.Outputs.Artifacts, art)
} }
} }
} }
@ -241,99 +258,14 @@ func (b *ArgoBuilder) addStorageAnnotations(id string, template *Template, names
}, volumes) }, volumes)
} }
} }
return volumes b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, *template)
} return volumes, firstItems, lastItems
func (b *ArgoBuilder) addS3annotations(art *Artifact, template *Template, rw graph.StorageProcessingGraphLink, linkToStorage graph.GraphLink, storage *resources.StorageResource, namespace string) {
art.S3 = &Key{
// Key: template.ReplacePerEnv(rw.Destination+"/"+rw.FileName, linkToStorage.Env),
Insecure: true, // temporary
}
if rw.Write {
art.S3.Key = rw.Destination + "/" + rw.FileName
} else {
art.S3.Key = rw.Source
}
sel := storage.GetSelectedInstance()
// v0.1 : add the storage.Source to the s3 object
// v0.2 : test if the storage.Source exists in the configMap and quit if not
// v1 : v0.2 + if doesn't exist edit/create the configMap with the response from API call
if sel != nil {
b.addAuthInformation(storage, namespace, art)
art.S3.Bucket = namespace // DEFAULT : will need to update this to create an unique
art.S3.EndPoint = sel.(*resources.StorageResourceInstance).Source
}
}
func (b *ArgoBuilder) addAuthInformation(storage *resources.StorageResource, namespace string, art *Artifact) {
sel := storage.GetSelectedInstance()
tool, err := tools2.NewService(conf.GetConfig().Mode)
if err != nil || tool == nil {
logger.Fatal().Msg("Could not create the access secret :" + err.Error())
}
secretName, err := b.SetupS3Credentials(storage, namespace, tool) // this method return should be updated once we have decided how to retrieve credentials
if err == nil {
art.S3.AccessKeySecret = &Secret{
Name: secretName,
Key: "access-key",
}
art.S3.SecretKeySecret = &Secret{
Name: secretName,
Key: "secret-key",
}
}
art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source+"/", "")
art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source, "")
splits := strings.Split(art.S3.EndPoint, "/")
if len(splits) > 1 {
art.S3.Bucket = splits[0]
art.S3.EndPoint = strings.Join(splits[1:], "/")
} else {
art.S3.Bucket = splits[0]
}
}
func (b *ArgoBuilder) SetupS3Credentials(storage *resources.StorageResource, namespace string, tool tools2.Tool) (string, error) {
s := tool.GetS3Secret(storage.UUID, namespace)
// var s *v1.Secret
accessKey, secretKey := retrieveMinioCredential("peer",namespace)
if s == nil {
id, err := tool.CreateAccessSecret(
accessKey,
secretKey,
storage.UUID,
namespace,
)
if err != nil {
l := oclib.GetLogger()
l.Fatal().Msg("Error when creating the secret holding credentials for S3 access in " + namespace + " : " + err.Error())
}
return id, nil
}
// s.Name = "toto"
return s.Name, nil
}
// This method needs to evolve to an API call to the peer passed as a parameter
func retrieveMinioCredential(peer string, namespace string) (string,string) {
return "hF9wRGog75JuMdshWeEZ", "OwXXJkVQyb5l1aVPdOegKOtDJGoP1dJYeo8O7mDW"
} }
func (b *ArgoBuilder) addTaskToArgo(dag *Dag, graphItemID string, processing *resources.ProcessingResource, func (b *ArgoBuilder) addTaskToArgo(dag *Dag, graphItemID string, processing *resources.ProcessingResource,
firstItems []string, lastItems []string) (*Dag, []string, []string) { firstItems []string, lastItems []string) (*Dag, []string, []string) {
unique_name := getArgoName(processing.GetName(), graphItemID) unique_name := getArgoName(processing.GetName(), graphItemID)
step := Task{Name: unique_name, Template: unique_name} step := Task{Name: unique_name, Template: unique_name}
instance := processing.GetSelectedInstance() instance := processing.GetSelectedInstance()
if instance != nil { if instance != nil {
for _, value := range instance.(*resources.ProcessingInstance).Env { for _, value := range instance.(*resources.ProcessingInstance).Env {
@ -393,7 +325,7 @@ func (b *ArgoBuilder) isArgoDependancy(id string) (bool, []string) {
isDeps := false isDeps := false
for _, link := range b.OriginWorkflow.Graph.Links { for _, link := range b.OriginWorkflow.Graph.Links {
if _, ok := b.OriginWorkflow.Graph.Items[link.Destination.ID]; !ok { if _, ok := b.OriginWorkflow.Graph.Items[link.Destination.ID]; !ok {
logger.Info().Msg(fmt.Sprint("Could not find the source of the link", link.Destination.ID)) fmt.Println("Could not find the source of the link", link.Destination.ID)
continue continue
} }
source := b.OriginWorkflow.Graph.Items[link.Destination.ID].Processing source := b.OriginWorkflow.Graph.Items[link.Destination.ID].Processing
@ -413,7 +345,7 @@ func (b *ArgoBuilder) isArgoDependancy(id string) (bool, []string) {
func (b *ArgoBuilder) getArgoDependencies(id string) (dependencies []string) { func (b *ArgoBuilder) getArgoDependencies(id string) (dependencies []string) {
for _, link := range b.OriginWorkflow.Graph.Links { for _, link := range b.OriginWorkflow.Graph.Links {
if _, ok := b.OriginWorkflow.Graph.Items[link.Source.ID]; !ok { if _, ok := b.OriginWorkflow.Graph.Items[link.Source.ID]; !ok {
logger.Info().Msg(fmt.Sprint("Could not find the source of the link", link.Source.ID)) fmt.Println("Could not find the source of the link", link.Source.ID)
continue continue
} }
source := b.OriginWorkflow.Graph.Items[link.Source.ID].Processing source := b.OriginWorkflow.Graph.Items[link.Source.ID].Processing
@ -435,19 +367,20 @@ func getArgoName(raw_name string, component_id string) (formatedName string) {
// Verify if a processing resource is attached to another Compute than the one hosting // Verify if a processing resource is attached to another Compute than the one hosting
// the current Open Cloud instance. If true return the peer ID to contact // the current Open Cloud instance. If true return the peer ID to contact
func (b *ArgoBuilder) isProcessingReparted(processing resources.ProcessingResource, graphID string) (bool, string) { func (b *ArgoBuilder) isProcessingReparted(processing resources.ProcessingResource, graphID string) (bool,string) {
computeAttached := b.retrieveProcessingCompute(graphID) computeAttached := b.retrieveProcessingCompute(graphID)
if computeAttached == nil { if computeAttached == nil {
logger.Error().Msg("No compute was found attached to processing " + processing.Name + " : " + processing.UUID) logger.Error().Msg("No compute was found attached to processing " + processing.Name + " : " + processing.UUID )
panic(0) panic(0)
} }
// Creates an accessor srtictly for Peer Collection
req := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", "", nil, nil) // Creates an accessor srtictly for Peer Collection
req := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"","",nil,nil)
if req == nil { if req == nil {
fmt.Println("TODO : handle error when trying to create a request on the Peer Collection") fmt.Println("TODO : handle error when trying to create a request on the Peer Collection")
return false, "" return false, ""
} }
res := req.LoadOne(computeAttached.CreatorID) res := req.LoadOne(computeAttached.CreatorID)
if res.Err != "" { if res.Err != "" {
@ -455,25 +388,25 @@ func (b *ArgoBuilder) isProcessingReparted(processing resources.ProcessingResour
fmt.Print(res.Err) fmt.Print(res.Err)
return false, "" return false, ""
} }
peer := *res.ToPeer() peer := *res.ToPeer()
isNotReparted := peer.State == 1 isNotReparted, _ := peer.IsMySelf()
logger.Info().Msg(fmt.Sprint("Result IsMySelf for ", peer.UUID, " : ", isNotReparted)) fmt.Println("Result IsMySelf for ", peer.UUID ," : ", isNotReparted)
return !isNotReparted, peer.UUID return !isNotReparted, peer.UUID
} }
func (b *ArgoBuilder) retrieveProcessingCompute(graphID string) *resources.ComputeResource { func (b *ArgoBuilder) retrieveProcessingCompute(graphID string) *resources.ComputeResource {
for _, link := range b.OriginWorkflow.Graph.Links { for _, link := range b.OriginWorkflow.Graph.Links {
// If a link contains the id of the processing // If a link contains the id of the processing
var oppositeId string var oppositeId string
if link.Source.ID == graphID { if link.Source.ID == graphID{
oppositeId = link.Destination.ID oppositeId = link.Destination.ID
} else if link.Destination.ID == graphID { } else if(link.Destination.ID == graphID){
oppositeId = link.Source.ID oppositeId = link.Source.ID
} }
fmt.Println("OppositeId : ", oppositeId)
if oppositeId != "" { if oppositeId != "" {
dt, res := b.OriginWorkflow.Graph.GetResource(oppositeId) dt, res := b.OriginWorkflow.Graph.GetResource(oppositeId)
if dt == oclib.COMPUTE_RESOURCE { if dt == oclib.COMPUTE_RESOURCE {
@ -484,25 +417,27 @@ func (b *ArgoBuilder) retrieveProcessingCompute(graphID string) *resources.Compu
} }
} }
return nil return nil
} }
// Execute the last actions once the YAML file for the Argo Workflow is created // Execute the last actions once the YAML file for the Argo Workflow is created
func (b *ArgoBuilder) CompleteBuild(executionsId string) (string, error) { func (b *ArgoBuilder) CompleteBuild(executionsId string) (string, error) {
logger.Info().Msg(fmt.Sprint("DEV :: Completing build")) fmt.Println("DEV :: Completing build")
setter := AdmiraltySetter{Id: executionsId} setter := AdmiraltySetter{Id: executionsId}
// Setup admiralty for each node // Setup admiralty for each node
for _, peer := range b.RemotePeers { for _, peer := range b.RemotePeers {
logger.Info().Msg(fmt.Sprint("DEV :: Launching Admiralty Setup for ", peer)) fmt.Println("DEV :: Launching Admiralty Setup for ", peer)
setter.InitializeAdmiralty(conf.GetConfig().PeerID,peer) setter.InitializeAdmiralty(conf.GetConfig().PeerID,peer)
} }
// Update the name of the admiralty node to use // Update the name of the admiralty node to use
for _, template := range b.Workflow.Spec.Templates { for _, template := range b.Workflow.Spec.Templates {
if len(template.Metadata.Annotations) > 0 { if len(template.Metadata.Annotations) > 0 {
if peerId, ok := template.Metadata.Annotations["multicluster.admiralty.io/clustername"]; ok { if resp, ok := template.Metadata.Annotations["multicluster.admiralty.io/clustername"]; ok {
template.Metadata.Annotations["multicluster.admiralty.io/clustername"] = "target-" + oclib.GetConcatenatedName(peerId, executionsId) fmt.Println(resp)
template.Metadata.Annotations["multicluster.admiralty.io/clustername"] = "target-" + conf.GetConfig().ExecutionID
} }
} }
} }
@ -528,4 +463,4 @@ func (b *ArgoBuilder) CompleteBuild(executionsId string) (string, error) {
} }
return workflows_dir + file_name, nil return workflows_dir + file_name, nil
} }

View File

@ -14,7 +14,7 @@ type WorflowDB struct {
// Create the obj!ects from the mxgraphxml stored in the workflow given as a parameter // Create the obj!ects from the mxgraphxml stored in the workflow given as a parameter
func (w *WorflowDB) LoadFrom(workflow_id string, peerID string) error { func (w *WorflowDB) LoadFrom(workflow_id string, peerID string) error {
logger.Info().Msg("Loading workflow from " + workflow_id) fmt.Println("Loading workflow from " + workflow_id)
var err error var err error
if w.Workflow, err = w.getWorkflow(workflow_id, peerID); err != nil { if w.Workflow, err = w.getWorkflow(workflow_id, peerID); err != nil {
return err return err
@ -27,7 +27,7 @@ func (w *WorflowDB) getWorkflow(workflow_id string, peerID string) (workflow *wo
logger := oclib.GetLogger() logger := oclib.GetLogger()
lib_data := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW), "", peerID, []string{}, nil).LoadOne(workflow_id) lib_data := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW), "", peerID, []string{}, nil).LoadOne(workflow_id)
logger.Info().Msg(fmt.Sprint("ERR", lib_data.Code, lib_data.Err)) fmt.Println("ERR", lib_data.Code, lib_data.Err)
if lib_data.Code != 200 { if lib_data.Code != 200 {
logger.Error().Msg("Error loading the graph") logger.Error().Msg("Error loading the graph")
return workflow, errors.New(lib_data.Err) return workflow, errors.New(lib_data.Err)
@ -43,7 +43,7 @@ func (w *WorflowDB) getWorkflow(workflow_id string, peerID string) (workflow *wo
func (w *WorflowDB) ExportToArgo(namespace string, timeout int) (*ArgoBuilder, int, error) { func (w *WorflowDB) ExportToArgo(namespace string, timeout int) (*ArgoBuilder, int, error) {
logger := oclib.GetLogger() logger := oclib.GetLogger()
logger.Info().Msg(fmt.Sprint("Exporting to Argo", w.Workflow)) fmt.Println("Exporting to Argo", w.Workflow)
if len(w.Workflow.Name) == 0 || w.Workflow.Graph == nil { if len(w.Workflow.Name) == 0 || w.Workflow.Graph == nil {
return nil, 0, fmt.Errorf("can't export a graph that has not been loaded yet") return nil, 0, fmt.Errorf("can't export a graph that has not been loaded yet")
} }