Compare commits
	
		
			37 Commits
		
	
	
		
			feature/ad
			...
			feature/mi
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | d62a20ab39 | ||
|  | 6bacf44f2f | ||
|  | 0b652f5dfc | ||
|  | a50ede2688 | ||
|  | 722a42e36d | ||
|  | a46708842b | ||
|  | 1e4011d5b1 | ||
|  | f5e1707545 | ||
|  | cd6d5a24aa | ||
|  | 24bbe81638 | ||
|  | 7c913bec0e | ||
|  | bdbbd7697a | ||
|  | 6917295fbd | ||
|  | e1b0ad089c | ||
|  | 483f747754 | ||
|  | 03675d09ae | ||
|  | f3e84a4f43 | ||
|  | eae5474552 | ||
|  | bae9cb2011 | ||
|  | 65b8960703 | ||
|  | 90aa19caeb | ||
|  | dcb3e2b7cc | ||
|  | c871d68333 | ||
|  | 6cf5da787a | ||
|  | fa4db92c92 | ||
|  | ee94c1aa42 | ||
|  | c40b18f1d6 | ||
|  | 2932fb2710 | ||
|  | 2343a5329e | ||
|  | 86fa41a376 | ||
|  | 6ec7a670bd | ||
| 6323d4eed4 | |||
| 93f3806b86 | |||
|  | ade18f1042 | ||
| 83d118fb05 | |||
| f7f0c9c2d2 | |||
| aea7cbd41c | 
							
								
								
									
										4
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										4
									
								
								Makefile
									
									
									
									
									
								
							| @@ -3,6 +3,8 @@ | ||||
| build: clean | ||||
| 	go build . | ||||
|  | ||||
| dev: build | ||||
|  | ||||
| run: | ||||
| 	./oc-monitord | ||||
|  | ||||
| @@ -22,4 +24,4 @@ publish-registry: | ||||
|  | ||||
| all: docker publish-kind publish-registry | ||||
|  | ||||
| .PHONY: build run clean docker publish-kind publish-registry | ||||
| .PHONY: build run clean docker publish-kind publish-registry | ||||
|   | ||||
							
								
								
									
										71
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										71
									
								
								README.md
									
									
									
									
									
								
							| @@ -1,52 +1,26 @@ | ||||
| # oc-monitor | ||||
|  | ||||
| ## Deploy in k8s (dev) | ||||
| DO : | ||||
|   make build | ||||
|  | ||||
| While a registry with all of the OC docker images has not been set-up we can export this image to k3s ctr | ||||
| ## Summary | ||||
|  | ||||
| > docker save oc-monitord:latest | sudo k3s ctr images import - | ||||
| oc-monitord is a daemon which can be run : | ||||
| - as a binary | ||||
| - as a container | ||||
|  | ||||
| Then in the pod manifest for oc-monitord use :  | ||||
|  | ||||
| ``` | ||||
| image: docker.io/library/oc-monitord | ||||
| imagePullPolicy: Never | ||||
| ``` | ||||
|  | ||||
| Not doing so will end up in the pod having a `ErrorImagePull` | ||||
|  | ||||
| ## Allow argo to create services | ||||
|  | ||||
| In order for monitord to expose **open cloud services** on the node, we need to give him permission to create **k8s services**. | ||||
|  | ||||
| For that we can update the RBAC configuration for a role already created by argo :  | ||||
|  | ||||
| ### Manually edit the rbac authorization  | ||||
|  | ||||
| > kubectl edit roles.rbac.authorization.k8s.io -n argo argo-role | ||||
|  | ||||
| In rules add a new entry :  | ||||
|  | ||||
| ``` | ||||
| - apiGroups: | ||||
|   - "" | ||||
|   resources:   | ||||
|   - services | ||||
|     verbs:                                                                                                                                                           | ||||
|     - get | ||||
|     - create | ||||
| ``` | ||||
|  | ||||
| ### Patch the rbac authorization with a one liner  | ||||
|  | ||||
| > kubectl patch role argo-role -n argo --type='json' -p='[{"op": "add", "path": "/rules/-", "value": {"apiGroups": [""], "resources": ["services"], "verbs": ["get","create"]}}]' | ||||
|  | ||||
| ### Check wether the modification is effective  | ||||
|  | ||||
| > kubectl auth can-i create services --as=system:serviceaccount:argo:argo -n argo | ||||
|  | ||||
| This command **must return "yes"** | ||||
| It is used to perform several actions regarding the execution of an Open Cloud workflow : | ||||
| - generating a YAML file that can be interpreted by **Argo Workflow** to create and execute pods in a kubernetes environment | ||||
| - setting up the different resources needed to execute a workflow over several peers/kubernetes nodes with **Admiralty** : token, secrets, targets and sources | ||||
| - creating the workflow and logging the output from  | ||||
|   - Argo watch, which gives informations about the workflow in general (phase, number of steps executed, status...) | ||||
|   - Pods : which are the logs generated by the pods  | ||||
|  | ||||
| To execute, the daemon needs several options :  | ||||
| - **-u** :   | ||||
| - **-m** : | ||||
| - **-d** : | ||||
| - **-e** : | ||||
|  | ||||
| # Notes features/admiralty-docker | ||||
|  | ||||
| @@ -57,14 +31,17 @@ This command **must return "yes"** | ||||
|   - decide that no peer can have "http://localhost" as its url and use an attribute from the peer object or isMyself() from oc-lib if a peer is the current host. | ||||
|  | ||||
|  | ||||
| ## TODO | ||||
| ## TODO | ||||
|  | ||||
| - [ ] Allow the front to known on which IP the service are reachable | ||||
| - [ ] Allow the front to known on which IP the service are reachable | ||||
|   - currently doing it by using `kubectl get nodes -o wide` | ||||
|  | ||||
| - [ ] Implement writing and reading from S3 bucket/MinIO when a data resource is linked to a compute resource. | ||||
|  | ||||
| ### Adding ingress handling to support reverse proxing | ||||
|  | ||||
| ### Adding ingress handling to support reverse proxing | ||||
|  | ||||
| - Test wether ingress-nginx is running or not | ||||
|   - Do something if not found : stop running and send error log OR start installation | ||||
| -  | ||||
| -  | ||||
|  | ||||
|   | ||||
							
								
								
									
										
											BIN
										
									
								
								docs/admiralty_naming_multi_peer.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								docs/admiralty_naming_multi_peer.jpg
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| After Width: | Height: | Size: 71 KiB | 
							
								
								
									
										
											BIN
										
									
								
								docs/admiralty_setup_schema.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								docs/admiralty_setup_schema.jpg
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| After Width: | Height: | Size: 91 KiB | 
							
								
								
									
										4
									
								
								env.env
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								env.env
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,4 @@ | ||||
| KUBERNETES_SERVICE_HOST=192.168.1.169 | ||||
| KUBE_CA="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTWpNeE1USXdNell3SGhjTk1qUXdPREE0TVRBeE16VTJXaGNOTXpRd09EQTJNVEF4TXpVMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTWpNeE1USXdNell3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTVlk3ZHZhNEdYTVdkMy9jMlhLN3JLYjlnWXgyNSthaEE0NmkyNVBkSFAKRktQL2UxSVMyWVF0dzNYZW1TTUQxaStZdzJSaVppNUQrSVZUamNtNHdhcnFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWtlUVJpNFJiODduME5yRnZaWjZHClc2SU55NnN3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUlnRXA5ck04WmdNclRZSHYxZjNzOW5DZXZZeWVVa3lZUk4KWjUzazdoaytJS1FDSVFDbk05TnVGKzlTakIzNDFacGZ5ays2NEpWdkpSM3BhcmVaejdMd2lhNm9kdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" | ||||
| KUBE_CERT="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJWUxWNkFPQkdrU1F3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOekl6TVRFeU1ETTJNQjRYRFRJME1EZ3dPREV3TVRNMU5sb1hEVEkxTURndwpPREV3TVRNMU5sb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJGQ2Q1MFdPeWdlQ2syQzcKV2FrOWY4MVAvSkJieVRIajRWOXBsTEo0ck5HeHFtSjJOb2xROFYxdUx5RjBtOTQ2Nkc0RmRDQ2dqaXFVSk92Swp3NVRPNnd5alNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVFJkOFI5cXVWK2pjeUVmL0ovT1hQSzMyS09XekFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQTArbThqTDBJVldvUTZ0dnB4cFo4NVlMalF1SmpwdXM0aDdnSXRxS3NmUVVDSUI2M2ZNdzFBMm5OVWU1TgpIUGZOcEQwSEtwcVN0Wnk4djIyVzliYlJUNklZCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTWpNeE1USXdNell3SGhjTk1qUXdPREE0TVRBeE16VTJXaGNOTXpRd09EQTJNVEF4TXpVMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTWpNeE1USXdNell3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRc3hXWk9pbnIrcVp4TmFEQjVGMGsvTDF5cE01VHAxOFRaeU92ektJazQKRTFsZWVqUm9STW0zNmhPeVljbnN3d3JoNnhSUnBpMW5RdGhyMzg0S0Z6MlBvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTBYZkVmYXJsZm8zTWhIL3lmemx6Cnl0OWlqbHN3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUxJL2dNYnNMT3MvUUpJa3U2WHVpRVMwTEE2cEJHMXgKcnBlTnpGdlZOekZsQWlFQW1wdjBubjZqN3M0MVI0QzFNMEpSL0djNE53MHdldlFmZWdEVGF1R2p3cFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" | ||||
| KUBE_DATA="LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU5ZS1BFb1dhd1NKUzJlRW5oWmlYMk5VZlY1ZlhKV2krSVNnV09TNFE5VTlvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFVUozblJZN0tCNEtUWUx0WnFUMS96VS84a0Z2Sk1lUGhYMm1Vc25pczBiR3FZblkyaVZEeApYVzR2SVhTYjNqcm9iZ1YwSUtDT0twUWs2OHJEbE03ckRBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=" | ||||
							
								
								
									
										37
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										37
									
								
								go.mod
									
									
									
									
									
								
							| @@ -5,7 +5,7 @@ go 1.23.1 | ||||
| toolchain go1.23.3 | ||||
|  | ||||
| require ( | ||||
| 	cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9 | ||||
| 	cloud.o-forge.io/core/oc-lib v0.0.0-20250715125819-e735f78e58c6 | ||||
| 	github.com/akamensky/argparse v1.4.0 | ||||
| 	github.com/google/uuid v1.6.0 | ||||
| 	github.com/goraz/onion v0.1.3 | ||||
| @@ -15,11 +15,12 @@ require ( | ||||
| ) | ||||
|  | ||||
| require ( | ||||
| 	github.com/beego/beego/v2 v2.3.7 // indirect | ||||
| 	github.com/go-playground/validator/v10 v10.26.0 // indirect | ||||
| 	github.com/beego/beego/v2 v2.3.8 // indirect | ||||
| 	github.com/go-playground/validator/v10 v10.27.0 // indirect | ||||
| 	github.com/golang/protobuf v1.5.4 // indirect | ||||
| 	github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect | ||||
| 	github.com/sirupsen/logrus v1.9.3 // indirect | ||||
| 	github.com/ugorji/go/codec v1.1.7 // indirect | ||||
| 	google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect | ||||
| 	google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect | ||||
| 	google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect | ||||
| @@ -27,7 +28,6 @@ require ( | ||||
| ) | ||||
|  | ||||
| require ( | ||||
| 	github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d | ||||
| 	github.com/argoproj/argo-workflows/v3 v3.6.4 | ||||
| 	github.com/beorn7/perks v1.0.1 // indirect | ||||
| 	github.com/biter777/countries v1.7.5 // indirect | ||||
| @@ -35,7 +35,7 @@ require ( | ||||
| 	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect | ||||
| 	github.com/emicklei/go-restful/v3 v3.11.0 // indirect | ||||
| 	github.com/fxamacker/cbor/v2 v2.7.0 // indirect | ||||
| 	github.com/gabriel-vasile/mimetype v1.4.8 // indirect | ||||
| 	github.com/gabriel-vasile/mimetype v1.4.9 // indirect | ||||
| 	github.com/go-logr/logr v1.4.2 // indirect | ||||
| 	github.com/go-openapi/jsonpointer v0.21.0 // indirect | ||||
| 	github.com/go-openapi/jsonreference v0.20.4 // indirect | ||||
| @@ -60,31 +60,30 @@ require ( | ||||
| 	github.com/modern-go/reflect2 v1.0.2 // indirect | ||||
| 	github.com/montanaflynn/stats v0.7.1 // indirect | ||||
| 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect | ||||
| 	github.com/nats-io/nats.go v1.41.0 // indirect | ||||
| 	github.com/nats-io/nkeys v0.4.10 // indirect | ||||
| 	github.com/nats-io/nats.go v1.43.0 // indirect | ||||
| 	github.com/nats-io/nkeys v0.4.11 // indirect | ||||
| 	github.com/nats-io/nuid v1.0.1 // indirect | ||||
| 	github.com/pkg/errors v0.9.1 // indirect | ||||
| 	github.com/prometheus/client_golang v1.22.0 // indirect | ||||
| 	github.com/prometheus/client_model v0.6.1 // indirect | ||||
| 	github.com/prometheus/common v0.63.0 // indirect | ||||
| 	github.com/prometheus/procfs v0.16.0 // indirect | ||||
| 	github.com/prometheus/client_model v0.6.2 // indirect | ||||
| 	github.com/prometheus/common v0.65.0 // indirect | ||||
| 	github.com/prometheus/procfs v0.17.0 // indirect | ||||
| 	github.com/robfig/cron v1.2.0 // indirect | ||||
| 	github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect | ||||
| 	github.com/smartystreets/goconvey v1.6.4 // indirect | ||||
| 	github.com/ugorji/go/codec v1.1.7 // indirect | ||||
| 	github.com/x448/float16 v0.8.4 // indirect | ||||
| 	github.com/xdg-go/pbkdf2 v1.0.0 // indirect | ||||
| 	github.com/xdg-go/scram v1.1.2 // indirect | ||||
| 	github.com/xdg-go/stringprep v1.0.4 // indirect | ||||
| 	github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect | ||||
| 	go.mongodb.org/mongo-driver v1.17.3 // indirect | ||||
| 	golang.org/x/crypto v0.37.0 // indirect | ||||
| 	golang.org/x/net v0.39.0 // indirect | ||||
| 	golang.org/x/oauth2 v0.25.0 // indirect | ||||
| 	golang.org/x/sync v0.13.0 // indirect | ||||
| 	golang.org/x/sys v0.32.0 // indirect | ||||
| 	golang.org/x/term v0.31.0 // indirect | ||||
| 	golang.org/x/text v0.24.0 // indirect | ||||
| 	go.mongodb.org/mongo-driver v1.17.4 // indirect | ||||
| 	golang.org/x/crypto v0.40.0 // indirect | ||||
| 	golang.org/x/net v0.42.0 // indirect | ||||
| 	golang.org/x/oauth2 v0.30.0 // indirect | ||||
| 	golang.org/x/sync v0.16.0 // indirect | ||||
| 	golang.org/x/sys v0.34.0 // indirect | ||||
| 	golang.org/x/term v0.33.0 // indirect | ||||
| 	golang.org/x/text v0.27.0 // indirect | ||||
| 	golang.org/x/time v0.7.0 // indirect | ||||
| 	google.golang.org/protobuf v1.36.6 // indirect | ||||
| 	gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect | ||||
|   | ||||
							
								
								
									
										96
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										96
									
								
								go.sum
									
									
									
									
									
								
							| @@ -1,18 +1,22 @@ | ||||
| cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= | ||||
| cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9 h1:mSFFPwil5Ih+RPBvn88MBerQMtsoHnOuyCZQaf91a34= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20250604083300-387785b40cb0 h1:iEm/Rf9I0OSCcncuFy61YOSZ3jdRlhJ/oLD97Pc2pCQ= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20250604083300-387785b40cb0/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20250620085001-583ca2fbacd5 h1:FEBwueVOOWKYf0tJuE0EKNIbjxmTyCMgkT4qATYsfbo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20250620085001-583ca2fbacd5/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20250704084459-443546027b27 h1:iogk6pV3gybzQDBXMI6Qd/jvSA1h+3oRE+vLl1MRjew= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20250704084459-443546027b27/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20250715125819-e735f78e58c6 h1:Gnkv59Ntl2RebC5tNauXuxyRXLfZ2XAJ0+ujMyFte5U= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20250715125819-e735f78e58c6/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI= | ||||
| github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= | ||||
| github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= | ||||
| github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= | ||||
| github.com/akamensky/argparse v1.4.0 h1:YGzvsTqCvbEZhL8zZu2AiA5nq805NZh75JNj4ajn1xc= | ||||
| github.com/akamensky/argparse v1.4.0/go.mod h1:S5kwC7IuDcEr5VeXtGPRVZ5o/FdhcMlQz4IZQuw64xA= | ||||
| github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= | ||||
| github.com/argoproj/argo-workflows/v3 v3.6.4 h1:5+Cc1UwaQE5ka3w7R3hxZ1TK3M6VjDEXA5WSQ/IXrxY= | ||||
| github.com/argoproj/argo-workflows/v3 v3.6.4/go.mod h1:2f5zB8CkbNCCO1od+kd1dWkVokqcuyvu+tc+Jwx1MZg= | ||||
| github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= | ||||
| github.com/beego/beego/v2 v2.3.7 h1:z4btKtjU/rfp5BiYHkGD2QPjK9i1E9GH+I7vfhn6Agk= | ||||
| github.com/beego/beego/v2 v2.3.7/go.mod h1:5cqHsOHJIxkq44tBpRvtDe59GuVRVv/9/tyVDxd5ce4= | ||||
| github.com/beego/beego/v2 v2.3.8 h1:wplhB1pF4TxR+2SS4PUej8eDoH4xGfxuHfS7wAk9VBc= | ||||
| github.com/beego/beego/v2 v2.3.8/go.mod h1:8vl9+RrXqvodrl9C8yivX1e6le6deCK6RWeq8R7gTTg= | ||||
| github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= | ||||
| github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= | ||||
| github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q= | ||||
| @@ -45,8 +49,8 @@ github.com/etcd-io/etcd v3.3.17+incompatible/go.mod h1:cdZ77EstHBwVtD6iTgzgvogwc | ||||
| github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= | ||||
| github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= | ||||
| github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= | ||||
| github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= | ||||
| github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= | ||||
| github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY= | ||||
| github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= | ||||
| github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= | ||||
| github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= | ||||
| github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= | ||||
| @@ -64,6 +68,8 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn | ||||
| github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= | ||||
| github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k= | ||||
| github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= | ||||
| github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4= | ||||
| github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= | ||||
| github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= | ||||
| github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= | ||||
| github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= | ||||
| @@ -149,10 +155,10 @@ github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8 | ||||
| github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= | ||||
| github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= | ||||
| github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= | ||||
| github.com/nats-io/nats.go v1.41.0 h1:PzxEva7fflkd+n87OtQTXqCTyLfIIMFJBpyccHLE2Ko= | ||||
| github.com/nats-io/nats.go v1.41.0/go.mod h1:wV73x0FSI/orHPSYoyMeJB+KajMDoWyXmFaRrrYaaTo= | ||||
| github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc= | ||||
| github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U= | ||||
| github.com/nats-io/nats.go v1.43.0 h1:uRFZ2FEoRvP64+UUhaTokyS18XBCR/xM2vQZKO4i8ug= | ||||
| github.com/nats-io/nats.go v1.43.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= | ||||
| github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0= | ||||
| github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE= | ||||
| github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= | ||||
| github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= | ||||
| github.com/nwtgck/go-fakelish v0.1.3 h1:bA8/xa9hQmzppexIhBvdmztcd/PJ4SPuAUTBdMKZ8G4= | ||||
| @@ -173,12 +179,16 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH | ||||
| github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= | ||||
| github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= | ||||
| github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= | ||||
| github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= | ||||
| github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= | ||||
| github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= | ||||
| github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= | ||||
| github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM= | ||||
| github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= | ||||
| github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= | ||||
| github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= | ||||
| github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= | ||||
| github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= | ||||
| github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= | ||||
| github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= | ||||
| github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= | ||||
| github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= | ||||
| github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= | ||||
| github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= | ||||
| github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= | ||||
| github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= | ||||
| github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= | ||||
| @@ -232,14 +242,20 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec | ||||
| github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= | ||||
| go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ= | ||||
| go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= | ||||
| go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw= | ||||
| go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= | ||||
| golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= | ||||
| golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | ||||
| golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||
| golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= | ||||
| golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= | ||||
| golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= | ||||
| golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= | ||||
| golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= | ||||
| golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= | ||||
| golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= | ||||
| golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= | ||||
| golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= | ||||
| golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= | ||||
| golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= | ||||
| golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= | ||||
| @@ -259,12 +275,16 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R | ||||
| golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | ||||
| golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= | ||||
| golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= | ||||
| golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= | ||||
| golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= | ||||
| golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= | ||||
| golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= | ||||
| golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= | ||||
| golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= | ||||
| golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= | ||||
| golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= | ||||
| golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= | ||||
| golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= | ||||
| golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= | ||||
| golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= | ||||
| golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= | ||||
| golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= | ||||
| golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| @@ -272,8 +292,12 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ | ||||
| golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= | ||||
| golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= | ||||
| golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= | ||||
| golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= | ||||
| golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= | ||||
| golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= | ||||
| golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= | ||||
| golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= | ||||
| golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| @@ -289,18 +313,26 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc | ||||
| golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= | ||||
| golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= | ||||
| golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= | ||||
| golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= | ||||
| golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= | ||||
| golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= | ||||
| golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= | ||||
| golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= | ||||
| golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= | ||||
| golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= | ||||
| golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= | ||||
| golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= | ||||
| golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= | ||||
| golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= | ||||
| golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | ||||
| golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | ||||
| golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= | ||||
| golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= | ||||
| golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= | ||||
| golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= | ||||
| golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= | ||||
| golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= | ||||
| golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= | ||||
| golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= | ||||
| golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= | ||||
| golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= | ||||
| golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= | ||||
| golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= | ||||
| golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||
|   | ||||
| @@ -7,6 +7,8 @@ import ( | ||||
| 	"oc-monitord/tools" | ||||
| 	"oc-monitord/utils" | ||||
| 	"slices" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/rs/zerolog" | ||||
| @@ -106,13 +108,16 @@ func NewArgoPodLog(name string, step string, msg string) ArgoPodLog { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func LogKubernetesArgo(wfName string, executionID string, watcher watch.Interface) { | ||||
| func LogKubernetesArgo(wfName string, namespace string, watcher watch.Interface) { | ||||
| 	var argoWatcher *ArgoWatch | ||||
| 	var pods []string | ||||
| 	var node wfv1.NodeStatus | ||||
|  | ||||
| 	wfl := utils.GetWFLogger("") | ||||
| 	wfl.Debug().Msg("Starting to log " + wfName) | ||||
|  | ||||
| 	var wg sync.WaitGroup | ||||
| 	 | ||||
| 	for event := range (watcher.ResultChan()) { | ||||
| 		wf, ok := event.Object.(*wfv1.Workflow) | ||||
| 		if !ok { | ||||
| @@ -120,7 +125,7 @@ func LogKubernetesArgo(wfName string, executionID string, watcher watch.Interfac | ||||
| 			continue | ||||
| 		} | ||||
| 		if len(wf.Status.Nodes) == 0 { | ||||
| 			wfl.Debug().Msg("No node status yet")	// The first output of the channel doesn't contain Nodes so we skip it | ||||
| 			wfl.Info().Msg("No node status yet")	// The first output of the channel doesn't contain Nodes so we skip it | ||||
| 			continue | ||||
| 		} | ||||
| 		 | ||||
| @@ -138,7 +143,7 @@ func LogKubernetesArgo(wfName string, executionID string, watcher watch.Interfac | ||||
|  | ||||
| 		newWatcher := ArgoWatch{ | ||||
| 			Name: node.Name, | ||||
| 			Namespace: executionID, | ||||
| 			Namespace: namespace, | ||||
| 			Status: string(node.Phase), | ||||
| 			Created: node.StartedAt.String(), | ||||
| 			Started: node.StartedAt.String(), | ||||
| @@ -163,7 +168,9 @@ func LogKubernetesArgo(wfName string, executionID string, watcher watch.Interfac | ||||
| 			if !slices.Contains(pods,pod.Name){ | ||||
| 				pl := wfl.With().Str("pod",  pod.Name).Logger() | ||||
| 				if wfName == pod.Name { pods = append(pods, pod.Name); continue }	// One of the node is the Workflow, the others are the pods so don't try to log on the wf name | ||||
| 				go logKubernetesPods(executionID, wfName, pod.Name, pl) | ||||
| 				pl.Info().Msg("Found a new pod to log : "  + pod.Name) | ||||
| 				wg.Add(1) | ||||
| 				go logKubernetesPods(namespace, wfName, pod.Name, pl, &wg) | ||||
| 				pods = append(pods, pod.Name) | ||||
| 			}  | ||||
| 		} | ||||
| @@ -171,6 +178,8 @@ func LogKubernetesArgo(wfName string, executionID string, watcher watch.Interfac | ||||
| 		// Stop listening to the chan when the Workflow is completed or something bad happened | ||||
| 		if node.Phase.Completed() { | ||||
| 			wfl.Info().Msg(wfName + " worflow completed") | ||||
| 			wg.Wait() | ||||
| 			wfl.Info().Msg(wfName + " exiting") | ||||
| 			break | ||||
| 		} | ||||
| 		if node.Phase.FailedOrError() { | ||||
| @@ -196,24 +205,31 @@ func retrieveCondition(wf *wfv1.Workflow) (c Conditions) { | ||||
| } | ||||
|  | ||||
| // Function needed to be executed as a go thread  | ||||
| func logKubernetesPods(executionId string, wfName string,podName string, logger zerolog.Logger){ | ||||
| func logKubernetesPods(executionId string, wfName string,podName string, logger zerolog.Logger, wg *sync.WaitGroup){ | ||||
| 	defer wg.Done() | ||||
| 	 | ||||
| 	s := strings.Split(podName, ".") | ||||
| 	name := s[0] + "-" + s[1] | ||||
| 	step := s[1] | ||||
| 	 | ||||
| 	k, err := tools.NewKubernetesTool() | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg("Could not get Kubernetes tools") | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	 | ||||
| 	reader, err := k.GetPodLogger(executionId, wfName, podName) | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg(err.Error()) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	 | ||||
| 	scanner := bufio.NewScanner(reader) | ||||
| 	for scanner.Scan() { | ||||
| 		log := scanner.Text() | ||||
| 		podLog := NewArgoPodLog(wfName,podName,log) | ||||
| 		podLog := NewArgoPodLog(name,step,log) | ||||
| 		jsonified, _ := json.Marshal(podLog) | ||||
| 		logger.Info().Msg(string(jsonified)) | ||||
| 	} | ||||
| 	 | ||||
| } | ||||
| @@ -3,7 +3,6 @@ package logger | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"oc-monitord/conf" | ||||
|  | ||||
| @@ -71,7 +70,6 @@ func LogLocalWorkflow(wfName string, pipe io.ReadCloser, wg *sync.WaitGroup) { | ||||
| 	logger = logs.GetLogger() | ||||
|  | ||||
| 	logger.Debug().Msg("created wf_logger") | ||||
| 	fmt.Println("created wf_logger") | ||||
| 	wfLogger = logger.With().Str("argo_name", wfName).Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() | ||||
|  | ||||
| 	var current_watch, previous_watch ArgoWatch | ||||
| @@ -111,7 +109,6 @@ func LogLocalPod(wfName string, pipe io.ReadCloser, steps []string, wg *sync.Wai | ||||
| 	scanner := bufio.NewScanner(pipe) | ||||
| 	for scanner.Scan() { | ||||
| 		var podLogger zerolog.Logger | ||||
| 		fmt.Println("new line") | ||||
| 		wg.Add(1) | ||||
| 		 | ||||
| 		line := scanner.Text() | ||||
|   | ||||
							
								
								
									
										41
									
								
								main.go
									
									
									
									
									
								
							
							
						
						
									
										41
									
								
								main.go
									
									
									
									
									
								
							| @@ -67,11 +67,14 @@ func main() { | ||||
| 	logger = u.GetLogger() | ||||
|  | ||||
| 	logger.Debug().Msg("Loki URL : " + conf.GetConfig().LokiURL) | ||||
| 	logger.Debug().Msg("Workflow executed : " + conf.GetConfig().ExecutionID) | ||||
| 	logger.Info().Msg("Workflow executed : " + conf.GetConfig().ExecutionID) | ||||
| 	exec := u.GetExecution(conf.GetConfig().ExecutionID) | ||||
| 	if exec == nil { | ||||
| 		logger.Fatal().Msg("Could not retrieve workflow ID from execution ID " + conf.GetConfig().ExecutionID + " on peer " + conf.GetConfig().PeerID) | ||||
| 	} | ||||
| 	conf.GetConfig().WorkflowID = exec.WorkflowID | ||||
|  | ||||
| 	logger.Debug().Msg("Starting construction of yaml argo for workflow :" + exec.WorkflowID) | ||||
| 	logger.Info().Msg("Starting construction of yaml argo for workflow :" + exec.WorkflowID) | ||||
|  | ||||
| 	if _, err := os.Stat("./argo_workflows/"); os.IsNotExist(err) { | ||||
| 		os.Mkdir("./argo_workflows/", 0755) | ||||
| @@ -83,7 +86,6 @@ func main() { | ||||
|  | ||||
| 	err := new_wf.LoadFrom(conf.GetConfig().WorkflowID, conf.GetConfig().PeerID) | ||||
| 	if err != nil { | ||||
|  | ||||
| 		logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API") | ||||
| 	} | ||||
|  | ||||
| @@ -95,23 +97,20 @@ func main() { | ||||
|  | ||||
| 	argoFilePath, err := builder.CompleteBuild(exec.ExecutionsID) | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg(err.Error()) | ||||
| 		logger.Error().Msg("Error when completing the build of the workflow: " + err.Error()) | ||||
| 	} | ||||
|  | ||||
| 	workflowName = getContainerName(argoFilePath) | ||||
|  | ||||
| 	wf_logger := u.GetWFLogger(workflowName) | ||||
| 	wf_logger.Debug().Msg("Testing argo name") | ||||
|  | ||||
| 	if conf.GetConfig().KubeHost == "" { | ||||
| 		// Not in a k8s environment, get conf from parameters | ||||
| 		fmt.Println("Executes outside of k8s") | ||||
| 		logger.Info().Msg("Executes outside of k8s") | ||||
| 		executeOutside(argoFilePath, builder.Workflow) | ||||
| 	} else { | ||||
| 		// Executed in a k8s environment | ||||
| 		fmt.Println("Executes inside a k8s") | ||||
| 		logger.Info().Msg("Executes inside a k8s") | ||||
| 		// executeInside(exec.GetID(), "argo", argo_file_path, stepMax)  // commenting to use conf.ExecutionID instead of exec.GetID() | ||||
| 		executeInside(conf.GetConfig().ExecutionID, conf.GetConfig().ExecutionID, argoFilePath) | ||||
| 		executeInside(conf.GetConfig().ExecutionID, exec.ExecutionsID, argoFilePath) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| @@ -124,23 +123,25 @@ func executeInside(execID string, ns string, argo_file_path string) { | ||||
| 	} | ||||
| 	 | ||||
| 	name, err := t.CreateArgoWorkflow(argo_file_path, ns) | ||||
| 	_ = name  | ||||
| 	// _ = name  | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg("Could not create argo workflow : " + err.Error()) | ||||
| 		fmt.Println("CA :" + conf.GetConfig().KubeCA) | ||||
| 		fmt.Println("Cert :" + conf.GetConfig().KubeCert) | ||||
| 		fmt.Println("Data :" + conf.GetConfig().KubeData) | ||||
| 		logger.Info().Msg(fmt.Sprint("CA :" + conf.GetConfig().KubeCA)) | ||||
| 		logger.Info().Msg(fmt.Sprint("Cert :" + conf.GetConfig().KubeCert)) | ||||
| 		logger.Info().Msg(fmt.Sprint("Data :" + conf.GetConfig().KubeData)) | ||||
| 		return | ||||
| 	} else { | ||||
| 		watcher, err := t.GetArgoWatch(execID, workflowName) | ||||
| 		watcher, err := t.GetArgoWatch(ns, workflowName) | ||||
| 		if err != nil { | ||||
| 			logger.Error().Msg("Could not retrieve Watcher : " + err.Error()) | ||||
| 		} | ||||
|  | ||||
| 		l.LogKubernetesArgo(name, execID, watcher) | ||||
| 		l.LogKubernetesArgo(name, ns, watcher) | ||||
| 		if err != nil { | ||||
| 			logger.Error().Msg("Could not log workflow : " + err.Error()) | ||||
| 		} | ||||
|  | ||||
| 		logger.Info().Msg("Finished, exiting...") | ||||
| 	} | ||||
|  | ||||
| } | ||||
| @@ -173,7 +174,7 @@ func executeOutside(argo_file_path string, workflow workflow_builder.Workflow) { | ||||
| 	go l.LogLocalWorkflow(workflowName, stdoutSubmit, &wg) | ||||
| 	go l.LogLocalPod(workflowName, stdoutLogs, steps, &wg) | ||||
|  | ||||
| 	fmt.Println("Starting argo submit") | ||||
| 	logger.Info().Msg("Starting argo submit") | ||||
| 	if err := cmdSubmit.Start(); err != nil { | ||||
| 		wf_logger.Error().Msg("Could not start argo submit") | ||||
| 		wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text()) | ||||
| @@ -182,7 +183,7 @@ func executeOutside(argo_file_path string, workflow workflow_builder.Workflow) { | ||||
|  | ||||
| 	time.Sleep(5 * time.Second) | ||||
|  | ||||
| 	fmt.Println("Running argo logs") | ||||
| 	logger.Info().Msg("Running argo logs") | ||||
| 	if err := cmdLogs.Run(); err != nil { | ||||
| 		wf_logger.Error().Msg("Could not run '" + strings.Join(cmdLogs.Args, " ") + "'") | ||||
| 		 | ||||
| @@ -190,7 +191,7 @@ func executeOutside(argo_file_path string, workflow workflow_builder.Workflow) { | ||||
|  | ||||
| 	} | ||||
|  | ||||
| 	fmt.Println("Waiting argo submit") | ||||
| 	logger.Info().Msg("Waiting argo submit") | ||||
| 	if err := cmdSubmit.Wait(); err != nil { | ||||
| 		wf_logger.Error().Msg("Could not execute argo submit") | ||||
| 		wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text()) | ||||
| @@ -231,7 +232,7 @@ func setConf(is_k8s bool, o *onion.Onion, parser *argparse.Parser) { | ||||
|  | ||||
| 	err := parser.Parse(os.Args) | ||||
| 	if err != nil { | ||||
| 		fmt.Println(parser.Usage(err)) | ||||
| 		logger.Info().Msg(parser.Usage(err)) | ||||
| 		os.Exit(1) | ||||
| 	} | ||||
| 	conf.GetConfig().Logs = "debug" | ||||
|   | ||||
							
								
								
									
										66
									
								
								minio-doc/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										66
									
								
								minio-doc/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,66 @@ | ||||
| # Goal | ||||
|  | ||||
| We want to be able to instantiate a service that allows to store file located on a `processing` pod onto it.  | ||||
|  | ||||
| We have already tested it with a static `Argo` yaml file, a MinIO running on the same kubernetes node, the minio service is reached because it is the only associated to the `serviceAccount`. | ||||
|  | ||||
| We have established three otpions that need to be available to the user for the feature to be implemented:  | ||||
|  | ||||
| - Use a MinIO running constantly on the node that executes the argo workflow | ||||
| - Use a MinIO | ||||
| - A MinIO is instanciated when a new workflow is launched | ||||
|  | ||||
| # Requirements | ||||
|  | ||||
| - Helm : `https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3` | ||||
| - Helm GO client : `$ go get github.com/mittwald/go-helm-client` | ||||
| - MinIO chart : `https://charts.min.io/` | ||||
|  | ||||
|  | ||||
| # Ressources | ||||
|  | ||||
| We need to create several ressources in order for the pods to communicate with the MinIO  | ||||
|  | ||||
| ## MinIO Auth Secrets | ||||
|  | ||||
| ## Bucket ConfigMap | ||||
|  | ||||
| With the name `artifact-repositories` this configMap will be used by default. It contains the URL to the MinIO server and the key to the authentication data held in a `secret` ressource.  | ||||
|  | ||||
| ```yaml | ||||
| apiVersion: v1 | ||||
| kind: ConfigMap | ||||
| metadata: | ||||
|   # If you want to use this config map by default, name it "artifact-repositories". | ||||
|   name: artifact-repositories | ||||
|   # annotations: | ||||
|   #   # v3.0 and after - if you want to use a specific key, put that key into this annotation. | ||||
|   #   workflows.argoproj.io/default-artifact-repository: oc-s3-artifact-repository | ||||
| data: | ||||
|   oc-s3-artifact-repository: | | ||||
|     s3: | ||||
|       bucket: oc-bucket | ||||
|       endpoint: [ retrieve cluster with kubectl get service argo-artifacts -o jsonpath="{.spec.clusterIP}" ]:9000 | ||||
|       insecure: true | ||||
|       accessKeySecret:  | ||||
|         name: argo-artifact-secret | ||||
|         key: access-key | ||||
|       secretKeySecret: | ||||
|         name: argo-artifact-secret | ||||
|         key: secret-key | ||||
|  | ||||
| ``` | ||||
|  | ||||
|  | ||||
| # Code modifications | ||||
|  | ||||
| Rajouter un attribut "isDataLink"  | ||||
|  - true/false | ||||
|  | ||||
| Rajouter un attribut DataPath ou un truc comme ca | ||||
|  | ||||
|   - liste de map[string]string permet de n'avoir qu'une copie par fichier) | ||||
|   - éditable uniquement a travers la méthode addDataPath | ||||
|   - clé : path du fichier / value : nom de la copie dans minio	 | ||||
|  | ||||
| ===> on a besoin du meme attribut pour Processing -> Data et Data -> Processing | ||||
| @@ -71,8 +71,8 @@ type Key struct { | ||||
| 	Bucket          string  `yaml:"bucket"` | ||||
| 	EndPoint        string  `yaml:"endpoint"` | ||||
| 	Insecure        bool    `yaml:"insecure"` | ||||
| 	AccessKeySecret *Secret `yaml accessKeySecret` | ||||
| 	SecretKeySecret *Secret `yaml secretKeySecret` | ||||
| 	AccessKeySecret *Secret `yaml:"accessKeySecret"` | ||||
| 	SecretKeySecret *Secret `yaml:"secretKeySecret"` | ||||
| } | ||||
|  | ||||
| type Artifact struct { | ||||
| @@ -131,7 +131,7 @@ func (template *Template) CreateContainer(processing *resources.ProcessingResour | ||||
|  | ||||
| func (template *Template) ReplacePerEnv(arg string, envs []models.Param) string { | ||||
| 	for _, v := range envs { | ||||
| 		if strings.Contains(arg, v.Name) { | ||||
| 		if v.Name != "" && strings.Contains(arg, v.Name) { | ||||
| 			value := "{{ inputs.parameters." + v.Name + " }}" | ||||
| 			arg = strings.ReplaceAll(arg, v.Name, value) | ||||
| 			arg = strings.ReplaceAll(arg, "$"+v.Name, value) | ||||
|   | ||||
							
								
								
									
										
											BIN
										
									
								
								oc-monitord
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								oc-monitord
									
									
									
									
									
										Executable file
									
								
							
										
											Binary file not shown.
										
									
								
							| @@ -4,14 +4,16 @@ import ( | ||||
| 	"errors" | ||||
| 	"io" | ||||
|  | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	"k8s.io/apimachinery/pkg/watch" | ||||
| ) | ||||
|  | ||||
| type Tool interface { | ||||
| 	CreateArgoWorkflow(path string, ns string) (string, error) | ||||
| 	CreateAccessSecret(ns string, login string, password string) (string, error) | ||||
| 	CreateAccessSecret(user string, password string, storageId string, namespace string) (string, error) | ||||
| 	GetArgoWatch(executionId string, wfName string) (watch.Interface, error) | ||||
| 	GetPodLogger(ns string, wfName string, podName string) (io.ReadCloser, error) | ||||
| 	GetS3Secret(storageId string, namespace string) *v1.Secret | ||||
| } | ||||
|  | ||||
| var _service = map[string]func() (Tool, error){ | ||||
|   | ||||
| @@ -2,7 +2,6 @@ package tools | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"encoding/base64" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| @@ -13,8 +12,8 @@ import ( | ||||
|  | ||||
| 	wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" | ||||
| 	"github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned" | ||||
| 	"github.com/google/uuid" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	k8serrors "k8s.io/apimachinery/pkg/api/errors" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/runtime" | ||||
| 	"k8s.io/apimachinery/pkg/runtime/serializer" | ||||
| @@ -31,7 +30,7 @@ type KubernetesTools struct { | ||||
| func NewKubernetesTool() (Tool, error) { | ||||
| 	// Load Kubernetes config (from ~/.kube/config) | ||||
| 	config := &rest.Config{ | ||||
| 		Host: conf.GetConfig().KubeHost + ":" + conf.GetConfig().KubePort, | ||||
| 		Host: "https://" + conf.GetConfig().KubeHost + ":" + conf.GetConfig().KubePort, | ||||
| 		TLSClientConfig: rest.TLSClientConfig{ | ||||
| 			CAData:   []byte(conf.GetConfig().KubeCA), | ||||
| 			CertData: []byte(conf.GetConfig().KubeCert), | ||||
| @@ -83,25 +82,25 @@ func (k *KubernetesTools) CreateArgoWorkflow(path string, ns string) (string, er | ||||
| 	if err != nil { | ||||
| 		return "", errors.New("failed to create workflow: " + err.Error()) | ||||
| 	} | ||||
| 	fmt.Printf("workflow %s created in namespace %s\n", createdWf.Name, ns) | ||||
| 	l := utils.GetLogger() | ||||
| 	l.Info().Msg(fmt.Sprintf("workflow %s created in namespace %s\n", createdWf.Name, ns)) | ||||
| 	return createdWf.Name, nil | ||||
| } | ||||
|  | ||||
| func (k *KubernetesTools) CreateAccessSecret(ns string, login string, password string) (string, error) { | ||||
| func (k *KubernetesTools) CreateAccessSecret(access string, password string, storageId string, namespace string) (string, error) { | ||||
| 	// Namespace where the secret will be created | ||||
| 	namespace := "default" | ||||
| 	// Encode the secret data (Kubernetes requires base64-encoded values) | ||||
| 	secretData := map[string][]byte{ | ||||
| 		"access-key": []byte(base64.StdEncoding.EncodeToString([]byte(login))), | ||||
| 		"secret-key": []byte(base64.StdEncoding.EncodeToString([]byte(password))), | ||||
| 		"access-key": []byte(access), | ||||
| 		"secret-key": []byte(password), | ||||
| 	} | ||||
|  | ||||
| 	// Define the Secret object | ||||
| 	name := uuid.New().String() | ||||
| 	name := storageId+"-secret-s3" | ||||
| 	secret := &v1.Secret{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:      name, | ||||
| 			Namespace: ns, | ||||
| 			Namespace: namespace, | ||||
| 		}, | ||||
| 		Type: v1.SecretTypeOpaque, | ||||
| 		Data: secretData, | ||||
| @@ -111,20 +110,35 @@ func (k *KubernetesTools) CreateAccessSecret(ns string, login string, password s | ||||
| 	if err != nil { | ||||
| 		return "", errors.New("Error creating secret: " + err.Error()) | ||||
| 	} | ||||
|  | ||||
| 	return name, nil | ||||
| } | ||||
|  | ||||
| func (k *KubernetesTools) GetS3Secret(storageId string, namespace string) *v1.Secret { | ||||
|  | ||||
| 	secret, err := k.Set.CoreV1().Secrets(namespace).Get(context.TODO(), storageId + "-secret-s3", metav1.GetOptions{}) | ||||
| 	// Get(context.TODO(),storageId + "-artifact-server", metav1.GetOptions{}) | ||||
| 	 | ||||
| 	if err != nil && !k8serrors.IsNotFound(err) { | ||||
| 		l := utils.GetLogger() | ||||
| 		l.Fatal().Msg("An error happened when retrieving secret in " + namespace + " : " + err.Error()) | ||||
| 	} | ||||
| 	if k8serrors.IsNotFound(err) { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	return secret | ||||
| 	// return secret  | ||||
| } | ||||
|  | ||||
|  | ||||
| func (k *KubernetesTools) GetArgoWatch(executionId string, wfName string) (watch.Interface, error){ | ||||
| 	wfl := utils.GetWFLogger("") | ||||
| 	wfl.Debug().Msg("Starting argo watch with argo lib") | ||||
| 	fmt.Println("metadata.name=oc-monitor-"+wfName + "  in namespace : " + executionId) | ||||
| 	options := metav1.ListOptions{FieldSelector: "metadata.name=oc-monitor-"+wfName} | ||||
| 	fmt.Println(options) | ||||
| 	watcher, err := k.VersionedSet.ArgoprojV1alpha1().Workflows(executionId).Watch(context.TODO(), options) | ||||
|  | ||||
| 	watcher, err := k.VersionedSet.ArgoprojV1alpha1().Workflows(executionId).Watch(context.Background(), options) | ||||
| 	if err != nil { | ||||
| 		return nil, errors.New("Error executing 'argo watch " + wfName + " -n " + executionId + " with ArgoprojV1alpha1 client") | ||||
| 	} | ||||
| 	 | ||||
|  | ||||
| 	return watcher, nil  | ||||
|  | ||||
| @@ -133,16 +147,18 @@ func (k *KubernetesTools) GetArgoWatch(executionId string, wfName string) (watch | ||||
| func (k *KubernetesTools) GetPodLogger(ns string, wfName string, nodeName string) (io.ReadCloser, error) { | ||||
| 	var targetPod v1.Pod | ||||
|  | ||||
| 	 | ||||
| 	pods, err := k.Set.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{ | ||||
|         LabelSelector: "workflows.argoproj.io/workflow="+wfName, | ||||
| 		LabelSelector: "workflows.argoproj.io/workflow="+wfName, | ||||
|     }) | ||||
|     if err != nil { | ||||
| 		return nil, fmt.Errorf("failed to list pods: " + err.Error()) | ||||
|     } | ||||
|     if len(pods.Items) == 0 { | ||||
| 		return nil, fmt.Errorf("no pods found with label workflows.argoproj.io/node-name=" + nodeName) | ||||
| 		 | ||||
| 		return nil, fmt.Errorf("no pods found with label workflows.argoproj.io/workflow="+ wfName + " no pods found with label workflows.argoproj.io/node-name=" + nodeName + " in namespace " + ns) | ||||
|     } | ||||
|  | ||||
| 	 | ||||
|     for _, pod := range pods.Items { | ||||
| 		if pod.Annotations["workflows.argoproj.io/node-name"] == nodeName { | ||||
| 			targetPod = pod | ||||
| @@ -172,7 +188,8 @@ func (k *KubernetesTools) testPodReady(pod v1.Pod, ns string) { | ||||
| 	 | ||||
| 		var initialized bool | ||||
| 		for _, cond := range pod.Status.Conditions { | ||||
| 			if cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue { | ||||
| 			// It seems that for remote pods the pod gets the Succeeded status before it has time to display the it is ready to run in .status.conditions,so we added the OR condition | ||||
| 			if (cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue) || pod.Status.Phase == v1.PodSucceeded {		 | ||||
| 				initialized = true | ||||
| 				return | ||||
| 			} | ||||
|   | ||||
| @@ -17,11 +17,13 @@ var ( | ||||
| 	onceLogger 	sync.Once | ||||
| 	onceWF 		sync.Once | ||||
| ) | ||||
|  | ||||
| func GetExecution(exec_id string) *workflow_execution.WorkflowExecution { | ||||
| 	res := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), "", conf.GetConfig().PeerID, []string{}, nil).LoadOne(exec_id) | ||||
| 	if res.Code != 200 { | ||||
| 		logger := oclib.GetLogger() | ||||
| 		logger.Error().Msg("Could not retrieve workflow ID from execution ID " + exec_id) | ||||
| 		logger.Error().Msg("Error retrieving execution " + exec_id) | ||||
| 		logger.Error().Msg(res.Err) | ||||
| 		return nil | ||||
| 	} | ||||
| 	return res.ToWorkflowExecution() | ||||
|   | ||||
| @@ -4,6 +4,7 @@ import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 	"oc-monitord/utils" | ||||
| 	"slices" | ||||
| 	"time" | ||||
|  | ||||
| @@ -21,7 +22,7 @@ type AdmiraltySetter struct { | ||||
|  | ||||
| func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID string) error { | ||||
| 	 | ||||
| 	logger = logs.GetLogger() | ||||
| 	logger := logs.GetLogger() | ||||
|  | ||||
| 	data := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(remotePeerID) | ||||
| 	if data.Code != 200 { | ||||
| @@ -30,42 +31,42 @@ func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID st | ||||
| 	} | ||||
| 	remotePeer := data.ToPeer() | ||||
|  | ||||
| 	data = oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(localPeerID) | ||||
| 	if data.Code != 200 { | ||||
| 		logger.Error().Msg("Error while trying to instantiate local peer " + remotePeerID) | ||||
| 		return fmt.Errorf(data.Err) | ||||
| 	} | ||||
| 	localPeer := data.ToPeer() | ||||
| 			data = oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(localPeerID) | ||||
| 			if data.Code != 200 { | ||||
| 				logger.Error().Msg("Error while trying to instantiate local peer " + remotePeerID) | ||||
| 				return fmt.Errorf(data.Err) | ||||
| 			} | ||||
| 			localPeer := data.ToPeer() | ||||
|  | ||||
| 	caller := tools.NewHTTPCaller( | ||||
| 		map[tools.DataType]map[tools.METHOD]string{ | ||||
| 			tools.ADMIRALTY_SOURCE: map[tools.METHOD]string{ | ||||
| 				tools.POST :"/:id", | ||||
| 			caller := tools.NewHTTPCaller( | ||||
| 				map[tools.DataType]map[tools.METHOD]string{ | ||||
| 					tools.ADMIRALTY_SOURCE: { | ||||
| 						tools.POST :"/:id", | ||||
| 					}, | ||||
| 					tools.ADMIRALTY_KUBECONFIG: { | ||||
| 						tools.GET:"/:id", | ||||
| 					}, | ||||
| 					tools.ADMIRALTY_SECRET: { | ||||
| 						tools.POST:"/:id/" + remotePeerID, | ||||
| 					}, | ||||
| 					tools.ADMIRALTY_TARGET: { | ||||
| 						tools.POST:"/:id/" + remotePeerID, | ||||
| 					}, | ||||
| 					tools.ADMIRALTY_NODES: { | ||||
| 						tools.GET:"/:id/" + remotePeerID, | ||||
| 					}, | ||||
| 			}, | ||||
| 			tools.ADMIRALTY_KUBECONFIG: map[tools.METHOD]string{ | ||||
| 				tools.GET:"/:id", | ||||
| 			}, | ||||
| 			tools.ADMIRALTY_SECRET: map[tools.METHOD]string{ | ||||
| 				tools.POST:"/:id", | ||||
| 			}, | ||||
| 			tools.ADMIRALTY_TARGET: map[tools.METHOD]string{ | ||||
| 				tools.POST:"/:id", | ||||
| 			}, | ||||
| 			tools.ADMIRALTY_NODES: map[tools.METHOD]string{ | ||||
| 				tools.GET:"/:id", | ||||
| 			}, | ||||
| 		}, | ||||
| 	) | ||||
|  | ||||
| 	logger.Info().Msg(" Creating the Admiralty Source on " + remotePeerID + " ns-" + s.Id + "\n\n") | ||||
| 	_ = s.callRemoteExecution(remotePeer, []int{http.StatusCreated, http.StatusConflict},caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true) | ||||
| 	logger.Info().Msg(" Retrieving kubeconfig with the secret on " + remotePeerID + " ns-" + s.Id + "\n\n") | ||||
| 		) | ||||
| 	 | ||||
| 	logger.Info().Msg("\n\n  Creating the Admiralty Source on " + remotePeerID + " ns-" + s.Id) | ||||
| 	s.callRemoteExecution(remotePeer, []int{http.StatusCreated, http.StatusConflict},caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true) | ||||
| 	logger.Info().Msg("\n\n  Retrieving kubeconfig with the secret on " + remotePeerID + " ns-" + s.Id) | ||||
| 	kubeconfig := s.getKubeconfig(remotePeer, caller) | ||||
| 	logger.Info().Msg(" Creating a secret from the kubeconfig " + localPeerID + " ns-" + s.Id + "\n\n") | ||||
| 	_ = s.callRemoteExecution(localPeer, []int{http.StatusCreated}, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true) | ||||
| 	logger.Info().Msg(" Creating the Admiralty Target on " + localPeerID + " ns-" + s.Id + "\n\n") | ||||
| 	_ = s.callRemoteExecution(localPeer,[]int{http.StatusCreated, http.StatusConflict},caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true) | ||||
| 	logger.Info().Msg(" Checking for the creation of the admiralty node on " + localPeerID + " ns-" + s.Id + "\n\n") | ||||
| 	logger.Info().Msg("\n\n  Creating a secret from the kubeconfig " + localPeerID + " ns-" + s.Id) | ||||
| 	s.callRemoteExecution(localPeer, []int{http.StatusCreated}, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true) | ||||
| 	logger.Info().Msg("\n\n Creating the Admiralty Target on " + localPeerID + " in namespace " + s.Id ) | ||||
| 	s.callRemoteExecution(localPeer,[]int{http.StatusCreated, http.StatusConflict},caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true) | ||||
| 	logger.Info().Msg("\n\n  Checking for the creation of the admiralty node on " + localPeerID + " ns-" + s.Id) | ||||
| 	s.checkNodeStatus(localPeer,caller) | ||||
| 	 | ||||
| 	return nil | ||||
| @@ -73,40 +74,41 @@ func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID st | ||||
|  | ||||
| func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCaller) map[string]string { | ||||
| 	var kubedata map[string]string | ||||
| 	_ = s.callRemoteExecution(peer, []int{http.StatusOK}, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil, true) | ||||
| 	s.callRemoteExecution(peer, []int{http.StatusOK}, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil, true) | ||||
| 	if caller.LastResults["body"] == nil || len(caller.LastResults["body"].([]byte)) == 0 { | ||||
| 		fmt.Println("Something went wrong when retrieving data from Get call for kubeconfig") | ||||
| 		l := utils.GetLogger() | ||||
| 		l.Error().Msg("Something went wrong when retrieving data from Get call for kubeconfig") | ||||
| 		panic(0) | ||||
| 	} | ||||
| 	err := json.Unmarshal(caller.LastResults["body"].([]byte), &kubedata) | ||||
| 	if err != nil { | ||||
| 		fmt.Println("Something went wrong when unmarshalling data from Get call for kubeconfig") | ||||
| 		l := utils.GetLogger() | ||||
| 		l.Error().Msg("Something went wrong when unmarshalling data from Get call for kubeconfig") | ||||
| 		panic(0) | ||||
| 	} | ||||
|  | ||||
| 	return kubedata | ||||
| } | ||||
|  | ||||
| func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode []int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) *peer.PeerExecution { | ||||
| 	resp, err := peer.LaunchPeerExecution(peer.UUID, dataID, dt, method, body, caller) | ||||
| func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode []int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) { | ||||
| 	l := utils.GetLogger() | ||||
| 	_, err := peer.LaunchPeerExecution(peer.UUID, dataID, dt, method, body, caller) | ||||
| 	if err != nil { | ||||
| 		fmt.Println("Error when executing on peer at", peer.Url) | ||||
| 		fmt.Println(err) | ||||
| 		l.Error().Msg("Error when executing on peer at" + peer.Url) | ||||
| 		l.Error().Msg(err.Error()) | ||||
| 		panic(0) | ||||
| 	} | ||||
|  | ||||
| 	if !slices.Contains(expectedCode, caller.LastResults["code"].(int)) { | ||||
| 		fmt.Println("Didn't receive the expected code :", caller.LastResults["code"], "when expecting", expectedCode) | ||||
| 		l.Error().Msg(fmt.Sprint("Didn't receive the expected code :", caller.LastResults["code"], "when expecting", expectedCode)) | ||||
| 		if _, ok := caller.LastResults["body"]; ok { | ||||
| 			logger.Info().Msg(string(caller.LastResults["body"].([]byte))) | ||||
| 			// fmt.Println(string(caller.LastResults["body"].([]byte))) | ||||
| 			l.Info().Msg(string(caller.LastResults["body"].([]byte))) | ||||
| 		} | ||||
| 		if panicCode { | ||||
| 			panic(0) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return resp | ||||
| } | ||||
|  | ||||
| func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller){ | ||||
| @@ -120,15 +122,16 @@ func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller){ | ||||
| 		name := metadata.(map[string]interface{})["name"].(string) | ||||
| 		s.NodeName = name | ||||
| 	} else { | ||||
| 		fmt.Println("Could not retrieve data about the recently created node") | ||||
| 		l := utils.GetLogger() | ||||
| 		l.Error().Msg("Could not retrieve data about the recently created node") | ||||
| 		panic(0) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *AdmiraltySetter) checkNodeStatus(localPeer *peer.Peer, caller *tools.HTTPCaller){ | ||||
| 	for i := range(5) { | ||||
| 		time.Sleep(5 * time.Second) // let some time for kube to generate the node | ||||
| 		_ = s.callRemoteExecution(localPeer,[]int{http.StatusOK},caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil, false) | ||||
| 		time.Sleep(10 * time.Second) // let some time for kube to generate the node | ||||
| 		s.callRemoteExecution(localPeer,[]int{http.StatusOK},caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil, false) | ||||
| 		if caller.LastResults["code"] == 200 { | ||||
| 			s.storeNodeName(caller) | ||||
| 			return | ||||
|   | ||||
| @@ -8,6 +8,7 @@ import ( | ||||
| 	"fmt" | ||||
| 	"oc-monitord/conf" | ||||
| 	. "oc-monitord/models" | ||||
|  | ||||
| 	tools2 "oc-monitord/tools" | ||||
| 	"os" | ||||
| 	"strings" | ||||
| @@ -18,6 +19,7 @@ import ( | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/common/enum" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/resources" | ||||
| 	w "cloud.o-forge.io/core/oc-lib/models/workflow" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/workflow/graph" | ||||
| 	"github.com/nwtgck/go-fakelish" | ||||
| 	"github.com/rs/zerolog" | ||||
| 	"gopkg.in/yaml.v3" | ||||
| @@ -26,11 +28,11 @@ import ( | ||||
| var logger zerolog.Logger | ||||
|  | ||||
| type ArgoBuilder struct { | ||||
| 	OriginWorkflow 	*w.Workflow | ||||
| 	Workflow       	Workflow | ||||
| 	Services       	[]*Service | ||||
| 	Timeout        	int | ||||
| 	RemotePeers		[]string | ||||
| 	OriginWorkflow *w.Workflow | ||||
| 	Workflow       Workflow | ||||
| 	Services       []*Service | ||||
| 	Timeout        int | ||||
| 	RemotePeers    []string | ||||
| } | ||||
|  | ||||
| type Workflow struct { | ||||
| @@ -53,7 +55,7 @@ func (b *Workflow) getDag() *Dag { | ||||
| } | ||||
|  | ||||
| type Spec struct { | ||||
| 	ServiceAccountName	string					`yaml:"serviceAccountName"` | ||||
| 	ServiceAccountName	string					`yaml:"serviceAccountName,omitempty"` | ||||
| 	Entrypoint 			string                	`yaml:"entrypoint"` | ||||
| 	Arguments  			[]Parameter           	`yaml:"arguments,omitempty"` | ||||
| 	Volumes    			[]VolumeClaimTemplate 	`yaml:"volumeClaimTemplates,omitempty"` | ||||
| @@ -65,7 +67,7 @@ type Spec struct { | ||||
| // add s3, gcs, azure, etc if needed on a link between processing and storage | ||||
| func (b *ArgoBuilder) CreateDAG(namespace string, write bool) ( int, []string, []string, error) { | ||||
| 	logger = logs.GetLogger() | ||||
| 	fmt.Println("Creating DAG", b.OriginWorkflow.Graph.Items) | ||||
| 	logger.Info().Msg(fmt.Sprint("Creating DAG ", b.OriginWorkflow.Graph.Items)) | ||||
| 	// handle services by checking if there is only one processing with hostname and port | ||||
| 	firstItems, lastItems, volumes := b.createTemplates(namespace) | ||||
| 	b.createVolumes(volumes) | ||||
| @@ -73,7 +75,7 @@ func (b *ArgoBuilder) CreateDAG(namespace string, write bool) ( int, []string, [ | ||||
| 	if b.Timeout > 0 { | ||||
| 		b.Workflow.Spec.Timeout = b.Timeout | ||||
| 	} | ||||
| 	b.Workflow.Spec.ServiceAccountName = "sa-"+namespace | ||||
| 	b.Workflow.Spec.ServiceAccountName = "sa-" + namespace | ||||
| 	b.Workflow.Spec.Entrypoint = "dag" | ||||
| 	b.Workflow.ApiVersion = "argoproj.io/v1alpha1" | ||||
| 	b.Workflow.Kind = "Workflow" | ||||
| @@ -84,16 +86,16 @@ func (b *ArgoBuilder) CreateDAG(namespace string, write bool) ( int, []string, [ | ||||
| 	 | ||||
| 	return  len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil | ||||
| } | ||||
|  | ||||
| 		 | ||||
| func (b *ArgoBuilder) createTemplates(namespace string) ([]string, []string, []VolumeMount) { | ||||
| 	volumes := []VolumeMount{} | ||||
| 	firstItems := []string{} | ||||
| 	lastItems := []string{} | ||||
| 	items := b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsProcessing) | ||||
| 	fmt.Println("Creating templates", len(items)) | ||||
| 	logger.Info().Msg(fmt.Sprint("Creating templates", len(items))) | ||||
| 	for _, item := range b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsProcessing) { | ||||
| 		instance := item.Processing.GetSelectedInstance() | ||||
| 		fmt.Println("Creating template for", item.Processing.GetName(), instance) | ||||
| 		logger.Info().Msg(fmt.Sprint("Creating template for", item.Processing.GetName(), instance)) | ||||
| 		if instance == nil || instance.(*resources.ProcessingInstance).Access == nil && instance.(*resources.ProcessingInstance).Access.Container != nil { | ||||
| 			logger.Error().Msg("Not enough configuration setup, template can't be created : " + item.Processing.GetName()) | ||||
| 			return firstItems, lastItems, volumes | ||||
| @@ -176,10 +178,12 @@ func (b *ArgoBuilder) createArgoTemplates(namespace string, | ||||
| 	lastItems []string) ([]VolumeMount, []string, []string) { | ||||
| 	_, firstItems, lastItems = b.addTaskToArgo(b.Workflow.getDag(), id, processing, firstItems, lastItems) | ||||
| 	template := &Template{Name: getArgoName(processing.GetName(), id)} | ||||
| 	fmt.Println("Creating template for", template.Name) | ||||
| 	isReparted, peerId := b.isProcessingReparted(*processing,id) | ||||
| 	logger.Info().Msg(fmt.Sprint("Creating template for", template.Name)) | ||||
| 	isReparted, peerId := b.isProcessingReparted(*processing, id) | ||||
| 	template.CreateContainer(processing, b.Workflow.getDag()) | ||||
| 	 | ||||
| 	if isReparted { | ||||
| 		logger.Debug().Msg("Reparted processing, on " + peerId) | ||||
| 		b.RemotePeers = append(b.RemotePeers, peerId) | ||||
| 		template.AddAdmiraltyAnnotations(peerId) | ||||
| 	} | ||||
| @@ -189,55 +193,33 @@ func (b *ArgoBuilder) createArgoTemplates(namespace string, | ||||
| 		template.Metadata.Labels = make(map[string]string) | ||||
| 		template.Metadata.Labels["app"] = "oc-service-" + processing.GetName() // Construct the template for the k8s service and add a link in graph between k8s service and processing | ||||
| 	} | ||||
| 	related := b.OriginWorkflow.GetByRelatedProcessing(id, b.OriginWorkflow.Graph.IsStorage) | ||||
|  | ||||
| 	volumes = b.addStorageAnnotations(id, template, namespace, volumes) | ||||
| 	b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, *template) | ||||
| 	return volumes, firstItems, lastItems | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) addStorageAnnotations(id string, template *Template, namespace string, volumes []VolumeMount) []VolumeMount { | ||||
| 	related := b.OriginWorkflow.GetByRelatedProcessing(id, b.OriginWorkflow.Graph.IsStorage) // Retrieve all of the storage node linked to the processing for which we create the template | ||||
| 	for _, r := range related { | ||||
| 		storage := r.Node.(*resources.StorageResource) | ||||
| 		for _, linkToStorage := range r.Links { | ||||
| 			for _, rw := range linkToStorage.StorageLinkInfos { | ||||
| 				art := Artifact{Path: template.ReplacePerEnv(rw.Source, linkToStorage.Env)} | ||||
| 				var art Artifact | ||||
| 				artifactBaseName := strings.Join(strings.Split(storage.GetName(), " "), "-") + "-" + strings.Replace(rw.FileName, ".", "-", -1) //  Parameter/Artifact name must consist of alpha-numeric characters, '_' or '-' | ||||
| 				if rw.Write { | ||||
| 					art.Name = storage.GetName() + "-" + rw.Destination + "-input-write" | ||||
| 					art = Artifact{Path: template.ReplacePerEnv(rw.Source, linkToStorage.Env)} // When we are writing to the s3 the Path element is the path to the file in the pod | ||||
| 					art.Name = artifactBaseName + "-input-write" | ||||
| 				} else { | ||||
| 					art.Name = storage.GetName() + "-" + rw.Destination + "-input-read" | ||||
| 					art = Artifact{Path: template.ReplacePerEnv(rw.Destination+"/"+rw.FileName, linkToStorage.Env)} // When we are reading from the s3 the Path element in pod should be the destination of the file | ||||
| 					art.Name = artifactBaseName + "-input-read" | ||||
| 				} | ||||
|  | ||||
| 				if storage.StorageType == enum.S3 { | ||||
| 					art.S3 = &Key{ | ||||
| 						Key:      template.ReplacePerEnv(rw.Destination+"/"+rw.FileName, linkToStorage.Env), | ||||
| 						Insecure: true, // temporary | ||||
| 					} | ||||
| 					sel := storage.GetSelectedInstance() | ||||
| 					if sel != nil { | ||||
| 						if sel.(*resources.StorageResourceInstance).Credentials != nil { | ||||
| 							tool, err := tools2.NewService(conf.GetConfig().Mode) | ||||
| 							if err != nil || tool == nil { | ||||
| 								logger.Error().Msg("Could not create the access secret") | ||||
| 							} else { | ||||
| 								id, err := tool.CreateAccessSecret(namespace, | ||||
| 									sel.(*resources.StorageResourceInstance).Credentials.Login, | ||||
| 									sel.(*resources.StorageResourceInstance).Credentials.Pass) | ||||
| 								if err == nil { | ||||
| 									art.S3.AccessKeySecret = &Secret{ | ||||
| 										Name: id, | ||||
| 										Key:  "access-key", | ||||
| 									} | ||||
| 									art.S3.SecretKeySecret = &Secret{ | ||||
| 										Name: id, | ||||
| 										Key:  "secret-key", | ||||
| 									} | ||||
| 								} | ||||
| 							} | ||||
| 						} | ||||
| 						art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source+"/", "") | ||||
| 						art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source, "") | ||||
| 						splits := strings.Split(art.S3.EndPoint, "/") | ||||
| 						if len(splits) > 1 { | ||||
| 							art.S3.Bucket = splits[0] | ||||
| 							art.S3.EndPoint = strings.Join(splits[1:], "/") | ||||
| 						} else { | ||||
| 							art.S3.Bucket = splits[0] | ||||
| 						} | ||||
| 					} | ||||
|  | ||||
| 					b.addS3annotations(&art, template, rw, linkToStorage, storage, namespace) | ||||
| 				} | ||||
|  | ||||
| 				if rw.Write { | ||||
| 					template.Outputs.Artifacts = append(template.Inputs.Artifacts, art) | ||||
| 				} else { | ||||
| @@ -258,14 +240,99 @@ func (b *ArgoBuilder) createArgoTemplates(namespace string, | ||||
| 			}, volumes) | ||||
| 		} | ||||
| 	} | ||||
| 	b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, *template) | ||||
| 	return volumes, firstItems, lastItems | ||||
| 	return volumes | ||||
| }	 | ||||
|  | ||||
| func (b *ArgoBuilder) addS3annotations(art *Artifact, template *Template, rw graph.StorageProcessingGraphLink, linkToStorage graph.GraphLink, storage *resources.StorageResource, namespace string) { | ||||
| 		 | ||||
| 	art.S3 = &Key{ | ||||
| 		// Key:      template.ReplacePerEnv(rw.Destination+"/"+rw.FileName, linkToStorage.Env), | ||||
| 		Insecure: true, // temporary | ||||
| 	} | ||||
| 	if rw.Write { | ||||
| 		art.S3.Key = rw.Destination + "/" + rw.FileName | ||||
| 	} else { | ||||
| 		art.S3.Key = rw.Source  | ||||
| 	} | ||||
| 	sel := storage.GetSelectedInstance() | ||||
| 	// v0.1 : add the storage.Source to the s3 object | ||||
| 	// v0.2 : test if the storage.Source  exists in the configMap and quit if not | ||||
| 	// v1 	: v0.2 + if doesn't exist edit/create the configMap with the response from API call | ||||
| 	if sel != nil { | ||||
| 		b.addAuthInformation(storage, namespace, art) | ||||
| 		art.S3.Bucket = namespace 											// DEFAULT : will need to update this to create an unique | ||||
| 		art.S3.EndPoint = sel.(*resources.StorageResourceInstance).Source  | ||||
| 	} | ||||
| } | ||||
|  | ||||
|  | ||||
| func (b *ArgoBuilder) addAuthInformation(storage *resources.StorageResource, namespace string, art *Artifact) { | ||||
| 	 | ||||
| 	sel := storage.GetSelectedInstance() | ||||
| 	 | ||||
| 	tool, err := tools2.NewService(conf.GetConfig().Mode) | ||||
| 	if err != nil || tool == nil { | ||||
| 		logger.Fatal().Msg("Could not create the access secret :" + err.Error()) | ||||
| 		}  | ||||
| 		 | ||||
| 		secretName, err := b.SetupS3Credentials(storage, namespace, tool) 			// this method return should be updated once we have decided how to retrieve credentials   | ||||
| 		 | ||||
| 		if err == nil { | ||||
| 			art.S3.AccessKeySecret = &Secret{ | ||||
| 				Name: secretName, | ||||
| 				Key:  "access-key", | ||||
| 			} | ||||
| 			art.S3.SecretKeySecret = &Secret{ | ||||
| 				Name: secretName, | ||||
| 				Key:  "secret-key", | ||||
| 		} | ||||
| 	} | ||||
| 	 | ||||
| 	art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source+"/", "") | ||||
| 	art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source, "") | ||||
| 	splits := strings.Split(art.S3.EndPoint, "/") | ||||
| 	if len(splits) > 1 { | ||||
| 		art.S3.Bucket = splits[0] | ||||
| 		art.S3.EndPoint = strings.Join(splits[1:], "/") | ||||
| 		} else { | ||||
| 			art.S3.Bucket = splits[0] | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) SetupS3Credentials(storage *resources.StorageResource, namespace string, tool tools2.Tool) (string, error) { | ||||
| 	s := tool.GetS3Secret(storage.UUID, namespace) | ||||
| 	// var s *v1.Secret | ||||
| 	accessKey, secretKey := retrieveMinioCredential("peer",namespace) | ||||
| 	 | ||||
| 	if s == nil { | ||||
| 		id, err := tool.CreateAccessSecret( | ||||
| 			accessKey, | ||||
| 			secretKey, | ||||
| 			storage.UUID, | ||||
| 			namespace, | ||||
| 		) | ||||
| 		if err != nil { | ||||
| 			l := oclib.GetLogger() | ||||
| 			l.Fatal().Msg("Error when creating the secret holding credentials for S3 access in " + namespace + " : " + err.Error()) | ||||
| 		} | ||||
|  | ||||
| 		return id, nil | ||||
| 	} | ||||
|  | ||||
| 	// s.Name = "toto" | ||||
| 	return s.Name, nil | ||||
| 	 | ||||
| } | ||||
|  | ||||
| // This method needs to evolve to an API call to the peer passed as a parameter | ||||
| func retrieveMinioCredential(peer string, namespace string) (string,string) { | ||||
| 	return "hF9wRGog75JuMdshWeEZ", "OwXXJkVQyb5l1aVPdOegKOtDJGoP1dJYeo8O7mDW" | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) addTaskToArgo(dag *Dag, graphItemID string, processing *resources.ProcessingResource, | ||||
| 	firstItems []string, lastItems []string) (*Dag, []string, []string) { | ||||
| 	unique_name := getArgoName(processing.GetName(), graphItemID) | ||||
| 	step := Task{Name: unique_name, Template: unique_name} | ||||
| 		unique_name := getArgoName(processing.GetName(), graphItemID) | ||||
| 		step := Task{Name: unique_name, Template: unique_name} | ||||
| 	instance := processing.GetSelectedInstance() | ||||
| 	if instance != nil { | ||||
| 		for _, value := range instance.(*resources.ProcessingInstance).Env { | ||||
| @@ -325,7 +392,7 @@ func (b *ArgoBuilder) isArgoDependancy(id string) (bool, []string) { | ||||
| 	isDeps := false | ||||
| 	for _, link := range b.OriginWorkflow.Graph.Links { | ||||
| 		if _, ok := b.OriginWorkflow.Graph.Items[link.Destination.ID]; !ok { | ||||
| 			fmt.Println("Could not find the source of the link", link.Destination.ID) | ||||
| 			logger.Info().Msg(fmt.Sprint("Could not find the source of the link", link.Destination.ID)) | ||||
| 			continue | ||||
| 		} | ||||
| 		source := b.OriginWorkflow.Graph.Items[link.Destination.ID].Processing | ||||
| @@ -345,7 +412,7 @@ func (b *ArgoBuilder) isArgoDependancy(id string) (bool, []string) { | ||||
| func (b *ArgoBuilder) getArgoDependencies(id string) (dependencies []string) { | ||||
| 	for _, link := range b.OriginWorkflow.Graph.Links { | ||||
| 		if _, ok := b.OriginWorkflow.Graph.Items[link.Source.ID]; !ok { | ||||
| 			fmt.Println("Could not find the source of the link", link.Source.ID) | ||||
| 			logger.Info().Msg(fmt.Sprint("Could not find the source of the link", link.Source.ID)) | ||||
| 			continue | ||||
| 		} | ||||
| 		source := b.OriginWorkflow.Graph.Items[link.Source.ID].Processing | ||||
| @@ -367,20 +434,19 @@ func getArgoName(raw_name string, component_id string) (formatedName string) { | ||||
|  | ||||
| // Verify if a processing resource is attached to another Compute than the one hosting | ||||
| // the current Open Cloud instance. If true return the peer ID to contact | ||||
| func (b *ArgoBuilder) isProcessingReparted(processing resources.ProcessingResource, graphID string) (bool,string) { | ||||
| func (b *ArgoBuilder) isProcessingReparted(processing resources.ProcessingResource, graphID string) (bool, string) { | ||||
| 	computeAttached := b.retrieveProcessingCompute(graphID) | ||||
| 	if computeAttached == nil { | ||||
| 		logger.Error().Msg("No compute was found attached to processing " + processing.Name + " : " + processing.UUID ) | ||||
| 		logger.Error().Msg("No compute was found attached to processing " + processing.Name + " : " + processing.UUID) | ||||
| 		panic(0) | ||||
| 	} | ||||
|  | ||||
| 	 | ||||
| 	// Creates an accessor srtictly for Peer Collection  | ||||
| 	req := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"","",nil,nil) | ||||
| 	// Creates an accessor srtictly for Peer Collection | ||||
| 	req := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", "", nil, nil) | ||||
| 	if req == nil { | ||||
| 		fmt.Println("TODO : handle error when trying to create a request on the Peer Collection") | ||||
| 		return false, "" | ||||
| 	}  | ||||
| 	} | ||||
|  | ||||
| 	res := req.LoadOne(computeAttached.CreatorID) | ||||
| 	if res.Err != "" { | ||||
| @@ -388,25 +454,25 @@ func (b *ArgoBuilder) isProcessingReparted(processing resources.ProcessingResour | ||||
| 		fmt.Print(res.Err) | ||||
| 		return false, "" | ||||
| 	} | ||||
| 	 | ||||
|  | ||||
| 	peer := *res.ToPeer() | ||||
|  | ||||
| 	isNotReparted, _ := peer.IsMySelf() | ||||
| 	fmt.Println("Result IsMySelf for ", peer.UUID ," : ", isNotReparted) | ||||
| 	 | ||||
| 	isNotReparted := peer.State == 1 | ||||
| 	logger.Info().Msg(fmt.Sprint("Result IsMySelf for ", peer.UUID, " : ", isNotReparted)) | ||||
|  | ||||
| 	return !isNotReparted, peer.UUID | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) retrieveProcessingCompute(graphID string) *resources.ComputeResource { | ||||
| 	for _, link := range b.OriginWorkflow.Graph.Links { | ||||
| 		// If a link contains the id of the processing | ||||
| 		var oppositeId string  | ||||
| 		if link.Source.ID == graphID{ | ||||
| 		var oppositeId string | ||||
| 		if link.Source.ID == graphID { | ||||
| 			oppositeId = link.Destination.ID | ||||
| 		} else if(link.Destination.ID == graphID){ | ||||
| 		} else if link.Destination.ID == graphID { | ||||
| 			oppositeId = link.Source.ID | ||||
| 		} | ||||
| 		fmt.Println("OppositeId : ", oppositeId) | ||||
| 		 | ||||
| 		if oppositeId != "" { | ||||
| 			dt, res := b.OriginWorkflow.Graph.GetResource(oppositeId) | ||||
| 			if dt == oclib.COMPUTE_RESOURCE { | ||||
| @@ -417,27 +483,25 @@ func (b *ArgoBuilder) retrieveProcessingCompute(graphID string) *resources.Compu | ||||
| 		} | ||||
|  | ||||
| 	} | ||||
| 		 | ||||
| 	return nil  | ||||
| } | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Execute the last actions once the YAML file for the Argo Workflow is created | ||||
| func (b *ArgoBuilder) CompleteBuild(executionsId string) (string, error) { | ||||
| 	fmt.Println("DEV :: Completing build") | ||||
| 	logger.Info().Msg(fmt.Sprint("DEV :: Completing build")) | ||||
| 	setter := AdmiraltySetter{Id: executionsId} | ||||
| 	// Setup admiralty for each node | ||||
| 	for _, peer := range b.RemotePeers { | ||||
| 		fmt.Println("DEV :: Launching Admiralty Setup for ", peer) | ||||
| 		logger.Info().Msg(fmt.Sprint("DEV :: Launching Admiralty Setup for ", peer)) | ||||
| 		setter.InitializeAdmiralty(conf.GetConfig().PeerID,peer) | ||||
| 	} | ||||
|  | ||||
| 	// Update the name of the admiralty node to use  | ||||
| 	for _, template := range b.Workflow.Spec.Templates { | ||||
| 		if len(template.Metadata.Annotations) > 0 { | ||||
| 			if resp, ok := template.Metadata.Annotations["multicluster.admiralty.io/clustername"]; ok { | ||||
| 				fmt.Println(resp) | ||||
| 				template.Metadata.Annotations["multicluster.admiralty.io/clustername"] = "target-" + conf.GetConfig().ExecutionID | ||||
| 			if peerId, ok := template.Metadata.Annotations["multicluster.admiralty.io/clustername"]; ok { | ||||
| 				template.Metadata.Annotations["multicluster.admiralty.io/clustername"] = "target-" + oclib.GetConcatenatedName(peerId, executionsId) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| @@ -463,4 +527,4 @@ func (b *ArgoBuilder) CompleteBuild(executionsId string) (string, error) { | ||||
| 	} | ||||
|  | ||||
| 	return workflows_dir + file_name, nil | ||||
| } | ||||
| } | ||||
|   | ||||
| @@ -14,7 +14,7 @@ type WorflowDB struct { | ||||
|  | ||||
| // Create the obj!ects from the mxgraphxml stored in the workflow given as a parameter | ||||
| func (w *WorflowDB) LoadFrom(workflow_id string, peerID string) error { | ||||
| 	fmt.Println("Loading workflow from " + workflow_id) | ||||
| 	logger.Info().Msg("Loading workflow from " + workflow_id) | ||||
| 	var err error | ||||
| 	if w.Workflow, err = w.getWorkflow(workflow_id, peerID); err != nil { | ||||
| 		return err | ||||
| @@ -27,7 +27,7 @@ func (w *WorflowDB) getWorkflow(workflow_id string, peerID string) (workflow *wo | ||||
| 	logger := oclib.GetLogger() | ||||
|  | ||||
| 	lib_data := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW), "", peerID, []string{}, nil).LoadOne(workflow_id) | ||||
| 	fmt.Println("ERR", lib_data.Code, lib_data.Err) | ||||
| 	logger.Info().Msg(fmt.Sprint("ERR", lib_data.Code, lib_data.Err)) | ||||
| 	if lib_data.Code != 200 { | ||||
| 		logger.Error().Msg("Error loading the graph") | ||||
| 		return workflow, errors.New(lib_data.Err) | ||||
| @@ -43,7 +43,7 @@ func (w *WorflowDB) getWorkflow(workflow_id string, peerID string) (workflow *wo | ||||
|  | ||||
| func (w *WorflowDB) ExportToArgo(namespace string, timeout int) (*ArgoBuilder, int, error) { | ||||
| 	logger := oclib.GetLogger() | ||||
| 	fmt.Println("Exporting to Argo", w.Workflow) | ||||
| 	logger.Info().Msg(fmt.Sprint("Exporting to Argo", w.Workflow)) | ||||
| 	if len(w.Workflow.Name) == 0 || w.Workflow.Graph == nil { | ||||
| 		return nil, 0, fmt.Errorf("can't export a graph that has not been loaded yet") | ||||
| 	} | ||||
|   | ||||
		Reference in New Issue
	
	Block a user