Merge branch 'main' of https://cloud.o-forge.io/core/oc-monitord into services_demo
This commit is contained in:
commit
2c064c0e80
40
README.md
40
README.md
@ -15,3 +15,43 @@ imagePullPolicy: Never
|
|||||||
|
|
||||||
Not doing so will end up in the pod having a `ErrorImagePull`
|
Not doing so will end up in the pod having a `ErrorImagePull`
|
||||||
|
|
||||||
|
## Allow argo to create services
|
||||||
|
|
||||||
|
In order for monitord to expose **open cloud services** on the node, we need to give him permission to create **k8s services**.
|
||||||
|
|
||||||
|
For that we can update the RBAC configuration for a role already created by argo :
|
||||||
|
|
||||||
|
### Manually edit the rbac authorization
|
||||||
|
|
||||||
|
> kubectl edit roles.rbac.authorization.k8s.io -n argo argo-role
|
||||||
|
|
||||||
|
In rules add a new entry :
|
||||||
|
|
||||||
|
```
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- services
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- create
|
||||||
|
```
|
||||||
|
|
||||||
|
### Patch the rbac authorization with a one liner
|
||||||
|
|
||||||
|
> kubectl patch role argo-role -n argo --type='json' -p='[{"op": "add", "path": "/rules/-", "value": {"apiGroups": [""], "resources": ["services"], "verbs": ["get","create"]}}]'
|
||||||
|
|
||||||
|
### Check wether the modification is effective
|
||||||
|
|
||||||
|
> kubectl auth can-i create services --as=system:serviceaccount:argo:argo -n argo
|
||||||
|
|
||||||
|
This command **must return "yes"**
|
||||||
|
|
||||||
|
|
||||||
|
## TODO
|
||||||
|
|
||||||
|
- [ ] Logs the output of each pods :
|
||||||
|
- logsPods() function already exists
|
||||||
|
- need to implement the logic to create each pod's logger and start the monitoring routing
|
||||||
|
- [ ] Allow the front to known on which IP the service are reachable
|
||||||
|
- currently doing it by using `kubectl get nodes -o wide`
|
308
demo_nginx/workflow_nginx_services.json
Normal file
308
demo_nginx/workflow_nginx_services.json
Normal file
@ -0,0 +1,308 @@
|
|||||||
|
{
|
||||||
|
"id": "9c7ffc7e-3e6e-4ea8-8eab-3a03258712ff",
|
||||||
|
"name": "test-services",
|
||||||
|
"processings": [
|
||||||
|
"7c71a15b-bdbc-46d7-9dab-67e369804136",
|
||||||
|
"0d565c87-50ae-4a73-843d-f8b2d4047772"
|
||||||
|
],
|
||||||
|
"datacenters": [
|
||||||
|
"7b989e97-c3e7-49d2-a3a7-f959da4870b5"
|
||||||
|
],
|
||||||
|
"graph": {
|
||||||
|
"zoom": 1,
|
||||||
|
"items": {
|
||||||
|
"aa8d2265-9fe2-42c7-ba1f-46ea0da8e633": {
|
||||||
|
"id": "aa8d2265-9fe2-42c7-ba1f-46ea0da8e633",
|
||||||
|
"width": 0,
|
||||||
|
"height": 0,
|
||||||
|
"processing": {
|
||||||
|
"id": "0d565c87-50ae-4a73-843d-f8b2d4047772",
|
||||||
|
"resource_model": {
|
||||||
|
"resource_type": "processing",
|
||||||
|
"model": {
|
||||||
|
"command": {
|
||||||
|
"type": "string",
|
||||||
|
"value": "curlimages/curl:7.88.1"
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"type": "string",
|
||||||
|
"value": "-SL https://cloud.o-forge.io/core/oc-monitord/raw/branch/services_demo/demo_nginx/cockpit.html -o /usr/share/nginx/cockpit.hmtl"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"name": "CURL",
|
||||||
|
"short_description": "Transfer or retrieve information from or to a server ",
|
||||||
|
"description": "curl is a tool for transferring data from or to a server. It supports these protocols: DICT, FILE, FTP, FTPS, GOPHER, GOPHERS, HTTP, HTTPS, IMAP, IMAPS, LDAP, LDAPS, MQTT, POP3, POP3S, RTMP, RTMPS, RTSP, SCP, SFTP, SMB, SMBS, SMTP, SMTPS, TELNET, TFTP, WS and WSS.",
|
||||||
|
"logo": "https://cloud.o-forge.io/core/deperecated-oc-catalog/src/branch/main/scripts/local_imgs/curl-logo.png",
|
||||||
|
"owner": "IRT",
|
||||||
|
"source_url": "http://www.google.com",
|
||||||
|
"license": "GPLv2",
|
||||||
|
"ram": {},
|
||||||
|
"storage": 300,
|
||||||
|
"parallel": true,
|
||||||
|
"scaling_model": 2,
|
||||||
|
"disk_io": "30 MB/s"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"a2d273c1-e564-45ad-a720-c9a40c28c6b5": {
|
||||||
|
"id": "a2d273c1-e564-45ad-a720-c9a40c28c6b5",
|
||||||
|
"width": 0,
|
||||||
|
"height": 0,
|
||||||
|
"processing": {
|
||||||
|
"id": "0d565c87-50ae-4a73-843d-f8b2d4047772",
|
||||||
|
"resource_model": {
|
||||||
|
"resource_type": "processing",
|
||||||
|
"model": {
|
||||||
|
"command": {
|
||||||
|
"type": "string",
|
||||||
|
"value": "curlimages/curl:7.88.1"
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"type": "string",
|
||||||
|
"value": "-SL https://cloud.o-forge.io/core/oc-monitord/raw/branch/services_demo/demo_nginx/DTF.html -o /usr/share/nginx/DTF.hmtl"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"name": "CURL",
|
||||||
|
"short_description": "Transfer or retrieve information from or to a server ",
|
||||||
|
"description": "curl is a tool for transferring data from or to a server. It supports these protocols: DICT, FILE, FTP, FTPS, GOPHER, GOPHERS, HTTP, HTTPS, IMAP, IMAPS, LDAP, LDAPS, MQTT, POP3, POP3S, RTMP, RTMPS, RTSP, SCP, SFTP, SMB, SMBS, SMTP, SMTPS, TELNET, TFTP, WS and WSS.",
|
||||||
|
"logo": "https://cloud.o-forge.io/core/deperecated-oc-catalog/src/branch/main/scripts/local_imgs/curl-logo.png",
|
||||||
|
"owner": "IRT",
|
||||||
|
"source_url": "http://www.google.com",
|
||||||
|
"license": "GPLv2",
|
||||||
|
"ram": {},
|
||||||
|
"storage": 300,
|
||||||
|
"parallel": true,
|
||||||
|
"scaling_model": 2,
|
||||||
|
"disk_io": "30 MB/s"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"bf6916ff-b16f-44b3-818b-0bcd5bbaca00": {
|
||||||
|
"id": "bf6916ff-b16f-44b3-818b-0bcd5bbaca00",
|
||||||
|
"width": 0,
|
||||||
|
"height": 0,
|
||||||
|
"position": {
|
||||||
|
"id": "",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"processing": {
|
||||||
|
|
||||||
|
"id": "5234c921-490f-40b1-ab1e-56570dd7879e",
|
||||||
|
"resource_model": {
|
||||||
|
"resource_type": "processing",
|
||||||
|
"model": {
|
||||||
|
"command": {
|
||||||
|
"type": "string",
|
||||||
|
"value": "nginx"
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"expose": {
|
||||||
|
"type": "dict",
|
||||||
|
"value": {
|
||||||
|
"80" :{
|
||||||
|
"reverse" : "",
|
||||||
|
"PAT" : "308080"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"name": "NGINX",
|
||||||
|
"short_description": "an open source reverse proxy server for HTTP, HTTPS, SMTP, POP3, and IMAP protocols, as well as a load balancer, HTTP cache, and a web server ( ",
|
||||||
|
"description": "Nginx (pronounced 'engine-x') is an open source reverse proxy server for HTTP, HTTPS, SMTP, POP3, and IMAP protocols, as well as a load balancer, HTTP cache, and a web server (origin server). The nginx project started with a strong focus on high concurrency, high performance and low memory usage. It is licensed under the 2-clause BSD-like license and it runs on Linux, BSD variants, Mac OS X, Solaris, AIX, HP-UX, as well as on other *nix flavors. It also has a proof of concept port for Microsoft Windows.",
|
||||||
|
"logo": "https://cloud.o-forge.io/core/deperecated-oc-catalog/raw/branch/main/scripts/local_imgs/nginx.png",
|
||||||
|
"owner": "IRT",
|
||||||
|
"source_url": "https://hub.docker.com/_/nginx",
|
||||||
|
"license": "GPLv2",
|
||||||
|
"ram": {},
|
||||||
|
"storage": 300,
|
||||||
|
"parallel": true,
|
||||||
|
"scaling_model": 2,
|
||||||
|
"disk_io": "30 MB/s"
|
||||||
|
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"d83ac451-4690-44d9-af09-48e7588b2db9": {
|
||||||
|
"id": "d83ac451-4690-44d9-af09-48e7588b2db9",
|
||||||
|
"width": 0,
|
||||||
|
"height": 0,
|
||||||
|
"position": {
|
||||||
|
"id": "",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"processing": {
|
||||||
|
|
||||||
|
"id": "5234c921-490f-40b1-ab1e-56570dd7879e",
|
||||||
|
"resource_model": {
|
||||||
|
"resource_type": "processing",
|
||||||
|
"model": {
|
||||||
|
"command": {
|
||||||
|
"type": "string",
|
||||||
|
"value": "nginx"
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"expose": {
|
||||||
|
"type": "dict",
|
||||||
|
"value": {
|
||||||
|
"80" :{
|
||||||
|
"reverse" : "",
|
||||||
|
"PAT" : "308081"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"name": "NGINX",
|
||||||
|
"short_description": "an open source reverse proxy server for HTTP, HTTPS, SMTP, POP3, and IMAP protocols, as well as a load balancer, HTTP cache, and a web server ( ",
|
||||||
|
"description": "Nginx (pronounced 'engine-x') is an open source reverse proxy server for HTTP, HTTPS, SMTP, POP3, and IMAP protocols, as well as a load balancer, HTTP cache, and a web server (origin server). The nginx project started with a strong focus on high concurrency, high performance and low memory usage. It is licensed under the 2-clause BSD-like license and it runs on Linux, BSD variants, Mac OS X, Solaris, AIX, HP-UX, as well as on other *nix flavors. It also has a proof of concept port for Microsoft Windows.",
|
||||||
|
"logo": "https://cloud.o-forge.io/core/deperecated-oc-catalog/raw/branch/main/scripts/local_imgs/nginx.png",
|
||||||
|
"owner": "IRT",
|
||||||
|
"source_url": "https://hub.docker.com/_/nginx",
|
||||||
|
"license": "GPLv2",
|
||||||
|
"ram": {},
|
||||||
|
"storage": 300,
|
||||||
|
"parallel": true,
|
||||||
|
"scaling_model": 2,
|
||||||
|
"disk_io": "30 MB/s"
|
||||||
|
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"6a7e8860-7c26-4b70-9b3a-1bd27adcdfe1": {
|
||||||
|
"id": "6a7e8860-7c26-4b70-9b3a-1bd27adcdfe1",
|
||||||
|
"width": 0,
|
||||||
|
"height": 0,
|
||||||
|
"position": {
|
||||||
|
"id": "",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"itemresource": {
|
||||||
|
"datacenter": {
|
||||||
|
"cpus": [
|
||||||
|
{
|
||||||
|
"architecture": "x86",
|
||||||
|
"cores": {
|
||||||
|
"$numberLong": "8"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ram": {
|
||||||
|
"size": {
|
||||||
|
"$numberLong": "16384"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gpus": [
|
||||||
|
{
|
||||||
|
"memory": {
|
||||||
|
"$numberLong": "24000"
|
||||||
|
},
|
||||||
|
"tensor_cores": {
|
||||||
|
"$numberLong": "328"
|
||||||
|
},
|
||||||
|
"cuda_cores": {
|
||||||
|
"$numberLong": "10496"
|
||||||
|
},
|
||||||
|
"model": "RTX 3090 FE"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"abstractresource": {
|
||||||
|
"abstractobject": {
|
||||||
|
"id": "7b989e97-c3e7-49d2-a3a7-f959da4870b5",
|
||||||
|
"name": "Mundi datacenter"
|
||||||
|
},
|
||||||
|
"short_description": "Mundi Opencloud Instance",
|
||||||
|
"description": "A very long description of what this data is",
|
||||||
|
"logo": "https://cloud.o-forge.io/core/deperecated-oc-catalog/src/branch/main/scripts/local_imgs/Mundi datacenter.png",
|
||||||
|
"owner": "IRT",
|
||||||
|
"source_url": "http://www.google.com",
|
||||||
|
"resource_model": {
|
||||||
|
"id": "c3983010-1990-4ac0-8533-5389867e4424",
|
||||||
|
"resource_type": "datacenter_resource"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"links": [
|
||||||
|
{
|
||||||
|
"source": {
|
||||||
|
"id": "aa8d2265-9fe2-42c7-ba1f-46ea0da8e633",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"destination": {
|
||||||
|
"id": "bf6916ff-b16f-44b3-818b-0bcd5bbaca00",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": {
|
||||||
|
"id": "a2d273c1-e564-45ad-a720-c9a40c28c6b5",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"destination": {
|
||||||
|
"id": "d83ac451-4690-44d9-af09-48e7588b2db9",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": {
|
||||||
|
"id": "a2d273c1-e564-45ad-a720-c9a40c28c6b5",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"destination": {
|
||||||
|
"id": "6a7e8860-7c26-4b70-9b3a-1bd27adcdfe1",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": {
|
||||||
|
"id": "aa8d2265-9fe2-42c7-ba1f-46ea0da8e633",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"destination": {
|
||||||
|
"id": "6a7e8860-7c26-4b70-9b3a-1bd27adcdfe1",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": {
|
||||||
|
"id": "bf6916ff-b16f-44b3-818b-0bcd5bbaca00",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"destination": {
|
||||||
|
"id": "6a7e8860-7c26-4b70-9b3a-1bd27adcdfe1",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": {
|
||||||
|
"id": "d83ac451-4690-44d9-af09-48e7588b2db9",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"destination": {
|
||||||
|
"id": "6a7e8860-7c26-4b70-9b3a-1bd27adcdfe1",
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
2
go.mod
2
go.mod
@ -3,7 +3,7 @@ module oc-monitord
|
|||||||
go 1.22.0
|
go 1.22.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20240826085916-d0e1474f8f34
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240828135227-14d6a5f11c4e
|
||||||
github.com/akamensky/argparse v1.4.0
|
github.com/akamensky/argparse v1.4.0
|
||||||
github.com/goraz/onion v0.1.3
|
github.com/goraz/onion v0.1.3
|
||||||
github.com/nats-io/nats-server/v2 v2.10.18
|
github.com/nats-io/nats-server/v2 v2.10.18
|
||||||
|
3
go.sum
3
go.sum
@ -38,6 +38,8 @@ cloud.o-forge.io/core/oc-lib v0.0.0-20240822081914-4abf59a10d97 h1:6tbeTQvRnD0vD
|
|||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20240822081914-4abf59a10d97/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240822081914-4abf59a10d97/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU=
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20240826085916-d0e1474f8f34 h1:40XQgwR9HxXSnouY+ZqE/xYCM4qa+U+RLA5GA5JSNyQ=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240826085916-d0e1474f8f34 h1:40XQgwR9HxXSnouY+ZqE/xYCM4qa+U+RLA5GA5JSNyQ=
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20240826085916-d0e1474f8f34/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU=
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240826085916-d0e1474f8f34/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240828135227-14d6a5f11c4e h1:/KWO/gIcP5f7T4r00715fNz0Y/Hil6Bj3J1ycuES1Zw=
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20240828135227-14d6a5f11c4e/go.mod h1:FIJD0taWLJ5pjQLJ6sfE2KlTkvbmk5SMcyrxdjsaVz0=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/akamensky/argparse v1.4.0 h1:YGzvsTqCvbEZhL8zZu2AiA5nq805NZh75JNj4ajn1xc=
|
github.com/akamensky/argparse v1.4.0 h1:YGzvsTqCvbEZhL8zZu2AiA5nq805NZh75JNj4ajn1xc=
|
||||||
github.com/akamensky/argparse v1.4.0/go.mod h1:S5kwC7IuDcEr5VeXtGPRVZ5o/FdhcMlQz4IZQuw64xA=
|
github.com/akamensky/argparse v1.4.0/go.mod h1:S5kwC7IuDcEr5VeXtGPRVZ5o/FdhcMlQz4IZQuw64xA=
|
||||||
@ -221,6 +223,7 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
|
|||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
20
main.go
20
main.go
@ -28,6 +28,13 @@ import (
|
|||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Command-line args:
|
||||||
|
// - url: Loki URL (default: "http://127.0.0.1:3100")
|
||||||
|
// - execution: Workflow Execution ID (required) to identify the current execution, allows to retrieve Workflow
|
||||||
|
// - mongo: MongoDB URL (default: "mongodb://127.0.0.1:27017")
|
||||||
|
// - db: MongoDB database name (default: "DC_myDC")
|
||||||
|
// - timeout: Execution timeout (default: -1)
|
||||||
|
|
||||||
var logger zerolog.Logger
|
var logger zerolog.Logger
|
||||||
var wf_logger zerolog.Logger
|
var wf_logger zerolog.Logger
|
||||||
var pods_logger zerolog.Logger
|
var pods_logger zerolog.Logger
|
||||||
@ -40,6 +47,8 @@ const localConfigFile = "./conf/local_ocmonitord_conf.json"
|
|||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|
||||||
|
os.Setenv("test_service","true") // Only for service demo, delete before merging on main
|
||||||
|
|
||||||
monitorLocal = false
|
monitorLocal = false
|
||||||
// Test if monitor is launched outside (with parameters) or in a k8s environment (env variables sets)
|
// Test if monitor is launched outside (with parameters) or in a k8s environment (env variables sets)
|
||||||
if os.Getenv("KUBERNETES_SERVICE_HOST") == "" {
|
if os.Getenv("KUBERNETES_SERVICE_HOST") == "" {
|
||||||
@ -111,6 +120,8 @@ func getWorkflowId(exec_id string) string {
|
|||||||
return wf_exec.WorkflowID
|
return wf_exec.WorkflowID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// So far we only log the output from
|
||||||
|
|
||||||
func executeWorkflow(argo_file_path string) {
|
func executeWorkflow(argo_file_path string) {
|
||||||
// var stdout, stderr, stdout_logs, stderr_logs io.ReadCloser
|
// var stdout, stderr, stdout_logs, stderr_logs io.ReadCloser
|
||||||
var stdout, stderr io.ReadCloser
|
var stdout, stderr io.ReadCloser
|
||||||
@ -177,7 +188,7 @@ func logWorkflow(pipe io.ReadCloser, wg *sync.WaitGroup) {
|
|||||||
|
|
||||||
// Debug, no logs sent
|
// Debug, no logs sent
|
||||||
func logPods(pipe io.ReadCloser, name string) {
|
func logPods(pipe io.ReadCloser, name string) {
|
||||||
pods_logger = wf_logger.With().Str("sortie name", name).Logger()
|
pods_logger = wf_logger.With().Str("pod_name", name).Logger()
|
||||||
scanner := bufio.NewScanner(pipe)
|
scanner := bufio.NewScanner(pipe)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
log := scanner.Text()
|
log := scanner.Text()
|
||||||
@ -192,7 +203,7 @@ func loadConfig(is_k8s bool, parser *argparse.Parser) {
|
|||||||
|
|
||||||
o = initOnion(o)
|
o = initOnion(o)
|
||||||
// These variables can only be retrieved in the onion
|
// These variables can only be retrieved in the onion
|
||||||
// Variables that don't depend on the environmen (from conf file), can be loaded after
|
// Variables that don't depend on the environment (from conf file), can be loaded after
|
||||||
// We can't use underscore in the env variable names because it's the delimitor with OCMONITOR too
|
// We can't use underscore in the env variable names because it's the delimitor with OCMONITOR too
|
||||||
setConf(is_k8s, o, parser)
|
setConf(is_k8s, o, parser)
|
||||||
|
|
||||||
@ -216,7 +227,7 @@ func setConf(is_k8s bool, o *onion.Onion, parser *argparse.Parser) {
|
|||||||
tools.SetConfig(mongo, db, "")
|
tools.SetConfig(mongo, db, "")
|
||||||
} else {
|
} else {
|
||||||
url := parser.String("u", "url", &argparse.Options{Required: true, Default: "http://127.0.0.1:3100", Help: "Url to the Loki database logs will be sent to"})
|
url := parser.String("u", "url", &argparse.Options{Required: true, Default: "http://127.0.0.1:3100", Help: "Url to the Loki database logs will be sent to"})
|
||||||
workflow := parser.String("w", "workflow", &argparse.Options{Required: true, Help: "Execution ID of the workflow to request from oc-catalog API"})
|
execution := parser.String("e", "execution", &argparse.Options{Required: true, Help: "Execution ID of the workflow to request from oc-catalog API"})
|
||||||
mongo := parser.String("m", "mongo", &argparse.Options{Required: true, Default: "mongodb://127.0.0.1:27017", Help: "URL to reach the MongoDB"})
|
mongo := parser.String("m", "mongo", &argparse.Options{Required: true, Default: "mongodb://127.0.0.1:27017", Help: "URL to reach the MongoDB"})
|
||||||
db := parser.String("d", "database", &argparse.Options{Required: true, Default: "DC_myDC", Help: "Name of the database to query in MongoDB"})
|
db := parser.String("d", "database", &argparse.Options{Required: true, Default: "DC_myDC", Help: "Name of the database to query in MongoDB"})
|
||||||
timeout := parser.Int("t", "timeout", &argparse.Options{Required: false, Default: -1, Help: "Timeout for the execution of the workflow"})
|
timeout := parser.Int("t", "timeout", &argparse.Options{Required: false, Default: -1, Help: "Timeout for the execution of the workflow"})
|
||||||
@ -227,12 +238,13 @@ func setConf(is_k8s bool, o *onion.Onion, parser *argparse.Parser) {
|
|||||||
}
|
}
|
||||||
conf.GetConfig().LokiURL = *url
|
conf.GetConfig().LokiURL = *url
|
||||||
conf.GetConfig().Timeout = *timeout
|
conf.GetConfig().Timeout = *timeout
|
||||||
conf.GetConfig().ExecutionID = *workflow
|
conf.GetConfig().ExecutionID = *execution
|
||||||
tools.SetConfig(*mongo, *db, "")
|
tools.SetConfig(*mongo, *db, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func initOnion(o *onion.Onion) *onion.Onion {
|
func initOnion(o *onion.Onion) *onion.Onion {
|
||||||
logger = logs.CreateLogger("oc-monitord", "")
|
logger = logs.CreateLogger("oc-monitord", "")
|
||||||
configFile := ""
|
configFile := ""
|
||||||
|
40
models/services.go
Normal file
40
models/services.go
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
|
||||||
|
type ServiceResource struct {
|
||||||
|
Action string `yaml:"action,omitempty"`
|
||||||
|
SuccessCondition string `yaml:"successCondition,omitempty"`
|
||||||
|
FailureCondition string `yaml:"failureCondition,omitempty"`
|
||||||
|
SetOwnerReference bool `yaml:"setOwnerReference,omitempty"`
|
||||||
|
Manifest string `yaml:"manifest,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Service struct {
|
||||||
|
APIVersion string `yaml:"apiVersion"`
|
||||||
|
Kind string `yaml:"kind"`
|
||||||
|
Metadata Metadata `yaml:"metadata"`
|
||||||
|
Spec ServiceSpec `yaml:"spec"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Metadata struct {
|
||||||
|
Name string `yaml:"name"`
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceSpec is the specification of the Kubernetes Service
|
||||||
|
type ServiceSpec struct {
|
||||||
|
Selector map[string]string `yaml:"selector,omitempty"`
|
||||||
|
Ports []ServicePort `yaml:"ports"`
|
||||||
|
ClusterIP string `yaml:"clusterIP,omitempty"`
|
||||||
|
Type string `yaml:"type,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePort defines a port for a Kubernetes Service
|
||||||
|
type ServicePort struct {
|
||||||
|
Name string `yaml:"name"` // Even if empty need to be in the yaml
|
||||||
|
|
||||||
|
Protocol string `yaml:"protocol,omitempty"`
|
||||||
|
Port int64 `yaml:"port"`
|
||||||
|
TargetPort int64 `yaml:"targetPort,omitempty"`
|
||||||
|
NodePort int64 `yaml:"nodePort,omitempty"`
|
||||||
|
}
|
@ -30,6 +30,10 @@ type Dag struct {
|
|||||||
Tasks []Task `yaml:"tasks,omitempty"`
|
Tasks []Task `yaml:"tasks,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TemplateMetadata struct {
|
||||||
|
Labels map[string]string `yaml:"labels,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type Template struct {
|
type Template struct {
|
||||||
Name string `yaml:"name"`
|
Name string `yaml:"name"`
|
||||||
Inputs struct {
|
Inputs struct {
|
||||||
@ -37,4 +41,6 @@ type Template struct {
|
|||||||
} `yaml:"inputs,omitempty"`
|
} `yaml:"inputs,omitempty"`
|
||||||
Container Container `yaml:"container,omitempty"`
|
Container Container `yaml:"container,omitempty"`
|
||||||
Dag Dag `yaml:"dag,omitempty"`
|
Dag Dag `yaml:"dag,omitempty"`
|
||||||
|
Metadata TemplateMetadata `yaml:"metadata,omitempty"`
|
||||||
|
Resource ServiceResource `yaml:"resource,omitempty"`
|
||||||
}
|
}
|
||||||
|
8
models/translate_ports.go
Normal file
8
models/translate_ports.go
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
type PortTranslation map[string]PortConfig
|
||||||
|
|
||||||
|
type PortConfig struct {
|
||||||
|
Reverse string `json:"reverse,omitempty"`
|
||||||
|
PAT string `json:"PAT,omitempty"`
|
||||||
|
}
|
BIN
oc-monitord
BIN
oc-monitord
Binary file not shown.
@ -7,20 +7,26 @@ package workflow_builder
|
|||||||
import (
|
import (
|
||||||
. "oc-monitord/models"
|
. "oc-monitord/models"
|
||||||
"os"
|
"os"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
oclib "cloud.o-forge.io/core/oc-lib"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
"cloud.o-forge.io/core/oc-lib/models/resources/workflow/graph"
|
"cloud.o-forge.io/core/oc-lib/models/resources/workflow/graph"
|
||||||
|
w "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||||
"github.com/nwtgck/go-fakelish"
|
"github.com/nwtgck/go-fakelish"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var logger zerolog.Logger
|
||||||
|
|
||||||
type ArgoBuilder struct {
|
type ArgoBuilder struct {
|
||||||
graph graph.Graph
|
OriginWorkflow w.Workflow
|
||||||
Workflow Workflow
|
Workflow Workflow
|
||||||
Timeout int
|
Services *Service
|
||||||
|
Timeout int
|
||||||
}
|
}
|
||||||
|
|
||||||
type Workflow struct {
|
type Workflow struct {
|
||||||
@ -41,9 +47,17 @@ type Spec struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *ArgoBuilder) CreateDAG() (string, error) {
|
func (b *ArgoBuilder) CreateDAG() (string, error) {
|
||||||
|
|
||||||
|
// handle services by checking if there is only one processing with hostname and port
|
||||||
|
|
||||||
|
b.createNginxVolumes()
|
||||||
|
|
||||||
|
|
||||||
b.createTemplates()
|
b.createTemplates()
|
||||||
b.createDAGstep()
|
b.createDAGstep()
|
||||||
b.createVolumes()
|
b.createVolumes()
|
||||||
|
|
||||||
|
|
||||||
if b.Timeout > 0 {
|
if b.Timeout > 0 {
|
||||||
b.Workflow.Spec.Timeout = b.Timeout
|
b.Workflow.Spec.Timeout = b.Timeout
|
||||||
}
|
}
|
||||||
@ -52,7 +66,7 @@ func (b *ArgoBuilder) CreateDAG() (string, error) {
|
|||||||
b.Workflow.Kind = "Workflow"
|
b.Workflow.Kind = "Workflow"
|
||||||
random_name := generateWfName()
|
random_name := generateWfName()
|
||||||
b.Workflow.Metadata.Name = "oc-monitor-" + random_name
|
b.Workflow.Metadata.Name = "oc-monitor-" + random_name
|
||||||
logger := oclib.GetLogger()
|
logger = oclib.GetLogger()
|
||||||
yamlified, err := yaml.Marshal(b.Workflow)
|
yamlified, err := yaml.Marshal(b.Workflow)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error().Msg("Could not transform object to yaml file")
|
logger.Error().Msg("Could not transform object to yaml file")
|
||||||
@ -87,7 +101,7 @@ func (b *ArgoBuilder) createTemplates() {
|
|||||||
image_name := strings.Split(command, " ")[0] // TODO : decide where to store the image name, GUI or models.computing.Image
|
image_name := strings.Split(command, " ")[0] // TODO : decide where to store the image name, GUI or models.computing.Image
|
||||||
temp_container := Container{Image: image_name} // TODO : decide where to store the image name, GUI or models.computing.Image
|
temp_container := Container{Image: image_name} // TODO : decide where to store the image name, GUI or models.computing.Image
|
||||||
temp_container.Command = getComputingCommands(command)
|
temp_container.Command = getComputingCommands(command)
|
||||||
temp_container.Args = getComputingArgs(strings.Split(args, " "), command)
|
temp_container.Args = getComputingArgs(args, command)
|
||||||
// Only for dev purpose,
|
// Only for dev purpose,
|
||||||
input_names := getComputingEnvironmentName(strings.Split(env, " "))
|
input_names := getComputingEnvironmentName(strings.Split(env, " "))
|
||||||
|
|
||||||
@ -100,8 +114,23 @@ func (b *ArgoBuilder) createTemplates() {
|
|||||||
new_temp := Template{Name: argo_name, Container: temp_container}
|
new_temp := Template{Name: argo_name, Container: temp_container}
|
||||||
new_temp.Inputs.Parameters = inputs_container
|
new_temp.Inputs.Parameters = inputs_container
|
||||||
new_temp.Container.VolumeMounts = append(new_temp.Container.VolumeMounts, VolumeMount{Name: "workdir", MountPath: "/mnt/vol"}) // TODO : replace this with a search of the storage / data source name
|
new_temp.Container.VolumeMounts = append(new_temp.Container.VolumeMounts, VolumeMount{Name: "workdir", MountPath: "/mnt/vol"}) // TODO : replace this with a search of the storage / data source name
|
||||||
|
new_temp.Container.VolumeMounts = append(new_temp.Container.VolumeMounts, VolumeMount{Name: "nginx-demo", MountPath: "/usr/share/nginx"}) // Used for processing services' demo with nginx
|
||||||
|
|
||||||
|
if (b.isService(comp.ID)){
|
||||||
|
serv := b.CreateService(comp)
|
||||||
|
b.createService(serv, argo_name, comp.ID)
|
||||||
|
new_temp.Metadata.Labels = make(map[string]string)
|
||||||
|
new_temp.Metadata.Labels["app"] = "oc-service" // Construct the template for the k8s service and add a link in graph between k8s service and processing
|
||||||
|
// if err != nil {
|
||||||
|
// // TODO
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
|
||||||
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, new_temp)
|
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, new_temp)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Services != nil {
|
||||||
|
b.addServiceToArgo()
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -123,8 +152,13 @@ func (b *ArgoBuilder) createDAGstep() {
|
|||||||
step.Dependencies = b.getDependency(comp.ID) // Error : we use the component ID instead of the GraphItem ID -> store objects
|
step.Dependencies = b.getDependency(comp.ID) // Error : we use the component ID instead of the GraphItem ID -> store objects
|
||||||
new_dag.Tasks = append(new_dag.Tasks, step)
|
new_dag.Tasks = append(new_dag.Tasks, step)
|
||||||
}
|
}
|
||||||
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, Template{Name: "dag", Dag: new_dag})
|
|
||||||
|
|
||||||
|
if b.Services != nil {
|
||||||
|
new_dag.Tasks = append(new_dag.Tasks, Task{Name:"workflow-service-pod", Template: "workflow-service-pod"})
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, Template{Name: "dag", Dag: new_dag})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *ArgoBuilder) createVolumes() {
|
func (b *ArgoBuilder) createVolumes() {
|
||||||
@ -136,12 +170,23 @@ func (b *ArgoBuilder) createVolumes() {
|
|||||||
b.Workflow.Spec.Volumes = append(b.Workflow.Spec.Volumes, new_volume)
|
b.Workflow.Spec.Volumes = append(b.Workflow.Spec.Volumes, new_volume)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For demo purposes, until we implement the use of storage ressources
|
||||||
|
func (b *ArgoBuilder) createNginxVolumes() {
|
||||||
|
new_volume := VolumeClaimTemplate{}
|
||||||
|
new_volume.Metadata.Name = "nginx-demo"
|
||||||
|
new_volume.Spec.AccessModes = []string{"ReadWriteOnce"}
|
||||||
|
new_volume.Spec.Resources.Requests.Storage = "1Gi"
|
||||||
|
|
||||||
|
b.Workflow.Spec.Volumes = append(b.Workflow.Spec.Volumes, new_volume)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
func (b *ArgoBuilder) getDependency(current_computing_id string) (dependencies []string) {
|
func (b *ArgoBuilder) getDependency(current_computing_id string) (dependencies []string) {
|
||||||
for _, link := range b.graph.Links {
|
for _, link := range b.OriginWorkflow.Graph.Links {
|
||||||
if !b.IsProcessing(link.Source.ID) || !b.IsProcessing(link.Destination.ID) {
|
if b.OriginWorkflow.Graph.Items[link.Source.ID].Processing == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
source := b.graph.Items[link.Source.ID].Processing
|
source := b.OriginWorkflow.Graph.Items[link.Source.ID].Processing
|
||||||
if current_computing_id == link.Destination.ID && source != nil {
|
if current_computing_id == link.Destination.ID && source != nil {
|
||||||
dependency_name := getArgoName(source.GetName(), link.Source.ID)
|
dependency_name := getArgoName(source.GetName(), link.Source.ID)
|
||||||
dependencies = append(dependencies, dependency_name)
|
dependencies = append(dependencies, dependency_name)
|
||||||
@ -159,17 +204,20 @@ func getComputingCommands(user_input string) []string {
|
|||||||
return strings.Split(user_input, " ")
|
return strings.Split(user_input, " ")
|
||||||
}
|
}
|
||||||
|
|
||||||
func getComputingArgs(user_input []string, command string) (list_args []string) {
|
func getComputingArgs(user_input string, command string) (list_args []string) {
|
||||||
if len(user_input) == 0 {
|
if len(user_input) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
args := strings.Split(user_input," ")
|
||||||
|
|
||||||
// quickfix that might need improvement
|
// quickfix that might need improvement
|
||||||
if strings.Contains(command, "sh -c") {
|
if strings.Contains(command, "sh -c") {
|
||||||
list_args = append(list_args, strings.Join(user_input, " "))
|
list_args = append(list_args, strings.Join(args, " "))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
list_args = append(list_args, user_input...)
|
list_args = append(list_args, args...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -239,7 +287,7 @@ func removeImageName(user_input string) string {
|
|||||||
|
|
||||||
// Return the graphItem containing a Processing resource, so that we have access to the ID of the graphItem in order to identify it in the links
|
// Return the graphItem containing a Processing resource, so that we have access to the ID of the graphItem in order to identify it in the links
|
||||||
func (b *ArgoBuilder) getProcessings() (list_computings []graph.GraphItem) {
|
func (b *ArgoBuilder) getProcessings() (list_computings []graph.GraphItem) {
|
||||||
for _, item := range b.graph.Items {
|
for _, item := range b.OriginWorkflow.Graph.Items {
|
||||||
if item.Processing != nil {
|
if item.Processing != nil {
|
||||||
list_computings = append(list_computings, item)
|
list_computings = append(list_computings, item)
|
||||||
}
|
}
|
||||||
@ -247,13 +295,9 @@ func (b *ArgoBuilder) getProcessings() (list_computings []graph.GraphItem) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *ArgoBuilder) IsProcessing(id string) bool {
|
// Pass a GraphItem's UUID and not the ID
|
||||||
for _, item := range b.graph.Items {
|
func (b *ArgoBuilder) IsProcessing(component_uuid string) bool {
|
||||||
if item.Processing != nil && item.Processing.GetID() == id {
|
return slices.Contains(b.OriginWorkflow.Processings, component_uuid)
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getStringValue(comp resource_model.AbstractResource, key string) string {
|
func getStringValue(comp resource_model.AbstractResource, key string) string {
|
||||||
@ -262,3 +306,27 @@ func getStringValue(comp resource_model.AbstractResource, key string) string {
|
|||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *ArgoBuilder) isService(id string) bool{
|
||||||
|
|
||||||
|
comp := b.OriginWorkflow.Graph.Items[id]
|
||||||
|
|
||||||
|
if comp.Processing == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
_, is_exposed := comp.Processing.ResourceModel.Model["expose"]
|
||||||
|
return is_exposed
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
func (b *ArgoBuilder) addLabel(name string, id string) {
|
||||||
|
argo_name := getArgoName(name,id)
|
||||||
|
for _, template := range b.Workflow.Spec.Templates{
|
||||||
|
if template.Name == argo_name{
|
||||||
|
template.Metadata.Labels["app"] = "service-workflow"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
148
workflow_builder/argo_services.go
Normal file
148
workflow_builder/argo_services.go
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
package workflow_builder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"oc-monitord/models"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resource_model"
|
||||||
|
"cloud.o-forge.io/core/oc-lib/models/resources/workflow/graph"
|
||||||
|
"go.mongodb.org/mongo-driver/bson"
|
||||||
|
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO : refactor this method or the deserialization process in oc-lib to get rid of the mongo code
|
||||||
|
func getExposeContract(expose resource_model.Model) map[string]map[string]string {
|
||||||
|
contract := make(map[string]map[string]string,0)
|
||||||
|
|
||||||
|
mapped_info := bson.M{}
|
||||||
|
// var contract PortTranslation
|
||||||
|
_ , byt, _ := bson.MarshalValue(expose.Value)
|
||||||
|
|
||||||
|
bson.Unmarshal(byt,&mapped_info)
|
||||||
|
|
||||||
|
for _,v := range mapped_info {
|
||||||
|
port := v.(primitive.M)["Key"].(string)
|
||||||
|
// exposed_port := map[string]interface{}{data["Key"] : ""}
|
||||||
|
port_translation := v.(primitive.M)["Value"]
|
||||||
|
contract[port] = map[string]string{}
|
||||||
|
for _,v2 := range port_translation.(primitive.A) {
|
||||||
|
if v2.(primitive.M)["Key"] == "reverse" {
|
||||||
|
contract[port]["reverse"] = v2.(primitive.M)["Value"].(string)
|
||||||
|
}
|
||||||
|
if v2.(primitive.M)["Key"] == "PAT" {
|
||||||
|
contract[port]["PAT"] = v2.(primitive.M)["Value"].(string)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return contract
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
func (b *ArgoBuilder) CreateService(processing graph.GraphItem) models.Service{
|
||||||
|
|
||||||
|
// model {
|
||||||
|
// Type : "dict",
|
||||||
|
// Value : {
|
||||||
|
// "80" : {
|
||||||
|
// "reverse" : "",
|
||||||
|
// "PAT" : "34000"
|
||||||
|
// },
|
||||||
|
// "344" : {
|
||||||
|
// "reverse" : "",
|
||||||
|
// "PAT" : "34400"
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
|
||||||
|
new_service := models.Service{APIVersion: "v1",
|
||||||
|
Kind: "Service",
|
||||||
|
Metadata: models.Metadata{
|
||||||
|
Name: "workflow-service" ,
|
||||||
|
},
|
||||||
|
Spec: models.ServiceSpec{
|
||||||
|
Selector: map[string]string{"app": "oc-service"},
|
||||||
|
Ports: []models.ServicePort{
|
||||||
|
},
|
||||||
|
Type: "NodePort",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
completeServicePorts(&new_service, processing)
|
||||||
|
yamlified, _ := yaml.Marshal(new_service)
|
||||||
|
x := string(yamlified)
|
||||||
|
_ = x
|
||||||
|
return new_service
|
||||||
|
}
|
||||||
|
|
||||||
|
func completeServicePorts(service *models.Service, processing graph.GraphItem) {
|
||||||
|
|
||||||
|
contract := getExposeContract(processing.Processing.ResourceModel.Model["expose"])
|
||||||
|
|
||||||
|
|
||||||
|
for str_port,translation_dict := range contract{
|
||||||
|
|
||||||
|
port, err := strconv.ParseInt(str_port, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error().Msg("Could not convert " + str_port + "to an int")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if _, ok := translation_dict["PAT"]; ok{
|
||||||
|
port_translation, err := strconv.ParseInt(translation_dict["PAT"], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error().Msg("Could not convert " + translation_dict["PAT"] + "to an int")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
new_port_translation := models.ServicePort{
|
||||||
|
Name: strings.ToLower(processing.Processing.Name) + processing.ID,
|
||||||
|
Port: port_translation-30000,
|
||||||
|
TargetPort: port,
|
||||||
|
NodePort: port_translation,
|
||||||
|
Protocol: "TCP",
|
||||||
|
}
|
||||||
|
service.Spec.Ports = append(service.Spec.Ports, new_port_translation)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *ArgoBuilder) createService(service models.Service, processing_name string, processing_id string) {
|
||||||
|
if b.Services != nil{
|
||||||
|
b.Services.Spec.Ports = append(b.Services.Spec.Ports, service.Spec.Ports...)
|
||||||
|
}else {
|
||||||
|
b.Services = &service
|
||||||
|
}
|
||||||
|
|
||||||
|
b.addLabel(processing_name,processing_id)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *ArgoBuilder) addServiceToArgo() error {
|
||||||
|
service_manifest, err := yaml.Marshal(b.Services)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error().Msg("Could not marshal service manifest")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
service_template := models.Template{Name: "workflow-service-pod",
|
||||||
|
Resource: models.ServiceResource{
|
||||||
|
Action: "create",
|
||||||
|
SuccessCondition: "status.succeeded > 0",
|
||||||
|
FailureCondition: "status.failed > 3",
|
||||||
|
SetOwnerReference: true,
|
||||||
|
Manifest: string(service_manifest),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, service_template)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -46,7 +46,7 @@ func (w *WorflowDB) ExportToArgo(timeout int) (string, error) {
|
|||||||
return "", fmt.Errorf("can't export a graph that has not been loaded yet")
|
return "", fmt.Errorf("can't export a graph that has not been loaded yet")
|
||||||
}
|
}
|
||||||
|
|
||||||
argo_builder := ArgoBuilder{graph: *w.Workflow.Graph, Timeout: timeout}
|
argo_builder := ArgoBuilder{OriginWorkflow: *w.Workflow, Timeout: timeout}
|
||||||
filename, err := argo_builder.CreateDAG()
|
filename, err := argo_builder.CreateDAG()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error().Msg("Could not create the argo file for " + w.Workflow.Name)
|
logger.Error().Msg("Could not create the argo file for " + w.Workflow.Name)
|
||||||
|
Loading…
Reference in New Issue
Block a user