225 Commits

Author SHA1 Message Date
pb
778ffa05a1 what is collaborative 2025-06-02 18:05:08 +02:00
pb
3c15907427 added id for logger 2025-06-02 10:34:58 +02:00
pb
9ae5f3b91d timing status checks 2025-05-28 18:19:07 +02:00
pb
3a2141aab5 adding log to measure search time 2025-05-28 16:22:26 +02:00
pb
6ab6383144 corrected an use of the original http caller instead of the deep copy 2025-05-27 18:07:00 +02:00
pb
690d60f9d6 corrected the getBooking function parameters 2025-05-27 16:13:46 +02:00
pb
da0de80afd Booking check and booking post have been transformed in goroutine to improve performance when booking several execution with cron expressions 2025-05-27 15:38:24 +02:00
pb
cd7ae788b1 didn't put the blocking loop in the right place for post booking 2025-05-27 12:06:10 +02:00
pb
0d96cc53bf transformed the loop that posted the booking on oc-datacenter to a threaded operation where each call is done in a goroutine 2025-05-27 11:58:55 +02:00
pb
66fc3c5b35 added the passing of the request.Caller's URL to the deep copy 2025-05-27 11:34:44 +02:00
pb
5ab3eb8a38 forgot to pass the mutex as pointer and unlock it 2025-05-27 11:17:10 +02:00
pb
fec23b4acd modified HTTP caller to have a DeepCopy() method in order to parallelize calls without race conditions 2025-05-27 11:08:35 +02:00
pb
901622fee0 logging on the booking uuid before the post booking 2025-05-27 09:51:41 +02:00
pb
527e622774 correct the error channel 2025-05-26 19:21:28 +02:00
pb
7223b79fe8 correct the error channel 2025-05-26 19:16:39 +02:00
pb
1ade41aeae moved the code that execute the booking into a separated function so that it can be launched as goroutine and parallelize get booking$ 2025-05-26 19:05:17 +02:00
pb
58dc579255 added debug logging 2025-05-26 18:30:56 +02:00
pb
370dac201b In CheckBooking mooved the loop on bookings outside of the loop of execs, which seems to repeat the Peer execution on booking an exponential number of time 2025-05-26 17:55:45 +02:00
pb
2a763006db counting round in exec 2025-05-26 17:41:26 +02:00
pb
522c66653b Added logging for debug 2025-05-26 17:22:09 +02:00
pb
b57f050b81 increased the limit of returns by Mongo find() 2025-05-22 14:41:38 +02:00
pb
41ebcf150a added logging when booking 2025-05-07 18:16:38 +02:00
pb
1499def6ad added booking on the computing and data resource's peers 2025-05-07 14:54:31 +02:00
pb
adbab0f5d7 added more info on error returned by LaunchPeerExecution() 2025-04-30 16:13:49 +02:00
pb
88c88cac5b testing simplyfied urlFormat() method which works thanks to traefik 2025-03-13 16:57:27 +01:00
pb
1ae38c98ad correct path for ADMIRALTY_NODESAPI 2025-03-13 11:58:57 +01:00
pb
2d517cc594 Correcting an error in CallGet() 2025-03-12 15:41:10 +01:00
pb
a9c82bd261 Replaced the return of Call[Method]() by the stored value of the resp.Body 2025-03-12 15:37:03 +01:00
pb
79aec86f5f Replaced the return of Call[Method]() by the stored value of the resp.Body 2025-03-12 15:26:00 +01:00
pb
9b3dfc7576 fixing how last result in stored in httpcaller 2025-03-12 12:13:55 +01:00
pb
037ae74782 modified the way HTTPCaller store last resposne 2025-03-12 12:09:55 +01:00
pb
b81c60a3ce modified the way HTTPCaller store last resposne 2025-03-12 12:00:32 +01:00
pb
363ac94c47 debug instructions 2025-03-12 11:35:25 +01:00
pb
378f9e5095 Added a new http.Response field to HTTPCaller to store results for each call 2025-03-12 10:39:20 +01:00
pb
659b494ee4 Added a new field to HTTPCaller to store results for each call 2025-03-12 09:45:17 +01:00
pb
92965c6af2 Added more information in error when LaunchPeerExecution method doesn't match caller's 2025-03-11 16:48:05 +01:00
pb
70cb5aec9f changed some variable name for better understanding of process in LaunchPeerExecution 2025-03-11 12:03:35 +01:00
pb
d59e77d5a2 changed how url is consructed in LaunchPeerExecution by placing meth after peer url + dt 2025-03-05 16:42:22 +01:00
pb
ff1b857ab0 removed caller from checkPeerStatus() parameters by adding the path to url 2025-03-05 11:54:14 +01:00
pb
dbdccdb920 Corrected urlFormat() from peer_cache which constructed url with API's name at the end 2025-03-05 11:01:46 +01:00
pb
fd3fef72d3 correct DefaultAPI list 2025-03-03 10:55:00 +01:00
pb
1890fd4f71 added the resources used for admiralty in datacenter API 2025-03-03 10:33:37 +01:00
pb
95af3cb515 removed caller from checkPeerStatus() parameters by adding the path to url 2025-03-03 10:32:52 +01:00
pb
3acebc451e Added the required tag to ExecutionsID 2025-02-26 11:00:37 +01:00
mr
5111c9c8be discovery clear view 2025-02-19 15:29:42 +01:00
mr
3ecb0e9d96 set up auth for workspace 2025-02-19 11:41:52 +01:00
mr
b4a1766677 better load & peear cache for traefik 2025-02-19 11:03:12 +01:00
mr
241c6a5a08 casual debug, time added before change of state bookin & exec 2025-02-19 08:55:11 +01:00
mr
7c30633bde verifyAuthAction for instance 2025-02-18 14:02:29 +01:00
mr
81d3406305 verifyAuthAction for instance 2025-02-18 12:55:49 +01:00
mr
04f7537066 save 2025-02-18 12:39:16 +01:00
mr
6bf058ab5c save 2025-02-18 11:11:40 +01:00
mr
b771b5d25e save 2025-02-18 10:25:08 +01:00
mr
6e6ed4ea2c debug 2025-02-18 09:53:55 +01:00
mr
a098f0a672 conf 2025-02-18 09:01:21 +01:00
mr
cafadec146 missing res access 2025-02-17 08:25:19 +01:00
mr
0940b63961 correct 2025-02-14 16:16:25 +01:00
mr
a2dca94dca correct 2025-02-14 15:01:49 +01:00
mr
085a8718e0 correct 2025-02-13 15:11:23 +01:00
mr
271cc2caa0 workflow scheduler create booking with a booking execution lot id 2025-02-13 09:50:18 +01:00
mr
42b60ca5cd workflow scheduler create booking with a booking execution lot id 2025-02-13 09:10:24 +01:00
mr
4920322d0a workflow scheduler create booking with a booking execution lot id 2025-02-13 08:26:26 +01:00
mr
c7c1535ba9 workflow scheduler create booking with a booking execution lot id 2025-02-12 16:08:15 +01:00
mr
576f53f81b workflow scheduler create booking with a booking execution lot id 2025-02-12 15:45:03 +01:00
mr
c0e6247fb8 workflow scheduler create booking with a booking execution lot id 2025-02-12 15:27:05 +01:00
mr
3e85fdc779 workflow scheduler create booking with a booking execution lot id 2025-02-12 14:14:28 +01:00
mr
4833bcb710 workflow scheduler create booking with a booking execution lot id 2025-02-12 14:08:57 +01:00
mr
7d69d65dd2 workflow scheduler create booking with a booking execution lot id 2025-02-12 11:41:34 +01:00
mr
a098b3797a workflow scheduler create booking with a booking execution lot id 2025-02-11 15:33:01 +01:00
mr
7d03676ac2 workflow scheduler create booking with a booking execution lot id 2025-02-11 14:11:12 +01:00
mr
945b7a893e workflow scheduler create booking with a booking execution lot id 2025-02-11 13:58:24 +01:00
mr
ef028cb2b9 workflow scheduler create booking with a booking execution lot id 2025-02-11 13:54:06 +01:00
mr
4cfd0a1789 workflow scheduler create booking with a booking execution lot id 2025-02-11 12:28:04 +01:00
mr
7c57cf34a8 workflow scheduler create booking with a booking execution lot id 2025-02-11 12:13:34 +01:00
mr
019b590b4f workflow scheduler create booking with a booking execution lot id 2025-02-11 11:26:02 +01:00
mr
d82ae166a1 add purchase resource in model catalog 2025-02-11 09:16:18 +01:00
mr
ffaa67fb5d add purchase resource in model catalog 2025-02-11 08:30:38 +01:00
mr
a573a4ce71 add purchase resource in model catalog 2025-02-11 07:55:15 +01:00
mr
52d5a1fbf9 add purchase resource in model catalog 2025-02-10 13:10:42 +01:00
mr
4ad32401fd add purchase resource in model catalog 2025-02-10 13:04:13 +01:00
mr
f663ec80f5 add purchase resource in model catalog 2025-02-10 11:32:55 +01:00
mr
e55727d9e2 add purchase resource in model catalog 2025-02-10 10:42:37 +01:00
mr
4a178d01e3 add purchase resource in model catalog 2025-02-10 09:58:46 +01:00
mr
3d13833572 workflow execution evolved 2025-02-07 11:41:12 +01:00
mr
31ec352b57 workflow execution evolved 2025-02-07 08:29:57 +01:00
mr
940ef17f7b workflow execution evolved 2025-02-06 12:56:51 +01:00
mr
ad3293da9d workflow execution evolved 2025-02-06 11:13:06 +01:00
mr
3ffff7d32c workflow execution evolved 2025-02-06 09:56:00 +01:00
mr
e646cfef0b workflow execution evolved 2025-02-06 09:08:35 +01:00
mr
88b7cfe2fd workflow partial allows 2025-02-05 17:02:21 +01:00
mr
7201cabb43 workflow partial allows 2025-02-05 16:41:16 +01:00
mr
a8e2445c10 peer is a public data 2025-02-04 16:51:13 +01:00
mr
69bf951866 peer is a public data 2025-02-04 14:43:21 +01:00
mr
3061df4f13 peer is a public data 2025-02-04 12:07:09 +01:00
mr
2ccb57ffb0 peer is a public data 2025-02-04 10:14:10 +01:00
mr
847fce07bb peer is a public data 2025-02-04 10:12:22 +01:00
mr
f481cde465 peer is a public data 2025-02-04 10:11:40 +01:00
mr
bf114b39b7 peer is a public data 2025-02-04 09:00:55 +01:00
mr
22d15fe395 adding inputs output struct based on argo naming for now 2025-02-03 15:33:22 +01:00
mr
14977c7b2c adding inputs output struct based on argo naming for now 2025-02-03 13:45:14 +01:00
mr
8d9bb20538 adding inputs output struct based on argo naming for now 2025-02-03 13:44:15 +01:00
mr
6a977203ab adding inputs output struct based on argo naming for now 2025-02-03 13:36:09 +01:00
mr
275bd56fe6 adding inputs output struct based on argo naming for now 2025-02-03 12:38:30 +01:00
mr
2662709fed adding inputs output struct based on argo naming for now 2025-02-03 12:21:50 +01:00
mr
64bea2a66e adding inputs output struct based on argo naming for now 2025-02-03 11:52:49 +01:00
mr
6807614ac8 adding inputs output struct based on argo naming for now 2025-01-31 16:36:10 +01:00
mr
676f2f4caa adding inputs output struct based on argo naming for now 2025-01-31 16:29:04 +01:00
mr
a2f2d0ebef adding inputs output struct based on argo naming for now 2025-01-31 12:07:30 +01:00
mr
b2113bff62 adding inputs output struct based on argo naming for now^C 2025-01-31 11:01:42 +01:00
mr
892bd93471 adding inputs output struct based on argo naming for now^C 2025-01-31 09:23:40 +01:00
mr
3ec0d554ed adding inputs output struct based on argo naming for now 2025-01-31 08:38:00 +01:00
mr
976a5cedcb adding inputs output struct based on argo naming for now 2025-01-30 14:08:47 +01:00
mr
107ce25801 adding inputs output struct based on argo naming for now 2025-01-30 11:11:34 +01:00
mr
6350491f9f adding inputs output struct based on argo naming for now 2025-01-30 10:26:59 +01:00
mr
c78f758202 adding inputs output struct based on argo naming for now 2025-01-30 10:24:44 +01:00
mr
787c01b4be adding inputs output struct based on argo naming for now 2025-01-30 09:45:13 +01:00
mr
826d7586b1 adding inputs output struct based on argo naming for now 2025-01-30 08:24:03 +01:00
mr
84d20c52fa adding inputs output struct based on argo naming for now 2025-01-29 16:49:25 +01:00
mr
b176874c2b adding inputs output struct based on argo naming for now 2025-01-29 16:21:58 +01:00
mr
df2c38199c adding inputs output struct based on argo naming for now 2025-01-29 15:30:04 +01:00
mr
ede2d5fd53 adding inputs output struct based on argo naming for now^C 2025-01-29 14:33:24 +01:00
mr
d111a97521 adding inputs output struct based on argo naming for now^C 2025-01-29 14:20:46 +01:00
mr
330768490a adding inputs output struct based on argo naming for now^C 2025-01-29 11:01:35 +01:00
mr
74a1f66d26 adding inputs output struct based on argo naming for now^C 2025-01-29 08:37:43 +01:00
mr
598774b0b1 adding inputs output struct based on argo naming for now 2025-01-28 14:19:16 +01:00
mr
bf1d4a4001 adding inputs output struct based on argo naming for now 2025-01-28 13:38:31 +01:00
mr
db85d1a48b adding inputs output struct based on argo naming for now 2025-01-27 16:03:45 +01:00
mr
3ff7b47995 adding inputs output struct based on argo naming for now 2025-01-27 16:02:45 +01:00
mr
8b03df7923 adding inputs output struct based on argo naming for now 2025-01-27 14:42:57 +01:00
mr
98dc733240 adding inputs output struct based on argo naming for now 2025-01-27 14:41:03 +01:00
mr
c02e3dffcf adding inputs output struct based on argo naming for now 2025-01-27 14:38:44 +01:00
mr
1cdbcca7f7 adding inputs output struct based on argo naming for now 2025-01-27 14:31:12 +01:00
mr
9b8acb83cb adding inputs output struct based on argo naming for now 2025-01-27 14:30:47 +01:00
mr
7ca360be6a adding inputs output struct based on argo naming for now 2025-01-27 14:15:12 +01:00
mr
6c4fab1adc adding inputs output struct based on argo naming for now 2025-01-27 14:01:42 +01:00
mr
6551b1b97d adding inputs output struct based on argo naming for now 2025-01-27 14:00:20 +01:00
mr
71d9bd4678 adding inputs output struct based on argo naming for now 2025-01-27 13:24:14 +01:00
mr
1ad9ce09cb adding inputs output struct based on argo naming for now 2025-01-27 12:09:38 +01:00
mr
d731277914 adding inputs output struct based on argo naming for now 2025-01-27 12:04:48 +01:00
mr
24fe99cfa5 adding inputs output struct based on argo naming for now 2025-01-27 11:40:10 +01:00
mr
2a373e7368 adding inputs output struct based on argo naming for now 2025-01-27 11:36:57 +01:00
mr
68bacf5da4 adding inputs output struct based on argo naming for now 2025-01-27 09:37:56 +01:00
mr
ed158ffdcb adding inputs output struct based on argo naming for now 2025-01-27 09:19:37 +01:00
mr
fbb55e64dc adding inputs output struct based on argo naming for now 2025-01-27 09:05:47 +01:00
mr
1521b8fac5 adding inputs output struct based on argo naming for now 2025-01-24 15:33:57 +01:00
mr
97d466818a light modification 2025-01-24 10:55:57 +01:00
mr
c1888f8921 light modification 2025-01-23 15:08:34 +01:00
mr
db6049bab3 light modification 2025-01-23 14:47:17 +01:00
mr
5cc68bca6d light modification 2025-01-23 14:36:19 +01:00
mr
49e495f062 light modification 2025-01-23 12:49:59 +01:00
mr
1952d905d2 light modification 2025-01-23 12:47:04 +01:00
mr
2205ac9b58 light modification 2025-01-23 11:35:35 +01:00
mr
e9017767d1 light modification 2025-01-23 11:31:56 +01:00
mr
ad660b0ce8 light modification 2025-01-23 11:28:48 +01:00
mr
d15fdac27b light modification 2025-01-23 10:49:50 +01:00
mr
386881c283 light modification 2025-01-23 10:33:43 +01:00
mr
8cba10c4fe light modification 2025-01-23 09:27:27 +01:00
mr
f8ac3154e1 light modification 2025-01-23 09:06:22 +01:00
mr
df04133551 light modification 2025-01-23 08:48:22 +01:00
mr
99693d8ec0 light modification 2025-01-23 08:35:28 +01:00
mr
0e798dac50 light modification 2025-01-22 16:30:05 +01:00
mr
e6ac7d0da6 light modification 2025-01-22 16:17:55 +01:00
mr
9c71730d9c light modification 2025-01-22 15:03:40 +01:00
mr
4be954a6f3 light modification 2025-01-22 14:53:42 +01:00
mr
e9278111a6 light modification 2025-01-22 14:13:10 +01:00
mr
ed1e761052 light modification 2025-01-22 13:18:14 +01:00
mr
86b1e4ad5d light modification 2025-01-22 13:12:14 +01:00
mr
062c1afe85 light modification 2025-01-22 12:04:38 +01:00
mr
fa00980352 light modification 2025-01-22 11:55:02 +01:00
mr
2a93b17d71 light modification 2025-01-22 11:16:35 +01:00
mr
287aa3dea3 light modification 2025-01-22 11:11:04 +01:00
mr
8ab313e6cb light modification 2025-01-22 10:07:36 +01:00
mr
cccb54d38f light modification 2025-01-22 09:59:28 +01:00
mr
67940296d2 light modification 2025-01-22 09:06:53 +01:00
mr
67ebeca1f4 light modification 2025-01-21 17:04:38 +01:00
mr
b45e882559 light modification 2025-01-21 16:50:49 +01:00
mr
745bb58c59 light modification 2025-01-21 14:10:07 +01:00
mr
bf5a16f41b light modification 2025-01-21 11:55:44 +01:00
mr
bc12fb53be light modification 2025-01-21 11:11:18 +01:00
mr
0d83885b9b light modification 2025-01-21 09:35:41 +01:00
mr
de585a7234 light modification 2025-01-21 09:02:57 +01:00
mr
5c2980fb36 light modification 2025-01-21 09:02:26 +01:00
mr
e741a95cdb light modification 2025-01-21 08:58:04 +01:00
mr
19eb5239a6 light modification 2025-01-21 08:36:10 +01:00
mr
305f260503 light modification 2025-01-20 15:35:09 +01:00
mr
d1f6331ff8 light modification 2025-01-20 14:43:02 +01:00
mr
67b8215adf light modification 2025-01-20 13:49:39 +01:00
mr
58b36f2823 light modification 2025-01-20 13:37:06 +01:00
mr
2452d37acf light modification 2025-01-20 13:29:04 +01:00
mr
8e4ebbf622 light modification 2025-01-20 13:26:30 +01:00
mr
b85ca8674b light modification 2025-01-17 16:22:46 +01:00
mr
c63a1fef6c light modification 2025-01-17 14:54:17 +01:00
mr
66196da877 light modification 2025-01-17 14:45:35 +01:00
mr
e5c7dbe4cb light modification 2025-01-17 13:48:01 +01:00
mr
f72ceecc19 light modification 2025-01-17 13:46:36 +01:00
mr
ed787683f4 light modification 2025-01-17 13:19:20 +01:00
mr
d44fb976e4 inspect search bug 2025-01-17 11:05:08 +01:00
mr
fb1b44e1d1 light modification 2025-01-17 10:56:45 +01:00
mr
d00109daf3 light modification 2025-01-17 10:55:06 +01:00
mr
367613a9d5 light modification 2025-01-17 10:34:44 +01:00
mr
b990fe42d3 inspect search bug 2025-01-17 10:07:37 +01:00
mr
7d11c23eba inspect search bug 2025-01-17 09:51:50 +01:00
mr
450fab437c inspect search bug 2025-01-17 09:16:40 +01:00
mr
a4a249bab8 light modification 2025-01-16 15:25:44 +01:00
mr
d9c9f05cd2 light modification 2025-01-16 15:14:56 +01:00
mr
68f4189283 light modification 2025-01-16 10:14:55 +01:00
mr
0e0540af43 light modification 2025-01-15 11:28:20 +01:00
mr
555c5acb26 light modification 2025-01-15 11:09:33 +01:00
mr
b48e2cb3e5 light modification 2025-01-15 11:02:00 +01:00
mr
cf1c5f2186 light modification 2025-01-15 11:01:13 +01:00
mr
be38030395 light modification 2025-01-15 10:56:44 +01:00
mr
ad69c04951 light modification 2025-01-15 09:20:26 +01:00
mr
abd6c1d712 light modification 2025-01-14 15:37:30 +01:00
mr
a55f4c449c light modification 2025-01-14 15:03:29 +01:00
mr
1a4694c891 light modification 2025-01-14 14:50:55 +01:00
mr
b782248da7 light modification 2025-01-14 11:53:39 +01:00
mr
ae9a80c8f3 light modification 2025-01-14 11:28:16 +01:00
mr
918006302b data change for resource struct 2025-01-14 09:16:37 +01:00
mr
f30076e0f5 data change for resource struct 2025-01-14 09:15:50 +01:00
plm
5255ffc2f7 Merging issue#4 2025-01-10 17:43:31 +01:00
plm
fd1c579ec4 Removing required field on PeerId, see #7 2025-01-10 17:39:58 +01:00
plm
0f4adeea86 Same prefix for all builtin microservices in opencloud 2025-01-08 16:55:42 +01:00
plm
245f3adea3 Merge branch 'issue#4' 2024-12-16 09:18:58 +01:00
plm
21d08204b5 Fixing env based layer; not using onion builtin mechanism to preserve opencloud conf key/value format 2024-12-16 09:17:54 +01:00
plm
1de4888599 Remove extra underscore character; it breaks the env var loading 2024-12-10 14:01:47 +01:00
54 changed files with 1524 additions and 1137 deletions

View File

@@ -26,12 +26,12 @@ import (
func GetConfLoader() *onion.Onion { func GetConfLoader() *onion.Onion {
logger := zerolog.New(os.Stdout).With().Timestamp().Logger() logger := zerolog.New(os.Stdout).With().Timestamp().Logger()
AppName := GetAppName() AppName := GetAppName()
EnvPrefix := strings.ToUpper(AppName[0:2]+AppName[3:]) + "_" EnvPrefix := "OC_"
defaultConfigFile := "/etc/oc/" + AppName[3:] + ".json" defaultConfigFile := "/etc/oc/" + AppName[3:] + ".json"
localConfigFile := "./" + AppName[3:] + ".json" localConfigFile := "./" + AppName[3:] + ".json"
var configFile string var configFile string
var o *onion.Onion var o *onion.Onion
l3 := onion.NewEnvLayerPrefix("_", EnvPrefix) l3 := GetEnvVarLayer(EnvPrefix)
l2, err := onion.NewFileLayer(localConfigFile, nil) l2, err := onion.NewFileLayer(localConfigFile, nil)
if err == nil { if err == nil {
logger.Info().Msg("Local config file found " + localConfigFile + ", overriding default file") logger.Info().Msg("Local config file found " + localConfigFile + ", overriding default file")
@@ -54,3 +54,17 @@ func GetConfLoader() *onion.Onion {
} }
return o return o
} }
func GetEnvVarLayer(prefix string) onion.Layer {
envVars := make(map[string]interface{})
for _, e := range os.Environ() {
pair := strings.SplitN(e, "=", 2)
key := pair[0]
if strings.HasPrefix(key, prefix) {
envVars[strings.TrimPrefix(key, prefix)] = pair[1]
}
}
return onion.NewMapLayer(envVars)
}

View File

@@ -3,7 +3,6 @@ package mongo
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"slices" "slices"
"time" "time"
@@ -49,7 +48,7 @@ func (m *MongoDB) Init(collections []string, config MongoConf) {
mngoCollections = collections mngoCollections = collections
mngoConfig = config mngoConfig = config
if err := m.createClient(config.GetUrl(), false); err != nil { if err := m.createClient(config.GetUrl(), false); err != nil {
m.Logger.Error().Msg(err.Error()) // m.Logger.Error().Msg(err.Error())
} }
} }
@@ -171,12 +170,12 @@ func (m *MongoDB) DeleteOne(id string, collection_name string) (int64, int, erro
filter := bson.M{"_id": id} filter := bson.M{"_id": id}
targetDBCollection := CollectionMap[collection_name] targetDBCollection := CollectionMap[collection_name]
opts := options.Delete().SetHint(bson.D{{Key: "_id", Value: 1}}) opts := options.Delete().SetHint(bson.D{{Key: "_id", Value: 1}})
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second) MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() //defer cancel()
result, err := targetDBCollection.DeleteOne(MngoCtx, filter, opts) result, err := targetDBCollection.DeleteOne(MngoCtx, filter, opts)
if err != nil { if err != nil {
m.Logger.Error().Msg("Couldn't insert resource: " + err.Error()) // m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
return 0, 404, err return 0, 404, err
} }
return result.DeletedCount, 200, nil return result.DeletedCount, 200, nil
@@ -192,12 +191,12 @@ func (m *MongoDB) DeleteMultiple(f map[string]interface{}, collection_name strin
} }
targetDBCollection := CollectionMap[collection_name] targetDBCollection := CollectionMap[collection_name]
opts := options.Delete().SetHint(bson.D{{Key: "_id", Value: 1}}) opts := options.Delete().SetHint(bson.D{{Key: "_id", Value: 1}})
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second) MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() //defer cancel()
result, err := targetDBCollection.DeleteMany(MngoCtx, filter, opts) result, err := targetDBCollection.DeleteMany(MngoCtx, filter, opts)
if err != nil { if err != nil {
m.Logger.Error().Msg("Couldn't insert resource: " + err.Error()) // m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
return 0, 404, err return 0, 404, err
} }
return result.DeletedCount, 200, nil return result.DeletedCount, 200, nil
@@ -215,11 +214,11 @@ func (m *MongoDB) UpdateMultiple(set interface{}, filter map[string]interface{},
f = append(f, bson.E{Key: k, Value: v}) f = append(f, bson.E{Key: k, Value: v})
} }
targetDBCollection := CollectionMap[collection_name] targetDBCollection := CollectionMap[collection_name]
MngoCtx, cancel = context.WithTimeout(context.Background(), 50*time.Second) MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() //defer cancel()
res, err := targetDBCollection.UpdateMany(MngoCtx, f, dbs.InputToBson(doc, true)) res, err := targetDBCollection.UpdateMany(MngoCtx, f, dbs.InputToBson(doc, true))
if err != nil { if err != nil {
m.Logger.Error().Msg("Couldn't update resource: " + err.Error()) // m.Logger.Error().Msg("Couldn't update resource: " + err.Error())
return 0, 404, err return 0, 404, err
} }
return res.UpsertedCount, 200, nil return res.UpsertedCount, 200, nil
@@ -234,11 +233,11 @@ func (m *MongoDB) UpdateOne(set interface{}, id string, collection_name string)
bson.Unmarshal(b, &doc) bson.Unmarshal(b, &doc)
filter := bson.M{"_id": id} filter := bson.M{"_id": id}
targetDBCollection := CollectionMap[collection_name] targetDBCollection := CollectionMap[collection_name]
MngoCtx, cancel = context.WithTimeout(context.Background(), 50*time.Second) MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() //defer cancel()
_, err := targetDBCollection.UpdateOne(MngoCtx, filter, dbs.InputToBson(doc, true)) _, err := targetDBCollection.UpdateOne(MngoCtx, filter, dbs.InputToBson(doc, true))
if err != nil { if err != nil {
m.Logger.Error().Msg("Couldn't update resource: " + err.Error()) // m.Logger.Error().Msg("Couldn't update resource: " + err.Error())
return "", 404, err return "", 404, err
} }
return id, 200, nil return id, 200, nil
@@ -253,12 +252,12 @@ func (m *MongoDB) StoreOne(obj interface{}, id string, collection_name string) (
bson.Unmarshal(b, &doc) bson.Unmarshal(b, &doc)
doc["_id"] = id doc["_id"] = id
targetDBCollection := CollectionMap[collection_name] targetDBCollection := CollectionMap[collection_name]
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second) MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() //defer cancel()
_, err := targetDBCollection.InsertOne(MngoCtx, doc) _, err := targetDBCollection.InsertOne(MngoCtx, doc)
if err != nil { if err != nil {
m.Logger.Error().Msg("Couldn't insert resource: " + err.Error()) // m.Logger.Error().Msg("Couldn't insert resource: " + err.Error())
return "", 409, err return "", 409, err
} }
@@ -271,12 +270,12 @@ func (m *MongoDB) LoadOne(id string, collection_name string) (*mongo.SingleResul
} }
filter := bson.M{"_id": id} filter := bson.M{"_id": id}
targetDBCollection := CollectionMap[collection_name] targetDBCollection := CollectionMap[collection_name]
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second) MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() //defer cancel()
res := targetDBCollection.FindOne(MngoCtx, filter) res := targetDBCollection.FindOne(MngoCtx, filter)
if res.Err() != nil { if res.Err() != nil {
m.Logger.Error().Msg("Couldn't find resource " + id + ". Error : " + res.Err().Error()) // m.Logger.Error().Msg("Couldn't find resource " + id + ". Error : " + res.Err().Error())
err := res.Err() err := res.Err()
return nil, 404, err return nil, 404, err
} }
@@ -288,8 +287,7 @@ func (m *MongoDB) Search(filters *dbs.Filters, collection_name string) (*mongo.C
return nil, 503, err return nil, 503, err
} }
opts := options.Find() opts := options.Find()
opts.SetLimit(100) opts.SetLimit(1000)
fmt.Println("Filters: ", CollectionMap, collection_name)
targetDBCollection := CollectionMap[collection_name] targetDBCollection := CollectionMap[collection_name]
orList := bson.A{} orList := bson.A{}
andList := bson.A{} andList := bson.A{}
@@ -315,8 +313,8 @@ func (m *MongoDB) Search(filters *dbs.Filters, collection_name string) (*mongo.C
} }
} }
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second) MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() // defer cancel()
if cursor, err := targetDBCollection.Find( if cursor, err := targetDBCollection.Find(
MngoCtx, MngoCtx,
f, f,
@@ -338,12 +336,12 @@ func (m *MongoDB) LoadFilter(filter map[string]interface{}, collection_name stri
} }
targetDBCollection := CollectionMap[collection_name] targetDBCollection := CollectionMap[collection_name]
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second) MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() //defer cancel()
res, err := targetDBCollection.Find(MngoCtx, f) res, err := targetDBCollection.Find(MngoCtx, f)
if err != nil { if err != nil {
m.Logger.Error().Msg("Couldn't find any resources. Error : " + err.Error()) // m.Logger.Error().Msg("Couldn't find any resources. Error : " + err.Error())
return nil, 404, err return nil, 404, err
} }
return res, 200, nil return res, 200, nil
@@ -355,12 +353,12 @@ func (m *MongoDB) LoadAll(collection_name string) (*mongo.Cursor, int, error) {
} }
targetDBCollection := CollectionMap[collection_name] targetDBCollection := CollectionMap[collection_name]
MngoCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second) MngoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() //defer cancel()
res, err := targetDBCollection.Find(MngoCtx, bson.D{}) res, err := targetDBCollection.Find(MngoCtx, bson.D{})
if err != nil { if err != nil {
m.Logger.Error().Msg("Couldn't find any resources. Error : " + err.Error()) // m.Logger.Error().Msg("Couldn't find any resources. Error : " + err.Error())
return nil, 404, err return nil, 404, err
} }
return res, 200, nil return res, 200, nil

View File

@@ -20,7 +20,6 @@ import (
"cloud.o-forge.io/core/oc-lib/models/order" "cloud.o-forge.io/core/oc-lib/models/order"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resources" "cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
w2 "cloud.o-forge.io/core/oc-lib/models/workflow" w2 "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution" "cloud.o-forge.io/core/oc-lib/models/workflow_execution"
@@ -197,48 +196,6 @@ func SetConfig(mongoUrl string, database string, natsUrl string, lokiUrl string,
}() }()
logs.CreateLogger("main") logs.CreateLogger("main")
mongo.MONGOService.Init(models.GetModelsNames(), config.GetConfig()) // init the mongo service mongo.MONGOService.Init(models.GetModelsNames(), config.GetConfig()) // init the mongo service
/*
Here we will check if the resource model is already stored in the database
If not we will store it
Resource model is the model that will define the structure of the resources
*/
accessor := (&resource_model.ResourceModel{}).GetAccessor(nil)
for _, model := range []string{tools.DATA_RESOURCE.String(), tools.PROCESSING_RESOURCE.String(), tools.STORAGE_RESOURCE.String(), tools.COMPUTE_RESOURCE.String(), tools.WORKFLOW_RESOURCE.String()} {
data, code, _ := accessor.Search(nil, model, true)
if code == 404 || len(data) == 0 {
refs := map[string]string{}
m := map[string]resource_model.Model{}
// for now only processing is specified here (not an elegant way)
if model == tools.DATA_RESOURCE.String() || model == tools.STORAGE_RESOURCE.String() {
refs["path"] = "string"
}
if model == tools.PROCESSING_RESOURCE.String() {
m["command"] = resource_model.Model{
Type: "string",
ReadOnly: false,
}
m["args"] = resource_model.Model{
Type: "string",
ReadOnly: false,
}
m["env"] = resource_model.Model{
Type: "string",
ReadOnly: false,
}
m["volumes"] = resource_model.Model{
Type: "map[string]string",
ReadOnly: false,
}
}
accessor.StoreOne(&resource_model.ResourceModel{
ResourceType: model,
VarRefs: refs,
Model: map[string]map[string]resource_model.Model{
"container": m,
},
})
}
}
return cfg return cfg
} }
@@ -289,21 +246,22 @@ func ToScheduler(m interface{}) (n *workflow_execution.WorkflowSchedule) {
return m.(*workflow_execution.WorkflowSchedule) return m.(*workflow_execution.WorkflowSchedule)
} }
func (r *Request) Schedule(wfID string, start string, end string, durationInS float64, cron string) (*workflow_execution.WorkflowSchedule, error) { func (r *Request) Schedule(wfID string, scheduler *workflow_execution.WorkflowSchedule) (*workflow_execution.WorkflowSchedule, error) {
scheduler := workflow_execution.NewScheduler(start, end, durationInS, cron) ws, _, _, err := scheduler.Schedules(wfID, &tools.APIRequest{
if _, _, err := scheduler.Schedules(wfID, &tools.APIRequest{
Caller: r.caller, Caller: r.caller,
Username: r.user, Username: r.user,
PeerID: r.peerID, PeerID: r.peerID,
Groups: r.groups, Groups: r.groups,
}); err != nil { })
if err != nil {
return nil, err return nil, err
} }
return scheduler, nil fmt.Println("BAM", ws)
return ws, nil
} }
func (r *Request) CheckBooking(wfID string, start string, end string, durationInS float64, cron string) bool { func (r *Request) CheckBooking(wfID string, start string, end string, durationInS float64, cron string) bool {
ok, _, _, err := workflow_execution.NewScheduler(start, end, durationInS, cron).CheckBooking(wfID, &tools.APIRequest{ ok, _, _, _, err := workflow_execution.NewScheduler(start, end, durationInS, cron).CheckBooking(wfID, &tools.APIRequest{
Caller: r.caller, Caller: r.caller,
Username: r.user, Username: r.user,
PeerID: r.peerID, PeerID: r.peerID,
@@ -605,9 +563,9 @@ func (l *LibData) ToRule() *rule.Rule {
return nil return nil
} }
func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecutions { func (l *LibData) ToWorkflowExecution() *workflow_execution.WorkflowExecution {
if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_EXECUTION { if l.Data.GetAccessor(nil).GetType() == tools.WORKFLOW_EXECUTION {
return l.Data.(*workflow_execution.WorkflowExecutions) return l.Data.(*workflow_execution.WorkflowExecution)
} }
return nil return nil
} }

1
go.mod
View File

@@ -38,7 +38,6 @@ require (
github.com/klauspost/compress v1.17.9 // indirect github.com/klauspost/compress v1.17.9 // indirect
github.com/kr/text v0.1.0 // indirect github.com/kr/text v0.1.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect
github.com/marcinwyszynski/geopoint v0.0.0-20140302213024-cf2a6f750c5b
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/montanaflynn/stats v0.7.1 // indirect github.com/montanaflynn/stats v0.7.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect

2
go.sum
View File

@@ -55,8 +55,6 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/marcinwyszynski/geopoint v0.0.0-20140302213024-cf2a6f750c5b h1:XBF8THPBy28s2ryI7+/Jf/847unLWxYMpJveX5Kox+0=
github.com/marcinwyszynski/geopoint v0.0.0-20140302213024-cf2a6f750c5b/go.mod h1:z1oqhOuuYpPHmUmAK2aNygKFlPdb4o3PppQnVTRFdrI=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=

View File

@@ -4,7 +4,8 @@ import (
"time" "time"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/common" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/bson/primitive"
@@ -14,13 +15,14 @@ import (
* Booking is a struct that represents a booking * Booking is a struct that represents a booking
*/ */
type Booking struct { type Booking struct {
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name) utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
DestPeerID string `json:"dest_peer_id,omitempty"` // DestPeerID is the ID of the destination peer ExecutionsID string `json:"executions_id,omitempty" bson:"executions_id,omitempty" validate:"required"` // ExecutionsID is the ID of the executions
WorkflowID string `json:"workflow_id,omitempty" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow DestPeerID string `json:"dest_peer_id,omitempty"` // DestPeerID is the ID of the destination peer
ExecutionID string `json:"execution_id,omitempty" bson:"execution_id,omitempty" validate:"required"` WorkflowID string `json:"workflow_id,omitempty" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow
State common.ScheduledType `json:"state,omitempty" bson:"state,omitempty" validate:"required"` // State is the state of the booking ExecutionID string `json:"execution_id,omitempty" bson:"execution_id,omitempty" validate:"required"`
ExpectedStartDate time.Time `json:"expected_start_date,omitempty" bson:"expected_start_date,omitempty" validate:"required"` // ExpectedStartDate is the expected start date of the booking State enum.BookingStatus `json:"state,omitempty" bson:"state,omitempty" validate:"required"` // State is the state of the booking
ExpectedEndDate *time.Time `json:"expected_end_date,omitempty" bson:"expected_end_date,omitempty" validate:"required"` // ExpectedEndDate is the expected end date of the booking ExpectedStartDate time.Time `json:"expected_start_date,omitempty" bson:"expected_start_date,omitempty" validate:"required"` // ExpectedStartDate is the expected start date of the booking
ExpectedEndDate *time.Time `json:"expected_end_date,omitempty" bson:"expected_end_date,omitempty" validate:"required"` // ExpectedEndDate is the expected end date of the booking
RealStartDate *time.Time `json:"real_start_date,omitempty" bson:"real_start_date,omitempty"` // RealStartDate is the real start date of the booking RealStartDate *time.Time `json:"real_start_date,omitempty" bson:"real_start_date,omitempty"` // RealStartDate is the real start date of the booking
RealEndDate *time.Time `json:"real_end_date,omitempty" bson:"real_end_date,omitempty"` // RealEndDate is the real end date of the booking RealEndDate *time.Time `json:"real_end_date,omitempty" bson:"real_end_date,omitempty"` // RealEndDate is the real end date of the booking
@@ -38,16 +40,21 @@ func (wfa *Booking) Check(id string, start time.Time, end *time.Time, parrallelA
end = &e end = &e
} }
accessor := NewAccessor(nil) accessor := NewAccessor(nil)
l := logs.GetLogger().With().Str("Search Check", "Booking").Logger()
l.Debug().Msg("Starting to search")
res, code, err := accessor.Search(&dbs.Filters{ res, code, err := accessor.Search(&dbs.Filters{
And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date
"resource_id": {{Operator: dbs.EQUAL.String(), Value: id}}, "resource_id": {{Operator: dbs.EQUAL.String(), Value: id}},
"state": {{Operator: dbs.EQUAL.String(), Value: common.DRAFT.EnumIndex()}}, "state": {{Operator: dbs.EQUAL.String(), Value: enum.DRAFT.EnumIndex()}},
"expected_start_date": { "expected_start_date": {
{Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(*end)}, {Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(*end)},
{Operator: dbs.GTE.String(), Value: primitive.NewDateTimeFromTime(start)}, {Operator: dbs.GTE.String(), Value: primitive.NewDateTimeFromTime(start)},
}, },
}, },
}, "", wfa.IsDraft) }, "", wfa.IsDraft)
l.Debug().Msg("Search finished")
if code != 200 { if code != 200 {
return false, err return false, err
} }
@@ -80,10 +87,6 @@ func (d *Booking) GetDelayOnDuration() time.Duration {
return d.GetRealDuration() - d.GetUsualDuration() return d.GetRealDuration() - d.GetUsualDuration()
} }
func (d *Booking) GetName() string {
return d.GetID() + "_" + d.ExpectedStartDate.String()
}
func (d *Booking) GetAccessor(request *tools.APIRequest) utils.Accessor { func (d *Booking) GetAccessor(request *tools.APIRequest) utils.Accessor {
return NewAccessor(request) // Create a new instance of the accessor return NewAccessor(request) // Create a new instance of the accessor
} }
@@ -93,7 +96,7 @@ func (d *Booking) VerifyAuth(request *tools.APIRequest) bool {
} }
func (r *Booking) StoreDraftDefault() { func (r *Booking) StoreDraftDefault() {
r.IsDraft = true r.IsDraft = false
} }
func (r *Booking) CanUpdate(set utils.DBObject) (bool, utils.DBObject) { func (r *Booking) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {

View File

@@ -1,11 +1,12 @@
package booking package booking
import ( import (
"errors"
"time" "time"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/logs" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/common" "cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
@@ -33,7 +34,11 @@ func (a *bookingMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error)
} }
func (a *bookingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (a *bookingMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
return utils.GenericUpdateOne(set, id, a, &Booking{}) if set.(*Booking).State == 0 {
return nil, 400, errors.New("state is required")
}
realSet := &Booking{State: set.(*Booking).State}
return utils.GenericUpdateOne(realSet, id, a, &Booking{})
} }
func (a *bookingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *bookingMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
@@ -46,11 +51,16 @@ func (a *bookingMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int
func (a *bookingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (a *bookingMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
return utils.GenericLoadOne[*Booking](id, func(d utils.DBObject) (utils.DBObject, int, error) { return utils.GenericLoadOne[*Booking](id, func(d utils.DBObject) (utils.DBObject, int, error) {
now := time.Now()
now = now.Add(time.Second * -60)
if d.(*Booking).State == enum.DRAFT && now.UTC().After(d.(*Booking).ExpectedStartDate) {
return utils.GenericDeleteOne(d.GetID(), a)
}
if (d.(*Booking).ExpectedEndDate) == nil { if (d.(*Booking).ExpectedEndDate) == nil {
d.(*Booking).State = common.FORGOTTEN d.(*Booking).State = enum.FORGOTTEN
utils.GenericRawUpdateOne(d, id, a) utils.GenericRawUpdateOne(d, id, a)
} else if d.(*Booking).State == common.SCHEDULED && time.Now().UTC().After(*&d.(*Booking).ExpectedStartDate) { } else if d.(*Booking).State == enum.SCHEDULED && now.UTC().After(d.(*Booking).ExpectedStartDate) {
d.(*Booking).State = common.DELAYED d.(*Booking).State = enum.DELAYED
utils.GenericRawUpdateOne(d, id, a) utils.GenericRawUpdateOne(d, id, a)
} }
return d, 200, nil return d, 200, nil
@@ -67,8 +77,14 @@ func (a *bookingMongoAccessor) Search(filters *dbs.Filters, search string, isDra
func (a *bookingMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject { func (a *bookingMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
return func(d utils.DBObject) utils.ShallowDBObject { return func(d utils.DBObject) utils.ShallowDBObject {
if d.(*Booking).State == common.SCHEDULED && time.Now().UTC().After(*&d.(*Booking).ExpectedStartDate) { now := time.Now()
d.(*Booking).State = common.DELAYED now = now.Add(time.Second * -60)
if d.(*Booking).State == enum.DRAFT && now.UTC().After(d.(*Booking).ExpectedStartDate) {
utils.GenericDeleteOne(d.GetID(), a)
return nil
}
if d.(*Booking).State == enum.SCHEDULED && now.UTC().After(d.(*Booking).ExpectedStartDate) {
d.(*Booking).State = enum.DELAYED
utils.GenericRawUpdateOne(d, d.GetID(), a) utils.GenericRawUpdateOne(d, d.GetID(), a)
} }
return d return d

View File

@@ -1,7 +1,6 @@
package collaborative_area package collaborative_area
import ( import (
"fmt"
"slices" "slices"
"time" "time"
@@ -34,7 +33,7 @@ type CollaborativeArea struct {
Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"` // Attributes is the attributes of the workspace (TODO) Attributes map[string]interface{} `json:"attributes,omitempty" bson:"attributes,omitempty"` // Attributes is the attributes of the workspace (TODO)
Workspaces []string `json:"workspaces" bson:"workspaces"` // Workspaces is the workspaces of the workspace Workspaces []string `json:"workspaces" bson:"workspaces"` // Workspaces is the workspaces of the workspace
Workflows []string `json:"workflows" bson:"workflows"` // Workflows is the workflows of the workspace Workflows []string `json:"workflows" bson:"workflows"` // Workflows is the workflows of the workspace
AllowedPeersGroup map[string][]string `json:"allowed_peers_group,omitempty" bson:"allowed_peers_group,omitempty"` // AllowedPeersGroup is the group of allowed peers AllowedPeersGroup map[string][]string `json:"allowed_peers_group" bson:"allowed_peers_group"` // AllowedPeersGroup is the group of allowed peers
Rules []string `json:"rules" bson:"rules,omitempty"` // Rules is the rules of the workspace Rules []string `json:"rules" bson:"rules,omitempty"` // Rules is the rules of the workspace
SharedRules []*rule.Rule `json:"shared_rules,omitempty" bson:"-"` // SharedRules is the shared rules of the workspace SharedRules []*rule.Rule `json:"shared_rules,omitempty" bson:"-"` // SharedRules is the shared rules of the workspace
@@ -44,6 +43,9 @@ type CollaborativeArea struct {
} }
func (ao *CollaborativeArea) Clear(peerID string) { func (ao *CollaborativeArea) Clear(peerID string) {
if ao.AllowedPeersGroup == nil {
ao.AllowedPeersGroup = map[string][]string{}
}
ao.CreatorID = peerID ao.CreatorID = peerID
if config.GetConfig().Whitelist { if config.GetConfig().Whitelist {
ao.AllowedPeersGroup[peerID] = []string{"*"} ao.AllowedPeersGroup[peerID] = []string{"*"}
@@ -72,7 +74,6 @@ func (ao *CollaborativeArea) Clear(peerID string) {
func (ao *CollaborativeArea) VerifyAuth(request *tools.APIRequest) bool { func (ao *CollaborativeArea) VerifyAuth(request *tools.APIRequest) bool {
if (ao.AllowedPeersGroup != nil || config.GetConfig().Whitelist) && request != nil { if (ao.AllowedPeersGroup != nil || config.GetConfig().Whitelist) && request != nil {
if grps, ok := ao.AllowedPeersGroup[request.PeerID]; ok || config.GetConfig().Whitelist { if grps, ok := ao.AllowedPeersGroup[request.PeerID]; ok || config.GetConfig().Whitelist {
fmt.Println("grps", grps, "ok", ok, "config.GetConfig().Whitelist", config.GetConfig().Whitelist)
if slices.Contains(grps, "*") || (!ok && config.GetConfig().Whitelist) { if slices.Contains(grps, "*") || (!ok && config.GetConfig().Whitelist) {
return true return true
} }
@@ -91,8 +92,12 @@ func (d *CollaborativeArea) GetAccessor(request *tools.APIRequest) utils.Accesso
} }
func (d *CollaborativeArea) Trim() *CollaborativeArea { func (d *CollaborativeArea) Trim() *CollaborativeArea {
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.CreatorID}}).IsMySelf(); !ok {
d.AllowedPeersGroup = map[string][]string{}
}
return d return d
} }
func (d *CollaborativeArea) StoreDraftDefault() {
d.AllowedPeersGroup = map[string][]string{
d.CreatorID: []string{"*"},
}
d.IsDraft = false
}

View File

@@ -1,7 +1,6 @@
package collaborative_area package collaborative_area
import ( import (
"errors"
"fmt" "fmt"
"slices" "slices"
@@ -33,10 +32,10 @@ func NewAccessor(request *tools.APIRequest) *collaborativeAreaMongoAccessor {
Request: request, Request: request,
Type: tools.COLLABORATIVE_AREA, Type: tools.COLLABORATIVE_AREA,
}, },
workspaceAccessor: (&workspace.Workspace{}).GetAccessor(nil), workspaceAccessor: (&workspace.Workspace{}).GetAccessor(request),
workflowAccessor: (&w.Workflow{}).GetAccessor(nil), workflowAccessor: (&w.Workflow{}).GetAccessor(request),
peerAccessor: (&peer.Peer{}).GetAccessor(nil), peerAccessor: (&peer.Peer{}).GetAccessor(request),
ruleAccessor: (&rule.Rule{}).GetAccessor(nil), ruleAccessor: (&rule.Rule{}).GetAccessor(request),
} }
} }
@@ -67,11 +66,10 @@ func (a *collaborativeAreaMongoAccessor) StoreOne(data utils.DBObject) (utils.DB
_, id := (&peer.Peer{}).IsMySelf() // get the local peer _, id := (&peer.Peer{}).IsMySelf() // get the local peer
data.(*CollaborativeArea).Clear(id) // set the creator data.(*CollaborativeArea).Clear(id) // set the creator
// retrieve or proper peer // retrieve or proper peer
dd, code, err := a.peerAccessor.Search(nil, "0", true) if data.(*CollaborativeArea).CollaborativeAreaRule != nil {
if code != 200 || len(dd) == 0 { data.(*CollaborativeArea).CollaborativeAreaRule = &CollaborativeAreaRule{}
return nil, code, errors.New("Could not retrieve the peer" + err.Error())
} }
data.(*CollaborativeArea).CollaborativeAreaRule.Creator = dd[0].GetID() data.(*CollaborativeArea).CollaborativeAreaRule.Creator = id
d, code, err := utils.GenericStoreOne(data.(*CollaborativeArea).Trim(), a) d, code, err := utils.GenericStoreOne(data.(*CollaborativeArea).Trim(), a)
if code == 200 { if code == 200 {
a.sharedWorkflow(d.(*CollaborativeArea), d.GetID()) // create all shared workflows a.sharedWorkflow(d.(*CollaborativeArea), d.GetID()) // create all shared workflows
@@ -93,6 +91,7 @@ func filterEnrich[T utils.ShallowDBObject](arr []string, isDrafted bool, a utils
"abstractobject.id": {{Operator: dbs.IN.String(), Value: arr}}, "abstractobject.id": {{Operator: dbs.IN.String(), Value: arr}},
}, },
}, "", isDrafted) }, "", isDrafted)
fmt.Println(res, arr, isDrafted, a)
if code == 200 { if code == 200 {
for _, r := range res { for _, r := range res {
new = append(new, r.(T)) new = append(new, r.(T))
@@ -102,38 +101,46 @@ func filterEnrich[T utils.ShallowDBObject](arr []string, isDrafted bool, a utils
} }
// enrich is a function that enriches the CollaborativeArea with the shared objects // enrich is a function that enriches the CollaborativeArea with the shared objects
func (a *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *CollaborativeArea, isDrafted bool) *CollaborativeArea { func (a *collaborativeAreaMongoAccessor) enrich(sharedWorkspace *CollaborativeArea, isDrafted bool, request *tools.APIRequest) *CollaborativeArea {
sharedWorkspace.SharedWorkspaces = append(sharedWorkspace.SharedWorkspaces, sharedWorkspace.SharedWorkspaces = filterEnrich[*workspace.Workspace](sharedWorkspace.Workspaces, isDrafted, a.workspaceAccessor)
filterEnrich[*workspace.Workspace](sharedWorkspace.Workspaces, isDrafted, a.workspaceAccessor)...) sharedWorkspace.SharedWorkflows = filterEnrich[*workflow.Workflow](sharedWorkspace.Workflows, isDrafted, a.workflowAccessor)
sharedWorkspace.SharedWorkflows = append(sharedWorkspace.SharedWorkflows,
filterEnrich[*workflow.Workflow](sharedWorkspace.Workflows, isDrafted, a.workflowAccessor)...)
peerskey := []string{} peerskey := []string{}
for k := range sharedWorkspace.AllowedPeersGroup { fmt.Println("PEERS 1", sharedWorkspace.AllowedPeersGroup)
peerskey = append(peerskey, k) for k, v := range sharedWorkspace.AllowedPeersGroup {
canFound := false
for _, t := range request.Groups {
if slices.Contains(v, t) {
canFound = true
break
}
}
fmt.Println("PEERS 2", canFound, v)
if slices.Contains(v, "*") || canFound {
peerskey = append(peerskey, k)
}
} }
sharedWorkspace.SharedPeers = append(sharedWorkspace.SharedPeers, fmt.Println("PEERS", peerskey)
filterEnrich[*peer.Peer](peerskey, isDrafted, a.peerAccessor)...) sharedWorkspace.SharedPeers = filterEnrich[*peer.Peer](peerskey, isDrafted, a.peerAccessor)
sharedWorkspace.SharedRules = append(sharedWorkspace.SharedRules, sharedWorkspace.SharedRules = filterEnrich[*rule.Rule](sharedWorkspace.Rules, isDrafted, a.ruleAccessor)
filterEnrich[*rule.Rule](sharedWorkspace.Rules, isDrafted, a.ruleAccessor)...)
return sharedWorkspace return sharedWorkspace
} }
func (a *collaborativeAreaMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (a *collaborativeAreaMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
return utils.GenericLoadOne[*CollaborativeArea](id, func(d utils.DBObject) (utils.DBObject, int, error) { return utils.GenericLoadOne[*CollaborativeArea](id, func(d utils.DBObject) (utils.DBObject, int, error) {
return a.enrich(d.(*CollaborativeArea), true), 200, nil return a.enrich(d.(*CollaborativeArea), false, a.Request), 200, nil
}, a) }, a)
} }
func (a *collaborativeAreaMongoAccessor) LoadAll(isDrafted bool) ([]utils.ShallowDBObject, int, error) { func (a *collaborativeAreaMongoAccessor) LoadAll(isDrafted bool) ([]utils.ShallowDBObject, int, error) {
return utils.GenericLoadAll[*CollaborativeArea](func(d utils.DBObject) utils.ShallowDBObject { return utils.GenericLoadAll[*CollaborativeArea](func(d utils.DBObject) utils.ShallowDBObject {
return a.enrich(d.(*CollaborativeArea), true) return a.enrich(d.(*CollaborativeArea), isDrafted, a.Request)
}, isDrafted, a) }, isDrafted, a)
} }
func (a *collaborativeAreaMongoAccessor) Search(filters *dbs.Filters, search string, isDrafted bool) ([]utils.ShallowDBObject, int, error) { func (a *collaborativeAreaMongoAccessor) Search(filters *dbs.Filters, search string, isDrafted bool) ([]utils.ShallowDBObject, int, error) {
return utils.GenericSearch[*CollaborativeArea](filters, search, (&CollaborativeArea{}).GetObjectFilters(search), return utils.GenericSearch[*CollaborativeArea](filters, search, (&CollaborativeArea{}).GetObjectFilters(search),
func(d utils.DBObject) utils.ShallowDBObject { func(d utils.DBObject) utils.ShallowDBObject {
return a.enrich(d.(*CollaborativeArea), true) return a.enrich(d.(*CollaborativeArea), isDrafted, a.Request)
}, isDrafted, a) }, isDrafted, a)
} }

View File

@@ -1,33 +0,0 @@
package common
// CPU is a struct that represents a CPU
type CPU struct {
Model string `bson:"platform,omitempty" json:"platform,omitempty"`
FrequencyGhz float64 `bson:"frenquency,omitempty" json:"frenquency,omitempty"`
Cores int `bson:"cores,omitempty" json:"cores,omitempty"`
Architecture string `bson:"architecture,omitempty" json:"architecture,omitempty"`
}
type RAM struct {
SizeGb float64 `bson:"size,omitempty" json:"size,omitempty" description:"Units in MB"`
Ecc bool `bson:"ecc" json:"ecc" default:"true"`
}
type GPU struct {
Model string `bson:"platform,omitempty" json:"platform,omitempty"`
MemoryGb float64 `bson:"memory,omitempty" json:"memory,omitempty" description:"Units in MB"`
}
type InfrastructureType int
const (
DOCKER InfrastructureType = iota
KUBERNETES
SLURM
HW
CONDOR
)
func (t InfrastructureType) String() string {
return [...]string{"DOCKER", "KUBERNETES", "SLURM", "HW", "CONDOR"}[t]
}

View File

@@ -0,0 +1,20 @@
package enum
type InfrastructureType int
const (
DOCKER InfrastructureType = iota
KUBERNETES
SLURM
HW
CONDOR
)
func (t InfrastructureType) String() string {
return [...]string{"DOCKER", "KUBERNETES", "SLURM", "HW", "CONDOR"}[t]
}
// get list of all infrastructure types
func InfrastructureList() []InfrastructureType {
return []InfrastructureType{DOCKER, KUBERNETES, SLURM, HW, CONDOR}
}

View File

@@ -1,4 +1,4 @@
package common package enum
type StorageSize int type StorageSize int
@@ -7,12 +7,23 @@ const (
GB StorageSize = iota GB StorageSize = iota
MB MB
KB KB
TB
) )
var argoType = [...]string{ var argoType = [...]string{
"Gi", "Gi",
"Mi", "Mi",
"Ki", "Ki",
"Ti",
}
// Size to string
func (t StorageSize) String() string {
return [...]string{"GB", "MB", "KB", "TB"}[t]
}
func SizeList() []StorageSize {
return []StorageSize{GB, MB, KB, TB}
} }
// New creates a new instance of the StorageResource struct // New creates a new instance of the StorageResource struct
@@ -31,9 +42,15 @@ const (
S3 S3
MEMORY MEMORY
HARDWARE HARDWARE
AZURE
GCS
) )
// String() - Returns the string representation of the storage type // String() - Returns the string representation of the storage type
func (t StorageType) String() string { func (t StorageType) String() string {
return [...]string{"FILE", "STREAM", "API", "DATABASE", "S3", "MEMORY", "HARDWARE"}[t] return [...]string{"FILE", "STREAM", "API", "DATABASE", "S3", "MEMORY", "HARDWARE", "AZURE", "GCS"}[t]
}
func TypeList() []StorageType {
return []StorageType{FILE, STREAM, API, DATABASE, S3, MEMORY, HARDWARE, AZURE, GCS}
} }

View File

@@ -0,0 +1,64 @@
package enum
type CompletionStatus int
const (
DRAFTED CompletionStatus = iota
PENDING
CANCEL
PARTIAL
PAID
DISPUTED
OVERDUE
REFUND
)
func (d CompletionStatus) String() string {
return [...]string{"drafted", "pending", "cancel", "partial", "paid", "disputed", "overdue", "refund"}[d]
}
func CompletionStatusList() []CompletionStatus {
return []CompletionStatus{DRAFTED, PENDING, CANCEL, PARTIAL, PAID, DISPUTED, OVERDUE, REFUND}
}
type BookingStatus int
const (
DRAFT BookingStatus = iota
SCHEDULED
STARTED
FAILURE
SUCCESS
FORGOTTEN
DELAYED
CANCELLED
)
var str = [...]string{
"draft",
"scheduled",
"started",
"failure",
"success",
"forgotten",
"delayed",
"cancelled",
}
func FromInt(i int) string {
return str[i]
}
func (d BookingStatus) String() string {
return str[d]
}
// EnumIndex - Creating common behavior-give the type a EnumIndex functio
func (d BookingStatus) EnumIndex() int {
return int(d)
}
// List
func StatusList() []BookingStatus {
return []BookingStatus{DRAFT, SCHEDULED, STARTED, FAILURE, SUCCESS, FORGOTTEN, DELAYED, CANCELLED}
}

View File

@@ -1,4 +1,4 @@
package common package models
type Container struct { type Container struct {
Image string `json:"image,omitempty" bson:"image,omitempty"` // Image is the container image TEMPO Image string `json:"image,omitempty" bson:"image,omitempty"` // Image is the container image TEMPO

View File

@@ -0,0 +1,20 @@
package models
// CPU is a struct that represents a CPU
type CPU struct {
Model string `bson:"model,omitempty" json:"model,omitempty"`
FrequencyGhz float64 `bson:"frequency,omitempty" json:"frequency,omitempty"`
Cores int `bson:"cores,omitempty" json:"cores,omitempty"`
Architecture string `bson:"architecture,omitempty" json:"architecture,omitempty"`
}
type RAM struct {
SizeGb float64 `bson:"size,omitempty" json:"size,omitempty" description:"Units in MB"`
Ecc bool `bson:"ecc" json:"ecc" default:"true"`
}
type GPU struct {
Model string `bson:"model,omitempty" json:"model,omitempty"`
MemoryGb float64 `bson:"memory,omitempty" json:"memory,omitempty" description:"Units in MB"`
Cores map[string]int `bson:"cores,omitempty" json:"cores,omitempty"`
}

View File

@@ -0,0 +1,21 @@
package models
type Artifact struct {
AttrPath string `json:"attr_path,omitempty" bson:"attr_path,omitempty" validate:"required"`
AttrFrom string `json:"from_path,omitempty" bson:"from_path,omitempty"`
Readonly bool `json:"readonly" bson:"readonly" default:"true"`
}
type Param struct {
Name string `json:"name" bson:"name" validate:"required"`
Attr string `json:"attr,omitempty" bson:"attr,omitempty"`
Value string `json:"value,omitempty" bson:"value,omitempty"`
Origin string `json:"origin,omitempty" bson:"origin,omitempty"`
Readonly bool `json:"readonly" bson:"readonly" default:"true"`
Optionnal bool `json:"optionnal" bson:"optionnal" default:"true"`
}
type InOutputs struct {
Params []Param `json:"parameters" bson:"parameters"`
Artifacts []Artifact `json:"artifacts" bson:"artifacts"`
}

42
models/common/planner.go Normal file
View File

@@ -0,0 +1,42 @@
package common
import (
"time"
"cloud.o-forge.io/core/oc-lib/models/common/pricing"
"cloud.o-forge.io/core/oc-lib/tools"
)
func GetPlannerNearestStart(start time.Time, planned map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest) float64 {
near := float64(10000000000) // set a high value
for _, items := range planned { // loop through the planned items
for _, priced := range items { // loop through the priced items
if priced.GetLocationStart() == nil { // if the start is nil,
continue // skip the iteration
}
newS := priced.GetLocationStart() // get the start
if newS.Sub(start).Seconds() < near { // if the difference between the start and the new start is less than the nearest start
near = newS.Sub(start).Seconds()
}
}
}
return near
}
func GetPlannerLongestTime(end *time.Time, planned map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest) float64 {
if end == nil {
return -1
}
longestTime := float64(0)
for _, priced := range planned[tools.PROCESSING_RESOURCE] {
if priced.GetLocationEnd() == nil {
continue
}
newS := priced.GetLocationEnd()
if end == nil && longestTime < newS.Sub(*end).Seconds() {
longestTime = newS.Sub(*end).Seconds()
}
// get the nearest start from start var
}
return longestTime
}

View File

@@ -5,7 +5,6 @@ import (
) )
type PricingProfileITF interface { type PricingProfileITF interface {
GetID() string
GetPrice(quantity float64, val float64, start time.Time, end time.Time, params ...string) (float64, error) GetPrice(quantity float64, val float64, start time.Time, end time.Time, params ...string) (float64, error)
IsPurchased() bool IsPurchased() bool
GetOverrideStrategyValue() int GetOverrideStrategyValue() int
@@ -19,15 +18,18 @@ const (
REFUND_ON_EARLY_END REFUND_ON_EARLY_END
) )
type AccessPricingProfile[T Strategy] struct { // only use for acces such as : DATA && PROCESSING func (t RefundType) String() string {
ID string `json:"id,omitempty" bson:"id,omitempty"` // ID is the ID of the pricing return [...]string{"REFUND ON DEAD END", "REFUND ON ERROR", "REFUND ON EARLY END"}[t]
Pricing PricingStrategy[T] `json:"price,omitempty" bson:"price,omitempty"` // Price is the price of the resource
DefaultRefund RefundType `json:"default_refund" bson:"default_refund"` // DefaultRefund is the default refund type of the pricing
RefundRatio int32 `json:"refund_ratio" bson:"refund_ratio" default:"0"` // RefundRatio is the refund ratio if missing
} }
func (b *AccessPricingProfile[T]) GetID() string { func RefundTypeList() []RefundType {
return b.ID return []RefundType{REFUND_DEAD_END, REFUND_ON_ERROR, REFUND_ON_EARLY_END}
}
type AccessPricingProfile[T Strategy] struct { // only use for acces such as : DATA && PROCESSING
Pricing PricingStrategy[T] `json:"pricing,omitempty" bson:"pricing,omitempty"` // Price is the price of the resource
DefaultRefund RefundType `json:"default_refund" bson:"default_refund"` // DefaultRefund is the default refund type of the pricing
RefundRatio int32 `json:"refund_ratio" bson:"refund_ratio" default:"0"` // RefundRatio is the refund ratio if missing
} }
func (b *AccessPricingProfile[T]) GetOverrideStrategyValue() int { func (b *AccessPricingProfile[T]) GetOverrideStrategyValue() int {
@@ -42,17 +44,21 @@ const (
GARANTED GARANTED
) )
func ExploitPrivilegeStrategyList() []ExploitPrivilegeStrategy {
return []ExploitPrivilegeStrategy{BASIC, GARANTED_ON_DELAY, GARANTED}
}
func (t ExploitPrivilegeStrategy) String() string { func (t ExploitPrivilegeStrategy) String() string {
return [...]string{"BASIC", "GARANTED_ON_DELAY", "GARANTED"}[t] return [...]string{"NO GARANTY", "GARANTED ON SPECIFIC DELAY", "GARANTED"}[t]
} }
type ExploitPricingProfile[T Strategy] struct { // only use for exploit such as : STORAGE, COMPUTE, WORKFLOW type ExploitPricingProfile[T Strategy] struct { // only use for exploit such as : STORAGE, COMPUTE, WORKFLOW
AccessPricingProfile[T] AccessPricingProfile[T]
AdditionnalRefundTypes []RefundType `json:"refund_types" bson:"refund_types"` // RefundTypes is the refund types of the pricing AdditionnalRefundTypes []RefundType `json:"refund_types" bson:"refund_types"` // RefundTypes is the refund types of the pricing
PrivilegeStrategy ExploitPrivilegeStrategy `json:"privilege_strategy,omitempty" bson:"privilege_strategy,omitempty"` // Strategy is the strategy of the pricing PrivilegeStrategy ExploitPrivilegeStrategy `json:"privilege_strategy,omitempty" bson:"privilege_strategy,omitempty"` // Strategy is the strategy of the pricing
GarantedDelaySecond uint GarantedDelaySecond uint `json:"garanted_delay_second,omitempty" bson:"garanted_delay_second,omitempty"` // GarantedDelaySecond is the garanted delay of the pricing
Exceeding bool Exceeding bool `json:"exceeding" bson:"exceeding"` // Exceeding is the exceeding of the bill
ExceedingRatio int32 `json:"exceeding_ratio" bson:"exceeding_ratio" default:"0"` // ExceedingRatio is the exceeding ratio of the bill ExceedingRatio int32 `json:"exceeding_ratio" bson:"exceeding_ratio" default:"0"` // ExceedingRatio is the exceeding ratio of the bill
} }

View File

@@ -15,6 +15,14 @@ const (
PAY_PER_USE PAY_PER_USE
) )
func (t BuyingStrategy) String() string {
return [...]string{"UNLIMITED", "SUBSCRIPTION", "PAY PER USE"}[t]
}
func BuyingStrategyList() []BuyingStrategy {
return []BuyingStrategy{UNLIMITED, SUBSCRIPTION, PAY_PER_USE}
}
type Strategy interface { type Strategy interface {
GetStrategy() string GetStrategy() string
GetStrategyValue() int GetStrategyValue() int
@@ -32,6 +40,14 @@ const (
PER_MONTH PER_MONTH
) )
func (t TimePricingStrategy) String() string {
return [...]string{"ONCE", "PER SECOND", "PER MINUTE", "PER HOUR", "PER DAY", "PER WEEK", "PER MONTH"}[t]
}
func TimePricingStrategyList() []TimePricingStrategy {
return []TimePricingStrategy{ONCE, PER_SECOND, PER_MINUTE, PER_HOUR, PER_DAY, PER_WEEK, PER_MONTH}
}
func (t TimePricingStrategy) GetStrategy() string { func (t TimePricingStrategy) GetStrategy() string {
return [...]string{"ONCE", "PER_SECOND", "PER_MINUTE", "PER_HOUR", "PER_DAY", "PER_WEEK", "PER_MONTH"}[t] return [...]string{"ONCE", "PER_SECOND", "PER_MINUTE", "PER_HOUR", "PER_DAY", "PER_WEEK", "PER_MONTH"}[t]
} }
@@ -82,38 +98,17 @@ func BookingEstimation(t TimePricingStrategy, price float64, locationDurationInS
case PER_MONTH: case PER_MONTH:
return p * float64(locationDurationInSecond/2592000), nil return p * float64(locationDurationInSecond/2592000), nil
} }
return 0, errors.New("Pricing strategy not found") return 0, errors.New("pricing strategy not found")
} }
// hmmmm
type PricingStrategy[T Strategy] struct { type PricingStrategy[T Strategy] struct {
Price float64 `json:"Price" bson:"Price" default:"0"` // Price is the Price of the pricing Price float64 `json:"price" bson:"price" default:"0"` // Price is the Price of the pricing
Currency string `json:"currency" bson:"currency" default:"USD"` // Currency is the currency of the pricing
BuyingStrategy BuyingStrategy `json:"buying_strategy" bson:"buying_strategy" default:"0"` // BuyingStrategy is the buying strategy of the pricing BuyingStrategy BuyingStrategy `json:"buying_strategy" bson:"buying_strategy" default:"0"` // BuyingStrategy is the buying strategy of the pricing
TimePricingStrategy TimePricingStrategy `json:"time_pricing_strategy" bson:"time_pricing_strategy" default:"0"` // TimePricingStrategy is the time pricing strategy of the pricing TimePricingStrategy TimePricingStrategy `json:"time_pricing_strategy" bson:"time_pricing_strategy" default:"0"` // TimePricingStrategy is the time pricing strategy of the pricing
OverrideStrategy T `json:"override_strategy" bson:"override_strategy" default:"-1"` // Modulation is the modulation of the pricing OverrideStrategy T `json:"override_strategy" bson:"override_strategy" default:"-1"` // Modulation is the modulation of the pricing
} }
func (p PricingStrategy[T]) SetStrategy(Price float64, BuyingStrategy BuyingStrategy, TimePricingStrategy TimePricingStrategy) error {
if TimePricingStrategy == ONCE && (BuyingStrategy != UNLIMITED || BuyingStrategy != PAY_PER_USE) {
return errors.New("time pricing strategy can only be set to ONCE if buying strategy is UNLIMITED or PAY_PER_USE")
} else if BuyingStrategy == SUBSCRIPTION && (TimePricingStrategy == ONCE) {
return errors.New("subscription duration in second must be set if buying strategy is SUBSCRIPTION")
}
p.Price = Price
p.BuyingStrategy = BuyingStrategy
p.TimePricingStrategy = TimePricingStrategy
return nil
}
func (p PricingStrategy[T]) SetSpecificPerUseStrategy(strategy T) error {
if p.BuyingStrategy == UNLIMITED {
return errors.New("UNLIMITED buying strategy can't have a specific strategy, Price is set on buying")
}
p.OverrideStrategy = strategy
return nil
}
// QUANTITY can be how many of gb core per example
func (p PricingStrategy[T]) GetPrice(amountOfData float64, bookingTimeDuration float64, start time.Time, end *time.Time) (float64, error) { func (p PricingStrategy[T]) GetPrice(amountOfData float64, bookingTimeDuration float64, start time.Time, end *time.Time) (float64, error) {
if p.BuyingStrategy == SUBSCRIPTION { if p.BuyingStrategy == SUBSCRIPTION {
return BookingEstimation(p.GetTimePricingStrategy(), p.Price*float64(amountOfData), bookingTimeDuration, start, end) return BookingEstimation(p.GetTimePricingStrategy(), p.Price*float64(amountOfData), bookingTimeDuration, start, end)

View File

@@ -1,38 +0,0 @@
package common
type ScheduledType int
const (
DRAFT ScheduledType = iota
SCHEDULED
STARTED
FAILURE
SUCCESS
FORGOTTEN
DELAYED
CANCELLED
)
var str = [...]string{
"draft",
"scheduled",
"started",
"failure",
"success",
"forgotten",
"delayed",
"cancelled",
}
func FromInt(i int) string {
return str[i]
}
func (d ScheduledType) String() string {
return str[d]
}
// EnumIndex - Creating common behavior-give the type a EnumIndex functio
func (d ScheduledType) EnumIndex() int {
return int(d)
}

View File

@@ -3,6 +3,7 @@ package models
import ( import (
"cloud.o-forge.io/core/oc-lib/logs" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/order" "cloud.o-forge.io/core/oc-lib/models/order"
"cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"cloud.o-forge.io/core/oc-lib/models/booking" "cloud.o-forge.io/core/oc-lib/models/booking"
@@ -10,7 +11,6 @@ import (
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/rules/rule"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
resource "cloud.o-forge.io/core/oc-lib/models/resources" resource "cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
w2 "cloud.o-forge.io/core/oc-lib/models/workflow" w2 "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/models/workflow_execution" "cloud.o-forge.io/core/oc-lib/models/workflow_execution"
@@ -28,9 +28,8 @@ var models = map[string]func() utils.DBObject{
tools.STORAGE_RESOURCE.String(): func() utils.DBObject { return &resource.StorageResource{} }, tools.STORAGE_RESOURCE.String(): func() utils.DBObject { return &resource.StorageResource{} },
tools.PROCESSING_RESOURCE.String(): func() utils.DBObject { return &resource.ProcessingResource{} }, tools.PROCESSING_RESOURCE.String(): func() utils.DBObject { return &resource.ProcessingResource{} },
tools.WORKFLOW.String(): func() utils.DBObject { return &w2.Workflow{} }, tools.WORKFLOW.String(): func() utils.DBObject { return &w2.Workflow{} },
tools.WORKFLOW_EXECUTION.String(): func() utils.DBObject { return &workflow_execution.WorkflowExecutions{} }, tools.WORKFLOW_EXECUTION.String(): func() utils.DBObject { return &workflow_execution.WorkflowExecution{} },
tools.WORKSPACE.String(): func() utils.DBObject { return &w3.Workspace{} }, tools.WORKSPACE.String(): func() utils.DBObject { return &w3.Workspace{} },
tools.RESOURCE_MODEL.String(): func() utils.DBObject { return &resource_model.ResourceModel{} },
tools.PEER.String(): func() utils.DBObject { return &peer.Peer{} }, tools.PEER.String(): func() utils.DBObject { return &peer.Peer{} },
tools.COLLABORATIVE_AREA.String(): func() utils.DBObject { return &collaborative_area.CollaborativeArea{} }, tools.COLLABORATIVE_AREA.String(): func() utils.DBObject { return &collaborative_area.CollaborativeArea{} },
tools.RULE.String(): func() utils.DBObject { return &rule.Rule{} }, tools.RULE.String(): func() utils.DBObject { return &rule.Rule{} },
@@ -38,6 +37,7 @@ var models = map[string]func() utils.DBObject{
tools.WORKFLOW_HISTORY.String(): func() utils.DBObject { return &w2.WorkflowHistory{} }, tools.WORKFLOW_HISTORY.String(): func() utils.DBObject { return &w2.WorkflowHistory{} },
tools.WORKSPACE_HISTORY.String(): func() utils.DBObject { return &w3.WorkspaceHistory{} }, tools.WORKSPACE_HISTORY.String(): func() utils.DBObject { return &w3.WorkspaceHistory{} },
tools.ORDER.String(): func() utils.DBObject { return &order.Order{} }, tools.ORDER.String(): func() utils.DBObject { return &order.Order{} },
tools.PURCHASE_RESOURCE.String(): func() utils.DBObject { return &purchase_resource.PurchaseResource{} },
} }
// Model returns the model object based on the model type // Model returns the model object based on the model type

View File

@@ -8,6 +8,7 @@ import (
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/booking" "cloud.o-forge.io/core/oc-lib/models/booking"
"cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/common/pricing" "cloud.o-forge.io/core/oc-lib/models/common/pricing"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource" "cloud.o-forge.io/core/oc-lib/models/resources/purchase_resource"
@@ -20,24 +21,12 @@ import (
* Booking is a struct that represents a booking * Booking is a struct that represents a booking
*/ */
type OrderStatus = int
const (
DRAFT OrderStatus = iota
PENDING
CANCELLED
PARTIAL
PAID
DISPUTED
OVERDUE
REFUND
)
type Order struct { type Order struct {
utils.AbstractObject utils.AbstractObject
OrderBy string `json:"order_by" bson:"order_by" validate:"required"` OrderBy string `json:"order_by" bson:"order_by" validate:"required"`
WorkflowID string `json:"workflow_id" bson:"workflow_id" validate:"required"`
WorkflowExecutionIDs []string `json:"workflow_execution_ids" bson:"workflow_execution_ids" validate:"required"` WorkflowExecutionIDs []string `json:"workflow_execution_ids" bson:"workflow_execution_ids" validate:"required"`
Status OrderStatus `json:"status" bson:"status" default:"0"` Status enum.CompletionStatus `json:"status" bson:"status" default:"0"`
SubOrders map[string]*PeerOrder `json:"sub_orders" bson:"sub_orders"` SubOrders map[string]*PeerOrder `json:"sub_orders" bson:"sub_orders"`
Total float64 `json:"total" bson:"total" validate:"required"` Total float64 `json:"total" bson:"total" validate:"required"`
} }
@@ -69,7 +58,7 @@ func (o *Order) Pay(scheduler *workflow_execution.WorkflowSchedule, request *too
if _, err := o.draftBookOrder(scheduler, request); err != nil { if _, err := o.draftBookOrder(scheduler, request); err != nil {
return err return err
} }
o.Status = PENDING o.Status = enum.PENDING
_, code, err := o.GetAccessor(request).UpdateOne(o, o.GetID()) _, code, err := o.GetAccessor(request).UpdateOne(o, o.GetID())
if code != 200 || err != nil { if code != 200 || err != nil {
return errors.New("could not update the order" + fmt.Sprintf("%v", err)) return errors.New("could not update the order" + fmt.Sprintf("%v", err))
@@ -79,10 +68,10 @@ func (o *Order) Pay(scheduler *workflow_execution.WorkflowSchedule, request *too
} else { } else {
o.IsDraft = false o.IsDraft = false
} }
for _, exec := range scheduler.WorkflowExecutions { for _, exec := range scheduler.WorkflowExecution {
exec.IsDraft = false exec.IsDraft = false
_, code, err := utils.GenericUpdateOne(exec, exec.GetID(), _, code, err := utils.GenericUpdateOne(exec, exec.GetID(),
workflow_execution.NewAccessor(request), &workflow_execution.WorkflowExecutions{}) workflow_execution.NewAccessor(request), &workflow_execution.WorkflowExecution{})
if code != 200 || err != nil { if code != 200 || err != nil {
return errors.New("could not update the workflow execution" + fmt.Sprintf("%v", err)) return errors.New("could not update the workflow execution" + fmt.Sprintf("%v", err))
} }
@@ -102,23 +91,25 @@ func (o *Order) draftStoreFromModel(scheduler *workflow_execution.WorkflowSchedu
if request == nil { if request == nil {
return errors.New("no request found") return errors.New("no request found")
} }
if scheduler.Workflow.Graph == nil { // if the workflow has no graph, return an error fmt.Println("Drafting order", scheduler.Workflow)
if scheduler.Workflow == nil || scheduler.Workflow.Graph == nil { // if the workflow has no graph, return an error
return errors.New("no graph found") return errors.New("no graph found")
} }
o.SetName() o.SetName()
o.WorkflowID = scheduler.Workflow.GetID()
o.IsDraft = true o.IsDraft = true
o.OrderBy = request.Username o.OrderBy = request.PeerID
o.WorkflowExecutionIDs = []string{} // create an array of ids o.WorkflowExecutionIDs = []string{} // create an array of ids
for _, exec := range scheduler.WorkflowExecutions { for _, exec := range scheduler.WorkflowExecution {
o.WorkflowExecutionIDs = append(o.WorkflowExecutionIDs, exec.GetID()) o.WorkflowExecutionIDs = append(o.WorkflowExecutionIDs, exec.GetID())
} }
// set the name of the order // set the name of the order
resourcesByPeer := map[string][]pricing.PricedItemITF{} // create a map of resources by peer resourcesByPeer := map[string][]pricing.PricedItemITF{} // create a map of resources by peer
processings := scheduler.Workflow.GetPricedItem(scheduler.Workflow.IsProcessing, request) // get the processing items processings := scheduler.Workflow.GetPricedItem(scheduler.Workflow.Graph.IsProcessing, request) // get the processing items
datas := scheduler.Workflow.GetPricedItem(scheduler.Workflow.IsData, request) // get the data items datas := scheduler.Workflow.GetPricedItem(scheduler.Workflow.Graph.IsData, request) // get the data items
storages := scheduler.Workflow.GetPricedItem(scheduler.Workflow.IsStorage, request) // get the storage items storages := scheduler.Workflow.GetPricedItem(scheduler.Workflow.Graph.IsStorage, request) // get the storage items
workflows := scheduler.Workflow.GetPricedItem(scheduler.Workflow.IsWorkflow, request) // get the workflow items workflows := scheduler.Workflow.GetPricedItem(scheduler.Workflow.Graph.IsWorkflow, request) // get the workflow items
for _, items := range []map[string]pricing.PricedItemITF{processings, datas, storages, workflows} { for _, items := range []map[string]pricing.PricedItemITF{processings, datas, storages, workflows} {
for _, item := range items { for _, item := range items {
if _, ok := resourcesByPeer[item.GetCreatorID()]; !ok { if _, ok := resourcesByPeer[item.GetCreatorID()]; !ok {
@@ -129,13 +120,16 @@ func (o *Order) draftStoreFromModel(scheduler *workflow_execution.WorkflowSchedu
} }
for peerID, resources := range resourcesByPeer { for peerID, resources := range resourcesByPeer {
peerOrder := &PeerOrder{ peerOrder := &PeerOrder{
Status: DRAFT, Status: enum.DRAFTED,
PeerID: peerID, PeerID: peerID,
} }
peerOrder.GenerateID() peerOrder.GenerateID()
for _, resource := range resources { for _, resource := range resources {
peerOrder.AddItem(resource, len(resources)) // TODO SPECIALS REF ADDITIONALS NOTES peerOrder.AddItem(resource, len(resources)) // TODO SPECIALS REF ADDITIONALS NOTES
} }
if o.SubOrders == nil {
o.SubOrders = map[string]*PeerOrder{}
}
o.SubOrders[peerOrder.GetID()] = peerOrder o.SubOrders[peerOrder.GetID()] = peerOrder
} }
// search an order with same user name and same session id // search an order with same user name and same session id
@@ -146,7 +140,8 @@ func (o *Order) draftStoreFromModel(scheduler *workflow_execution.WorkflowSchedu
// should store the order // should store the order
res, code, err := o.GetAccessor(request).Search(&dbs.Filters{ res, code, err := o.GetAccessor(request).Search(&dbs.Filters{
And: map[string][]dbs.Filter{ And: map[string][]dbs.Filter{
"order_by": {{Operator: dbs.EQUAL.String(), Value: request.Username}}, "workflow_id": {{Operator: dbs.EQUAL.String(), Value: o.WorkflowID}},
"order_by": {{Operator: dbs.EQUAL.String(), Value: request.PeerID}},
}, },
}, "", o.IsDraft) }, "", o.IsDraft)
if code != 200 || err != nil { if code != 200 || err != nil {
@@ -171,12 +166,12 @@ func (o *Order) draftBookOrder(scheduler *workflow_execution.WorkflowSchedule, r
if request == nil { if request == nil {
return draftedBookings, errors.New("no request found") return draftedBookings, errors.New("no request found")
} }
for _, exec := range scheduler.WorkflowExecutions { for _, exec := range scheduler.WorkflowExecution {
_, priceds, _, err := scheduler.Workflow.Planify(exec.ExecDate, exec.EndDate, request) _, priceds, _, err := scheduler.Workflow.Planify(exec.ExecDate, exec.EndDate, request)
if err != nil { if err != nil {
return draftedBookings, errors.New("could not planify the workflow" + fmt.Sprintf("%v", err)) return draftedBookings, errors.New("could not planify the workflow" + fmt.Sprintf("%v", err))
} }
bookings := exec.Book(scheduler.Workflow.UUID, priceds) bookings := exec.Book(scheduler.UUID, scheduler.Workflow.UUID, priceds)
for _, booking := range bookings { for _, booking := range bookings {
_, err := (&peer.Peer{}).LaunchPeerExecution(booking.DestPeerID, "", _, err := (&peer.Peer{}).LaunchPeerExecution(booking.DestPeerID, "",
tools.BOOKING, tools.POST, booking.Serialize(booking), request.Caller) tools.BOOKING, tools.POST, booking.Serialize(booking), request.Caller)
@@ -231,13 +226,13 @@ func (d *Order) pay(request *tools.APIRequest) error {
if res.Error != "" { if res.Error != "" {
errs += res.Error errs += res.Error
} }
if res.Status != PAID { if res.Status != enum.PAID {
gotAnUnpaid = true gotAnUnpaid = true
} }
d.Status = PARTIAL d.Status = enum.PARTIAL
d.SubOrders[res.GetID()] = res d.SubOrders[res.GetID()] = res
if count == len(d.SubOrders) && !gotAnUnpaid { if count == len(d.SubOrders) && !gotAnUnpaid {
d.Status = PAID d.Status = enum.PAID
} }
} }
} }
@@ -250,20 +245,20 @@ func (d *Order) pay(request *tools.APIRequest) error {
type PeerOrder struct { type PeerOrder struct {
utils.AbstractObject utils.AbstractObject
Error string `json:"error,omitempty" bson:"error,omitempty"` Error string `json:"error,omitempty" bson:"error,omitempty"`
PeerID string `json:"peer_id,omitempty" bson:"peer_id,omitempty"` PeerID string `json:"peer_id,omitempty" bson:"peer_id,omitempty"`
Status OrderStatus `json:"status" bson:"status" default:"0"` Status enum.CompletionStatus `json:"status" bson:"status" default:"0"`
BillingAddress string `json:"billing_address,omitempty" bson:"billing_address,omitempty"` BillingAddress string `json:"billing_address,omitempty" bson:"billing_address,omitempty"`
Items []*PeerItemOrder `json:"items,omitempty" bson:"items,omitempty"` Items []*PeerItemOrder `json:"items,omitempty" bson:"items,omitempty"`
Total float64 `json:"total,omitempty" bson:"total,omitempty"` Total float64 `json:"total,omitempty" bson:"total,omitempty"`
} }
func (d *PeerOrder) Pay(request *tools.APIRequest, response chan *PeerOrder, wg *sync.WaitGroup) { func (d *PeerOrder) Pay(request *tools.APIRequest, response chan *PeerOrder, wg *sync.WaitGroup) {
d.Status = PENDING d.Status = enum.PENDING
go func() { go func() {
// DO SOMETHING TO PAY ON BLOCKCHAIN OR WHATEVER ON RETURN UPDATE STATUS // DO SOMETHING TO PAY ON BLOCKCHAIN OR WHATEVER ON RETURN UPDATE STATUS
d.Status = PAID // TO REMOVE LATER IT'S A MOCK d.Status = enum.PAID // TO REMOVE LATER IT'S A MOCK
if d.Status == PAID { if d.Status == enum.PAID {
for _, b := range d.Items { for _, b := range d.Items {
if !b.Item.IsPurchased() { if !b.Item.IsPurchased() {
continue continue
@@ -277,7 +272,7 @@ func (d *PeerOrder) Pay(request *tools.APIRequest, response chan *PeerOrder, wg
} }
} }
if d.Status != PENDING { if d.Status != enum.PENDING {
response <- d response <- d
} }
wg.Done() wg.Done()
@@ -333,4 +328,5 @@ func (d *PeerItemOrder) GetPrice(request *tools.APIRequest) (float64, error) {
return p * float64(d.Quantity), nil return p * float64(d.Quantity), nil
} }
// WTF HOW TO SELECT THE RIGHT PRICE ???
// SHOULD SET A BUYING STATUS WHEN PAYMENT IS VALIDATED // SHOULD SET A BUYING STATUS WHEN PAYMENT IS VALIDATED

View File

@@ -37,6 +37,10 @@ type Peer struct {
FailedExecution []PeerExecution `json:"failed_execution" bson:"failed_execution"` // FailedExecution is the list of failed executions, to be retried FailedExecution []PeerExecution `json:"failed_execution" bson:"failed_execution"` // FailedExecution is the list of failed executions, to be retried
} }
func (ao *Peer) VerifyAuth(request *tools.APIRequest) bool {
return true
}
// AddExecution adds an execution to the list of failed executions // AddExecution adds an execution to the list of failed executions
func (ao *Peer) AddExecution(exec PeerExecution) { func (ao *Peer) AddExecution(exec PeerExecution) {
found := false found := false
@@ -73,7 +77,7 @@ func (p *Peer) IsMySelf() (bool, string) {
} }
// LaunchPeerExecution launches an execution on a peer // LaunchPeerExecution launches an execution on a peer
func (p *Peer) LaunchPeerExecution(peerID string, dataID string, dt tools.DataType, method tools.METHOD, body map[string]interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) { func (p *Peer) LaunchPeerExecution(peerID string, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) {
p.UUID = peerID p.UUID = peerID
return cache.LaunchPeerExecution(peerID, dataID, dt, method, body, caller) // Launch the execution on the peer through the cache return cache.LaunchPeerExecution(peerID, dataID, dt, method, body, caller) // Launch the execution on the peer through the cache
} }

View File

@@ -4,7 +4,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"regexp"
"strings" "strings"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
@@ -15,11 +14,11 @@ import (
* it defines the execution data * it defines the execution data
*/ */
type PeerExecution struct { type PeerExecution struct {
Method string `json:"method" bson:"method"` Method string `json:"method" bson:"method"`
Url string `json:"url" bson:"url"` Url string `json:"url" bson:"url"`
Body map[string]interface{} `json:"body" bson:"body"` Body interface{} `json:"body" bson:"body"`
DataType int `json:"data_type" bson:"data_type"` DataType int `json:"data_type" bson:"data_type"`
DataID string `json:"data_id" bson:"data_id"` DataID string `json:"data_id" bson:"data_id"`
} }
var cache = &PeerCache{} // Singleton instance of the peer cache var cache = &PeerCache{} // Singleton instance of the peer cache
@@ -29,86 +28,78 @@ type PeerCache struct {
} }
// urlFormat formats the URL of the peer with the data type API function // urlFormat formats the URL of the peer with the data type API function
func (p *PeerCache) urlFormat(url string, dt tools.DataType) string { func (p *PeerCache) urlFormat(hostUrl string, dt tools.DataType) string {
// localhost is replaced by the local peer URL // localhost is replaced by the local peer URL
// because localhost must collide on a web request security protocol // because localhost must collide on a web request security protocol
localhost := "" /*localhost := ""
if strings.Contains(url, "localhost") { if strings.Contains(hostUrl, "localhost") {
localhost = "localhost" localhost = "localhost"
} }
if strings.Contains(url, "127.0.0.1") { if strings.Contains(hostUrl, "127.0.0.1") {
localhost = "127.0.0.1" localhost = "127.0.0.1"
} }
if localhost != "" { if localhost != "" {
r := regexp.MustCompile("(" + localhost + ":[0-9]+)") r := regexp.MustCompile("(" + localhost + ":[0-9]+)")
t := r.FindString(url) t := r.FindString(hostUrl)
if t != "" { if t != "" {
url = strings.Replace(url, t, dt.API()+":8080/oc", -1) hostUrl = strings.Replace(hostUrl, t, dt.API()+":8080/oc", -1)
} else { } else {
url = strings.ReplaceAll(url, localhost, dt.API()+":8080/oc") hostUrl = strings.ReplaceAll(hostUrl, localhost, dt.API()+":8080/oc")
} }
} else { } else {*/
url = url + "/" + dt.API() hostUrl = hostUrl + "/" + strings.ReplaceAll(dt.API(), "oc-", "")
} //}
return url fmt.Println("Contacting", hostUrl)
return hostUrl
} }
// checkPeerStatus checks the status of a peer // checkPeerStatus checks the status of a peer
func (p *PeerCache) checkPeerStatus(peerID string, appName string, caller *tools.HTTPCaller) (*Peer, bool) { func (p *PeerCache) checkPeerStatus(peerID string, appName string) (*Peer, bool) {
api := tools.API{} api := tools.API{}
access := NewShallowAccessor() access := NewShallowAccessor()
res, code, _ := access.LoadOne(peerID) // Load the peer from db res, code, _ := access.LoadOne(peerID) // Load the peer from db
if code != 200 { // no peer no party if code != 200 { // no peer no party
return nil, false return nil, false
} }
methods := caller.URLS[tools.PEER] // Get the methods url of the peer url := p.urlFormat(res.(*Peer).Url, tools.PEER) + "/status" // Format the URL
if methods == nil {
return res.(*Peer), false
}
meth := methods[tools.POST] // Get the POST method to check status
if meth == "" {
return res.(*Peer), false
}
url := p.urlFormat(res.(*Peer).Url, tools.PEER) + meth // Format the URL
fmt.Println("Checking peer status on", url, "...")
state, services := api.CheckRemotePeer(url) state, services := api.CheckRemotePeer(url)
fmt.Println("Checking peer status on", url, state, services) // Check the status of the peer
res.(*Peer).ServicesState = services // Update the services states of the peer res.(*Peer).ServicesState = services // Update the services states of the peer
access.UpdateOne(res, peerID) // Update the peer in the db access.UpdateOne(res, peerID) // Update the peer in the db
return res.(*Peer), state != tools.DEAD && services[appName] == 0 // Return the peer and its status return res.(*Peer), state != tools.DEAD && services[appName] == 0 // Return the peer and its status
} }
// LaunchPeerExecution launches an execution on a peer // LaunchPeerExecution launches an execution on a peer
// The method contacts the path described by : peer.Url + datatype path (from enums) + replacement of id by dataID
func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string, func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string,
dt tools.DataType, method tools.METHOD, body map[string]interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) { dt tools.DataType, method tools.METHOD, body interface{}, caller *tools.HTTPCaller) (*PeerExecution, error) {
fmt.Println("Launching peer execution on", caller.URLS, dt, method) fmt.Println("Launching peer execution on", caller.URLS, dt, method)
methods := caller.URLS[dt] // Get the methods url of the data type methods := caller.URLS[dt] // Get the methods url of the data type
if m, ok := methods[method]; !ok || m == "" { if m, ok := methods[method]; !ok || m == "" {
return nil, errors.New("no path found") return nil, errors.New("Requested method " + method.String() + " not declared in HTTPCaller")
} }
meth := methods[method] // Get the method url to execute path := methods[method] // Get the path corresponding to the action we want to execute
meth = strings.ReplaceAll(meth, ":id", dataID) // Replace the id in the url in case of a DELETE / UPDATE method (it's a standard naming in OC) path = strings.ReplaceAll(path, ":id", dataID) // Replace the id in the path in case of a DELETE / UPDATE method (it's a standard naming in OC)
url := "" url := ""
// Check the status of the peer // Check the status of the peer
if mypeer, ok := p.checkPeerStatus(peerID, dt.API(), caller); !ok && mypeer != nil { if mypeer, ok := p.checkPeerStatus(peerID, dt.API()); !ok && mypeer != nil {
// If the peer is not reachable, add the execution to the failed executions list // If the peer is not reachable, add the execution to the failed executions list
pexec := &PeerExecution{ pexec := &PeerExecution{
Method: method.String(), Method: method.String(),
Url: p.urlFormat((mypeer.Url)+meth, dt), Url: p.urlFormat((mypeer.Url), dt) + path, // the url is constitued of : host URL + resource path + action path (ex : mypeer.com/datacenter/resourcetype/path/to/action)
Body: body, Body: body,
DataType: dt.EnumIndex(), DataType: dt.EnumIndex(),
DataID: dataID, DataID: dataID,
} }
mypeer.AddExecution(*pexec) mypeer.AddExecution(*pexec)
NewShallowAccessor().UpdateOne(mypeer, peerID) // Update the peer in the db NewShallowAccessor().UpdateOne(mypeer, peerID) // Update the peer in the db
return nil, errors.New("peer is not reachable") return nil, errors.New("peer is " + peerID + " not reachable")
} else { } else {
if mypeer == nil { if mypeer == nil {
return nil, errors.New("peer not found") return nil, errors.New("peer " + peerID + " not found")
} }
// If the peer is reachable, launch the execution // If the peer is reachable, launch the execution
url = p.urlFormat((mypeer.Url)+meth, dt) // Format the URL url = p.urlFormat((mypeer.Url), dt) + path // Format the URL
tmp := mypeer.FailedExecution // Get the failed executions list tmp := mypeer.FailedExecution // Get the failed executions list
mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list mypeer.FailedExecution = []PeerExecution{} // Reset the failed executions list
NewShallowAccessor().UpdateOne(mypeer, peerID) // Update the peer in the db NewShallowAccessor().UpdateOne(mypeer, peerID) // Update the peer in the db
@@ -116,12 +107,11 @@ func (p *PeerCache) LaunchPeerExecution(peerID string, dataID string,
go p.exec(v.Url, tools.ToMethod(v.Method), v.Body, caller) go p.exec(v.Url, tools.ToMethod(v.Method), v.Body, caller)
} }
} }
fmt.Println("URL exec", url)
return nil, p.exec(url, method, body, caller) // Execute the method return nil, p.exec(url, method, body, caller) // Execute the method
} }
// exec executes the method on the peer // exec executes the method on the peer
func (p *PeerCache) exec(url string, method tools.METHOD, body map[string]interface{}, caller *tools.HTTPCaller) error { func (p *PeerCache) exec(url string, method tools.METHOD, body interface{}, caller *tools.HTTPCaller) error {
var b []byte var b []byte
var err error var err error
if method == tools.POST { // Execute the POST method if it's a POST method if method == tools.POST { // Execute the POST method if it's a POST method
@@ -133,8 +123,11 @@ func (p *PeerCache) exec(url string, method tools.METHOD, body map[string]interf
if method == tools.DELETE { // Execute the DELETE method if it's a DELETE method if method == tools.DELETE { // Execute the DELETE method if it's a DELETE method
b, err = caller.CallDelete(url, "") b, err = caller.CallDelete(url, "")
} }
if err != nil {
return err
}
var m map[string]interface{} var m map[string]interface{}
json.Unmarshal(b, &m) err = json.Unmarshal(b, &m)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -11,12 +11,14 @@ import (
type peerMongoAccessor struct { type peerMongoAccessor struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller) utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
overrideAuth bool
} }
// New creates a new instance of the peerMongoAccessor // New creates a new instance of the peerMongoAccessor
func NewShallowAccessor() *peerMongoAccessor { func NewShallowAccessor() *peerMongoAccessor {
return &peerMongoAccessor{ return &peerMongoAccessor{
utils.AbstractAccessor{ overrideAuth: true,
AbstractAccessor: utils.AbstractAccessor{
Logger: logs.CreateLogger(tools.PEER.String()), // Create a logger with the data type Logger: logs.CreateLogger(tools.PEER.String()), // Create a logger with the data type
Type: tools.PEER, Type: tools.PEER,
}, },
@@ -25,7 +27,8 @@ func NewShallowAccessor() *peerMongoAccessor {
func NewAccessor(request *tools.APIRequest) *peerMongoAccessor { func NewAccessor(request *tools.APIRequest) *peerMongoAccessor {
return &peerMongoAccessor{ return &peerMongoAccessor{
utils.AbstractAccessor{ overrideAuth: false,
AbstractAccessor: utils.AbstractAccessor{
Logger: logs.CreateLogger(tools.PEER.String()), // Create a logger with the data type Logger: logs.CreateLogger(tools.PEER.String()), // Create a logger with the data type
Request: request, Request: request,
Type: tools.PEER, Type: tools.PEER,
@@ -33,6 +36,10 @@ func NewAccessor(request *tools.APIRequest) *peerMongoAccessor {
} }
} }
func (wfa *peerMongoAccessor) ShouldVerifyAuth() bool {
return !wfa.overrideAuth
}
/* /*
* Nothing special here, just the basic CRUD operations * Nothing special here, just the basic CRUD operations
*/ */
@@ -72,14 +79,16 @@ func (wfa *peerMongoAccessor) Search(filters *dbs.Filters, search string, isDraf
}, isDraft, wfa) }, isDraft, wfa)
} }
func (a *peerMongoAccessor) getDefaultFilter(search string) *dbs.Filters { func (a *peerMongoAccessor) getDefaultFilter(search string) *dbs.Filters {
s, err := strconv.Atoi(search) if i, err := strconv.Atoi(search); err == nil {
if err == nil {
return &dbs.Filters{ return &dbs.Filters{
Or: map[string][]dbs.Filter{ // search by name if no filters are provided Or: map[string][]dbs.Filter{ // search by name if no filters are provided
"state": {{Operator: dbs.EQUAL.String(), Value: s}}, "state": {{Operator: dbs.EQUAL.String(), Value: i}},
}, },
} }
} else { } else {
if search == "*" {
search = ""
}
return &dbs.Filters{ return &dbs.Filters{
Or: map[string][]dbs.Filter{ // search by name if no filters are provided Or: map[string][]dbs.Filter{ // search by name if no filters are provided
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}}, "abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},

View File

@@ -5,7 +5,8 @@ import (
"strings" "strings"
"time" "time"
"cloud.o-forge.io/core/oc-lib/models/common" "cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/common/models"
"cloud.o-forge.io/core/oc-lib/models/common/pricing" "cloud.o-forge.io/core/oc-lib/models/common/pricing"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
@@ -16,22 +17,48 @@ import (
* it defines the resource compute * it defines the resource compute
*/ */
type ComputeResource struct { type ComputeResource struct {
AbstractResource[*ComputeResourceInstance] AbstractInstanciatedResource[*ComputeResourceInstance]
Architecture string `json:"architecture,omitempty" bson:"architecture,omitempty"` // Architecture is the architecture Architecture string `json:"architecture,omitempty" bson:"architecture,omitempty"` // Architecture is the architecture
Infrastructure common.InfrastructureType `json:"infrastructure,omitempty" bson:"infrastructure,omitempty"` Infrastructure enum.InfrastructureType `json:"infrastructure" bson:"infrastructure" default:"-1"` // Infrastructure is the infrastructure
} }
func (d *ComputeResource) GetAccessor(request *tools.APIRequest) utils.Accessor { func (d *ComputeResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
return NewAccessor[*ComputeResource](tools.COMPUTE_RESOURCE, request, func() utils.DBObject { return &ComputeResource{} }) return NewAccessor[*ComputeResource](tools.COMPUTE_RESOURCE, request, func() utils.DBObject { return &ComputeResource{} })
} }
func (r *ComputeResource) GetType() string {
return tools.COMPUTE_RESOURCE.String()
}
func (abs *ComputeResource) ConvertToPricedResource(
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
if t != tools.COMPUTE_RESOURCE {
return nil
}
p := abs.AbstractInstanciatedResource.ConvertToPricedResource(t, request)
priced := p.(*PricedResource)
return &PricedComputeResource{
PricedResource: *priced,
}
}
type ComputeNode struct {
Name string `json:"name,omitempty" bson:"name,omitempty"`
Quantity int64 `json:"quantity" bson:"quantity" default:"1"`
RAM *models.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
CPUs map[string]int64 `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs key is model
GPUs map[string]int64 `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs key is model
}
type ComputeResourceInstance struct { type ComputeResourceInstance struct {
ResourceInstance[*ComputeResourcePartnership] ResourceInstance[*ComputeResourcePartnership]
SecurityLevel string `json:"security_level,omitempty" bson:"security_level,omitempty"` Source string `json:"source,omitempty" bson:"source,omitempty"` // Source is the source of the resource
PowerSource string `json:"power_source,omitempty" bson:"power_source,omitempty"` SecurityLevel string `json:"security_level,omitempty" bson:"security_level,omitempty"`
CPUs map[string]*common.CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs key is model PowerSources []string `json:"power_sources,omitempty" bson:"power_sources,omitempty"`
GPUs map[string]*common.GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs key is model AnnualCO2Emissions float64 `json:"annual_co2_emissions,omitempty" bson:"co2_emissions,omitempty"`
RAM *common.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM CPUs map[string]*models.CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs key is model
GPUs map[string]*models.GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs key is model
Nodes []*ComputeNode `json:"nodes,omitempty" bson:"nodes,omitempty"`
} }
type ComputeResourcePartnership struct { type ComputeResourcePartnership struct {
@@ -41,23 +68,14 @@ type ComputeResourcePartnership struct {
MaxAllowedRAMSize float64 `json:"allowed_ram,omitempty" bson:"allowed_ram,omitempty"` MaxAllowedRAMSize float64 `json:"allowed_ram,omitempty" bson:"allowed_ram,omitempty"`
} }
type ComputeResourcePricingProfileOptions struct {
CPUCore int `json:"cpu_core" bson:"cpu_core" default:"1"`
GPUMemoryGB float64 `json:"gpu_memory_gb" bson:"gpu_memory_gb" default:"1"`
RAMSizeGB float64 `json:"ram_size_gb" bson:"ram_size_gb" default:"1"`
}
type ComputeResourcePricingProfile struct { type ComputeResourcePricingProfile struct {
pricing.ExploitPricingProfile[pricing.TimePricingStrategy] pricing.ExploitPricingProfile[pricing.TimePricingStrategy]
Options ComputeResourcePricingProfileOptions `json:"options,omitempty" bson:"options,omitempty"` // Options is the options of the pricing profile
// ExploitPricingProfile is the pricing profile of a compute it means that we exploit the resource for an amount of continuous time // ExploitPricingProfile is the pricing profile of a compute it means that we exploit the resource for an amount of continuous time
OverrideCPUsPrices map[string]float64 `json:"cpus_prices,omitempty" bson:"cpus_prices,omitempty"` // CPUsPrices is the prices of the CPUs CPUsPrices map[string]float64 `json:"cpus_prices,omitempty" bson:"cpus_prices,omitempty"` // CPUsPrices is the prices of the CPUs
OverrideGPUsPrices map[string]float64 `json:"gpus_prices,omitempty" bson:"gpus_prices,omitempty"` // GPUsPrices is the prices of the GPUs GPUsPrices map[string]float64 `json:"gpus_prices,omitempty" bson:"gpus_prices,omitempty"` // GPUsPrices is the prices of the GPUs
OverrideRAMPrice float64 `json:"ram_price" bson:"ram_price" default:"-1"` // RAMPrice is the price of the RAM RAMPrice float64 `json:"ram_price" bson:"ram_price" default:"-1"` // RAMPrice is the price of the RAM
} }
// PROBLEM
func (p *ComputeResourcePricingProfile) IsPurchased() bool { func (p *ComputeResourcePricingProfile) IsPurchased() bool {
return p.Pricing.BuyingStrategy != pricing.PAY_PER_USE return p.Pricing.BuyingStrategy != pricing.PAY_PER_USE
} }
@@ -75,10 +93,10 @@ func (p *ComputeResourcePricingProfile) GetPrice(amountOfData float64, explicitD
pp := float64(0) pp := float64(0)
model := params[1] model := params[1]
if strings.Contains(params[0], "cpus") && len(params) > 1 { if strings.Contains(params[0], "cpus") && len(params) > 1 {
if _, ok := p.OverrideCPUsPrices[model]; ok { if _, ok := p.CPUsPrices[model]; ok {
p.Pricing.Price = p.OverrideCPUsPrices[model] p.Pricing.Price = p.CPUsPrices[model]
} }
r, err := p.Pricing.GetPrice(amountOfData/float64(p.Options.CPUCore), explicitDuration, start, &end) r, err := p.Pricing.GetPrice(amountOfData, explicitDuration, start, &end)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@@ -86,20 +104,20 @@ func (p *ComputeResourcePricingProfile) GetPrice(amountOfData float64, explicitD
} }
if strings.Contains(params[0], "gpus") && len(params) > 1 { if strings.Contains(params[0], "gpus") && len(params) > 1 {
if _, ok := p.OverrideGPUsPrices[model]; ok { if _, ok := p.GPUsPrices[model]; ok {
p.Pricing.Price = p.OverrideGPUsPrices[model] p.Pricing.Price = p.GPUsPrices[model]
} }
r, err := p.Pricing.GetPrice(amountOfData/float64(p.Options.GPUMemoryGB), explicitDuration, start, &end) r, err := p.Pricing.GetPrice(amountOfData, explicitDuration, start, &end)
if err != nil { if err != nil {
return 0, err return 0, err
} }
pp += r pp += r
} }
if strings.Contains(params[0], "ram") { if strings.Contains(params[0], "ram") {
if p.OverrideRAMPrice >= 0 { if p.RAMPrice >= 0 {
p.Pricing.Price = p.OverrideRAMPrice p.Pricing.Price = p.RAMPrice
} }
r, err := p.Pricing.GetPrice(float64(amountOfData)/p.Options.RAMSizeGB, explicitDuration, start, &end) r, err := p.Pricing.GetPrice(float64(amountOfData), explicitDuration, start, &end)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@@ -121,11 +139,19 @@ func (r *PricedComputeResource) GetType() tools.DataType {
} }
func (r *PricedComputeResource) GetPrice() (float64, error) { func (r *PricedComputeResource) GetPrice() (float64, error) {
if r.UsageStart == nil || r.UsageEnd == nil { now := time.Now()
return 0, errors.New("Usage start and end must be set") if r.UsageStart == nil {
r.UsageStart = &now
}
if r.UsageEnd == nil {
add := r.UsageStart.Add(time.Duration(1 * time.Hour))
r.UsageEnd = &add
} }
if r.SelectedPricing == nil { if r.SelectedPricing == nil {
return 0, errors.New("Selected pricing must be set") if len(r.PricingProfiles) == 0 {
return 0, errors.New("pricing profile must be set on Priced Compute" + r.ResourceID)
}
r.SelectedPricing = &r.PricingProfiles[0]
} }
pricing := *r.SelectedPricing pricing := *r.SelectedPricing
price := float64(0) price := float64(0)

View File

@@ -2,36 +2,29 @@ package resources
import ( import (
"errors" "errors"
"fmt"
"time" "time"
"cloud.o-forge.io/core/oc-lib/models/common/models"
"cloud.o-forge.io/core/oc-lib/models/common/pricing" "cloud.o-forge.io/core/oc-lib/models/common/pricing"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
// enum of public private or licenced data
type DataLicense int
const (
PUBLIC DataLicense = iota
PRIVATE
LICENCED
)
/* /*
* DataResource is a struct that represents a data resource * DataResource is a struct that represents a data resource
* it defines the resource data * it defines the resource data
*/ */
type DataResource struct { type DataResource struct {
AbstractResource[*ResourceInstance[*DataResourcePartnership]] AbstractInstanciatedResource[*DataInstance]
Type string `bson:"type,omitempty" json:"type,omitempty"` Type string `bson:"type,omitempty" json:"type,omitempty"`
Quality string `bson:"quality,omitempty" json:"quality,omitempty"` Quality string `bson:"quality,omitempty" json:"quality,omitempty"`
OpenData bool `bson:"open_data" json:"open_data" default:"false"` // Type is the type of the storage OpenData bool `bson:"open_data" json:"open_data" default:"false"` // Type is the type of the storage
Static bool `bson:"static" json:"static" default:"false"` Static bool `bson:"static" json:"static" default:"false"`
UpdatePeriod time.Time `bson:"update_period,omitempty" json:"update_period,omitempty"` UpdatePeriod *time.Time `bson:"update_period,omitempty" json:"update_period,omitempty"`
PersonalData bool `bson:"personal_data,omitempty" json:"personal_data,omitempty"` PersonalData bool `bson:"personal_data,omitempty" json:"personal_data,omitempty"`
AnonymizedPersonalData bool `bson:"anonymized_personal_data,omitempty" json:"anonymized_personal_data,omitempty"` AnonymizedPersonalData bool `bson:"anonymized_personal_data,omitempty" json:"anonymized_personal_data,omitempty"`
SizeGB float64 `json:"size_gb,omitempty" bson:"size_gb,omitempty"` // SizeGB is the size of the data License DataLicense `json:"license" bson:"license" description:"license of the data" default:"0"` // License is the license of the data SizeGB float64 `json:"size,omitempty" bson:"size,omitempty"` // SizeGB is the size of the data License DataLicense `json:"license" bson:"license" description:"license of the data" default:"0"` // License is the license of the data
// ? Interest DataLicense `json:"interest" bson:"interest" description:"interest of the data" default:"0"` // Interest is the interest of the data // ? Interest DataLicense `json:"interest" bson:"interest" description:"interest of the data" default:"0"` // Interest is the interest of the data
Example string `json:"example,omitempty" bson:"example,omitempty" description:"base64 encoded data"` // Example is an example of the data Example string `json:"example,omitempty" bson:"example,omitempty" description:"base64 encoded data"` // Example is an example of the data
} }
@@ -40,6 +33,45 @@ func (d *DataResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
return NewAccessor[*DataResource](tools.DATA_RESOURCE, request, func() utils.DBObject { return &DataResource{} }) // Create a new instance of the accessor return NewAccessor[*DataResource](tools.DATA_RESOURCE, request, func() utils.DBObject { return &DataResource{} }) // Create a new instance of the accessor
} }
func (r *DataResource) GetType() string {
return tools.DATA_RESOURCE.String()
}
func (abs *DataResource) ConvertToPricedResource(
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
if t != tools.DATA_RESOURCE {
return nil
}
p := abs.AbstractInstanciatedResource.ConvertToPricedResource(t, request)
priced := p.(*PricedResource)
return &PricedDataResource{
PricedResource: *priced,
}
}
type DataInstance struct {
ResourceInstance[*DataResourcePartnership]
Source string `json:"source,omitempty" bson:"source,omitempty"` // Source is the source of the data
}
func (ri *DataInstance) StoreDraftDefault() {
found := false
for _, p := range ri.ResourceInstance.Env {
if p.Attr == "source" {
found = true
break
}
}
if !found {
ri.ResourceInstance.Env = append(ri.ResourceInstance.Env, models.Param{
Attr: "source",
Value: ri.Source,
Readonly: true,
})
}
ri.ResourceInstance.StoreDraftDefault()
}
type DataResourcePartnership struct { type DataResourcePartnership struct {
ResourcePartnerShip[*DataResourcePricingProfile] ResourcePartnerShip[*DataResourcePricingProfile]
MaxDownloadableGbAllowed float64 `json:"allowed_gb,omitempty" bson:"allowed_gb,omitempty"` MaxDownloadableGbAllowed float64 `json:"allowed_gb,omitempty" bson:"allowed_gb,omitempty"`
@@ -57,6 +89,14 @@ const (
PER_KB_DOWNLOADED PER_KB_DOWNLOADED
) )
func (t DataResourcePricingStrategy) String() string {
return [...]string{"PER DOWNLOAD", "PER TB DOWNLOADED", "PER GB DOWNLOADED", "PER MB DOWNLOADED", "PER KB DOWNLOADED"}[t]
}
func DataResourcePricingStrategyList() []DataResourcePricingStrategy {
return []DataResourcePricingStrategy{PER_DOWNLOAD, PER_TB_DOWNLOADED, PER_GB_DOWNLOADED, PER_MB_DOWNLOADED, PER_KB_DOWNLOADED}
}
func ToDataResourcePricingStrategy(i int) DataResourcePricingStrategy { func ToDataResourcePricingStrategy(i int) DataResourcePricingStrategy {
return DataResourcePricingStrategy(i) return DataResourcePricingStrategy(i)
} }
@@ -82,7 +122,7 @@ func (t DataResourcePricingStrategy) GetQuantity(amountOfDataGB float64) (float6
case PER_KB_DOWNLOADED: case PER_KB_DOWNLOADED:
return amountOfDataGB / 1000000, nil return amountOfDataGB / 1000000, nil
} }
return 0, errors.New("Pricing strategy not found") return 0, errors.New("pricing strategy not found")
} }
type DataResourcePricingProfile struct { type DataResourcePricingProfile struct {
@@ -111,11 +151,20 @@ func (r *PricedDataResource) GetType() tools.DataType {
} }
func (r *PricedDataResource) GetPrice() (float64, error) { func (r *PricedDataResource) GetPrice() (float64, error) {
if r.UsageStart == nil || r.UsageEnd == nil { fmt.Println("GetPrice", r.UsageStart, r.UsageEnd)
return 0, errors.New("Usage start and end must be set") now := time.Now()
if r.UsageStart == nil {
r.UsageStart = &now
}
if r.UsageEnd == nil {
add := r.UsageStart.Add(time.Duration(1 * time.Hour))
r.UsageEnd = &add
} }
if r.SelectedPricing == nil { if r.SelectedPricing == nil {
return 0, errors.New("Selected pricing must be set") if len(r.PricingProfiles) == 0 {
return 0, errors.New("pricing profile must be set on Priced Data" + r.ResourceID)
}
r.SelectedPricing = &r.PricingProfiles[0]
} }
pricing := *r.SelectedPricing pricing := *r.SelectedPricing
var err error var err error

View File

@@ -1,40 +1,27 @@
package resources package resources
import ( import (
"time"
"cloud.o-forge.io/core/oc-lib/models/common/pricing" "cloud.o-forge.io/core/oc-lib/models/common/pricing"
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
type ShallowResourceInterface interface {
utils.DBObject
GetType() tools.DataType
GetCreatorID() string
GetPricingID() string
GetLocationStart() *time.Time
GetLocationEnd() *time.Time
GetExplicitDurationInS() float64
SetStartUsage(start time.Time)
SetEndUsage(end time.Time)
GetPartnership(request *tools.APIRequest) ResourcePartnerITF
SetResourceModel(model *resource_model.ResourceModel)
}
type ResourceInterface interface { type ResourceInterface interface {
utils.DBObject utils.DBObject
Trim() Trim()
Transform() utils.DBObject
ConvertToPricedResource(t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF ConvertToPricedResource(t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF
GetType() string
GetSelectedInstance() utils.DBObject
ClearEnv() utils.DBObject
SetAllowedInstances(request *tools.APIRequest) SetAllowedInstances(request *tools.APIRequest)
SetResourceModel(model *resource_model.ResourceModel)
} }
type ResourceInstanceITF interface { type ResourceInstanceITF interface {
utils.DBObject
GetID() string GetID() string
GetName() string GetName() string
StoreDraftDefault()
ClearEnv()
GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF
GetPeerGroups() ([]ResourcePartnerITF, []map[string][]string) GetPeerGroups() ([]ResourcePartnerITF, []map[string][]string)
ClearPeerGroups() ClearPeerGroups()

View File

@@ -28,6 +28,7 @@ func (r *ResourceSet) Clear() {
} }
func (r *ResourceSet) Fill(request *tools.APIRequest) { func (r *ResourceSet) Fill(request *tools.APIRequest) {
r.Clear()
for k, v := range map[utils.DBObject][]string{ for k, v := range map[utils.DBObject][]string{
(&DataResource{}): r.Datas, (&DataResource{}): r.Datas,
(&ComputeResource{}): r.Computes, (&ComputeResource{}): r.Computes,

View File

@@ -2,6 +2,7 @@ package resources
import ( import (
"errors" "errors"
"fmt"
"time" "time"
"cloud.o-forge.io/core/oc-lib/models/common/pricing" "cloud.o-forge.io/core/oc-lib/models/common/pricing"
@@ -9,17 +10,17 @@ import (
) )
type PricedResource struct { type PricedResource struct {
Name string `json:"name,omitempty" bson:"name,omitempty"` Name string `json:"name,omitempty" bson:"name,omitempty"`
Logo string `json:"logo,omitempty" bson:"logo,omitempty"` Logo string `json:"logo,omitempty" bson:"logo,omitempty"`
InstancesRefs map[string]string `json:"instances_refs,omitempty" bson:"instances_refs,omitempty"` InstancesRefs map[string]string `json:"instances_refs,omitempty" bson:"instances_refs,omitempty"`
PricingProfiles map[string][]pricing.PricingProfileITF `json:"pricing_profiles,omitempty" bson:"pricing_profiles,omitempty"` PricingProfiles []pricing.PricingProfileITF `json:"pricing_profiles,omitempty" bson:"pricing_profiles,omitempty"`
SelectedPricing *pricing.PricingProfileITF `json:"selected_pricing,omitempty" bson:"selected_pricing,omitempty"` SelectedPricing *pricing.PricingProfileITF `json:"selected_pricing,omitempty" bson:"selected_pricing,omitempty"`
ExplicitBookingDurationS float64 `json:"explicit_location_duration_s,omitempty" bson:"explicit_location_duration_s,omitempty"` ExplicitBookingDurationS float64 `json:"explicit_location_duration_s,omitempty" bson:"explicit_location_duration_s,omitempty"`
UsageStart *time.Time `json:"start,omitempty" bson:"start,omitempty"` UsageStart *time.Time `json:"start,omitempty" bson:"start,omitempty"`
UsageEnd *time.Time `json:"end,omitempty" bson:"end,omitempty"` UsageEnd *time.Time `json:"end,omitempty" bson:"end,omitempty"`
CreatorID string `json:"peer_id,omitempty" bson:"peer_id,omitempty"` CreatorID string `json:"peer_id,omitempty" bson:"peer_id,omitempty"`
ResourceID string `json:"resource_id,omitempty" bson:"resource_id,omitempty"` ResourceID string `json:"resource_id,omitempty" bson:"resource_id,omitempty"`
ResourceType tools.DataType `json:"resource_type,omitempty" bson:"resource_type,omitempty"` ResourceType tools.DataType `json:"resource_type,omitempty" bson:"resource_type,omitempty"`
} }
func (abs *PricedResource) GetID() string { func (abs *PricedResource) GetID() string {
@@ -34,18 +35,6 @@ func (abs *PricedResource) GetCreatorID() string {
return abs.CreatorID return abs.CreatorID
} }
func (abs *PricedResource) SetStartUsage(start time.Time) {
if abs.UsageStart == nil {
abs.UsageStart = &start
}
}
func (abs *PricedResource) SetEndUsage(end time.Time) {
if abs.UsageEnd == nil {
abs.UsageEnd = &end
}
}
func (abs *PricedResource) IsPurchased() bool { func (abs *PricedResource) IsPurchased() bool {
if abs.SelectedPricing == nil { if abs.SelectedPricing == nil {
return false return false
@@ -71,20 +60,34 @@ func (abs *PricedResource) SetLocationEnd(end time.Time) {
func (abs *PricedResource) GetExplicitDurationInS() float64 { func (abs *PricedResource) GetExplicitDurationInS() float64 {
if abs.ExplicitBookingDurationS == 0 { if abs.ExplicitBookingDurationS == 0 {
if abs.UsageEnd == nil || abs.UsageStart == nil { if abs.UsageEnd == nil && abs.UsageStart == nil {
return time.Duration(1 * time.Hour).Seconds() return time.Duration(1 * time.Hour).Seconds()
} }
if abs.UsageEnd == nil {
add := abs.UsageStart.Add(time.Duration(1 * time.Hour))
abs.UsageEnd = &add
}
return abs.UsageEnd.Sub(*abs.UsageStart).Seconds() return abs.UsageEnd.Sub(*abs.UsageStart).Seconds()
} }
return abs.ExplicitBookingDurationS return abs.ExplicitBookingDurationS
} }
func (r *PricedResource) GetPrice() (float64, error) { func (r *PricedResource) GetPrice() (float64, error) {
if r.UsageStart == nil || r.UsageEnd == nil { fmt.Println("GetPrice", r.UsageStart, r.UsageEnd)
return 0, errors.New("Usage start and end must be set") now := time.Now()
if r.UsageStart == nil {
r.UsageStart = &now
}
if r.UsageEnd == nil {
add := r.UsageStart.Add(time.Duration(1 * time.Hour))
r.UsageEnd = &add
} }
if r.SelectedPricing == nil { if r.SelectedPricing == nil {
return 0, errors.New("Selected pricing must be set") if len(r.PricingProfiles) == 0 {
return 0, errors.New("pricing profile must be set on Priced Resource " + r.ResourceID)
}
r.SelectedPricing = &r.PricingProfiles[0]
} }
return (*r.SelectedPricing).GetPrice(1, 0, *r.UsageStart, *r.UsageEnd) pricing := *r.SelectedPricing
return pricing.GetPrice(1, 0, *r.UsageStart, *r.UsageEnd)
} }

View File

@@ -3,16 +3,17 @@ package resources
import ( import (
"time" "time"
"cloud.o-forge.io/core/oc-lib/models/common" "cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/common/models"
"cloud.o-forge.io/core/oc-lib/models/common/pricing" "cloud.o-forge.io/core/oc-lib/models/common/pricing"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
type ProcessingUsage struct { type ProcessingUsage struct {
CPUs map[string]*common.CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs key is model CPUs map[string]*models.CPU `bson:"cpus,omitempty" json:"cpus,omitempty"` // CPUs is the list of CPUs key is model
GPUs map[string]*common.GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs key is model GPUs map[string]*models.GPU `bson:"gpus,omitempty" json:"gpus,omitempty"` // GPUs is the list of GPUs key is model
RAM *common.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM RAM *models.RAM `bson:"ram,omitempty" json:"ram,omitempty"` // RAM is the RAM
StorageGb float64 `bson:"storage,omitempty" json:"storage,omitempty"` // Storage is the storage StorageGb float64 `bson:"storage,omitempty" json:"storage,omitempty"` // Storage is the storage
Hypothesis string `bson:"hypothesis,omitempty" json:"hypothesis,omitempty"` Hypothesis string `bson:"hypothesis,omitempty" json:"hypothesis,omitempty"`
@@ -24,14 +25,26 @@ type ProcessingUsage struct {
* it defines the resource processing * it defines the resource processing
*/ */
type ProcessingResource struct { type ProcessingResource struct {
AbstractResource[*ResourceInstance[*ResourcePartnerShip[*ProcessingResourcePricingProfile]]] AbstractInstanciatedResource[*ProcessingInstance]
Infrastructure common.InfrastructureType `json:"infrastructure,omitempty" bson:"infrastructure,omitempty"` Infrastructure enum.InfrastructureType `json:"infrastructure" bson:"infrastructure" default:"-1"` // Infrastructure is the infrastructure
IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service IsService bool `json:"is_service,omitempty" bson:"is_service,omitempty"` // IsService is a flag that indicates if the processing is a service
Usage *ProcessingUsage `bson:"usage,omitempty" json:"usage,omitempty"` // Usage is the usage of the processing Usage *ProcessingUsage `bson:"usage,omitempty" json:"usage,omitempty"` // Usage is the usage of the processing
OpenSource bool `json:"open_source" bson:"open_source" default:"false"` OpenSource bool `json:"open_source" bson:"open_source" default:"false"`
License string `json:"license,omitempty" bson:"license,omitempty"` License string `json:"license,omitempty" bson:"license,omitempty"`
Maturity string `json:"maturity,omitempty" bson:"maturity,omitempty"` Maturity string `json:"maturity,omitempty" bson:"maturity,omitempty"`
Container *common.Container `json:"container,omitempty" bson:"container,omitempty"` // Container is the container }
func (r *ProcessingResource) GetType() string {
return tools.PROCESSING_RESOURCE.String()
}
type ProcessingResourceAccess struct {
Container *models.Container `json:"container,omitempty" bson:"container,omitempty"` // Container is the container
}
type ProcessingInstance struct {
ResourceInstance[*ResourcePartnerShip[*ProcessingResourcePricingProfile]]
Access *ProcessingResourceAccess `json:"access,omitempty" bson:"access,omitempty"` // Access is the access
} }
type PricedProcessingResource struct { type PricedProcessingResource struct {

View File

@@ -17,9 +17,9 @@ type purchaseResourceMongoAccessor struct {
func NewAccessor(request *tools.APIRequest) *purchaseResourceMongoAccessor { func NewAccessor(request *tools.APIRequest) *purchaseResourceMongoAccessor {
return &purchaseResourceMongoAccessor{ return &purchaseResourceMongoAccessor{
AbstractAccessor: utils.AbstractAccessor{ AbstractAccessor: utils.AbstractAccessor{
Logger: logs.CreateLogger(tools.BUYING_STATUS.String()), // Create a logger with the data type Logger: logs.CreateLogger(tools.PURCHASE_RESOURCE.String()), // Create a logger with the data type
Request: request, Request: request,
Type: tools.BUYING_STATUS, Type: tools.PURCHASE_RESOURCE,
}, },
} }
} }

View File

@@ -4,69 +4,61 @@ import (
"slices" "slices"
"cloud.o-forge.io/core/oc-lib/config" "cloud.o-forge.io/core/oc-lib/config"
"cloud.o-forge.io/core/oc-lib/models/common/models"
"cloud.o-forge.io/core/oc-lib/models/common/pricing" "cloud.o-forge.io/core/oc-lib/models/common/pricing"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/biter777/countries" "github.com/biter777/countries"
) )
// AbstractResource is the struct containing all of the attributes commons to all ressources // AbstractResource is the struct containing all of the attributes commons to all ressources
type AbstractResource struct {
// Resource is the interface to be implemented by all classes inheriting from Resource to have the same behavior utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
Type string `json:"type,omitempty" bson:"type,omitempty"` // Type is the type of the resource
// http://www.inanzzz.com/index.php/post/wqbs/a-basic-usage-of-int-and-string-enum-types-in-golang Logo string `json:"logo,omitempty" bson:"logo,omitempty" validate:"required"` // Logo is the logo of the resource
/* Description string `json:"description,omitempty" bson:"description,omitempty"` // Description is the description of the resource
* AbstractResource is a struct that represents a resource ShortDescription string `json:"short_description,omitempty" bson:"short_description,omitempty" validate:"required"` // ShortDescription is the short description of the resource
* it defines the resource data Owners []utils.Owner `json:"owners,omitempty" bson:"owners,omitempty"` // Owners is the list of owners of the resource
*/ UsageRestrictions string `bson:"usage_restrictions,omitempty" json:"usage_restrictions,omitempty"`
SelectedInstanceIndex *int `json:"selected_instance_index,omitempty" bson:"selected_instance_index,omitempty"` // SelectedInstance is the selected instance
type AbstractResource[T ResourceInstanceITF] struct {
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
Logo string `json:"logo,omitempty" bson:"logo,omitempty" validate:"required"` // Logo is the logo of the resource
Description string `json:"description,omitempty" bson:"description,omitempty"` // Description is the description of the resource
ShortDescription string `json:"short_description,omitempty" bson:"short_description,omitempty" validate:"required"` // ShortDescription is the short description of the resource
Owners []utils.Owner `json:"owners,omitempty" bson:"owners,omitempty"` // Owners is the list of owners of the resource
ResourceModel *resource_model.ResourceModel `json:"resource_model,omitempty" bson:"resource_model,omitempty"` // ResourceModel is the model of the resource
UsageRestrictions string `bson:"usage_restrictions,omitempty" json:"usage_restrictions,omitempty"`
SelectedInstanceIndex int `json:"selected_instance_index,omitempty" bson:"selected_instance_index,omitempty"` // SelectedInstance is the selected instance
Instances []T `json:"instances,omitempty" bson:"instances,omitempty"` // Bill is the bill of the resource // Bill is the bill of the resource
} }
func (r *AbstractResource[T]) Transform() utils.DBObject { func (r *AbstractResource) GetSelectedInstance() utils.DBObject {
return r return nil
} }
func (r *AbstractResource[T]) StoreDraftDefault() { func (r *AbstractResource) GetType() string {
return tools.INVALID.String()
}
func (r *AbstractResource) StoreDraftDefault() {
r.IsDraft = true r.IsDraft = true
} }
func (r *AbstractResource[T]) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
func (r *AbstractResource) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
if r.IsDraft != set.IsDrafted() && set.IsDrafted() { if r.IsDraft != set.IsDrafted() && set.IsDrafted() {
return true, set // only state can be updated return true, set // only state can be updated
} }
return r.IsDraft != set.IsDrafted() && set.IsDrafted(), set return r.IsDraft != set.IsDrafted() && set.IsDrafted(), set
} }
func (r *AbstractResource[T]) CanDelete() bool { func (r *AbstractResource) CanDelete() bool {
return r.IsDraft // only draft bookings can be deleted return r.IsDraft // only draft bookings can be deleted
} }
func (ao *AbstractResource[T]) GetAccessor(request *tools.APIRequest) utils.Accessor { type AbstractInstanciatedResource[T ResourceInstanceITF] struct {
return nil AbstractResource // AbstractResource contains the basic fields of an object (id, name)
Instances []T `json:"instances,omitempty" bson:"instances,omitempty"` // Bill is the bill of the resource // Bill is the bill of the resource
} }
func (abs *AbstractResource[T]) SetResourceModel(model *resource_model.ResourceModel) { func (abs *AbstractInstanciatedResource[T]) ConvertToPricedResource(
abs.ResourceModel = model
}
func (abs *AbstractResource[T]) ConvertToPricedResource(
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF { t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
instances := map[string]string{} instances := map[string]string{}
profiles := map[string][]pricing.PricingProfileITF{} profiles := []pricing.PricingProfileITF{}
for _, instance := range abs.Instances { for _, instance := range abs.Instances {
instances[instance.GetID()] = instance.GetName() instances[instance.GetID()] = instance.GetName()
profiles[instance.GetID()] = instance.GetPricingsProfiles(request.PeerID, request.Groups) profiles = instance.GetPricingsProfiles(request.PeerID, request.Groups)
} }
return &PricedResource{ return &PricedResource{
Name: abs.Name, Name: abs.Name,
@@ -79,11 +71,32 @@ func (abs *AbstractResource[T]) ConvertToPricedResource(
} }
} }
func (abs *AbstractResource[T]) SetAllowedInstances(request *tools.APIRequest) { func (abs *AbstractInstanciatedResource[T]) ClearEnv() utils.DBObject {
for _, instance := range abs.Instances {
instance.ClearEnv()
}
return abs
}
func (r *AbstractInstanciatedResource[T]) GetSelectedInstance() utils.DBObject {
if r.SelectedInstanceIndex != nil && len(r.Instances) > *r.SelectedInstanceIndex {
return r.Instances[*r.SelectedInstanceIndex]
}
if len(r.Instances) > 0 {
return r.Instances[0]
}
return nil
}
func (abs *AbstractInstanciatedResource[T]) SetAllowedInstances(request *tools.APIRequest) {
if request != nil && request.PeerID == abs.CreatorID && request.PeerID != "" {
return
}
abs.Instances = verifyAuthAction[T](abs.Instances, request) abs.Instances = verifyAuthAction[T](abs.Instances, request)
} }
func (d *AbstractResource[T]) Trim() { func (d *AbstractInstanciatedResource[T]) Trim() {
d.Type = d.GetType()
if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.CreatorID}}).IsMySelf(); !ok { if ok, _ := (&peer.Peer{AbstractObject: utils.AbstractObject{UUID: d.CreatorID}}).IsMySelf(); !ok {
for _, instance := range d.Instances { for _, instance := range d.Instances {
instance.ClearPeerGroups() instance.ClearPeerGroups()
@@ -91,7 +104,7 @@ func (d *AbstractResource[T]) Trim() {
} }
} }
func (abs *AbstractResource[T]) VerifyAuth(request *tools.APIRequest) bool { func (abs *AbstractInstanciatedResource[T]) VerifyAuth(request *tools.APIRequest) bool {
return len(verifyAuthAction[T](abs.Instances, request)) > 0 || abs.AbstractObject.VerifyAuth(request) return len(verifyAuthAction[T](abs.Instances, request)) > 0 || abs.AbstractObject.VerifyAuth(request)
} }
@@ -106,6 +119,7 @@ func verifyAuthAction[T ResourceInstanceITF](baseInstance []T, request *tools.AP
if grps, ok := peers[request.PeerID]; ok || config.GetConfig().Whitelist { if grps, ok := peers[request.PeerID]; ok || config.GetConfig().Whitelist {
if (ok && slices.Contains(grps, "*")) || (!ok && config.GetConfig().Whitelist) { if (ok && slices.Contains(grps, "*")) || (!ok && config.GetConfig().Whitelist) {
instances = append(instances, instance) instances = append(instances, instance)
continue
} }
for _, grp := range grps { for _, grp := range grps {
if slices.Contains(request.Groups, grp) { if slices.Contains(request.Groups, grp) {
@@ -123,21 +137,26 @@ type GeoPoint struct {
Longitude float64 `json:"longitude,omitempty" bson:"longitude,omitempty"` Longitude float64 `json:"longitude,omitempty" bson:"longitude,omitempty"`
} }
type Credentials struct {
Login string `json:"login,omitempty" bson:"login,omitempty"`
Pass string `json:"password,omitempty" bson:"password,omitempty"`
}
type ResourceInstance[T ResourcePartnerITF] struct { type ResourceInstance[T ResourcePartnerITF] struct {
UUID string `json:"id,omitempty" bson:"id,omitempty"` utils.AbstractObject
Name string `json:"name,omitempty" bson:"name,omitempty"`
Location GeoPoint `json:"location,omitempty" bson:"location,omitempty"` Location GeoPoint `json:"location,omitempty" bson:"location,omitempty"`
Country countries.CountryCode `json:"country,omitempty" bson:"country,omitempty"` Country countries.CountryCode `json:"country,omitempty" bson:"country,omitempty"`
AccessProtocol string `json:"access_protocol,omitempty" bson:"access_protocol,omitempty"` AccessProtocol string `json:"access_protocol,omitempty" bson:"access_protocol,omitempty"`
Partnerships []T `json:"partner_resource,omitempty" bson:"partner_resource,omitempty"` Env []models.Param `json:"env,omitempty" bson:"env,omitempty"`
Inputs []models.Param `json:"inputs,omitempty" bson:"inputs,omitempty"`
Outputs []models.Param `json:"outputs,omitempty" bson:"outputs,omitempty"`
Partnerships []T `json:"partnerships,omitempty" bson:"partnerships,omitempty"`
} }
func (ri *ResourceInstance[T]) GetID() string { func (ri *ResourceInstance[T]) ClearEnv() {
return ri.UUID ri.Env = []models.Param{}
} ri.Inputs = []models.Param{}
ri.Outputs = []models.Param{}
func (ri *ResourceInstance[T]) GetName() string {
return ri.Name
} }
func (ri *ResourceInstance[T]) GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF { func (ri *ResourceInstance[T]) GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF {
@@ -167,14 +186,23 @@ func (ri *ResourceInstance[T]) ClearPeerGroups() {
type ResourcePartnerShip[T pricing.PricingProfileITF] struct { type ResourcePartnerShip[T pricing.PricingProfileITF] struct {
Namespace string `json:"namespace" bson:"namespace" default:"default-namespace"` Namespace string `json:"namespace" bson:"namespace" default:"default-namespace"`
PeerGroups map[string][]string `json:"peer_groups,omitempty" bson:"peer_groups,omitempty"` PeerGroups map[string][]string `json:"peer_groups,omitempty" bson:"peer_groups,omitempty"`
PricingProfiles map[string]T `json:"pricing,omitempty" bson:"pricing,omitempty"` PricingProfiles []T `json:"pricing_profiles,omitempty" bson:"pricing_profiles,omitempty"`
} }
func (ri *ResourcePartnerShip[T]) GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF { func (ri *ResourcePartnerShip[T]) GetPricingsProfiles(peerID string, groups []string) []pricing.PricingProfileITF {
profiles := []pricing.PricingProfileITF{}
if ri.PeerGroups[peerID] != nil { if ri.PeerGroups[peerID] != nil {
for _, ri := range ri.PricingProfiles {
profiles = append(profiles, ri)
}
if slices.Contains(groups, "*") {
for _, ri := range ri.PricingProfiles {
profiles = append(profiles, ri)
}
return profiles
}
for _, p := range ri.PeerGroups[peerID] { for _, p := range ri.PeerGroups[peerID] {
if slices.Contains(groups, p) { if slices.Contains(groups, p) {
profiles := []pricing.PricingProfileITF{}
for _, ri := range ri.PricingProfiles { for _, ri := range ri.PricingProfiles {
profiles = append(profiles, ri) profiles = append(profiles, ri)
} }
@@ -182,7 +210,7 @@ func (ri *ResourcePartnerShip[T]) GetPricingsProfiles(peerID string, groups []st
} }
} }
} }
return []pricing.PricingProfileITF{} return profiles
} }
func (rp *ResourcePartnerShip[T]) GetPeerGroups() map[string][]string { func (rp *ResourcePartnerShip[T]) GetPeerGroups() map[string][]string {

View File

@@ -5,7 +5,6 @@ import (
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/logs" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
@@ -22,10 +21,9 @@ func NewAccessor[T ResourceInterface](t tools.DataType, request *tools.APIReques
} }
return &resourceMongoAccessor[T]{ return &resourceMongoAccessor[T]{
AbstractAccessor: utils.AbstractAccessor{ AbstractAccessor: utils.AbstractAccessor{
ResourceModelAccessor: resource_model.NewAccessor(), Logger: logs.CreateLogger(t.String()), // Create a logger with the data type
Logger: logs.CreateLogger(t.String()), // Create a logger with the data type Request: request,
Request: request, Type: t,
Type: t,
}, },
generateData: g, generateData: g,
} }
@@ -34,19 +32,16 @@ func NewAccessor[T ResourceInterface](t tools.DataType, request *tools.APIReques
/* /*
* Nothing special here, just the basic CRUD operations * Nothing special here, just the basic CRUD operations
*/ */
func (dca *resourceMongoAccessor[T]) DeleteOne(id string) (utils.DBObject, int, error) { func (dca *resourceMongoAccessor[T]) DeleteOne(id string) (utils.DBObject, int, error) {
return utils.GenericDeleteOne(id, dca) return utils.GenericDeleteOne(id, dca)
} }
func (dca *resourceMongoAccessor[T]) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (dca *resourceMongoAccessor[T]) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
set.(T).SetResourceModel(nil)
set.(T).Trim() set.(T).Trim()
return utils.GenericUpdateOne(set, id, dca, dca.generateData()) return utils.GenericUpdateOne(set, id, dca, dca.generateData())
} }
func (dca *resourceMongoAccessor[T]) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (dca *resourceMongoAccessor[T]) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
data.(T).SetResourceModel(nil)
data.(T).Trim() data.(T).Trim()
return utils.GenericStoreOne(data, dca) return utils.GenericStoreOne(data, dca)
} }
@@ -57,37 +52,28 @@ func (dca *resourceMongoAccessor[T]) CopyOne(data utils.DBObject) (utils.DBObjec
func (dca *resourceMongoAccessor[T]) LoadOne(id string) (utils.DBObject, int, error) { func (dca *resourceMongoAccessor[T]) LoadOne(id string) (utils.DBObject, int, error) {
return utils.GenericLoadOne[T](id, func(d utils.DBObject) (utils.DBObject, int, error) { return utils.GenericLoadOne[T](id, func(d utils.DBObject) (utils.DBObject, int, error) {
resources, _, err := dca.ResourceModelAccessor.Search(nil, dca.GetType().String(), false)
if err == nil && len(resources) > 0 {
d.(T).SetResourceModel(resources[0].(*resource_model.ResourceModel))
}
d.(T).SetAllowedInstances(dca.Request) d.(T).SetAllowedInstances(dca.Request)
d = d.(T).Transform()
return d, 200, nil return d, 200, nil
}, dca) }, dca)
} }
func (wfa *resourceMongoAccessor[T]) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) { func (wfa *resourceMongoAccessor[T]) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType().String(), isDraft)
return utils.GenericLoadAll[T](func(d utils.DBObject) utils.ShallowDBObject { return utils.GenericLoadAll[T](func(d utils.DBObject) utils.ShallowDBObject {
if err == nil && len(resources) > 0 {
d.(T).SetResourceModel(resources[0].(*resource_model.ResourceModel))
}
d.(T).SetAllowedInstances(wfa.Request) d.(T).SetAllowedInstances(wfa.Request)
d = d.(T).Transform()
return d return d
}, isDraft, wfa) }, isDraft, wfa)
} }
func (wfa *resourceMongoAccessor[T]) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) { func (wfa *resourceMongoAccessor[T]) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
resources, _, err := wfa.ResourceModelAccessor.Search(nil, wfa.GetType().String(), false) if filters == nil && search == "*" {
return utils.GenericLoadAll[T](func(d utils.DBObject) utils.ShallowDBObject {
d.(T).SetAllowedInstances(wfa.Request)
return d
}, isDraft, wfa)
}
return utils.GenericSearch[T](filters, search, wfa.getResourceFilter(search), return utils.GenericSearch[T](filters, search, wfa.getResourceFilter(search),
func(d utils.DBObject) utils.ShallowDBObject { func(d utils.DBObject) utils.ShallowDBObject {
if err == nil && len(resources) > 0 {
d.(T).SetResourceModel(resources[0].(*resource_model.ResourceModel))
}
d.(T).SetAllowedInstances(wfa.Request) d.(T).SetAllowedInstances(wfa.Request)
d = d.(T).Transform()
return d return d
}, isDraft, wfa) }, isDraft, wfa)
} }
@@ -95,11 +81,12 @@ func (wfa *resourceMongoAccessor[T]) Search(filters *dbs.Filters, search string,
func (abs *resourceMongoAccessor[T]) getResourceFilter(search string) *dbs.Filters { func (abs *resourceMongoAccessor[T]) getResourceFilter(search string) *dbs.Filters {
return &dbs.Filters{ return &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
"abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}}, "abstractintanciatedresource.abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}}, "abstractintanciatedresource.abstractresource.type": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}}, "abstractintanciatedresource.abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.owner": {{Operator: dbs.LIKE.String(), Value: search}}, "abstractintanciatedresource.abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractresource.source_url": {{Operator: dbs.LIKE.String(), Value: search}}, "abstractintanciatedresource.abstractresource.owners.name": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractintanciatedresource.abstractresource.abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: search}},
}, },
} }
} }

View File

@@ -1,67 +0,0 @@
package resource_model
import (
"encoding/json"
"cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
)
type WebResource struct {
Protocol string `bson:"protocol,omitempty" json:"protocol,omitempty"` // Protocol is the protocol of the URL
Path string `bson:"path,omitempty" json:"path,omitempty"` // Path is the path of the URL
}
type Model struct {
Type string `json:"type,omitempty" bson:"type,omitempty"` // Type is the type of the model
ReadOnly bool `json:"readonly,omitempty" bson:"readonly,omitempty"` // ReadOnly is the readonly of the model
}
/*
* ResourceModel is a struct that represents a resource model
* it defines the resource metadata and specificity
* Warning: This struct is not user available, it is only used by the system
*/
type ResourceModel struct {
utils.AbstractObject
ResourceType string `json:"resource_type,omitempty" bson:"resource_type,omitempty" validate:"required"`
VarRefs map[string]string `json:"var_refs,omitempty" bson:"var_refs,omitempty"` // VarRefs is the variable references of the model
Model map[string]map[string]Model `json:"model,omitempty" bson:"model,omitempty"`
}
func (d *ResourceModel) StoreDraftDefault() {
d.Name = d.ResourceType + " Resource Model"
d.IsDraft = false
}
func (abs *ResourceModel) VerifyAuth(request *tools.APIRequest) bool {
return true
}
func (d *ResourceModel) GetAccessor(request *tools.APIRequest) utils.Accessor {
return &ResourceModelMongoAccessor{
utils.AbstractAccessor{
Type: tools.RESOURCE_MODEL,
Request: request,
},
}
}
func (dma *ResourceModel) Deserialize(j map[string]interface{}, obj utils.DBObject) utils.DBObject {
b, err := json.Marshal(j)
if err != nil {
return nil
}
json.Unmarshal(b, obj)
return obj
}
func (dma *ResourceModel) Serialize(obj utils.DBObject) map[string]interface{} {
var m map[string]interface{}
b, err := json.Marshal(obj)
if err != nil {
return nil
}
json.Unmarshal(b, &m)
return m
}

View File

@@ -1,62 +0,0 @@
package resource_model
import (
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
)
type ResourceModelMongoAccessor struct {
utils.AbstractAccessor // AbstractAccessor contains the basic fields of an accessor (model, caller)
}
/*
* Nothing special here, just the basic CRUD operations
*/
func NewAccessor() *ResourceModelMongoAccessor {
return &ResourceModelMongoAccessor{
utils.AbstractAccessor{
Type: tools.RESOURCE_MODEL,
Logger: logs.CreateLogger(tools.RESOURCE_MODEL.String()),
},
}
}
func (wfa *ResourceModelMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error) {
return utils.GenericDeleteOne(id, wfa)
}
func (wfa *ResourceModelMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
return utils.GenericUpdateOne(set, id, wfa, &ResourceModel{})
}
func (wfa *ResourceModelMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
return utils.GenericStoreOne(data, wfa)
}
func (wfa *ResourceModelMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
return utils.GenericStoreOne(data, wfa)
}
func (a *ResourceModelMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
return utils.GenericLoadOne[*ResourceModel](id, func(d utils.DBObject) (utils.DBObject, int, error) {
return d, 200, nil
}, a)
}
func (a *ResourceModelMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
return utils.GenericLoadAll[*ResourceModel](func(d utils.DBObject) utils.ShallowDBObject {
return d
}, isDraft, a)
}
func (a *ResourceModelMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
return utils.GenericSearch[*ResourceModel](filters, search,
&dbs.Filters{
Or: map[string][]dbs.Filter{
"resource_type": {{Operator: dbs.LIKE.String(), Value: search}},
},
}, func(d utils.DBObject) utils.ShallowDBObject { return d }, isDraft, a)
}

View File

@@ -2,9 +2,11 @@ package resources
import ( import (
"errors" "errors"
"fmt"
"time" "time"
"cloud.o-forge.io/core/oc-lib/models/common" "cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/common/models"
"cloud.o-forge.io/core/oc-lib/models/common/pricing" "cloud.o-forge.io/core/oc-lib/models/common/pricing"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
@@ -15,34 +17,67 @@ import (
* it defines the resource storage * it defines the resource storage
*/ */
type StorageResource struct { type StorageResource struct {
AbstractResource[*StorageResourceInstance] // AbstractResource contains the basic fields of an object (id, name) AbstractInstanciatedResource[*StorageResourceInstance] // AbstractResource contains the basic fields of an object (id, name)
Type common.StorageType `bson:"type,omitempty"` // Type is the type of the storage StorageType enum.StorageType `bson:"storage_type" json:"storage_type" default:"-1"` // Type is the type of the storage
TypeJSON string `json:"type,omitempty"` Acronym string `bson:"acronym,omitempty" json:"acronym,omitempty"` // Acronym is the acronym of the storage
Acronym string `bson:"acronym,omitempty" json:"acronym,omitempty"` // Acronym is the acronym of the storage
} }
func (d *StorageResource) GetAccessor(request *tools.APIRequest) utils.Accessor { func (d *StorageResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
return NewAccessor[*StorageResource](tools.STORAGE_RESOURCE, request, func() utils.DBObject { return &StorageResource{} }) // Create a new instance of the accessor return NewAccessor[*StorageResource](tools.STORAGE_RESOURCE, request, func() utils.DBObject { return &StorageResource{} }) // Create a new instance of the accessor
} }
func (r *StorageResource) Transform() utils.DBObject { func (r *StorageResource) GetType() string {
r.TypeJSON = r.Type.String() return tools.STORAGE_RESOURCE.String()
return r }
func (abs *StorageResource) ConvertToPricedResource(
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
if t != tools.STORAGE_RESOURCE {
return nil
}
p := abs.AbstractInstanciatedResource.ConvertToPricedResource(t, request)
priced := p.(*PricedResource)
return &PricedStorageResource{
PricedResource: *priced,
}
} }
type StorageResourceInstance struct { type StorageResourceInstance struct {
ResourceInstance[*StorageResourcePartnership] ResourceInstance[*StorageResourcePartnership]
Local bool `bson:"local" json:"local"` Credentials *Credentials `json:"credentials,omitempty" bson:"credentials,omitempty"`
SecurityLevel string `bson:"security_level,omitempty" json:"security_level,omitempty"` Source string `bson:"source,omitempty" json:"source,omitempty"` // Source is the source of the storage
SizeType common.StorageSize `bson:"size_type" json:"size_type" default:"0"` // SizeType is the type of the storage size Local bool `bson:"local" json:"local"`
SizeGB uint `bson:"size,omitempty" json:"size,omitempty"` // Size is the size of the storage SecurityLevel string `bson:"security_level,omitempty" json:"security_level,omitempty"`
Encryption bool `bson:"encryption,omitempty" json:"encryption,omitempty"` // Encryption is a flag that indicates if the storage is encrypted SizeType enum.StorageSize `bson:"size_type" json:"size_type" default:"0"` // SizeType is the type of the storage size
Redundancy string `bson:"redundancy,omitempty" json:"redundancy,omitempty"` // Redundancy is the redundancy of the storage SizeGB int64 `bson:"size,omitempty" json:"size,omitempty"` // Size is the size of the storage
Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage Encryption bool `bson:"encryption,omitempty" json:"encryption,omitempty"` // Encryption is a flag that indicates if the storage is encrypted
Redundancy string `bson:"redundancy,omitempty" json:"redundancy,omitempty"` // Redundancy is the redundancy of the storage
Throughput string `bson:"throughput,omitempty" json:"throughput,omitempty"` // Throughput is the throughput of the storage
} }
func (i *StorageResourceInstance) GetID() string { func (ri *StorageResourceInstance) ClearEnv() {
return i.UUID ri.Credentials = nil
ri.Env = []models.Param{}
ri.Inputs = []models.Param{}
ri.Outputs = []models.Param{}
}
func (ri *StorageResourceInstance) StoreDraftDefault() {
found := false
for _, p := range ri.ResourceInstance.Env {
if p.Attr == "source" {
found = true
break
}
}
if !found {
ri.ResourceInstance.Env = append(ri.ResourceInstance.Env, models.Param{
Attr: "source",
Value: ri.Source,
Readonly: true,
})
}
ri.ResourceInstance.StoreDraftDefault()
} }
type StorageResourcePartnership struct { type StorageResourcePartnership struct {
@@ -59,8 +94,12 @@ const (
GARANTED_STORAGE GARANTED_STORAGE
) )
func PrivilegeStoragePricingStrategyList() []PrivilegeStoragePricingStrategy {
return []PrivilegeStoragePricingStrategy{BASIC_STORAGE, GARANTED_ON_DELAY_STORAGE, GARANTED_STORAGE}
}
func (t PrivilegeStoragePricingStrategy) String() string { func (t PrivilegeStoragePricingStrategy) String() string {
return [...]string{"BASIC_STORAGE", "GARANTED_ON_DELAY_STORAGE", "GARANTED_STORAGE"}[t] return [...]string{"NO MEMORY HOLDING", "KEEPED ON MEMORY GARANTED DURING DELAY", "KEEPED ON MEMORY GARANTED"}[t]
} }
type StorageResourcePricingStrategy int type StorageResourcePricingStrategy int
@@ -73,6 +112,14 @@ const (
PER_KB_STORED PER_KB_STORED
) )
func StorageResourcePricingStrategyList() []StorageResourcePricingStrategy {
return []StorageResourcePricingStrategy{PER_DATA_STORED, PER_TB_STORED, PER_GB_STORED, PER_MB_STORED, PER_KB_STORED}
}
func (t StorageResourcePricingStrategy) String() string {
return [...]string{"PER DATA STORED", "PER TB STORED", "PER GB STORED", "PER MB STORED", "PER KB STORED"}[t]
}
func (t StorageResourcePricingStrategy) GetStrategy() string { func (t StorageResourcePricingStrategy) GetStrategy() string {
return [...]string{"PER_DATA_STORED", "PER_GB_STORED", "PER_MB_STORED", "PER_KB_STORED"}[t] return [...]string{"PER_DATA_STORED", "PER_GB_STORED", "PER_MB_STORED", "PER_KB_STORED"}[t]
} }
@@ -98,7 +145,7 @@ func (t StorageResourcePricingStrategy) GetQuantity(amountOfDataGB float64) (flo
case PER_KB_STORED: case PER_KB_STORED:
return amountOfDataGB * 1000000, nil return amountOfDataGB * 1000000, nil
} }
return 0, errors.New("Pricing strategy not found") return 0, errors.New("pricing strategy not found")
} }
type StorageResourcePricingProfile struct { type StorageResourcePricingProfile struct {
@@ -123,11 +170,20 @@ func (r *PricedStorageResource) GetType() tools.DataType {
} }
func (r *PricedStorageResource) GetPrice() (float64, error) { func (r *PricedStorageResource) GetPrice() (float64, error) {
if r.UsageStart == nil || r.UsageEnd == nil { fmt.Println("GetPrice", r.UsageStart, r.UsageEnd)
return 0, errors.New("Usage start and end must be set") now := time.Now()
if r.UsageStart == nil {
r.UsageStart = &now
}
if r.UsageEnd == nil {
add := r.UsageStart.Add(time.Duration(1 * time.Hour))
r.UsageEnd = &add
} }
if r.SelectedPricing == nil { if r.SelectedPricing == nil {
return 0, errors.New("Selected pricing must be set") if len(r.PricingProfiles) == 0 {
return 0, errors.New("pricing profile must be set on Priced Storage" + r.ResourceID)
}
r.SelectedPricing = &r.PricingProfiles[0]
} }
pricing := *r.SelectedPricing pricing := *r.SelectedPricing
var err error var err error

View File

@@ -2,74 +2,45 @@ package resources
import ( import (
"cloud.o-forge.io/core/oc-lib/models/common/pricing" "cloud.o-forge.io/core/oc-lib/models/common/pricing"
"cloud.o-forge.io/core/oc-lib/models/resources/resource_model"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
// we don't have any information about the accessor type WorkflowResourcePricingProfile struct{}
type abstractWorkflowResource struct {
WorkflowID string `bson:"workflow_id,omitempty" json:"workflow_id,omitempty"` // WorkflowID is the ID of the native workflow
}
// WorkflowResource is a struct that represents a workflow resource // WorkflowResource is a struct that represents a workflow resource
// it defines the resource workflow // it defines the resource workflow
type WorkflowResource struct { type WorkflowResource struct {
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name) AbstractResource
Logo string `json:"logo,omitempty" bson:"logo,omitempty" validate:"required"` // Logo is the logo of the resource WorkflowID string `bson:"workflow_id,omitempty" json:"workflow_id,omitempty"` // WorkflowID is the ID of the native workflow
Description string `json:"description,omitempty" bson:"description,omitempty"` // Description is the description of the resource
ShortDescription string `json:"short_description,omitempty" bson:"short_description,omitempty" validate:"required"` // ShortDescription is the short description of the resource
Owners []utils.Owner `json:"owners,omitempty" bson:"owners,omitempty"` // Owners is the list of owners of the resource
ResourceModel *resource_model.ResourceModel `json:"resource_model,omitempty" bson:"resource_model,omitempty"` // ResourceModel is the model of the resource
UsageRestrictions string `bson:"usage_restrictions,omitempty" json:"usage_restrictions,omitempty"`
abstractWorkflowResource
} }
func (r *WorkflowResource) Transform() utils.DBObject { func (d *WorkflowResource) GetAccessor(request *tools.APIRequest) utils.Accessor {
return r return NewAccessor[*ComputeResource](tools.WORKFLOW_RESOURCE, request, func() utils.DBObject { return &WorkflowResource{} })
} }
func (r *WorkflowResource) StoreDraftDefault() { func (r *WorkflowResource) GetType() string {
r.IsDraft = true return tools.WORKFLOW_RESOURCE.String()
}
func (r *WorkflowResource) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
if r.IsDraft != set.IsDrafted() && set.IsDrafted() {
return true, set // only state can be updated
}
return r.IsDraft != set.IsDrafted() && set.IsDrafted(), set
} }
func (r *WorkflowResource) CanDelete() bool { func (d *WorkflowResource) ClearEnv() utils.DBObject {
return r.IsDraft // only draft bookings can be deleted return d
} }
func (ao *WorkflowResource) GetAccessor(request *tools.APIRequest) utils.Accessor { func (d *WorkflowResource) Trim() {
return nil /* EMPTY */
} }
func (abs *WorkflowResource) SetResourceModel(model *resource_model.ResourceModel) {
abs.ResourceModel = model
}
func (w *WorkflowResource) Trim() {
/*EMPTY AND PROUD TO BE*/
}
func (w *WorkflowResource) SetAllowedInstances(request *tools.APIRequest) { func (w *WorkflowResource) SetAllowedInstances(request *tools.APIRequest) {
/*EMPTY AND PROUD TO BE*/ /* EMPTY */
} }
func (w *WorkflowResource) ConvertToPricedResource( func (w *WorkflowResource) ConvertToPricedResource(
t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF { t tools.DataType, request *tools.APIRequest) pricing.PricedItemITF {
instances := map[string]string{}
profiles := map[string][]pricing.PricingProfileITF{}
return &PricedResource{ return &PricedResource{
Name: w.Name, Name: w.Name,
Logo: w.Logo, Logo: w.Logo,
ResourceID: w.UUID, ResourceID: w.UUID,
ResourceType: t, ResourceType: t,
InstancesRefs: instances, CreatorID: w.CreatorID,
PricingProfiles: profiles,
CreatorID: w.CreatorID,
} }
} }

View File

@@ -27,15 +27,20 @@ const (
* every data in base root model should inherit from this struct (only exception is the ResourceModel) * every data in base root model should inherit from this struct (only exception is the ResourceModel)
*/ */
type AbstractObject struct { type AbstractObject struct {
UUID string `json:"id,omitempty" bson:"id,omitempty" validate:"required"` UUID string `json:"id,omitempty" bson:"id,omitempty" validate:"required"`
Name string `json:"name,omitempty" bson:"name,omitempty" validate:"required"` Name string `json:"name,omitempty" bson:"name,omitempty" validate:"required"`
IsDraft bool `json:"is_draft" bson:"is_draft" default:"false"` IsDraft bool `json:"is_draft" bson:"is_draft" default:"false"`
CreatorID string `json:"creator_id" bson:"creator_id" default:"unknown"` CreatorID string `json:"creator_id,omitempty" bson:"creator_id,omitempty"`
UserCreatorID string `json:"user_creator_id,omitempty" bson:"user_creator_id,omitempty"`
CreationDate time.Time `json:"creation_date,omitempty" bson:"creation_date,omitempty"`
UpdateDate time.Time `json:"update_date,omitempty" bson:"update_date,omitempty"`
UpdaterID string `json:"updater_id,omitempty" bson:"updater_id,omitempty"`
UserUpdaterID string `json:"user_updater_id,omitempty" bson:"user_updater_id,omitempty"`
AccessMode AccessMode `json:"access_mode" bson:"access_mode" default:"0"`
}
CreationDate time.Time `json:"creation_date" bson:"creation_date"` func (ri *AbstractObject) GetAccessor(request *tools.APIRequest) Accessor {
UpdateDate time.Time `json:"update_date" bson:"update_date"` return nil
UpdaterID string `json:"updater_id" bson:"updater_id"`
AccessMode AccessMode `json:"access_mode" bson:"access_mode" default:"0"`
} }
func (r *AbstractObject) GenerateID() { func (r *AbstractObject) GenerateID() {
@@ -74,20 +79,25 @@ func (ao *AbstractObject) GetCreatorID() string {
return ao.CreatorID return ao.CreatorID
} }
func (ao *AbstractObject) UpToDate(user string, create bool) { func (ao *AbstractObject) UpToDate(user string, peer string, create bool) {
ao.UpdateDate = time.Now() ao.UpdateDate = time.Now()
ao.UpdaterID = user ao.UpdaterID = peer
ao.UserUpdaterID = user
if create { if create {
ao.CreationDate = time.Now() ao.CreationDate = time.Now()
ao.CreatorID = user ao.CreatorID = peer
ao.UserCreatorID = user
} }
} }
func (ao *AbstractObject) VerifyAuth(request *tools.APIRequest) bool { func (ao *AbstractObject) VerifyAuth(request *tools.APIRequest) bool {
return ao.AccessMode == Public || (request != nil && ao.CreatorID == request.Username) return ao.AccessMode == Public || (request != nil && ao.CreatorID == request.PeerID && request.PeerID != "")
} }
func (ao *AbstractObject) GetObjectFilters(search string) *dbs.Filters { func (ao *AbstractObject) GetObjectFilters(search string) *dbs.Filters {
if search == "*" {
search = ""
}
return &dbs.Filters{ return &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}}, "abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
@@ -120,6 +130,10 @@ type AbstractAccessor struct {
ResourceModelAccessor Accessor ResourceModelAccessor Accessor
} }
func (r *AbstractAccessor) ShouldVerifyAuth() bool {
return true
}
func (r *AbstractAccessor) GetRequest() *tools.APIRequest { func (r *AbstractAccessor) GetRequest() *tools.APIRequest {
return r.Request return r.Request
} }

View File

@@ -2,7 +2,6 @@ package utils
import ( import (
"errors" "errors"
"fmt"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/dbs/mongo"
@@ -14,11 +13,22 @@ type Owner struct {
Logo string `json:"logo,omitempty" bson:"logo,omitempty"` Logo string `json:"logo,omitempty" bson:"logo,omitempty"`
} }
func VerifyAccess(a Accessor, id string) error {
data, _, err := a.LoadOne(id)
if err != nil {
return err
}
if a.ShouldVerifyAuth() && !data.VerifyAuth(a.GetRequest()) {
return errors.New("you are not allowed to access :" + a.GetType().String())
}
return nil
}
// GenericLoadOne loads one object from the database (generic) // GenericLoadOne loads one object from the database (generic)
func GenericStoreOne(data DBObject, a Accessor) (DBObject, int, error) { func GenericStoreOne(data DBObject, a Accessor) (DBObject, int, error) {
data.GenerateID() data.GenerateID()
data.StoreDraftDefault() data.StoreDraftDefault()
data.UpToDate(a.GetUser(), true) data.UpToDate(a.GetUser(), a.GetPeerID(), true)
f := dbs.Filters{ f := dbs.Filters{
Or: map[string][]dbs.Filter{ Or: map[string][]dbs.Filter{
"abstractresource.abstractobject.name": {{ "abstractresource.abstractobject.name": {{
@@ -31,8 +41,8 @@ func GenericStoreOne(data DBObject, a Accessor) (DBObject, int, error) {
}}, }},
}, },
} }
if !data.VerifyAuth(a.GetRequest()) { if a.ShouldVerifyAuth() && !data.VerifyAuth(a.GetRequest()) {
return nil, 403, errors.New("you are not allowed to access this collaborative area") return nil, 403, errors.New("you are not allowed to access : " + a.GetType().String())
} }
if cursor, _, _ := a.Search(&f, "", data.IsDrafted()); len(cursor) > 0 { if cursor, _, _ := a.Search(&f, "", data.IsDrafted()); len(cursor) > 0 {
return nil, 409, errors.New(a.GetType().String() + " with name " + data.GetName() + " already exists") return nil, 409, errors.New(a.GetType().String() + " with name " + data.GetName() + " already exists")
@@ -53,14 +63,13 @@ func GenericStoreOne(data DBObject, a Accessor) (DBObject, int, error) {
func GenericDeleteOne(id string, a Accessor) (DBObject, int, error) { func GenericDeleteOne(id string, a Accessor) (DBObject, int, error) {
res, code, err := a.LoadOne(id) res, code, err := a.LoadOne(id)
if !res.CanDelete() { if !res.CanDelete() {
return nil, 403, errors.New("you are not allowed to delete this collaborative area") return nil, 403, errors.New("you are not allowed to delete :" + a.GetType().String())
} }
if err != nil { if err != nil {
a.GetLogger().Error().Msg("Could not retrieve " + id + " to db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
if !res.VerifyAuth(a.GetRequest()) { if a.ShouldVerifyAuth() && !res.VerifyAuth(a.GetRequest()) {
return nil, 403, errors.New("you are not allowed to access this collaborative area") return nil, 403, errors.New("you are not allowed to access " + a.GetType().String())
} }
_, code, err = mongo.MONGOService.DeleteOne(id, a.GetType().String()) _, code, err = mongo.MONGOService.DeleteOne(id, a.GetType().String())
if err != nil { if err != nil {
@@ -79,12 +88,12 @@ func GenericUpdateOne(set DBObject, id string, a Accessor, new DBObject) (DBObje
} }
ok, newSet := r.CanUpdate(set) ok, newSet := r.CanUpdate(set)
if !ok { if !ok {
return nil, 403, errors.New("you are not allowed to delete this collaborative area") return nil, 403, errors.New("you are not allowed to delete :" + a.GetType().String())
} }
set = newSet set = newSet
r.UpToDate(a.GetUser(), false) r.UpToDate(a.GetUser(), a.GetPeerID(), false)
if !r.VerifyAuth(a.GetRequest()) { if a.ShouldVerifyAuth() && !r.VerifyAuth(a.GetRequest()) {
return nil, 403, errors.New("you are not allowed to access this collaborative area") return nil, 403, errors.New("you are not allowed to access :" + a.GetType().String())
} }
change := set.Serialize(set) // get the changes change := set.Serialize(set) // get the changes
loaded := r.Serialize(r) // get the loaded object loaded := r.Serialize(r) // get the loaded object
@@ -104,12 +113,11 @@ func GenericLoadOne[T DBObject](id string, f func(DBObject) (DBObject, int, erro
var data T var data T
res_mongo, code, err := mongo.MONGOService.LoadOne(id, a.GetType().String()) res_mongo, code, err := mongo.MONGOService.LoadOne(id, a.GetType().String())
if err != nil { if err != nil {
a.GetLogger().Error().Msg("Could not retrieve " + id + " from db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
res_mongo.Decode(&data) res_mongo.Decode(&data)
if !data.VerifyAuth(a.GetRequest()) { if a.ShouldVerifyAuth() && !data.VerifyAuth(a.GetRequest()) {
return nil, 403, errors.New("you are not allowed to access this collaborative area") return nil, 403, errors.New("you are not allowed to access :" + a.GetType().String())
} }
return f(data) return f(data)
} }
@@ -118,14 +126,13 @@ func genericLoadAll[T DBObject](res *mgb.Cursor, code int, err error, onlyDraft
objs := []ShallowDBObject{} objs := []ShallowDBObject{}
var results []T var results []T
if err != nil { if err != nil {
a.GetLogger().Error().Msg("Could not retrieve any from db. Error: " + err.Error())
return nil, code, err return nil, code, err
} }
if err = res.All(mongo.MngoCtx, &results); err != nil { if err = res.All(mongo.MngoCtx, &results); err != nil {
return nil, 404, err return nil, 404, err
} }
for _, r := range results { for _, r := range results {
if !r.VerifyAuth(a.GetRequest()) || f(r) == nil || (onlyDraft && !r.IsDrafted()) || (!onlyDraft && r.IsDrafted()) { if (a.ShouldVerifyAuth() && !r.VerifyAuth(a.GetRequest())) || f(r) == nil || (onlyDraft && !r.IsDrafted()) || (!onlyDraft && r.IsDrafted()) {
continue continue
} }
objs = append(objs, f(r)) objs = append(objs, f(r))
@@ -135,13 +142,12 @@ func genericLoadAll[T DBObject](res *mgb.Cursor, code int, err error, onlyDraft
func GenericLoadAll[T DBObject](f func(DBObject) ShallowDBObject, onlyDraft bool, wfa Accessor) ([]ShallowDBObject, int, error) { func GenericLoadAll[T DBObject](f func(DBObject) ShallowDBObject, onlyDraft bool, wfa Accessor) ([]ShallowDBObject, int, error) {
res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType().String()) res_mongo, code, err := mongo.MONGOService.LoadAll(wfa.GetType().String())
fmt.Println("res_mongo", res_mongo)
return genericLoadAll[T](res_mongo, code, err, onlyDraft, f, wfa) return genericLoadAll[T](res_mongo, code, err, onlyDraft, f, wfa)
} }
func GenericSearch[T DBObject](filters *dbs.Filters, search string, defaultFilters *dbs.Filters, func GenericSearch[T DBObject](filters *dbs.Filters, search string, defaultFilters *dbs.Filters,
f func(DBObject) ShallowDBObject, onlyDraft bool, wfa Accessor) ([]ShallowDBObject, int, error) { f func(DBObject) ShallowDBObject, onlyDraft bool, wfa Accessor) ([]ShallowDBObject, int, error) {
if (filters == nil || len(filters.And) == 0 || len(filters.Or) == 0) && search != "" { if filters == nil && search != "" {
filters = defaultFilters filters = defaultFilters
} }
res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType().String()) res_mongo, code, err := mongo.MONGOService.Search(filters, wfa.GetType().String())

View File

@@ -11,8 +11,8 @@ type ShallowDBObject interface {
GenerateID() GenerateID()
GetID() string GetID() string
GetName() string GetName() string
Deserialize(j map[string]interface{}, obj DBObject) DBObject
Serialize(obj DBObject) map[string]interface{} Serialize(obj DBObject) map[string]interface{}
Deserialize(j map[string]interface{}, obj DBObject) DBObject
} }
// DBObject is an interface that defines the basic methods for a DBObject // DBObject is an interface that defines the basic methods for a DBObject
@@ -20,32 +20,33 @@ type DBObject interface {
GenerateID() GenerateID()
GetID() string GetID() string
GetName() string GetName() string
GetCreatorID() string
IsDrafted() bool IsDrafted() bool
StoreDraftDefault()
CanUpdate(set DBObject) (bool, DBObject)
CanDelete() bool CanDelete() bool
UpToDate(user string, create bool) StoreDraftDefault()
GetCreatorID() string
UpToDate(user string, peer string, create bool)
CanUpdate(set DBObject) (bool, DBObject)
VerifyAuth(request *tools.APIRequest) bool VerifyAuth(request *tools.APIRequest) bool
Deserialize(j map[string]interface{}, obj DBObject) DBObject
Serialize(obj DBObject) map[string]interface{} Serialize(obj DBObject) map[string]interface{}
GetAccessor(request *tools.APIRequest) Accessor GetAccessor(request *tools.APIRequest) Accessor
Deserialize(j map[string]interface{}, obj DBObject) DBObject
} }
// Accessor is an interface that defines the basic methods for an Accessor // Accessor is an interface that defines the basic methods for an Accessor
type Accessor interface { type Accessor interface {
GetRequest() *tools.APIRequest
GetType() tools.DataType
GetUser() string GetUser() string
GetPeerID() string GetPeerID() string
GetGroups() []string GetGroups() []string
ShouldVerifyAuth() bool
GetType() tools.DataType
GetLogger() *zerolog.Logger GetLogger() *zerolog.Logger
GetCaller() *tools.HTTPCaller GetCaller() *tools.HTTPCaller
Search(filters *dbs.Filters, search string, isDraft bool) ([]ShallowDBObject, int, error) GetRequest() *tools.APIRequest
LoadAll(isDraft bool) ([]ShallowDBObject, int, error)
LoadOne(id string) (DBObject, int, error) LoadOne(id string) (DBObject, int, error)
DeleteOne(id string) (DBObject, int, error) DeleteOne(id string) (DBObject, int, error)
CopyOne(data DBObject) (DBObject, int, error) CopyOne(data DBObject) (DBObject, int, error)
StoreOne(data DBObject) (DBObject, int, error) StoreOne(data DBObject) (DBObject, int, error)
LoadAll(isDraft bool) ([]ShallowDBObject, int, error)
UpdateOne(set DBObject, id string) (DBObject, int, error) UpdateOne(set DBObject, id string) (DBObject, int, error)
Search(filters *dbs.Filters, search string, isDraft bool) ([]ShallowDBObject, int, error)
} }

View File

@@ -9,9 +9,49 @@ import (
// Graph is a struct that represents a graph // Graph is a struct that represents a graph
type Graph struct { type Graph struct {
Zoom float64 `bson:"zoom" json:"zoom" default:"1"` // Zoom is the graphical zoom of the graph Partial bool `json:"partial" default:"false"` // Partial is a flag that indicates if the graph is partial
Items map[string]GraphItem `bson:"items" json:"items" default:"{}" validate:"required"` // Items is the list of elements in the graph Zoom float64 `bson:"zoom" json:"zoom" default:"1"` // Zoom is the graphical zoom of the graph
Links []GraphLink `bson:"links" json:"links" default:"{}" validate:"required"` // Links is the list of links between elements in the graph Items map[string]GraphItem `bson:"items" json:"items" default:"{}" validate:"required"` // Items is the list of elements in the graph
Links []GraphLink `bson:"links" json:"links" default:"{}" validate:"required"` // Links is the list of links between elements in the graph
}
func (g *Graph) Clear(id string) {
realItems := map[string]GraphItem{}
for k, it := range g.Items {
if k == id {
realinks := []GraphLink{}
for _, link := range g.Links {
if link.Source.ID != id && link.Destination.ID != id {
realinks = append(realinks, link)
}
}
g.Links = realinks
g.Partial = true
} else {
realItems[k] = it
}
}
g.Items = realItems
}
func (wf *Graph) IsProcessing(item GraphItem) bool {
return item.Processing != nil
}
func (wf *Graph) IsCompute(item GraphItem) bool {
return item.Compute != nil
}
func (wf *Graph) IsData(item GraphItem) bool {
return item.Data != nil
}
func (wf *Graph) IsStorage(item GraphItem) bool {
return item.Storage != nil
}
func (wf *Graph) IsWorkflow(item GraphItem) bool {
return item.Workflow != nil
} }
func (g *Graph) GetAverageTimeRelatedToProcessingActivity(start time.Time, processings []*resources.ProcessingResource, resource resources.ResourceInterface, func (g *Graph) GetAverageTimeRelatedToProcessingActivity(start time.Time, processings []*resources.ProcessingResource, resource resources.ResourceInterface,
@@ -105,71 +145,3 @@ func (g *Graph) GetResource(id string) (tools.DataType, resources.ResourceInterf
} }
return tools.INVALID, nil return tools.INVALID, nil
} }
// GraphItem is a struct that represents an item in a graph
type GraphItem struct {
ID string `bson:"id" json:"id" validate:"required"` // ID is the unique identifier of the item
Width float64 `bson:"width" json:"width" validate:"required"` // Width is the graphical width of the item
Height float64 `bson:"height" json:"height" validate:"required"` // Height is the graphical height of the item
Position Position `bson:"position" json:"position" validate:"required"` // Position is the graphical position of the item
*resources.ItemResource // ItemResource is the resource of the item affected to the item
}
func (g *GraphItem) GetResource() (tools.DataType, resources.ResourceInterface) {
if g.Data != nil {
return tools.DATA_RESOURCE, g.Data
} else if g.Compute != nil {
return tools.COMPUTE_RESOURCE, g.Compute
} else if g.Workflow != nil {
return tools.WORKFLOW_RESOURCE, g.Workflow
} else if g.Processing != nil {
return tools.PROCESSING_RESOURCE, g.Processing
} else if g.Storage != nil {
return tools.STORAGE_RESOURCE, g.Storage
}
return tools.INVALID, nil
}
// GraphLink is a struct that represents a link between two items in a graph
type GraphLink struct {
Source Position `bson:"source" json:"source" validate:"required"` // Source is the source graphical position of the link
Destination Position `bson:"destination" json:"destination" validate:"required"` // Destination is the destination graphical position of the link
Style *GraphLinkStyle `bson:"style,omitempty" json:"style,omitempty"` // Style is the graphical style of the link
}
// tool function to check if a link is a link between a compute and a resource
func (l *GraphLink) IsComputeLink(g Graph) (bool, string) {
if g.Items == nil {
return false, ""
}
if d, ok := g.Items[l.Source.ID]; ok && d.Compute != nil {
return true, d.Compute.UUID
}
if d, ok := g.Items[l.Destination.ID]; ok && d.Compute != nil {
return true, d.Compute.UUID
}
return false, ""
}
// GraphLinkStyle is a struct that represents the style of a link in a graph
type GraphLinkStyle struct {
Color int64 `bson:"color" json:"color"` // Color is the graphical color of the link (int description of a color, can be transpose as hex)
Stroke float64 `bson:"stroke" json:"stroke"` // Stroke is the graphical stroke of the link
Tension float64 `bson:"tension" json:"tension"` // Tension is the graphical tension of the link
HeadRadius float64 `bson:"head_radius" json:"head_radius"` // graphical pin radius
DashWidth float64 `bson:"dash_width" json:"dash_width"` // DashWidth is the graphical dash width of the link
DashSpace float64 `bson:"dash_space" json:"dash_space"` // DashSpace is the graphical dash space of the link
EndArrow Position `bson:"end_arrow" json:"end_arrow"` // EndArrow is the graphical end arrow of the link
StartArrow Position `bson:"start_arrow" json:"start_arrow"` // StartArrow is the graphical start arrow of the link
ArrowStyle int64 `bson:"arrow_style" json:"arrow_style"` // ArrowStyle is the graphical arrow style of the link (enum foundable in UI)
ArrowDirection int64 `bson:"arrow_direction" json:"arrow_direction"` // ArrowDirection is the graphical arrow direction of the link (enum foundable in UI)
StartArrowWidth float64 `bson:"start_arrow_width" json:"start_arrow_width"` // StartArrowWidth is the graphical start arrow width of the link
EndArrowWidth float64 `bson:"end_arrow_width" json:"end_arrow_width"` // EndArrowWidth is the graphical end arrow width of the link
}
// Position is a struct that represents a graphical position
type Position struct {
ID string `json:"id" bson:"id"` // ID reprents ItemID (optionnal)
X float64 `json:"x" bson:"x" validate:"required"` // X is the graphical x position
Y float64 `json:"y" bson:"y" validate:"required"` // Y is the graphical y position
}

View File

@@ -0,0 +1,38 @@
package graph
import (
"cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/tools"
)
// GraphItem is a struct that represents an item in a graph
type GraphItem struct {
ID string `bson:"id" json:"id" validate:"required"` // ID is the unique identifier of the item
Width float64 `bson:"width" json:"width" validate:"required"` // Width is the graphical width of the item
Height float64 `bson:"height" json:"height" validate:"required"` // Height is the graphical height of the item
Position Position `bson:"position" json:"position" validate:"required"` // Position is the graphical position of the item
*resources.ItemResource // ItemResource is the resource of the item affected to the item
}
func (g *GraphItem) GetResource() (tools.DataType, resources.ResourceInterface) {
if g.Data != nil {
return tools.DATA_RESOURCE, g.Data
} else if g.Compute != nil {
return tools.COMPUTE_RESOURCE, g.Compute
} else if g.Workflow != nil {
return tools.WORKFLOW_RESOURCE, g.Workflow
} else if g.Processing != nil {
return tools.PROCESSING_RESOURCE, g.Processing
} else if g.Storage != nil {
return tools.STORAGE_RESOURCE, g.Storage
}
return tools.INVALID, nil
}
func (g *GraphItem) Clear() {
g.Data = nil
g.Compute = nil
g.Workflow = nil
g.Processing = nil
g.Storage = nil
}

View File

@@ -0,0 +1,56 @@
package graph
import "cloud.o-forge.io/core/oc-lib/models/common/models"
type StorageProcessingGraphLink struct {
Write bool `json:"write" bson:"write"`
Source string `json:"source" bson:"source"`
Destination string `json:"destination" bson:"destination"`
FileName string `json:"filename" bson:"filename"`
}
// GraphLink is a struct that represents a link between two items in a graph
type GraphLink struct {
Source Position `bson:"source" json:"source" validate:"required"` // Source is the source graphical position of the link
Destination Position `bson:"destination" json:"destination" validate:"required"` // Destination is the destination graphical position of the link
Style *GraphLinkStyle `bson:"style,omitempty" json:"style,omitempty"` // Style is the graphical style of the link
StorageLinkInfos []StorageProcessingGraphLink `bson:"storage_link_infos,omitempty" json:"storage_link_infos,omitempty"` // StorageLinkInfo is the storage link info
Env []models.Param `json:"env" bson:"env"`
}
// tool function to check if a link is a link between a compute and a resource
func (l *GraphLink) IsComputeLink(g Graph) (bool, string) {
if g.Items == nil {
return false, ""
}
if d, ok := g.Items[l.Source.ID]; ok && d.Compute != nil {
return true, d.Compute.UUID
}
if d, ok := g.Items[l.Destination.ID]; ok && d.Compute != nil {
return true, d.Compute.UUID
}
return false, ""
}
// GraphLinkStyle is a struct that represents the style of a link in a graph
type GraphLinkStyle struct {
Color int64 `bson:"color" json:"color"` // Color is the graphical color of the link (int description of a color, can be transpose as hex)
Stroke float64 `bson:"stroke" json:"stroke"` // Stroke is the graphical stroke of the link
Tension float64 `bson:"tension" json:"tension"` // Tension is the graphical tension of the link
HeadRadius float64 `bson:"head_radius" json:"head_radius"` // graphical pin radius
DashWidth float64 `bson:"dash_width" json:"dash_width"` // DashWidth is the graphical dash width of the link
DashSpace float64 `bson:"dash_space" json:"dash_space"` // DashSpace is the graphical dash space of the link
EndArrow Position `bson:"end_arrow" json:"end_arrow"` // EndArrow is the graphical end arrow of the link
StartArrow Position `bson:"start_arrow" json:"start_arrow"` // StartArrow is the graphical start arrow of the link
ArrowStyle int64 `bson:"arrow_style" json:"arrow_style"` // ArrowStyle is the graphical arrow style of the link (enum foundable in UI)
ArrowDirection int64 `bson:"arrow_direction" json:"arrow_direction"` // ArrowDirection is the graphical arrow direction of the link (enum foundable in UI)
StartArrowWidth float64 `bson:"start_arrow_width" json:"start_arrow_width"` // StartArrowWidth is the graphical start arrow width of the link
EndArrowWidth float64 `bson:"end_arrow_width" json:"end_arrow_width"` // EndArrowWidth is the graphical end arrow width of the link
}
// Position is a struct that represents a graphical position
type Position struct {
ID string `json:"id" bson:"id"` // ID reprents ItemID (optionnal)
X float64 `json:"x" bson:"x" validate:"required"` // X is the graphical x position
Y float64 `json:"y" bson:"y" validate:"required"` // Y is the graphical y position
}

View File

@@ -2,10 +2,10 @@ package workflow
import ( import (
"errors" "errors"
"fmt"
"time" "time"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
"cloud.o-forge.io/core/oc-lib/models/common"
"cloud.o-forge.io/core/oc-lib/models/common/pricing" "cloud.o-forge.io/core/oc-lib/models/common/pricing"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resources" "cloud.o-forge.io/core/oc-lib/models/resources"
@@ -15,24 +15,23 @@ import (
) )
/* /*
* AbstractWorkflow is a struct that represents a workflow for resource or native workflow * Workflow is a struct that represents a workflow
* Warning: there is 2 types of workflows, the resource workflow and the native workflow * it defines the native workflow
* native workflow is the one that you create to schedule an execution
* resource workflow is the one that is created to set our native workflow in catalog
*/ */
type AbstractWorkflow struct { type Workflow struct {
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
resources.ResourceSet resources.ResourceSet
Graph *graph.Graph `bson:"graph,omitempty" json:"graph,omitempty"` // Graph UI & logic representation of the workflow Graph *graph.Graph `bson:"graph,omitempty" json:"graph,omitempty"` // Graph UI & logic representation of the workflow
ScheduleActive bool `json:"schedule_active" bson:"schedule_active"` // ScheduleActive is a flag that indicates if the schedule is active, if not the workflow is not scheduled and no execution or booking will be set ScheduleActive bool `json:"schedule_active" bson:"schedule_active"` // ScheduleActive is a flag that indicates if the schedule is active, if not the workflow is not scheduled and no execution or booking will be set
// Schedule *WorkflowSchedule `bson:"schedule,omitempty" json:"schedule,omitempty"` // Schedule is the schedule of the workflow // Schedule *WorkflowSchedule `bson:"schedule,omitempty" json:"schedule,omitempty"` // Schedule is the schedule of the workflow
Shared []string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workflow Shared []string `json:"shared,omitempty" bson:"shared,omitempty"` // Shared is the ID of the shared workflow // AbstractWorkflow contains the basic fields of a workflow
} }
func (d *Workflow) GetAccessor(request *tools.APIRequest) utils.Accessor { func (d *Workflow) GetAccessor(request *tools.APIRequest) utils.Accessor {
return NewAccessor(request) // Create a new instance of the accessor return NewAccessor(request) // Create a new instance of the accessor
} }
func (w *AbstractWorkflow) GetGraphItems(f func(item graph.GraphItem) bool) (list_datas []graph.GraphItem) { func (w *Workflow) GetGraphItems(f func(item graph.GraphItem) bool) (list_datas []graph.GraphItem) {
for _, item := range w.Graph.Items { for _, item := range w.Graph.Items {
if f(item) { if f(item) {
list_datas = append(list_datas, item) list_datas = append(list_datas, item)
@@ -41,18 +40,7 @@ func (w *AbstractWorkflow) GetGraphItems(f func(item graph.GraphItem) bool) (lis
return return
} }
func (w *AbstractWorkflow) GetResources(f func(item graph.GraphItem) bool) map[string]resources.ResourceInterface { func (w *Workflow) GetPricedItem(f func(item graph.GraphItem) bool, request *tools.APIRequest) map[string]pricing.PricedItemITF {
list_datas := map[string]resources.ResourceInterface{}
for _, item := range w.Graph.Items {
if f(item) {
_, res := item.GetResource()
list_datas[res.GetID()] = res
}
}
return list_datas
}
func (w *AbstractWorkflow) GetPricedItem(f func(item graph.GraphItem) bool, request *tools.APIRequest) map[string]pricing.PricedItemITF {
list_datas := map[string]pricing.PricedItemITF{} list_datas := map[string]pricing.PricedItemITF{}
for _, item := range w.Graph.Items { for _, item := range w.Graph.Items {
if f(item) { if f(item) {
@@ -64,8 +52,13 @@ func (w *AbstractWorkflow) GetPricedItem(f func(item graph.GraphItem) bool, requ
return list_datas return list_datas
} }
func (w *AbstractWorkflow) GetByRelatedProcessing(processingID string, g func(item graph.GraphItem) bool) []resources.ResourceInterface { type Related struct {
storages := []resources.ResourceInterface{} Node resources.ResourceInterface
Links []graph.GraphLink
}
func (w *Workflow) GetByRelatedProcessing(processingID string, g func(item graph.GraphItem) bool) map[string]Related {
related := map[string]Related{}
for _, link := range w.Graph.Links { for _, link := range w.Graph.Links {
nodeID := link.Destination.ID nodeID := link.Destination.ID
var node resources.ResourceInterface var node resources.ResourceInterface
@@ -79,47 +72,16 @@ func (w *AbstractWorkflow) GetByRelatedProcessing(processingID string, g func(it
_, node = item.GetResource() // we are looking for the storage as destination _, node = item.GetResource() // we are looking for the storage as destination
} }
if processingID == nodeID && node != nil { // if the storage is linked to the processing if processingID == nodeID && node != nil { // if the storage is linked to the processing
storages = append(storages, node) if _, ok := related[processingID]; !ok {
related[processingID] = Related{}
}
rel := related[node.GetID()]
rel.Node = node
rel.Links = append(rel.Links, link)
related[processingID] = rel
} }
} }
return storages return related
}
func (wf *AbstractWorkflow) IsProcessing(item graph.GraphItem) bool {
return item.Processing != nil
}
func (wf *AbstractWorkflow) IsCompute(item graph.GraphItem) bool {
return item.Compute != nil
}
func (wf *AbstractWorkflow) IsData(item graph.GraphItem) bool {
return item.Data != nil
}
func (wf *AbstractWorkflow) IsStorage(item graph.GraphItem) bool {
return item.Storage != nil
}
func (wf *AbstractWorkflow) IsWorkflow(item graph.GraphItem) bool {
return item.Workflow != nil
}
/*
* Workflow is a struct that represents a workflow
* it defines the native workflow
*/
type Workflow struct {
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
AbstractWorkflow // AbstractWorkflow contains the basic fields of a workflow
}
func (w *Workflow) getPricedItem(item graph.GraphItem, request *tools.APIRequest) pricing.PricedItemITF {
dt, res := item.GetResource()
if dt == tools.INVALID {
return nil
}
return res.ConvertToPricedResource(dt, request)
} }
func (ao *Workflow) VerifyAuth(request *tools.APIRequest) bool { func (ao *Workflow) VerifyAuth(request *tools.APIRequest) bool {
@@ -129,8 +91,9 @@ func (ao *Workflow) VerifyAuth(request *tools.APIRequest) bool {
shared, code, _ := shallow_collaborative_area.NewAccessor(request).LoadOne(shared) shared, code, _ := shallow_collaborative_area.NewAccessor(request).LoadOne(shared)
if code != 200 || shared == nil { if code != 200 || shared == nil {
isAuthorized = false isAuthorized = false
} else {
isAuthorized = shared.VerifyAuth(request)
} }
isAuthorized = shared.VerifyAuth(request)
} }
} }
return ao.AbstractObject.VerifyAuth(request) || isAuthorized return ao.AbstractObject.VerifyAuth(request) || isAuthorized
@@ -165,124 +128,91 @@ func (wfa *Workflow) CheckBooking(caller *tools.HTTPCaller) (bool, error) {
return true, nil return true, nil
} }
func (wf *Workflow) Planify(start time.Time, end *time.Time, request *tools.APIRequest) (float64, map[tools.DataType][]pricing.PricedItemITF, *Workflow, error) { func (wf *Workflow) Planify(start time.Time, end *time.Time, request *tools.APIRequest) (float64, map[tools.DataType]map[string]pricing.PricedItemITF, *Workflow, error) {
processings := []*resources.ProcessingResource{} priceds := map[tools.DataType]map[string]pricing.PricedItemITF{}
priceds := map[tools.DataType][]pricing.PricedItemITF{} ps, priceds, err := plan[*resources.ProcessingResource](tools.PROCESSING_RESOURCE, wf, priceds, request, wf.Graph.IsProcessing,
priceds[tools.PROCESSING_RESOURCE] = []pricing.PricedItemITF{} func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) {
for _, item := range wf.GetGraphItems(wf.IsProcessing) { return start.Add(time.Duration(wf.Graph.GetAverageTimeProcessingBeforeStart(0, res.GetID(), request)) * time.Second), priced.GetExplicitDurationInS()
dt, realItem := item.GetResource() }, func(started time.Time, duration float64) *time.Time {
if realItem == nil { s := started.Add(time.Duration(duration))
return 0, priceds, nil, errors.New("could not load the processing resource") return &s
} })
priced := realItem.ConvertToPricedResource(dt, request) if err != nil {
timeFromStartS := wf.Graph.GetAverageTimeProcessingBeforeStart(0, realItem.GetID(), request) return 0, priceds, nil, err
started := start.Add(time.Duration(timeFromStartS) * time.Second)
priced.SetLocationStart(started)
priced.SetLocationEnd(started.Add(time.Duration(priced.GetExplicitDurationInS())))
processings = append(processings, realItem.(*resources.ProcessingResource))
priceds[tools.PROCESSING_RESOURCE] = append(priceds[tools.PROCESSING_RESOURCE], priced)
} }
priceds[tools.DATA_RESOURCE] = []pricing.PricedItemITF{} if _, priceds, err = plan[resources.ResourceInterface](tools.DATA_RESOURCE, wf, priceds, request, wf.Graph.IsData,
for _, item := range wf.GetGraphItems(wf.IsData) { func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) {
dt, realItem := item.GetResource() return start, 0
if realItem == nil { }, func(started time.Time, duration float64) *time.Time {
continue return end
} }); err != nil {
priced := realItem.ConvertToPricedResource(dt, request) return 0, priceds, nil, err
priced.SetLocationStart(start)
priced.SetLocationEnd(*end)
priceds[tools.PROCESSING_RESOURCE] = append(priceds[tools.PROCESSING_RESOURCE], priced)
} }
for _, f := range []func(graph.GraphItem) bool{wf.IsStorage, wf.IsCompute} { for k, f := range map[tools.DataType]func(graph.GraphItem) bool{tools.STORAGE_RESOURCE: wf.Graph.IsStorage, tools.COMPUTE_RESOURCE: wf.Graph.IsCompute} {
for _, item := range wf.GetGraphItems(f) { if _, priceds, err = plan[resources.ResourceInterface](k, wf, priceds, request, f,
dt, r := item.GetResource() func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) {
if r == nil { nearestStart, longestDuration := wf.Graph.GetAverageTimeRelatedToProcessingActivity(start, ps, res, func(i graph.GraphItem) (r resources.ResourceInterface) {
continue
}
if priceds[dt] == nil {
priceds[dt] = []pricing.PricedItemITF{}
}
priced := r.ConvertToPricedResource(dt, request)
nearestStart, longestDuration := wf.Graph.GetAverageTimeRelatedToProcessingActivity(start, processings, r,
func(i graph.GraphItem) resources.ResourceInterface {
if f(i) { if f(i) {
_, r := i.GetResource() _, r = i.GetResource()
return r
} else {
return nil
} }
return r
}, request) }, request)
started := start.Add(time.Duration(nearestStart) * time.Second) return start.Add(time.Duration(nearestStart) * time.Second), longestDuration
priced.SetLocationStart(started) }, func(started time.Time, duration float64) *time.Time {
if longestDuration >= 0 { s := started.Add(time.Duration(duration))
priced.SetLocationEnd(started.Add(time.Duration(longestDuration))) return &s
} }); err != nil {
priceds[dt] = append(priceds[dt], priced) return 0, priceds, nil, err
} }
} }
longest := wf.getLongestTime(end, priceds, request) longest := common.GetPlannerLongestTime(end, priceds, request)
priceds[tools.WORKFLOW_RESOURCE] = []pricing.PricedItemITF{} if _, priceds, err = plan[resources.ResourceInterface](tools.WORKFLOW_RESOURCE, wf, priceds, request, wf.Graph.IsWorkflow,
for _, item := range wf.GetGraphItems(wf.IsWorkflow) { func(res resources.ResourceInterface, priced pricing.PricedItemITF) (time.Time, float64) {
access := NewAccessor(nil) start := start.Add(time.Duration(common.GetPlannerNearestStart(start, priceds, request)) * time.Second)
_, r := item.GetResource() longest := float64(-1)
if r == nil { r, code, err := res.GetAccessor(request).LoadOne(res.GetID())
return 0, priceds, nil, errors.New("could not load the workflow") if code != 200 || err != nil {
} return start, longest
priced := r.ConvertToPricedResource(tools.WORKFLOW_RESOURCE, request) }
res, code, err := access.LoadOne(r.GetID()) if neoLongest, _, _, err := r.(*Workflow).Planify(start, end, request); err != nil {
if code != 200 || err != nil { return start, longest
return 0, priceds, nil, errors.New("could not load the workflow with id: " + fmt.Sprintf("%v", err.Error())) } else if neoLongest > longest {
} longest = neoLongest
neoLongest := float64(0) }
innerWF := res.(*Workflow) return start.Add(time.Duration(common.GetPlannerNearestStart(start, priceds, request)) * time.Second), longest
neoLongest, _, innerWF, err = innerWF.Planify(start, end, request) }, func(start time.Time, longest float64) *time.Time {
if neoLongest > longest { s := start.Add(time.Duration(longest) * time.Second)
longest = neoLongest return &s
} }); err != nil {
started := start.Add(time.Duration(wf.getNearestStart(start, priceds, request)) * time.Second) return 0, priceds, nil, err
priced.SetLocationStart(started)
durationE := time.Duration(longest)
if durationE < 0 {
continue
}
ended := start.Add(durationE * time.Second)
priced.SetLocationEnd(ended)
priceds[tools.WORKFLOW_RESOURCE] = append(priceds[tools.WORKFLOW_RESOURCE], priced)
} }
return longest, priceds, wf, nil return longest, priceds, wf, nil
} }
func (wf *Workflow) getNearestStart(start time.Time, priceds map[tools.DataType][]pricing.PricedItemITF, request *tools.APIRequest) float64 { func plan[T resources.ResourceInterface](dt tools.DataType, wf *Workflow, priceds map[tools.DataType]map[string]pricing.PricedItemITF, request *tools.APIRequest,
near := float64(10000000000) f func(graph.GraphItem) bool, start func(resources.ResourceInterface, pricing.PricedItemITF) (time.Time, float64), end func(time.Time, float64) *time.Time) ([]T, map[tools.DataType]map[string]pricing.PricedItemITF, error) {
for _, items := range priceds { resources := []T{}
for _, priced := range items { for _, item := range wf.GetGraphItems(f) {
if priced.GetLocationStart() == nil { if priceds[dt] == nil {
continue priceds[dt] = map[string]pricing.PricedItemITF{}
} }
newS := priced.GetLocationStart() dt, realItem := item.GetResource()
if newS.Sub(start).Seconds() < near { if realItem == nil {
near = newS.Sub(start).Seconds() return resources, priceds, errors.New("could not load the processing resource")
}
priced := realItem.ConvertToPricedResource(dt, request)
started, duration := start(realItem, priced)
priced.SetLocationStart(started)
if duration >= 0 {
if e := end(started, duration); e != nil {
priced.SetLocationEnd(*e)
} }
} }
// get the nearest start from start var if e := end(started, priced.GetExplicitDurationInS()); e != nil {
} priced.SetLocationEnd(*e)
return near
}
func (wf *Workflow) getLongestTime(end *time.Time, priceds map[tools.DataType][]pricing.PricedItemITF, request *tools.APIRequest) float64 {
if end == nil {
return -1
}
longestTime := float64(0)
for _, priced := range priceds[tools.PROCESSING_RESOURCE] {
if priced.GetLocationEnd() == nil {
continue
} }
newS := priced.GetLocationEnd() resources = append(resources, realItem.(T))
if longestTime < newS.Sub(*end).Seconds() { priceds[dt][item.ID] = priced
longestTime = newS.Sub(*end).Seconds()
}
// get the nearest start from start var
} }
return longestTime return resources, priceds, nil
} }

View File

@@ -1,6 +1,8 @@
package workflow package workflow
import ( import (
"errors"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/logs" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
@@ -47,7 +49,7 @@ func (a *workflowMongoAccessor) DeleteOne(id string) (utils.DBObject, int, error
a.execute(res.(*Workflow), true, false) // up to date the workspace for the workflow a.execute(res.(*Workflow), true, false) // up to date the workspace for the workflow
a.share(res.(*Workflow), true, a.GetCaller()) a.share(res.(*Workflow), true, a.GetCaller())
} }
return res, code, err return a.verifyResource(res), code, err
} }
/* /*
@@ -89,19 +91,27 @@ func (a *workflowMongoAccessor) share(realData *Workflow, delete bool, caller *t
// UpdateOne updates a workflow in the database // UpdateOne updates a workflow in the database
func (a *workflowMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (a *workflowMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
// avoid the update if the schedule is the same // avoid the update if the schedule is the same
set = a.verifyResource(set)
if set.(*Workflow).Graph != nil && set.(*Workflow).Graph.Partial {
return nil, 403, errors.New("you are not allowed to update a partial workflow")
}
res, code, err := utils.GenericUpdateOne(set, id, a, &Workflow{}) res, code, err := utils.GenericUpdateOne(set, id, a, &Workflow{})
if code != 200 { if code != 200 {
return nil, code, err return nil, code, err
} }
workflow := res.(*Workflow) workflow := res.(*Workflow)
a.execute(workflow, false, false) // update the workspace for the workflow a.execute(workflow, false, true) // update the workspace for the workflow
a.share(workflow, false, a.GetCaller()) // share the update to the peers a.share(workflow, false, a.GetCaller()) // share the update to the peers
return res, code, nil return res, code, nil
} }
// StoreOne stores a workflow in the database // StoreOne stores a workflow in the database
func (a *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
data = a.verifyResource(data)
d := data.(*Workflow) d := data.(*Workflow)
if d.Graph != nil && d.Graph.Partial {
return nil, 403, errors.New("you are not allowed to update a partial workflow")
}
res, code, err := utils.GenericStoreOne(d, a) res, code, err := utils.GenericStoreOne(d, a)
if err != nil || code != 200 { if err != nil || code != 200 {
return nil, code, err return nil, code, err
@@ -109,19 +119,25 @@ func (a *workflowMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, i
workflow := res.(*Workflow) workflow := res.(*Workflow)
a.share(workflow, false, a.GetCaller()) // share the creation to the peers a.share(workflow, false, a.GetCaller()) // share the creation to the peers
a.execute(workflow, false, false) // store the workspace for the workflow a.execute(workflow, false, true) // store the workspace for the workflow
return res, code, nil return res, code, nil
} }
// CopyOne copies a workflow in the database // CopyOne copies a workflow in the database
func (a *workflowMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *workflowMongoAccessor) CopyOne(data utils.DBObject) (utils.DBObject, int, error) {
wf := data.(*Workflow)
for _, item := range wf.Graph.Items {
_, obj := item.GetResource()
if obj != nil {
obj.ClearEnv()
}
}
return utils.GenericStoreOne(data, a) return utils.GenericStoreOne(data, a)
} }
// execute is a function that executes a workflow // execute is a function that executes a workflow
// it stores the workflow resources in a specific workspace to never have a conflict in UI and logic // it stores the workflow resources in a specific workspace to never have a conflict in UI and logic
func (a *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active bool) { func (a *workflowMongoAccessor) execute(workflow *Workflow, delete bool, active bool) {
filters := &dbs.Filters{ filters := &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by standard workspace name attached to a workflow Or: map[string][]dbs.Filter{ // filter by standard workspace name attached to a workflow
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: workflow.Name + "_workspace"}}, "abstractobject.name": {{Operator: dbs.LIKE.String(), Value: workflow.Name + "_workspace"}},
@@ -173,5 +189,36 @@ func (a *workflowMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject,
} }
func (a *workflowMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) { func (a *workflowMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
return utils.GenericSearch[*Workflow](filters, search, (&Workflow{}).GetObjectFilters(search), func(d utils.DBObject) utils.ShallowDBObject { return d }, isDraft, a) return utils.GenericSearch[*Workflow](filters, search, (&Workflow{}).GetObjectFilters(search), func(d utils.DBObject) utils.ShallowDBObject { return a.verifyResource(d) }, isDraft, a)
}
func (a *workflowMongoAccessor) verifyResource(obj utils.DBObject) utils.DBObject {
wf := obj.(*Workflow)
if wf.Graph == nil {
return wf
}
for _, item := range wf.Graph.Items {
t, resource := item.GetResource()
if resource == nil {
continue
}
var access utils.Accessor
if t == tools.COMPUTE_RESOURCE {
access = resources.NewAccessor[*resources.ComputeResource](t, a.GetRequest(), func() utils.DBObject { return &resources.ComputeResource{} })
} else if t == tools.PROCESSING_RESOURCE {
access = resources.NewAccessor[*resources.ProcessingResource](t, a.GetRequest(), func() utils.DBObject { return &resources.ProcessingResource{} })
} else if t == tools.STORAGE_RESOURCE {
access = resources.NewAccessor[*resources.StorageResource](t, a.GetRequest(), func() utils.DBObject { return &resources.StorageResource{} })
} else if t == tools.WORKFLOW_RESOURCE {
access = resources.NewAccessor[*resources.WorkflowResource](t, a.GetRequest(), func() utils.DBObject { return &resources.WorkflowResource{} })
} else if t == tools.DATA_RESOURCE {
access = resources.NewAccessor[*resources.DataResource](t, a.GetRequest(), func() utils.DBObject { return &resources.DataResource{} })
} else {
wf.Graph.Clear(resource.GetID())
}
if error := utils.VerifyAccess(access, resource.GetID()); error != nil {
wf.Graph.Clear(resource.GetID())
}
}
return wf
} }

View File

@@ -6,7 +6,7 @@ import (
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/booking" "cloud.o-forge.io/core/oc-lib/models/booking"
"cloud.o-forge.io/core/oc-lib/models/common" "cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/common/pricing" "cloud.o-forge.io/core/oc-lib/models/common/pricing"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
@@ -15,38 +15,41 @@ import (
) )
/* /*
* WorkflowExecutions is a struct that represents a list of workflow executions * WorkflowExecution is a struct that represents a list of workflow executions
* Warning: No user can write (del, post, put) a workflow execution, it is only used by the system * Warning: No user can write (del, post, put) a workflow execution, it is only used by the system
* workflows generate their own executions * workflows generate their own executions
*/ */
type WorkflowExecutions struct { type WorkflowExecution struct {
utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name) utils.AbstractObject // AbstractObject contains the basic fields of an object (id, name)
ExecDate time.Time `json:"execution_date,omitempty" bson:"execution_date,omitempty" validate:"required"` // ExecDate is the execution date of the workflow, is required PeerBookByGraph map[string]map[string][]string `json:"peer_book_by_graph,omitempty" bson:"peer_book_by_graph,omitempty"` // BookByResource is a map of the resource id and the list of the booking id
EndDate *time.Time `json:"end_date,omitempty" bson:"end_date,omitempty"` // EndDate is the end date of the workflow ExecutionsID string `json:"executions_id,omitempty" bson:"executions_id,omitempty"`
State common.ScheduledType `json:"state" bson:"state" default:"0"` // State is the state of the workflow ExecDate time.Time `json:"execution_date,omitempty" bson:"execution_date,omitempty" validate:"required"` // ExecDate is the execution date of the workflow, is required
WorkflowID string `json:"workflow_id" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow EndDate *time.Time `json:"end_date,omitempty" bson:"end_date,omitempty"` // EndDate is the end date of the workflow
State enum.BookingStatus `json:"state" bson:"state" default:"0"` // TEMPORARY TODO DEFAULT 1 -> 0 State is the state of the workflow
WorkflowID string `json:"workflow_id" bson:"workflow_id,omitempty"` // WorkflowID is the ID of the workflow
} }
func (r *WorkflowExecutions) StoreDraftDefault() { func (r *WorkflowExecution) StoreDraftDefault() {
r.IsDraft = true r.IsDraft = false // TODO: TEMPORARY
r.State = enum.SCHEDULED
} }
func (r *WorkflowExecutions) CanUpdate(set utils.DBObject) (bool, utils.DBObject) { func (r *WorkflowExecution) CanUpdate(set utils.DBObject) (bool, utils.DBObject) {
if r.State != set.(*WorkflowExecutions).State { if r.State != set.(*WorkflowExecution).State {
return true, &WorkflowExecutions{State: set.(*WorkflowExecutions).State} // only state can be updated return true, &WorkflowExecution{State: set.(*WorkflowExecution).State} // only state can be updated
} }
return r.IsDraft, set // only draft buying can be updated return !r.IsDraft, set // only draft buying can be updated
} }
func (r *WorkflowExecutions) CanDelete() bool { func (r *WorkflowExecution) CanDelete() bool {
return r.IsDraft // only draft bookings can be deleted return r.IsDraft // only draft bookings can be deleted
} }
func (wfa *WorkflowExecutions) Equals(we *WorkflowExecutions) bool { func (wfa *WorkflowExecution) Equals(we *WorkflowExecution) bool {
return wfa.ExecDate.Equal(we.ExecDate) && wfa.WorkflowID == we.WorkflowID return wfa.ExecDate.Equal(we.ExecDate) && wfa.WorkflowID == we.WorkflowID
} }
func (ws *WorkflowExecutions) PurgeDraft(request *tools.APIRequest) error { func (ws *WorkflowExecution) PurgeDraft(request *tools.APIRequest) error {
if ws.EndDate == nil { if ws.EndDate == nil {
// if no end... then Book like a savage // if no end... then Book like a savage
e := ws.ExecDate.Add(time.Hour) e := ws.ExecDate.Add(time.Hour)
@@ -55,7 +58,7 @@ func (ws *WorkflowExecutions) PurgeDraft(request *tools.APIRequest) error {
accessor := ws.GetAccessor(request) accessor := ws.GetAccessor(request)
res, code, err := accessor.Search(&dbs.Filters{ res, code, err := accessor.Search(&dbs.Filters{
And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date And: map[string][]dbs.Filter{ // check if there is a booking on the same compute resource by filtering on the compute_resource_id, the state and the execution date
"state": {{Operator: dbs.EQUAL.String(), Value: common.DRAFT.EnumIndex()}}, "state": {{Operator: dbs.EQUAL.String(), Value: enum.DRAFT.EnumIndex()}},
"workflow_id": {{Operator: dbs.EQUAL.String(), Value: ws.WorkflowID}}, "workflow_id": {{Operator: dbs.EQUAL.String(), Value: ws.WorkflowID}},
"execution_date": { "execution_date": {
{Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(*ws.EndDate)}, {Operator: dbs.LTE.String(), Value: primitive.NewDateTimeFromTime(*ws.EndDate)},
@@ -73,53 +76,71 @@ func (ws *WorkflowExecutions) PurgeDraft(request *tools.APIRequest) error {
} }
// tool to transform the argo status to a state // tool to transform the argo status to a state
func (wfa *WorkflowExecutions) ArgoStatusToState(status string) *WorkflowExecutions { func (wfa *WorkflowExecution) ArgoStatusToState(status string) *WorkflowExecution {
status = strings.ToLower(status) status = strings.ToLower(status)
switch status { switch status {
case "succeeded": // Succeeded case "succeeded": // Succeeded
wfa.State = common.SUCCESS wfa.State = enum.SUCCESS
case "pending": // Pending case "pending": // Pending
wfa.State = common.SCHEDULED wfa.State = enum.SCHEDULED
case "running": // Running case "running": // Running
wfa.State = common.STARTED wfa.State = enum.STARTED
default: // Failed default: // Failed
wfa.State = common.FAILURE wfa.State = enum.FAILURE
} }
return wfa return wfa
} }
func (r *WorkflowExecutions) GenerateID() { func (r *WorkflowExecution) GenerateID() {
r.UUID = uuid.New().String() if r.UUID == "" {
r.UUID = uuid.New().String()
}
} }
func (d *WorkflowExecutions) GetName() string { func (d *WorkflowExecution) GetName() string {
return d.UUID + "_" + d.ExecDate.String() return d.UUID + "_" + d.ExecDate.String()
} }
func (d *WorkflowExecutions) GetAccessor(request *tools.APIRequest) utils.Accessor { func (d *WorkflowExecution) GetAccessor(request *tools.APIRequest) utils.Accessor {
return NewAccessor(request) // Create a new instance of the accessor return NewAccessor(request) // Create a new instance of the accessor
} }
func (d *WorkflowExecutions) VerifyAuth(request *tools.APIRequest) bool { func (d *WorkflowExecution) VerifyAuth(request *tools.APIRequest) bool {
return true return true
} }
func (d *WorkflowExecutions) Book(wfID string, priceds map[tools.DataType][]pricing.PricedItemITF) []*booking.Booking { func (d *WorkflowExecution) Book(executionsID string, wfID string, priceds map[tools.DataType]map[string]pricing.PricedItemITF) []*booking.Booking {
booking := d.bookEach(wfID, tools.STORAGE_RESOURCE, priceds[tools.STORAGE_RESOURCE]) booking := d.bookEach(executionsID, wfID, tools.STORAGE_RESOURCE, priceds[tools.STORAGE_RESOURCE])
booking = append(booking, d.bookEach(wfID, tools.PROCESSING_RESOURCE, priceds[tools.PROCESSING_RESOURCE])...) booking = append(booking, d.bookEach(executionsID, wfID, tools.PROCESSING_RESOURCE, priceds[tools.PROCESSING_RESOURCE])...)
booking = append(booking,d.bookEach(executionsID, wfID, tools.COMPUTE_RESOURCE, priceds[tools.COMPUTE_RESOURCE])...)
booking = append(booking,d.bookEach(executionsID, wfID, tools.DATA_RESOURCE, priceds[tools.DATA_RESOURCE])...)
return booking return booking
} }
func (d *WorkflowExecutions) bookEach(wfID string, dt tools.DataType, priceds []pricing.PricedItemITF) []*booking.Booking { func (d *WorkflowExecution) bookEach(executionsID string, wfID string, dt tools.DataType, priceds map[string]pricing.PricedItemITF) []*booking.Booking {
items := []*booking.Booking{} items := []*booking.Booking{}
for _, priced := range priceds { for itemID, priced := range priceds {
if d.PeerBookByGraph == nil {
d.PeerBookByGraph = map[string]map[string][]string{}
}
if d.PeerBookByGraph[priced.GetCreatorID()] == nil {
d.PeerBookByGraph[priced.GetCreatorID()] = map[string][]string{}
}
if d.PeerBookByGraph[priced.GetCreatorID()][itemID] == nil {
d.PeerBookByGraph[priced.GetCreatorID()][itemID] = []string{}
}
start := d.ExecDate start := d.ExecDate
if s := priced.GetLocationStart(); s != nil { if s := priced.GetLocationStart(); s != nil {
start = *s start = *s
} }
end := start.Add(time.Duration(priced.GetExplicitDurationInS()) * time.Second) end := start.Add(time.Duration(priced.GetExplicitDurationInS()) * time.Second)
bookingItem := &booking.Booking{ bookingItem := &booking.Booking{
State: common.DRAFT, AbstractObject: utils.AbstractObject{
UUID: uuid.New().String(),
Name: d.GetName() + "_" + executionsID + "_" + wfID,
},
ExecutionsID: executionsID,
State: enum.SCHEDULED,
ResourceID: priced.GetID(), ResourceID: priced.GetID(),
ResourceType: dt, ResourceType: dt,
DestPeerID: priced.GetCreatorID(), DestPeerID: priced.GetCreatorID(),
@@ -129,6 +150,8 @@ func (d *WorkflowExecutions) bookEach(wfID string, dt tools.DataType, priceds []
ExpectedEndDate: &end, ExpectedEndDate: &end,
} }
items = append(items, bookingItem) items = append(items, bookingItem)
d.PeerBookByGraph[priced.GetCreatorID()][itemID] = append(
d.PeerBookByGraph[priced.GetCreatorID()][itemID], bookingItem.GetID())
} }
return items return items
} }

View File

@@ -6,18 +6,31 @@ import (
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/logs" "cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/models/common" "cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
) )
type workflowExecutionMongoAccessor struct { type workflowExecutionMongoAccessor struct {
utils.AbstractAccessor utils.AbstractAccessor
shallow bool
}
func newShallowAccessor(request *tools.APIRequest) *workflowExecutionMongoAccessor {
return &workflowExecutionMongoAccessor{
shallow: true,
AbstractAccessor: utils.AbstractAccessor{
Logger: logs.CreateLogger(tools.WORKFLOW_EXECUTION.String()), // Create a logger with the data type
Request: request,
Type: tools.WORKFLOW_EXECUTION,
},
}
} }
func NewAccessor(request *tools.APIRequest) *workflowExecutionMongoAccessor { func NewAccessor(request *tools.APIRequest) *workflowExecutionMongoAccessor {
return &workflowExecutionMongoAccessor{ return &workflowExecutionMongoAccessor{
utils.AbstractAccessor{ shallow: false,
AbstractAccessor: utils.AbstractAccessor{
Logger: logs.CreateLogger(tools.WORKFLOW_EXECUTION.String()), // Create a logger with the data type Logger: logs.CreateLogger(tools.WORKFLOW_EXECUTION.String()), // Create a logger with the data type
Request: request, Request: request,
Type: tools.WORKFLOW_EXECUTION, Type: tools.WORKFLOW_EXECUTION,
@@ -30,7 +43,11 @@ func (wfa *workflowExecutionMongoAccessor) DeleteOne(id string) (utils.DBObject,
} }
func (wfa *workflowExecutionMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) { func (wfa *workflowExecutionMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils.DBObject, int, error) {
return nil, 404, errors.New("not implemented") if set.(*WorkflowExecution).State == 0 {
return nil, 400, errors.New("state is required")
}
realSet := WorkflowExecution{State: set.(*WorkflowExecution).State}
return utils.GenericUpdateOne(&realSet, id, wfa, &WorkflowExecution{})
} }
func (wfa *workflowExecutionMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (wfa *workflowExecutionMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
@@ -42,37 +59,49 @@ func (wfa *workflowExecutionMongoAccessor) CopyOne(data utils.DBObject) (utils.D
} }
func (a *workflowExecutionMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) { func (a *workflowExecutionMongoAccessor) LoadOne(id string) (utils.DBObject, int, error) {
return utils.GenericLoadOne[*WorkflowExecutions](id, func(d utils.DBObject) (utils.DBObject, int, error) { return utils.GenericLoadOne[*WorkflowExecution](id, func(d utils.DBObject) (utils.DBObject, int, error) {
if d.(*WorkflowExecutions).State == common.DRAFT && time.Now().UTC().After(d.(*WorkflowExecutions).ExecDate) { now := time.Now()
utils.GenericDeleteOne(d.GetID(), a) now = now.Add(time.Second * -60)
return nil, 404, errors.New("Not found") if d.(*WorkflowExecution).State == enum.DRAFT && !a.shallow && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
utils.GenericDeleteOne(d.GetID(), newShallowAccessor(a.Request))
return nil, 404, errors.New("not found")
} }
if d.(*WorkflowExecutions).State == common.SCHEDULED && time.Now().UTC().After(d.(*WorkflowExecutions).ExecDate) { if d.(*WorkflowExecution).State == enum.SCHEDULED && !a.shallow && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
d.(*WorkflowExecutions).State = common.FORGOTTEN d.(*WorkflowExecution).State = enum.FORGOTTEN
utils.GenericRawUpdateOne(d, id, a) utils.GenericRawUpdateOne(d, id, newShallowAccessor(a.Request))
} }
return d, 200, nil return d, 200, nil
}, a) }, a)
} }
func (a *workflowExecutionMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) { func (a *workflowExecutionMongoAccessor) LoadAll(isDraft bool) ([]utils.ShallowDBObject, int, error) {
return utils.GenericLoadAll[*WorkflowExecutions](a.getExec(), isDraft, a) return utils.GenericLoadAll[*WorkflowExecution](a.getExec(), isDraft, a)
} }
func (a *workflowExecutionMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) { func (a *workflowExecutionMongoAccessor) Search(filters *dbs.Filters, search string, isDraft bool) ([]utils.ShallowDBObject, int, error) {
return utils.GenericSearch[*WorkflowExecutions](filters, search, (&WorkflowExecutions{}).GetObjectFilters(search), a.getExec(), isDraft, a) return utils.GenericSearch[*WorkflowExecution](filters, search, a.GetExecFilters(search), a.getExec(), isDraft, a)
} }
func (a *workflowExecutionMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject { func (a *workflowExecutionMongoAccessor) getExec() func(utils.DBObject) utils.ShallowDBObject {
return func(d utils.DBObject) utils.ShallowDBObject { return func(d utils.DBObject) utils.ShallowDBObject {
if d.(*WorkflowExecutions).State == common.DRAFT && time.Now().UTC().After(d.(*WorkflowExecutions).ExecDate) { now := time.Now()
utils.GenericDeleteOne(d.GetID(), a) now = now.Add(time.Second * -60)
if d.(*WorkflowExecution).State == enum.DRAFT && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
utils.GenericDeleteOne(d.GetID(), newShallowAccessor(a.Request))
return nil return nil
} }
if d.(*WorkflowExecutions).State == common.SCHEDULED && time.Now().UTC().After(d.(*WorkflowExecutions).ExecDate) { if d.(*WorkflowExecution).State == enum.SCHEDULED && now.UTC().After(d.(*WorkflowExecution).ExecDate) {
d.(*WorkflowExecutions).State = common.FORGOTTEN d.(*WorkflowExecution).State = enum.FORGOTTEN
utils.GenericRawUpdateOne(d, d.GetID(), a) utils.GenericRawUpdateOne(d, d.GetID(), newShallowAccessor(a.Request))
return d
} }
return d return d
} }
} }
func (a *workflowExecutionMongoAccessor) GetExecFilters(search string) *dbs.Filters {
return &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search + "_execution"}},
}}
}

View File

@@ -4,13 +4,16 @@ import (
"errors" "errors"
"fmt" "fmt"
"strings" "strings"
"sync"
"time" "time"
"cloud.o-forge.io/core/oc-lib/models/common" "cloud.o-forge.io/core/oc-lib/models/booking"
"cloud.o-forge.io/core/oc-lib/models/common/enum"
"cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/models/workflow" "cloud.o-forge.io/core/oc-lib/models/workflow"
"cloud.o-forge.io/core/oc-lib/tools" "cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
"github.com/robfig/cron" "github.com/robfig/cron"
) )
@@ -20,14 +23,15 @@ import (
*/ */
// it's a flying object only use in a session time. It's not stored in the database // it's a flying object only use in a session time. It's not stored in the database
type WorkflowSchedule struct { type WorkflowSchedule struct {
Workflow *workflow.Workflow `json:"workflow,omitempty"` // Workflow is the workflow dependancy of the schedule UUID string `json:"id" validate:"required"` // ExecutionsID is the list of the executions id of the workflow
WorkflowExecutions []*WorkflowExecutions `json:"workflow_executions,omitempty"` // WorkflowExecutions is the list of executions of the workflow Workflow *workflow.Workflow `json:"workflow,omitempty"` // Workflow is the workflow dependancy of the schedule
Message string `json:"message,omitempty"` // Message is the message of the schedule WorkflowExecution []*WorkflowExecution `json:"workflow_executions,omitempty"` // WorkflowExecution is the list of executions of the workflow
Warning string `json:"warning,omitempty"` // Warning is the warning message of the schedule Message string `json:"message,omitempty"` // Message is the message of the schedule
Start time.Time `json:"start" validate:"required,ltfield=End"` // Start is the start time of the schedule, is required and must be less than the End time Warning string `json:"warning,omitempty"` // Warning is the warning message of the schedule
End *time.Time `json:"end,omitempty"` // End is the end time of the schedule, is required and must be greater than the Start time Start time.Time `json:"start" validate:"required,ltfield=End"` // Start is the start time of the schedule, is required and must be less than the End time
DurationS float64 `json:"duration_s" default:"-1"` // End is the end time of the schedule End *time.Time `json:"end,omitempty"` // End is the end time of the schedule, is required and must be greater than the Start time
Cron string `json:"cron,omitempty"` // here the cron format : ss mm hh dd MM dw task DurationS float64 `json:"duration_s" default:"-1"` // End is the end time of the schedule
Cron string `json:"cron,omitempty"` // here the cron format : ss mm hh dd MM dw task
} }
func NewScheduler(start string, end string, durationInS float64, cron string) *WorkflowSchedule { func NewScheduler(start string, end string, durationInS float64, cron string) *WorkflowSchedule {
@@ -36,6 +40,7 @@ func NewScheduler(start string, end string, durationInS float64, cron string) *W
return nil return nil
} }
ws := &WorkflowSchedule{ ws := &WorkflowSchedule{
UUID: uuid.New().String(),
Start: s, Start: s,
DurationS: durationInS, DurationS: durationInS,
Cron: cron, Cron: cron,
@@ -47,72 +52,150 @@ func NewScheduler(start string, end string, durationInS float64, cron string) *W
return ws return ws
} }
func (ws *WorkflowSchedule) CheckBooking(wfID string, request *tools.APIRequest) (bool, *workflow.Workflow, []*WorkflowExecutions, error) { func (ws *WorkflowSchedule) CheckBooking(wfID string, request *tools.APIRequest) (bool, *workflow.Workflow, []*WorkflowExecution, []*booking.Booking, error) {
if request.Caller == nil && request.Caller.URLS == nil && request.Caller.URLS[tools.BOOKING] == nil || request.Caller.URLS[tools.BOOKING][tools.POST] == "" { if request.Caller == nil && request.Caller.URLS == nil && request.Caller.URLS[tools.BOOKING] == nil || request.Caller.URLS[tools.BOOKING][tools.GET] == "" {
return false, nil, []*WorkflowExecutions{}, errors.New("no caller defined") return false, nil, []*WorkflowExecution{}, []*booking.Booking{}, errors.New("no caller defined")
} }
access := workflow.NewAccessor(nil) access := workflow.NewAccessor(request)
res, code, err := access.LoadOne(wfID) res, code, err := access.LoadOne(wfID)
if code != 200 { if code != 200 {
return false, nil, []*WorkflowExecutions{}, errors.New("could not load the workflow with id: " + err.Error()) return false, nil, []*WorkflowExecution{}, []*booking.Booking{}, errors.New("could not load the workflow with id: " + err.Error())
} }
wf := res.(*workflow.Workflow) wf := res.(*workflow.Workflow)
longest, priceds, wf, err := wf.Planify(ws.Start, ws.End, request) longest, priceds, wf, err := wf.Planify(ws.Start, ws.End, request)
if err != nil { if err != nil {
return false, wf, []*WorkflowExecutions{}, err return false, wf, []*WorkflowExecution{}, []*booking.Booking{}, err
} }
ws.DurationS = longest ws.DurationS = longest
ws.Message = "We estimate that the workflow will start at " + ws.Start.String() + " and last " + fmt.Sprintf("%v", ws.DurationS) + "seconds." ws.Message = "We estimate that the workflow will start at " + ws.Start.String() + " and last " + fmt.Sprintf("%v", ws.DurationS) + " seconds."
if ws.End != nil && ws.Start.Add(time.Duration(longest)*time.Second).After(*ws.End) { if ws.End != nil && ws.Start.Add(time.Duration(longest)*time.Second).After(*ws.End) {
ws.Warning = "The workflow may be too long to be executed in the given time frame, we will try to book it anyway\n" ws.Warning = "The workflow may be too long to be executed in the given time frame, we will try to book it anyway\n"
} }
execs, err := ws.getExecutions(wf) execs, err := ws.getExecutions(wf)
if err != nil { if err != nil {
return false, wf, []*WorkflowExecutions{}, err return false, wf, []*WorkflowExecution{}, []*booking.Booking{}, err
} }
bookings := []*booking.Booking{}
for _, exec := range execs { for _, exec := range execs {
bookings := exec.Book(wfID, priceds) bookings = append(bookings, exec.Book(ws.UUID, wfID, priceds)...)
for _, booking := range bookings { }
_, err := (&peer.Peer{}).LaunchPeerExecution(booking.DestPeerID, "",
tools.BOOKING, tools.POSTCHECK, booking.Serialize(booking), request.Caller) errCh := make(chan error, len(bookings))
if err != nil { var m sync.Mutex
return false, wf, execs, err
} for _, b := range bookings {
go getBooking(b, request, wf, execs, bookings, errCh, &m)
}
for i := 0; i < len(bookings); i++ {
if err := <-errCh; err != nil {
return false, wf, execs, bookings, err
} }
} }
return true, wf, execs, nil
return true, wf, execs, bookings, nil
} }
func (ws *WorkflowSchedule) Schedules(wfID string, request *tools.APIRequest) (*workflow.Workflow, []*WorkflowExecutions, error) { func getBooking( b *booking.Booking, request *tools.APIRequest, wf *workflow.Workflow, execs []*WorkflowExecution, bookings []*booking.Booking, errCh chan error, m *sync.Mutex) {
m.Lock()
c, err := getCallerCopy(request, errCh)
if err != nil {
errCh <- err
return
}
m.Unlock()
meth := c.URLS[tools.BOOKING][tools.GET]
meth = strings.ReplaceAll(meth, ":id", b.ResourceID)
meth = strings.ReplaceAll(meth, ":start_date", b.ExpectedStartDate.Format("2006-01-02T15:04:05"))
meth = strings.ReplaceAll(meth, ":end_date", b.ExpectedEndDate.Format("2006-01-02T15:04:05"))
c.URLS[tools.BOOKING][tools.GET] = meth
_, err = (&peer.Peer{}).LaunchPeerExecution(b.DestPeerID, b.ResourceID, tools.BOOKING, tools.GET, nil, &c)
if err != nil {
errCh <- err
return
}
errCh <- nil
}
func getCallerCopy(request *tools.APIRequest, errCh chan error) (tools.HTTPCaller, error) {
var c tools.HTTPCaller
err := request.Caller.DeepCopy(c)
if err != nil {
errCh <- err
return tools.HTTPCaller{}, nil
}
c.URLS = request.Caller.URLS
return c, err
}
func (ws *WorkflowSchedule) Schedules(wfID string, request *tools.APIRequest) (*WorkflowSchedule, *workflow.Workflow, []*WorkflowExecution, error) {
if request == nil { if request == nil {
return nil, []*WorkflowExecutions{}, errors.New("no request found") return ws, nil, []*WorkflowExecution{}, errors.New("no request found")
} }
c := request.Caller c := request.Caller
if c == nil || c.URLS == nil || c.URLS[tools.BOOKING] == nil { if c == nil || c.URLS == nil || c.URLS[tools.BOOKING] == nil {
return nil, []*WorkflowExecutions{}, errors.New("no caller defined") return ws, nil, []*WorkflowExecution{}, errors.New("no caller defined")
} }
methods := c.URLS[tools.BOOKING] methods := c.URLS[tools.BOOKING]
if _, ok := methods[tools.POST]; !ok { if _, ok := methods[tools.GET]; !ok {
return nil, []*WorkflowExecutions{}, errors.New("no path found") return ws, nil, []*WorkflowExecution{}, errors.New("no path found")
} }
ok, wf, executions, err := ws.CheckBooking(wfID, request) ok, wf, executions, bookings, err := ws.CheckBooking(wfID, request)
ws.WorkflowExecution = executions
if !ok || err != nil { if !ok || err != nil {
return nil, []*WorkflowExecutions{}, errors.New("could not book the workflow" + fmt.Sprintf("%v", err)) return ws, nil, executions, errors.New("could not book the workflow : " + fmt.Sprintf("%v", err))
}
ws.Workflow = wf
var errCh = make(chan error, len(bookings))
var m sync.Mutex
for _, booking := range bookings {
go ws.BookExecs(booking, request, errCh, &m)
} }
ws.Workflow = wf for i := 0; i < len(bookings); i++ {
ws.WorkflowExecutions = executions if err := <- errCh ; err != nil {
return ws, wf, executions, errors.New("could not launch the peer execution : " + fmt.Sprintf("%v", err))
}
}
fmt.Println("Schedules")
for _, exec := range executions { for _, exec := range executions {
err := exec.PurgeDraft(request) err := exec.PurgeDraft(request)
if err != nil { if err != nil {
return nil, []*WorkflowExecutions{}, errors.New("could not book the workflow" + fmt.Sprintf("%v", err)) return ws, nil, []*WorkflowExecution{}, errors.New("purge draft" + fmt.Sprintf("%v", err))
} }
exec.GenerateID() exec.StoreDraftDefault()
// Should DELETE the previous execution2
utils.GenericStoreOne(exec, NewAccessor(request)) utils.GenericStoreOne(exec, NewAccessor(request))
} }
return wf, executions, nil fmt.Println("Schedules")
return ws, wf, executions, nil
}
func (ws *WorkflowSchedule) BookExecs(booking *booking.Booking, request *tools.APIRequest, errCh chan error, m *sync.Mutex) {
m.Lock()
c, err := getCallerCopy(request, errCh)
if err != nil {
errCh <- err
return
}
m.Unlock()
_, err = (&peer.Peer{}).LaunchPeerExecution(booking.DestPeerID, "",
tools.BOOKING, tools.POST, booking.Serialize(booking), &c)
if err != nil {
errCh <- err
return
}
errCh <- nil
} }
/* /*
@@ -127,21 +210,23 @@ VERIFY THAT WE HANDLE DIFFERENCE BETWEEN LOCATION TIME && BOOKING
* getExecutions is a function that returns the executions of a workflow * getExecutions is a function that returns the executions of a workflow
* it returns an array of workflow_execution.WorkflowExecution * it returns an array of workflow_execution.WorkflowExecution
*/ */
func (ws *WorkflowSchedule) getExecutions(workflow *workflow.Workflow) ([]*WorkflowExecutions, error) { func (ws *WorkflowSchedule) getExecutions(workflow *workflow.Workflow) ([]*WorkflowExecution, error) {
workflows_executions := []*WorkflowExecutions{} workflows_executions := []*WorkflowExecution{}
dates, err := ws.getDates() dates, err := ws.getDates()
if err != nil { if err != nil {
return workflows_executions, err return workflows_executions, err
} }
for _, date := range dates { for _, date := range dates {
obj := &WorkflowExecutions{ obj := &WorkflowExecution{
AbstractObject: utils.AbstractObject{ AbstractObject: utils.AbstractObject{
UUID: uuid.New().String(), // set the uuid of the execution
Name: workflow.Name + "_execution_" + date.Start.String(), // set the name of the execution Name: workflow.Name + "_execution_" + date.Start.String(), // set the name of the execution
}, },
ExecDate: date.Start, // set the execution date ExecutionsID: ws.UUID,
EndDate: date.End, // set the end date ExecDate: date.Start, // set the execution date
State: common.DRAFT, // set the state to 1 (scheduled) EndDate: date.End, // set the end date
WorkflowID: workflow.GetID(), // set the workflow id dependancy of the execution State: enum.DRAFT, // set the state to 1 (scheduled)
WorkflowID: workflow.GetID(), // set the workflow id dependancy of the execution
} }
workflows_executions = append(workflows_executions, obj) workflows_executions = append(workflows_executions, obj)
} }
@@ -152,7 +237,7 @@ func (ws *WorkflowSchedule) getDates() ([]Schedule, error) {
schedule := []Schedule{} schedule := []Schedule{}
if len(ws.Cron) > 0 { // if cron is set then end date should be set if len(ws.Cron) > 0 { // if cron is set then end date should be set
if ws.End == nil { if ws.End == nil {
return schedule, errors.New("a cron task should have an end date.") return schedule, errors.New("a cron task should have an end date")
} }
if ws.DurationS <= 0 { if ws.DurationS <= 0 {
ws.DurationS = ws.End.Sub(ws.Start).Seconds() ws.DurationS = ws.End.Sub(ws.Start).Seconds()

View File

@@ -1,8 +1,6 @@
package workspace package workspace
import ( import (
"fmt"
"cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area" "cloud.o-forge.io/core/oc-lib/models/collaborative_area/shallow_collaborative_area"
"cloud.o-forge.io/core/oc-lib/models/resources" "cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/models/utils" "cloud.o-forge.io/core/oc-lib/models/utils"
@@ -23,15 +21,12 @@ func (d *Workspace) GetAccessor(request *tools.APIRequest) utils.Accessor {
} }
func (ao *Workspace) VerifyAuth(request *tools.APIRequest) bool { func (ao *Workspace) VerifyAuth(request *tools.APIRequest) bool {
fmt.Println("Workspace.VerifyAuth", ao.Shared)
if ao.Shared != "" { if ao.Shared != "" {
shared, code, _ := shallow_collaborative_area.NewAccessor(request).LoadOne(ao.Shared) shared, code, _ := shallow_collaborative_area.NewAccessor(request).LoadOne(ao.Shared)
fmt.Println("Workspace.VerifyAuth", shared, code)
if code != 200 || shared == nil { if code != 200 || shared == nil {
return false return false
} }
return shared.VerifyAuth(request) return shared.VerifyAuth(request)
} }
fmt.Println("Workspace.VerifyAuth", ao.AbstractObject.VerifyAuth(request))
return ao.AbstractObject.VerifyAuth(request) return ao.AbstractObject.VerifyAuth(request)
} }

View File

@@ -2,7 +2,6 @@ package workspace
import ( import (
"errors" "errors"
"fmt"
"cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/logs" "cloud.o-forge.io/core/oc-lib/logs"
@@ -73,12 +72,14 @@ func (a *workspaceMongoAccessor) UpdateOne(set utils.DBObject, id string) (utils
func (a *workspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) { func (a *workspaceMongoAccessor) StoreOne(data utils.DBObject) (utils.DBObject, int, error) {
filters := &dbs.Filters{ filters := &dbs.Filters{
Or: map[string][]dbs.Filter{ Or: map[string][]dbs.Filter{
"abstractobject.name": {{Operator: dbs.LIKE.String(), Value: data.GetName() + "_workspace"}}, "abstractobject.name": {{Operator: dbs.LIKE.String(), Value: data.GetName() + "_workspace"}},
"abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: a.GetPeerID()}},
}, },
} }
// filters *dbs.Filters, word string, isDraft bool
res, _, err := a.Search(filters, "", true) // Search for the workspace res, _, err := a.Search(filters, "", true) // Search for the workspace
if err == nil && len(res) > 0 { // If the workspace already exists, return an error if err == nil && len(res) > 0 { // If the workspace already exists, return an error
return nil, 409, errors.New("A workspace with the same name already exists") return nil, 409, errors.New("a workspace with the same name already exists")
} }
// reset the resources // reset the resources
d := data.(*Workspace) d := data.(*Workspace)
@@ -116,7 +117,6 @@ func (a *workspaceMongoAccessor) Search(filters *dbs.Filters, search string, isD
This function is used to share the workspace with the peers This function is used to share the workspace with the peers
*/ */
func (a *workspaceMongoAccessor) share(realData *Workspace, method tools.METHOD, caller *tools.HTTPCaller) { func (a *workspaceMongoAccessor) share(realData *Workspace, method tools.METHOD, caller *tools.HTTPCaller) {
fmt.Println("Sharing workspace", realData, caller)
if realData == nil || realData.Shared == "" || caller == nil || caller.Disabled { if realData == nil || realData.Shared == "" || caller == nil || caller.Disabled {
return return
} }

View File

@@ -3,12 +3,13 @@ package tools
import ( import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"strings" "strings"
"cloud.o-forge.io/core/oc-lib/config" "cloud.o-forge.io/core/oc-lib/config"
"cloud.o-forge.io/core/oc-lib/dbs/mongo" "cloud.o-forge.io/core/oc-lib/dbs/mongo"
"cloud.o-forge.io/core/oc-lib/logs"
beego "github.com/beego/beego/v2/server/web" beego "github.com/beego/beego/v2/server/web"
"github.com/google/uuid"
) )
type APIRequest struct { type APIRequest struct {
@@ -116,8 +117,8 @@ func (a *API) SubscribeRouter(infos []*beego.ControllerInfo) {
// CheckRemotePeer checks the state of a remote peer // CheckRemotePeer checks the state of a remote peer
func (a *API) CheckRemotePeer(url string) (State, map[string]int) { func (a *API) CheckRemotePeer(url string) (State, map[string]int) {
// Check if the database is up // Check if the database is up
caller := NewHTTPCaller(map[DataType]map[METHOD]string{}) // Create a new http caller
var resp APIStatusResponse var resp APIStatusResponse
caller := NewHTTPCaller(map[DataType]map[METHOD]string{}) // Create a new http caller
b, err := caller.CallPost(url, "", map[string]interface{}{}) // Call the status endpoint of the peer b, err := caller.CallPost(url, "", map[string]interface{}{}) // Call the status endpoint of the peer
if err != nil { if err != nil {
return DEAD, map[string]int{} // If the peer is not reachable, return dead return DEAD, map[string]int{} // If the peer is not reachable, return dead
@@ -135,6 +136,9 @@ func (a *API) CheckRemotePeer(url string) (State, map[string]int) {
// CheckRemoteAPIs checks the state of remote APIs from your proper OC // CheckRemoteAPIs checks the state of remote APIs from your proper OC
func (a *API) CheckRemoteAPIs(apis []DataType) (State, map[string]string, error) { func (a *API) CheckRemoteAPIs(apis []DataType) (State, map[string]string, error) {
id := uuid.New()
l := logs.GetLogger().With().Str("id",id.String()).Logger()
l.Debug().Msg("Start checking")
// Check if the database is up // Check if the database is up
new := map[string]string{} new := map[string]string{}
caller := NewHTTPCaller(map[DataType]map[METHOD]string{}) // Create a new http caller caller := NewHTTPCaller(map[DataType]map[METHOD]string{}) // Create a new http caller
@@ -143,6 +147,7 @@ func (a *API) CheckRemoteAPIs(apis []DataType) (State, map[string]string, error)
state := ALIVE state := ALIVE
reachable := false reachable := false
for _, api := range apis { // Check the state of each remote API in the list for _, api := range apis { // Check the state of each remote API in the list
l.Debug().Msg("Checking : " + api.String() + " at " + api.API())
var resp APIStatusResponse var resp APIStatusResponse
b, err := caller.CallGet("http://"+api.API()+":8080", "/oc/version/status") // Call the status endpoint of the remote API (standard OC status endpoint) b, err := caller.CallGet("http://"+api.API()+":8080", "/oc/version/status") // Call the status endpoint of the remote API (standard OC status endpoint)
if err != nil { if err != nil {
@@ -150,7 +155,6 @@ func (a *API) CheckRemoteAPIs(apis []DataType) (State, map[string]string, error)
continue continue
} }
json.Unmarshal(b, &resp) json.Unmarshal(b, &resp)
fmt.Println(string(b))
if resp.Data == nil { // if resp.Data == nil { //
state = REDUCED_SERVICE // If the response is empty, return reduced service state = REDUCED_SERVICE // If the response is empty, return reduced service
continue continue

View File

@@ -13,7 +13,6 @@ const (
WORKFLOW WORKFLOW
WORKFLOW_EXECUTION WORKFLOW_EXECUTION
WORKSPACE WORKSPACE
RESOURCE_MODEL
PEER PEER
COLLABORATIVE_AREA COLLABORATIVE_AREA
RULE RULE
@@ -21,7 +20,12 @@ const (
WORKFLOW_HISTORY WORKFLOW_HISTORY
WORKSPACE_HISTORY WORKSPACE_HISTORY
ORDER ORDER
BUYING_STATUS PURCHASE_RESOURCE
ADMIRALTY_SOURCE
ADMIRALTY_TARGET
ADMIRALTY_SECRET
ADMIRALTY_KUBECONFIG
ADMIRALTY_NODES
) )
var NOAPI = "" var NOAPI = ""
@@ -31,6 +35,11 @@ var WORKFLOWAPI = "oc-workflow"
var WORKSPACEAPI = "oc-workspace" var WORKSPACEAPI = "oc-workspace"
var PEERSAPI = "oc-peer" var PEERSAPI = "oc-peer"
var DATACENTERAPI = "oc-datacenter" var DATACENTERAPI = "oc-datacenter"
var ADMIRALTY_SOURCEAPI = DATACENTERAPI+"/admiralty/source"
var ADMIRALTY_TARGETAPI = DATACENTERAPI+"/admiralty/target"
var ADMIRALTY_SECRETAPI = DATACENTERAPI+"/admiralty/secret"
var ADMIRALTY_KUBECONFIGAPI = DATACENTERAPI+"/admiralty/kubeconfig"
var ADMIRALTY_NODESAPI = DATACENTERAPI+"/admiralty/node"
// Bind the standard API name to the data type // Bind the standard API name to the data type
var DefaultAPI = [...]string{ var DefaultAPI = [...]string{
@@ -43,7 +52,6 @@ var DefaultAPI = [...]string{
WORKFLOWAPI, WORKFLOWAPI,
NOAPI, NOAPI,
WORKSPACEAPI, WORKSPACEAPI,
CATALOGAPI,
PEERSAPI, PEERSAPI,
SHAREDAPI, SHAREDAPI,
SHAREDAPI, SHAREDAPI,
@@ -52,6 +60,11 @@ var DefaultAPI = [...]string{
NOAPI, NOAPI,
NOAPI, NOAPI,
NOAPI, NOAPI,
ADMIRALTY_SOURCEAPI,
ADMIRALTY_TARGETAPI,
ADMIRALTY_SECRETAPI,
ADMIRALTY_KUBECONFIGAPI,
ADMIRALTY_NODESAPI,
} }
// Bind the standard data name to the data type // Bind the standard data name to the data type
@@ -65,7 +78,6 @@ var Str = [...]string{
"workflow", "workflow",
"workflow_execution", "workflow_execution",
"workspace", "workspace",
"resource_model",
"peer", "peer",
"collaborative_area", "collaborative_area",
"rule", "rule",
@@ -73,7 +85,12 @@ var Str = [...]string{
"workflow_history", "workflow_history",
"workspace_history", "workspace_history",
"order", "order",
"buying_status", "purchase_resource",
"admiralty_source",
"admiralty_target",
"admiralty_secret",
"admiralty_kubeconfig",
"admiralty_node",
} }
func FromInt(i int) string { func FromInt(i int) string {
@@ -92,3 +109,7 @@ func (d DataType) String() string { // String - Returns the string name of the d
func (d DataType) EnumIndex() int { func (d DataType) EnumIndex() int {
return int(d) return int(d)
} }
func DataTypeList() []DataType {
return []DataType{DATA_RESOURCE, PROCESSING_RESOURCE, STORAGE_RESOURCE, COMPUTE_RESOURCE, WORKFLOW_RESOURCE, WORKFLOW, WORKFLOW_EXECUTION, WORKSPACE, PEER, COLLABORATIVE_AREA, RULE, BOOKING, WORKFLOW_HISTORY, WORKSPACE_HISTORY, ORDER, PURCHASE_RESOURCE,ADMIRALTY_SOURCE,ADMIRALTY_TARGET,ADMIRALTY_SECRET,ADMIRALTY_KUBECONFIG,ADMIRALTY_NODES}
}

View File

@@ -3,6 +3,7 @@ package tools
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
@@ -49,8 +50,9 @@ func ToMethod(str string) METHOD {
var HTTPCallerInstance = &HTTPCaller{} // Singleton instance of the HTTPCaller var HTTPCallerInstance = &HTTPCaller{} // Singleton instance of the HTTPCaller
type HTTPCaller struct { type HTTPCaller struct {
URLS map[DataType]map[METHOD]string // Map of the different methods and their urls URLS map[DataType]map[METHOD]string // Map of the different methods and their urls
Disabled bool // Disabled flag Disabled bool // Disabled flag
LastResults map[string]interface{} // Used to store information regarding the last execution of a given method on a given data type
} }
// NewHTTPCaller creates a new instance of the HTTP Caller // NewHTTPCaller creates a new instance of the HTTP Caller
@@ -61,6 +63,16 @@ func NewHTTPCaller(urls map[DataType]map[METHOD]string) *HTTPCaller {
} }
} }
// Creates a copy of the current caller, in order to have parallelized executions without race condition
func (c* HTTPCaller) DeepCopy(dst HTTPCaller) error {
bytes, err := json.Marshal(c)
if err != nil {
return err
}
return json.Unmarshal(bytes, &dst)
}
// CallGet calls the GET method on the HTTP server // CallGet calls the GET method on the HTTP server
func (caller *HTTPCaller) CallGet(url string, subpath string, types ...string) ([]byte, error) { func (caller *HTTPCaller) CallGet(url string, subpath string, types ...string) ([]byte, error) {
req, err := http.NewRequest(http.MethodGet, url+subpath, bytes.NewBuffer([]byte(""))) req, err := http.NewRequest(http.MethodGet, url+subpath, bytes.NewBuffer([]byte("")))
@@ -76,22 +88,41 @@ func (caller *HTTPCaller) CallGet(url string, subpath string, types ...string) (
return nil, err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close()
return io.ReadAll(resp.Body) err = caller.StoreResp(resp)
if err != nil {
return nil, err
}
return caller.LastResults["body"].([]byte), nil
} }
// CallPut calls the DELETE method on the HTTP server // CallPut calls the DELETE method on the HTTP server
func (caller *HTTPCaller) CallDelete(url string, subpath string) ([]byte, error) { func (caller *HTTPCaller) CallDelete(url string, subpath string) ([]byte, error) {
resp, err := http.NewRequest("DELETE", url+subpath, nil) req, err := http.NewRequest("DELETE", url+subpath, nil)
if err != nil || resp == nil || resp.Body == nil { if err != nil {
return nil, err
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil || req == nil || req.Body == nil {
return nil, err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close()
return io.ReadAll(resp.Body)
err = caller.StoreResp(resp)
if err != nil {
return nil, err
}
return caller.LastResults["body"].([]byte), nil
} }
// CallPost calls the POST method on the HTTP server // CallPost calls the POST method on the HTTP server
func (caller *HTTPCaller) CallPost(url string, subpath string, body map[string]interface{}, types ...string) ([]byte, error) { func (caller *HTTPCaller) CallPost(url string, subpath string, body interface{}, types ...string) ([]byte, error) {
postBody, _ := json.Marshal(body) postBody, err := json.Marshal(body)
if err != nil {
return nil, err
}
responseBody := bytes.NewBuffer(postBody) responseBody := bytes.NewBuffer(postBody)
contentType := "application/json" contentType := "application/json"
if len(types) > 0 { if len(types) > 0 {
@@ -102,7 +133,12 @@ func (caller *HTTPCaller) CallPost(url string, subpath string, body map[string]i
return nil, err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close()
return io.ReadAll(resp.Body) err = caller.StoreResp(resp)
if err != nil {
return nil, err
}
return caller.LastResults["body"].([]byte), nil
} }
// CallPost calls the POST method on the HTTP server // CallPost calls the POST method on the HTTP server
@@ -120,7 +156,12 @@ func (caller *HTTPCaller) CallPut(url string, subpath string, body map[string]in
return nil, err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close()
return io.ReadAll(resp.Body) err = caller.StoreResp(resp)
if err != nil {
return nil, err
}
return caller.LastResults["body"].([]byte), nil
} }
// CallRaw calls the Raw method on the HTTP server // CallRaw calls the Raw method on the HTTP server
@@ -140,7 +181,12 @@ func (caller *HTTPCaller) CallRaw(method string, url string, subpath string,
req.AddCookie(c) req.AddCookie(c)
} }
client := &http.Client{} client := &http.Client{}
return client.Do(req) resp, err := client.Do(req)
if err != nil {
return nil, err
}
return resp, nil
} }
// CallRaw calls the Raw method on the HTTP server // CallRaw calls the Raw method on the HTTP server
@@ -160,3 +206,17 @@ func (caller *HTTPCaller) CallForm(method string, url string, subpath string,
client := &http.Client{} client := &http.Client{}
return client.Do(req) return client.Do(req)
} }
func (caller *HTTPCaller) StoreResp(resp *http.Response) error {
caller.LastResults = make(map[string]interface{})
caller.LastResults["header"] = resp.Header
caller.LastResults["code"] = resp.StatusCode
data, err := io.ReadAll(resp.Body)
if err != nil {
fmt.Println("Error reading the body of the last request")
return err
}
caller.LastResults["body"] = data
return nil
}