Compare commits
	
		
			50 Commits
		
	
	
		
			services_d
			...
			feature/ad
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | b3524ccfad | ||
|  | e2d1746396 | ||
|  | 5f70feab59 | ||
|  | fb8d994be3 | ||
|  | 6f7acee2df | ||
|  | 31580f1905 | ||
|  | 04d6001fec | ||
|  | e2ceb6e58d | ||
|  | cd804fbeb5 | ||
|  | 9aefa18ea8 | ||
|  | 27fd603e36 | ||
|  | c31184e2ec | ||
|  | 5d8143c93e | ||
|  | 77a9b0770e | ||
|  | 9a17623cab | ||
|  | 4963284056 | ||
|  | df09585cc9 | ||
|  | aa20edaf25 | ||
| 42ee6abcb6 | |||
| 08ade1af66 | |||
| 73e1747c91 | |||
| 32ce70da6e | |||
| def56e5822 | |||
| 37c561c5fe | |||
| 3f533a1bfb | |||
| 3fa2cd3336 | |||
| da9a7d3a49 | |||
| 788a3174ea | |||
| 47363566b2 | |||
| 697d7a7145 | |||
| e4874697bc | |||
| 99ae1e6629 | |||
| 91a87fbc4d | |||
| 34547e8b2f | |||
| df6e3d5a46 | |||
| a0b1117075 | |||
| 80e81820a4 | |||
| a1d28f2563 | |||
| 4e06971668 | |||
| 4c51de03bc | |||
| 7d7835919e | |||
| ae6c533397 | |||
| 0331727bfd | |||
| 5c64ed3549 | |||
| 92ae269685 | |||
| 310395f869 | |||
| f388351476 | |||
| 42375a60cb | |||
| 3e4ac40109 | |||
| ae5c1ee879 | 
							
								
								
									
										18
									
								
								.vscode/launch.json
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								.vscode/launch.json
									
									
									
									
										vendored
									
									
								
							| @@ -1,18 +0,0 @@ | ||||
| { | ||||
|     // Use IntelliSense to learn about possible attributes. | ||||
|     // Hover to view descriptions of existing attributes. | ||||
|     // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 | ||||
|     "version": "0.2.0", | ||||
|     "configurations": [ | ||||
|         { | ||||
|             "name": "Launch Package", | ||||
|             "type": "go", | ||||
|             "request": "launch", | ||||
|             "mode": "auto", | ||||
|             "program": "${fileDirname}", | ||||
|             "args": ["-e","e3b7772e-dc9f-4bc1-9f5d-533e23e6cd57" ,"-u","http://127.0.0.1:3100","-m" ,"mongodb://127.0.0.1:27017","-d","DC_myDC"], | ||||
|             "env": {"OCMONITOR_LOKIURL":"http://127.0.0.1:3100","OCMONITOR_WORKFLOW":"8d0f1814-b5ca-436c-ba3f-116d99198fd2","KUBERNETES_SERVICE_HOST":"","OCMONITOR_MONGOURL":"mongodb://127.0.0.1:27017","OCMONITOR_DATABASE":"DC_myDC","test_service":"true"} | ||||
|         } | ||||
|     ] | ||||
| } | ||||
|  | ||||
							
								
								
									
										24
									
								
								Dockerfile
									
									
									
									
									
								
							
							
						
						
									
										24
									
								
								Dockerfile
									
									
									
									
									
								
							| @@ -1,12 +1,28 @@ | ||||
| FROM golang:alpine AS builder | ||||
| FROM golang:alpine AS deps | ||||
|  | ||||
| WORKDIR /app | ||||
| COPY go.mod go.sum ./ | ||||
| RUN sed -i '/replace/d' go.mod | ||||
| RUN go mod download -x | ||||
|  | ||||
| #---------------------------------------------------------------------------------------------- | ||||
|  | ||||
| FROM golang:alpine AS builder | ||||
| LABEL maintainer="IRT PFN" | ||||
| ENV DOCKER_ENVIRONMENT=true | ||||
| WORKDIR /app | ||||
|  | ||||
| COPY --from=deps /go/pkg /go/pkg | ||||
| COPY --from=deps /app/go.mod /app/go.sum ./ | ||||
|  | ||||
| COPY . . | ||||
|  | ||||
| RUN go build . | ||||
|  | ||||
| FROM argoproj/argocd:latest | ||||
| FROM scratch | ||||
|  | ||||
| WORKDIR /app | ||||
|  | ||||
| COPY conf/docker_ocmonitord_conf.json /app/conf/ | ||||
| COPY --from=builder /app/oc-monitord . | ||||
| COPY --from=builder /app/oc-monitord . | ||||
|  | ||||
| ENTRYPOINT ["./oc-monitord"] | ||||
							
								
								
									
										660
									
								
								LICENSE.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										660
									
								
								LICENSE.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,660 @@ | ||||
| # GNU AFFERO GENERAL PUBLIC LICENSE | ||||
|  | ||||
| Version 3, 19 November 2007 | ||||
|  | ||||
| Copyright (C) 2007 Free Software Foundation, Inc. | ||||
| <https://fsf.org/> | ||||
|  | ||||
| Everyone is permitted to copy and distribute verbatim copies of this | ||||
| license document, but changing it is not allowed. | ||||
|  | ||||
| ## Preamble | ||||
|  | ||||
| The GNU Affero General Public License is a free, copyleft license for | ||||
| software and other kinds of works, specifically designed to ensure | ||||
| cooperation with the community in the case of network server software. | ||||
|  | ||||
| The licenses for most software and other practical works are designed | ||||
| to take away your freedom to share and change the works. By contrast, | ||||
| our General Public Licenses are intended to guarantee your freedom to | ||||
| share and change all versions of a program--to make sure it remains | ||||
| free software for all its users. | ||||
|  | ||||
| When we speak of free software, we are referring to freedom, not | ||||
| price. Our General Public Licenses are designed to make sure that you | ||||
| have the freedom to distribute copies of free software (and charge for | ||||
| them if you wish), that you receive source code or can get it if you | ||||
| want it, that you can change the software or use pieces of it in new | ||||
| free programs, and that you know you can do these things. | ||||
|  | ||||
| Developers that use our General Public Licenses protect your rights | ||||
| with two steps: (1) assert copyright on the software, and (2) offer | ||||
| you this License which gives you legal permission to copy, distribute | ||||
| and/or modify the software. | ||||
|  | ||||
| A secondary benefit of defending all users' freedom is that | ||||
| improvements made in alternate versions of the program, if they | ||||
| receive widespread use, become available for other developers to | ||||
| incorporate. Many developers of free software are heartened and | ||||
| encouraged by the resulting cooperation. However, in the case of | ||||
| software used on network servers, this result may fail to come about. | ||||
| The GNU General Public License permits making a modified version and | ||||
| letting the public access it on a server without ever releasing its | ||||
| source code to the public. | ||||
|  | ||||
| The GNU Affero General Public License is designed specifically to | ||||
| ensure that, in such cases, the modified source code becomes available | ||||
| to the community. It requires the operator of a network server to | ||||
| provide the source code of the modified version running there to the | ||||
| users of that server. Therefore, public use of a modified version, on | ||||
| a publicly accessible server, gives the public access to the source | ||||
| code of the modified version. | ||||
|  | ||||
| An older license, called the Affero General Public License and | ||||
| published by Affero, was designed to accomplish similar goals. This is | ||||
| a different license, not a version of the Affero GPL, but Affero has | ||||
| released a new version of the Affero GPL which permits relicensing | ||||
| under this license. | ||||
|  | ||||
| The precise terms and conditions for copying, distribution and | ||||
| modification follow. | ||||
|  | ||||
| ## TERMS AND CONDITIONS | ||||
|  | ||||
| ### 0. Definitions. | ||||
|  | ||||
| "This License" refers to version 3 of the GNU Affero General Public | ||||
| License. | ||||
|  | ||||
| "Copyright" also means copyright-like laws that apply to other kinds | ||||
| of works, such as semiconductor masks. | ||||
|  | ||||
| "The Program" refers to any copyrightable work licensed under this | ||||
| License. Each licensee is addressed as "you". "Licensees" and | ||||
| "recipients" may be individuals or organizations. | ||||
|  | ||||
| To "modify" a work means to copy from or adapt all or part of the work | ||||
| in a fashion requiring copyright permission, other than the making of | ||||
| an exact copy. The resulting work is called a "modified version" of | ||||
| the earlier work or a work "based on" the earlier work. | ||||
|  | ||||
| A "covered work" means either the unmodified Program or a work based | ||||
| on the Program. | ||||
|  | ||||
| To "propagate" a work means to do anything with it that, without | ||||
| permission, would make you directly or secondarily liable for | ||||
| infringement under applicable copyright law, except executing it on a | ||||
| computer or modifying a private copy. Propagation includes copying, | ||||
| distribution (with or without modification), making available to the | ||||
| public, and in some countries other activities as well. | ||||
|  | ||||
| To "convey" a work means any kind of propagation that enables other | ||||
| parties to make or receive copies. Mere interaction with a user | ||||
| through a computer network, with no transfer of a copy, is not | ||||
| conveying. | ||||
|  | ||||
| An interactive user interface displays "Appropriate Legal Notices" to | ||||
| the extent that it includes a convenient and prominently visible | ||||
| feature that (1) displays an appropriate copyright notice, and (2) | ||||
| tells the user that there is no warranty for the work (except to the | ||||
| extent that warranties are provided), that licensees may convey the | ||||
| work under this License, and how to view a copy of this License. If | ||||
| the interface presents a list of user commands or options, such as a | ||||
| menu, a prominent item in the list meets this criterion. | ||||
|  | ||||
| ### 1. Source Code. | ||||
|  | ||||
| The "source code" for a work means the preferred form of the work for | ||||
| making modifications to it. "Object code" means any non-source form of | ||||
| a work. | ||||
|  | ||||
| A "Standard Interface" means an interface that either is an official | ||||
| standard defined by a recognized standards body, or, in the case of | ||||
| interfaces specified for a particular programming language, one that | ||||
| is widely used among developers working in that language. | ||||
|  | ||||
| The "System Libraries" of an executable work include anything, other | ||||
| than the work as a whole, that (a) is included in the normal form of | ||||
| packaging a Major Component, but which is not part of that Major | ||||
| Component, and (b) serves only to enable use of the work with that | ||||
| Major Component, or to implement a Standard Interface for which an | ||||
| implementation is available to the public in source code form. A | ||||
| "Major Component", in this context, means a major essential component | ||||
| (kernel, window system, and so on) of the specific operating system | ||||
| (if any) on which the executable work runs, or a compiler used to | ||||
| produce the work, or an object code interpreter used to run it. | ||||
|  | ||||
| The "Corresponding Source" for a work in object code form means all | ||||
| the source code needed to generate, install, and (for an executable | ||||
| work) run the object code and to modify the work, including scripts to | ||||
| control those activities. However, it does not include the work's | ||||
| System Libraries, or general-purpose tools or generally available free | ||||
| programs which are used unmodified in performing those activities but | ||||
| which are not part of the work. For example, Corresponding Source | ||||
| includes interface definition files associated with source files for | ||||
| the work, and the source code for shared libraries and dynamically | ||||
| linked subprograms that the work is specifically designed to require, | ||||
| such as by intimate data communication or control flow between those | ||||
| subprograms and other parts of the work. | ||||
|  | ||||
| The Corresponding Source need not include anything that users can | ||||
| regenerate automatically from other parts of the Corresponding Source. | ||||
|  | ||||
| The Corresponding Source for a work in source code form is that same | ||||
| work. | ||||
|  | ||||
| ### 2. Basic Permissions. | ||||
|  | ||||
| All rights granted under this License are granted for the term of | ||||
| copyright on the Program, and are irrevocable provided the stated | ||||
| conditions are met. This License explicitly affirms your unlimited | ||||
| permission to run the unmodified Program. The output from running a | ||||
| covered work is covered by this License only if the output, given its | ||||
| content, constitutes a covered work. This License acknowledges your | ||||
| rights of fair use or other equivalent, as provided by copyright law. | ||||
|  | ||||
| You may make, run and propagate covered works that you do not convey, | ||||
| without conditions so long as your license otherwise remains in force. | ||||
| You may convey covered works to others for the sole purpose of having | ||||
| them make modifications exclusively for you, or provide you with | ||||
| facilities for running those works, provided that you comply with the | ||||
| terms of this License in conveying all material for which you do not | ||||
| control copyright. Those thus making or running the covered works for | ||||
| you must do so exclusively on your behalf, under your direction and | ||||
| control, on terms that prohibit them from making any copies of your | ||||
| copyrighted material outside their relationship with you. | ||||
|  | ||||
| Conveying under any other circumstances is permitted solely under the | ||||
| conditions stated below. Sublicensing is not allowed; section 10 makes | ||||
| it unnecessary. | ||||
|  | ||||
| ### 3. Protecting Users' Legal Rights From Anti-Circumvention Law. | ||||
|  | ||||
| No covered work shall be deemed part of an effective technological | ||||
| measure under any applicable law fulfilling obligations under article | ||||
| 11 of the WIPO copyright treaty adopted on 20 December 1996, or | ||||
| similar laws prohibiting or restricting circumvention of such | ||||
| measures. | ||||
|  | ||||
| When you convey a covered work, you waive any legal power to forbid | ||||
| circumvention of technological measures to the extent such | ||||
| circumvention is effected by exercising rights under this License with | ||||
| respect to the covered work, and you disclaim any intention to limit | ||||
| operation or modification of the work as a means of enforcing, against | ||||
| the work's users, your or third parties' legal rights to forbid | ||||
| circumvention of technological measures. | ||||
|  | ||||
| ### 4. Conveying Verbatim Copies. | ||||
|  | ||||
| You may convey verbatim copies of the Program's source code as you | ||||
| receive it, in any medium, provided that you conspicuously and | ||||
| appropriately publish on each copy an appropriate copyright notice; | ||||
| keep intact all notices stating that this License and any | ||||
| non-permissive terms added in accord with section 7 apply to the code; | ||||
| keep intact all notices of the absence of any warranty; and give all | ||||
| recipients a copy of this License along with the Program. | ||||
|  | ||||
| You may charge any price or no price for each copy that you convey, | ||||
| and you may offer support or warranty protection for a fee. | ||||
|  | ||||
| ### 5. Conveying Modified Source Versions. | ||||
|  | ||||
| You may convey a work based on the Program, or the modifications to | ||||
| produce it from the Program, in the form of source code under the | ||||
| terms of section 4, provided that you also meet all of these | ||||
| conditions: | ||||
|  | ||||
| -   a) The work must carry prominent notices stating that you modified | ||||
|     it, and giving a relevant date. | ||||
| -   b) The work must carry prominent notices stating that it is | ||||
|     released under this License and any conditions added under | ||||
|     section 7. This requirement modifies the requirement in section 4 | ||||
|     to "keep intact all notices". | ||||
| -   c) You must license the entire work, as a whole, under this | ||||
|     License to anyone who comes into possession of a copy. This | ||||
|     License will therefore apply, along with any applicable section 7 | ||||
|     additional terms, to the whole of the work, and all its parts, | ||||
|     regardless of how they are packaged. This License gives no | ||||
|     permission to license the work in any other way, but it does not | ||||
|     invalidate such permission if you have separately received it. | ||||
| -   d) If the work has interactive user interfaces, each must display | ||||
|     Appropriate Legal Notices; however, if the Program has interactive | ||||
|     interfaces that do not display Appropriate Legal Notices, your | ||||
|     work need not make them do so. | ||||
|  | ||||
| A compilation of a covered work with other separate and independent | ||||
| works, which are not by their nature extensions of the covered work, | ||||
| and which are not combined with it such as to form a larger program, | ||||
| in or on a volume of a storage or distribution medium, is called an | ||||
| "aggregate" if the compilation and its resulting copyright are not | ||||
| used to limit the access or legal rights of the compilation's users | ||||
| beyond what the individual works permit. Inclusion of a covered work | ||||
| in an aggregate does not cause this License to apply to the other | ||||
| parts of the aggregate. | ||||
|  | ||||
| ### 6. Conveying Non-Source Forms. | ||||
|  | ||||
| You may convey a covered work in object code form under the terms of | ||||
| sections 4 and 5, provided that you also convey the machine-readable | ||||
| Corresponding Source under the terms of this License, in one of these | ||||
| ways: | ||||
|  | ||||
| -   a) Convey the object code in, or embodied in, a physical product | ||||
|     (including a physical distribution medium), accompanied by the | ||||
|     Corresponding Source fixed on a durable physical medium | ||||
|     customarily used for software interchange. | ||||
| -   b) Convey the object code in, or embodied in, a physical product | ||||
|     (including a physical distribution medium), accompanied by a | ||||
|     written offer, valid for at least three years and valid for as | ||||
|     long as you offer spare parts or customer support for that product | ||||
|     model, to give anyone who possesses the object code either (1) a | ||||
|     copy of the Corresponding Source for all the software in the | ||||
|     product that is covered by this License, on a durable physical | ||||
|     medium customarily used for software interchange, for a price no | ||||
|     more than your reasonable cost of physically performing this | ||||
|     conveying of source, or (2) access to copy the Corresponding | ||||
|     Source from a network server at no charge. | ||||
| -   c) Convey individual copies of the object code with a copy of the | ||||
|     written offer to provide the Corresponding Source. This | ||||
|     alternative is allowed only occasionally and noncommercially, and | ||||
|     only if you received the object code with such an offer, in accord | ||||
|     with subsection 6b. | ||||
| -   d) Convey the object code by offering access from a designated | ||||
|     place (gratis or for a charge), and offer equivalent access to the | ||||
|     Corresponding Source in the same way through the same place at no | ||||
|     further charge. You need not require recipients to copy the | ||||
|     Corresponding Source along with the object code. If the place to | ||||
|     copy the object code is a network server, the Corresponding Source | ||||
|     may be on a different server (operated by you or a third party) | ||||
|     that supports equivalent copying facilities, provided you maintain | ||||
|     clear directions next to the object code saying where to find the | ||||
|     Corresponding Source. Regardless of what server hosts the | ||||
|     Corresponding Source, you remain obligated to ensure that it is | ||||
|     available for as long as needed to satisfy these requirements. | ||||
| -   e) Convey the object code using peer-to-peer transmission, | ||||
|     provided you inform other peers where the object code and | ||||
|     Corresponding Source of the work are being offered to the general | ||||
|     public at no charge under subsection 6d. | ||||
|  | ||||
| A separable portion of the object code, whose source code is excluded | ||||
| from the Corresponding Source as a System Library, need not be | ||||
| included in conveying the object code work. | ||||
|  | ||||
| A "User Product" is either (1) a "consumer product", which means any | ||||
| tangible personal property which is normally used for personal, | ||||
| family, or household purposes, or (2) anything designed or sold for | ||||
| incorporation into a dwelling. In determining whether a product is a | ||||
| consumer product, doubtful cases shall be resolved in favor of | ||||
| coverage. For a particular product received by a particular user, | ||||
| "normally used" refers to a typical or common use of that class of | ||||
| product, regardless of the status of the particular user or of the way | ||||
| in which the particular user actually uses, or expects or is expected | ||||
| to use, the product. A product is a consumer product regardless of | ||||
| whether the product has substantial commercial, industrial or | ||||
| non-consumer uses, unless such uses represent the only significant | ||||
| mode of use of the product. | ||||
|  | ||||
| "Installation Information" for a User Product means any methods, | ||||
| procedures, authorization keys, or other information required to | ||||
| install and execute modified versions of a covered work in that User | ||||
| Product from a modified version of its Corresponding Source. The | ||||
| information must suffice to ensure that the continued functioning of | ||||
| the modified object code is in no case prevented or interfered with | ||||
| solely because modification has been made. | ||||
|  | ||||
| If you convey an object code work under this section in, or with, or | ||||
| specifically for use in, a User Product, and the conveying occurs as | ||||
| part of a transaction in which the right of possession and use of the | ||||
| User Product is transferred to the recipient in perpetuity or for a | ||||
| fixed term (regardless of how the transaction is characterized), the | ||||
| Corresponding Source conveyed under this section must be accompanied | ||||
| by the Installation Information. But this requirement does not apply | ||||
| if neither you nor any third party retains the ability to install | ||||
| modified object code on the User Product (for example, the work has | ||||
| been installed in ROM). | ||||
|  | ||||
| The requirement to provide Installation Information does not include a | ||||
| requirement to continue to provide support service, warranty, or | ||||
| updates for a work that has been modified or installed by the | ||||
| recipient, or for the User Product in which it has been modified or | ||||
| installed. Access to a network may be denied when the modification | ||||
| itself materially and adversely affects the operation of the network | ||||
| or violates the rules and protocols for communication across the | ||||
| network. | ||||
|  | ||||
| Corresponding Source conveyed, and Installation Information provided, | ||||
| in accord with this section must be in a format that is publicly | ||||
| documented (and with an implementation available to the public in | ||||
| source code form), and must require no special password or key for | ||||
| unpacking, reading or copying. | ||||
|  | ||||
| ### 7. Additional Terms. | ||||
|  | ||||
| "Additional permissions" are terms that supplement the terms of this | ||||
| License by making exceptions from one or more of its conditions. | ||||
| Additional permissions that are applicable to the entire Program shall | ||||
| be treated as though they were included in this License, to the extent | ||||
| that they are valid under applicable law. If additional permissions | ||||
| apply only to part of the Program, that part may be used separately | ||||
| under those permissions, but the entire Program remains governed by | ||||
| this License without regard to the additional permissions. | ||||
|  | ||||
| When you convey a copy of a covered work, you may at your option | ||||
| remove any additional permissions from that copy, or from any part of | ||||
| it. (Additional permissions may be written to require their own | ||||
| removal in certain cases when you modify the work.) You may place | ||||
| additional permissions on material, added by you to a covered work, | ||||
| for which you have or can give appropriate copyright permission. | ||||
|  | ||||
| Notwithstanding any other provision of this License, for material you | ||||
| add to a covered work, you may (if authorized by the copyright holders | ||||
| of that material) supplement the terms of this License with terms: | ||||
|  | ||||
| -   a) Disclaiming warranty or limiting liability differently from the | ||||
|     terms of sections 15 and 16 of this License; or | ||||
| -   b) Requiring preservation of specified reasonable legal notices or | ||||
|     author attributions in that material or in the Appropriate Legal | ||||
|     Notices displayed by works containing it; or | ||||
| -   c) Prohibiting misrepresentation of the origin of that material, | ||||
|     or requiring that modified versions of such material be marked in | ||||
|     reasonable ways as different from the original version; or | ||||
| -   d) Limiting the use for publicity purposes of names of licensors | ||||
|     or authors of the material; or | ||||
| -   e) Declining to grant rights under trademark law for use of some | ||||
|     trade names, trademarks, or service marks; or | ||||
| -   f) Requiring indemnification of licensors and authors of that | ||||
|     material by anyone who conveys the material (or modified versions | ||||
|     of it) with contractual assumptions of liability to the recipient, | ||||
|     for any liability that these contractual assumptions directly | ||||
|     impose on those licensors and authors. | ||||
|  | ||||
| All other non-permissive additional terms are considered "further | ||||
| restrictions" within the meaning of section 10. If the Program as you | ||||
| received it, or any part of it, contains a notice stating that it is | ||||
| governed by this License along with a term that is a further | ||||
| restriction, you may remove that term. If a license document contains | ||||
| a further restriction but permits relicensing or conveying under this | ||||
| License, you may add to a covered work material governed by the terms | ||||
| of that license document, provided that the further restriction does | ||||
| not survive such relicensing or conveying. | ||||
|  | ||||
| If you add terms to a covered work in accord with this section, you | ||||
| must place, in the relevant source files, a statement of the | ||||
| additional terms that apply to those files, or a notice indicating | ||||
| where to find the applicable terms. | ||||
|  | ||||
| Additional terms, permissive or non-permissive, may be stated in the | ||||
| form of a separately written license, or stated as exceptions; the | ||||
| above requirements apply either way. | ||||
|  | ||||
| ### 8. Termination. | ||||
|  | ||||
| You may not propagate or modify a covered work except as expressly | ||||
| provided under this License. Any attempt otherwise to propagate or | ||||
| modify it is void, and will automatically terminate your rights under | ||||
| this License (including any patent licenses granted under the third | ||||
| paragraph of section 11). | ||||
|  | ||||
| However, if you cease all violation of this License, then your license | ||||
| from a particular copyright holder is reinstated (a) provisionally, | ||||
| unless and until the copyright holder explicitly and finally | ||||
| terminates your license, and (b) permanently, if the copyright holder | ||||
| fails to notify you of the violation by some reasonable means prior to | ||||
| 60 days after the cessation. | ||||
|  | ||||
| Moreover, your license from a particular copyright holder is | ||||
| reinstated permanently if the copyright holder notifies you of the | ||||
| violation by some reasonable means, this is the first time you have | ||||
| received notice of violation of this License (for any work) from that | ||||
| copyright holder, and you cure the violation prior to 30 days after | ||||
| your receipt of the notice. | ||||
|  | ||||
| Termination of your rights under this section does not terminate the | ||||
| licenses of parties who have received copies or rights from you under | ||||
| this License. If your rights have been terminated and not permanently | ||||
| reinstated, you do not qualify to receive new licenses for the same | ||||
| material under section 10. | ||||
|  | ||||
| ### 9. Acceptance Not Required for Having Copies. | ||||
|  | ||||
| You are not required to accept this License in order to receive or run | ||||
| a copy of the Program. Ancillary propagation of a covered work | ||||
| occurring solely as a consequence of using peer-to-peer transmission | ||||
| to receive a copy likewise does not require acceptance. However, | ||||
| nothing other than this License grants you permission to propagate or | ||||
| modify any covered work. These actions infringe copyright if you do | ||||
| not accept this License. Therefore, by modifying or propagating a | ||||
| covered work, you indicate your acceptance of this License to do so. | ||||
|  | ||||
| ### 10. Automatic Licensing of Downstream Recipients. | ||||
|  | ||||
| Each time you convey a covered work, the recipient automatically | ||||
| receives a license from the original licensors, to run, modify and | ||||
| propagate that work, subject to this License. You are not responsible | ||||
| for enforcing compliance by third parties with this License. | ||||
|  | ||||
| An "entity transaction" is a transaction transferring control of an | ||||
| organization, or substantially all assets of one, or subdividing an | ||||
| organization, or merging organizations. If propagation of a covered | ||||
| work results from an entity transaction, each party to that | ||||
| transaction who receives a copy of the work also receives whatever | ||||
| licenses to the work the party's predecessor in interest had or could | ||||
| give under the previous paragraph, plus a right to possession of the | ||||
| Corresponding Source of the work from the predecessor in interest, if | ||||
| the predecessor has it or can get it with reasonable efforts. | ||||
|  | ||||
| You may not impose any further restrictions on the exercise of the | ||||
| rights granted or affirmed under this License. For example, you may | ||||
| not impose a license fee, royalty, or other charge for exercise of | ||||
| rights granted under this License, and you may not initiate litigation | ||||
| (including a cross-claim or counterclaim in a lawsuit) alleging that | ||||
| any patent claim is infringed by making, using, selling, offering for | ||||
| sale, or importing the Program or any portion of it. | ||||
|  | ||||
| ### 11. Patents. | ||||
|  | ||||
| A "contributor" is a copyright holder who authorizes use under this | ||||
| License of the Program or a work on which the Program is based. The | ||||
| work thus licensed is called the contributor's "contributor version". | ||||
|  | ||||
| A contributor's "essential patent claims" are all patent claims owned | ||||
| or controlled by the contributor, whether already acquired or | ||||
| hereafter acquired, that would be infringed by some manner, permitted | ||||
| by this License, of making, using, or selling its contributor version, | ||||
| but do not include claims that would be infringed only as a | ||||
| consequence of further modification of the contributor version. For | ||||
| purposes of this definition, "control" includes the right to grant | ||||
| patent sublicenses in a manner consistent with the requirements of | ||||
| this License. | ||||
|  | ||||
| Each contributor grants you a non-exclusive, worldwide, royalty-free | ||||
| patent license under the contributor's essential patent claims, to | ||||
| make, use, sell, offer for sale, import and otherwise run, modify and | ||||
| propagate the contents of its contributor version. | ||||
|  | ||||
| In the following three paragraphs, a "patent license" is any express | ||||
| agreement or commitment, however denominated, not to enforce a patent | ||||
| (such as an express permission to practice a patent or covenant not to | ||||
| sue for patent infringement). To "grant" such a patent license to a | ||||
| party means to make such an agreement or commitment not to enforce a | ||||
| patent against the party. | ||||
|  | ||||
| If you convey a covered work, knowingly relying on a patent license, | ||||
| and the Corresponding Source of the work is not available for anyone | ||||
| to copy, free of charge and under the terms of this License, through a | ||||
| publicly available network server or other readily accessible means, | ||||
| then you must either (1) cause the Corresponding Source to be so | ||||
| available, or (2) arrange to deprive yourself of the benefit of the | ||||
| patent license for this particular work, or (3) arrange, in a manner | ||||
| consistent with the requirements of this License, to extend the patent | ||||
| license to downstream recipients. "Knowingly relying" means you have | ||||
| actual knowledge that, but for the patent license, your conveying the | ||||
| covered work in a country, or your recipient's use of the covered work | ||||
| in a country, would infringe one or more identifiable patents in that | ||||
| country that you have reason to believe are valid. | ||||
|  | ||||
| If, pursuant to or in connection with a single transaction or | ||||
| arrangement, you convey, or propagate by procuring conveyance of, a | ||||
| covered work, and grant a patent license to some of the parties | ||||
| receiving the covered work authorizing them to use, propagate, modify | ||||
| or convey a specific copy of the covered work, then the patent license | ||||
| you grant is automatically extended to all recipients of the covered | ||||
| work and works based on it. | ||||
|  | ||||
| A patent license is "discriminatory" if it does not include within the | ||||
| scope of its coverage, prohibits the exercise of, or is conditioned on | ||||
| the non-exercise of one or more of the rights that are specifically | ||||
| granted under this License. You may not convey a covered work if you | ||||
| are a party to an arrangement with a third party that is in the | ||||
| business of distributing software, under which you make payment to the | ||||
| third party based on the extent of your activity of conveying the | ||||
| work, and under which the third party grants, to any of the parties | ||||
| who would receive the covered work from you, a discriminatory patent | ||||
| license (a) in connection with copies of the covered work conveyed by | ||||
| you (or copies made from those copies), or (b) primarily for and in | ||||
| connection with specific products or compilations that contain the | ||||
| covered work, unless you entered into that arrangement, or that patent | ||||
| license was granted, prior to 28 March 2007. | ||||
|  | ||||
| Nothing in this License shall be construed as excluding or limiting | ||||
| any implied license or other defenses to infringement that may | ||||
| otherwise be available to you under applicable patent law. | ||||
|  | ||||
| ### 12. No Surrender of Others' Freedom. | ||||
|  | ||||
| If conditions are imposed on you (whether by court order, agreement or | ||||
| otherwise) that contradict the conditions of this License, they do not | ||||
| excuse you from the conditions of this License. If you cannot convey a | ||||
| covered work so as to satisfy simultaneously your obligations under | ||||
| this License and any other pertinent obligations, then as a | ||||
| consequence you may not convey it at all. For example, if you agree to | ||||
| terms that obligate you to collect a royalty for further conveying | ||||
| from those to whom you convey the Program, the only way you could | ||||
| satisfy both those terms and this License would be to refrain entirely | ||||
| from conveying the Program. | ||||
|  | ||||
| ### 13. Remote Network Interaction; Use with the GNU General Public License. | ||||
|  | ||||
| Notwithstanding any other provision of this License, if you modify the | ||||
| Program, your modified version must prominently offer all users | ||||
| interacting with it remotely through a computer network (if your | ||||
| version supports such interaction) an opportunity to receive the | ||||
| Corresponding Source of your version by providing access to the | ||||
| Corresponding Source from a network server at no charge, through some | ||||
| standard or customary means of facilitating copying of software. This | ||||
| Corresponding Source shall include the Corresponding Source for any | ||||
| work covered by version 3 of the GNU General Public License that is | ||||
| incorporated pursuant to the following paragraph. | ||||
|  | ||||
| Notwithstanding any other provision of this License, you have | ||||
| permission to link or combine any covered work with a work licensed | ||||
| under version 3 of the GNU General Public License into a single | ||||
| combined work, and to convey the resulting work. The terms of this | ||||
| License will continue to apply to the part which is the covered work, | ||||
| but the work with which it is combined will remain governed by version | ||||
| 3 of the GNU General Public License. | ||||
|  | ||||
| ### 14. Revised Versions of this License. | ||||
|  | ||||
| The Free Software Foundation may publish revised and/or new versions | ||||
| of the GNU Affero General Public License from time to time. Such new | ||||
| versions will be similar in spirit to the present version, but may | ||||
| differ in detail to address new problems or concerns. | ||||
|  | ||||
| Each version is given a distinguishing version number. If the Program | ||||
| specifies that a certain numbered version of the GNU Affero General | ||||
| Public License "or any later version" applies to it, you have the | ||||
| option of following the terms and conditions either of that numbered | ||||
| version or of any later version published by the Free Software | ||||
| Foundation. If the Program does not specify a version number of the | ||||
| GNU Affero General Public License, you may choose any version ever | ||||
| published by the Free Software Foundation. | ||||
|  | ||||
| If the Program specifies that a proxy can decide which future versions | ||||
| of the GNU Affero General Public License can be used, that proxy's | ||||
| public statement of acceptance of a version permanently authorizes you | ||||
| to choose that version for the Program. | ||||
|  | ||||
| Later license versions may give you additional or different | ||||
| permissions. However, no additional obligations are imposed on any | ||||
| author or copyright holder as a result of your choosing to follow a | ||||
| later version. | ||||
|  | ||||
| ### 15. Disclaimer of Warranty. | ||||
|  | ||||
| THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY | ||||
| APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT | ||||
| HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT | ||||
| WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT | ||||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
| A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND | ||||
| PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE | ||||
| DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR | ||||
| CORRECTION. | ||||
|  | ||||
| ### 16. Limitation of Liability. | ||||
|  | ||||
| IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING | ||||
| WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR | ||||
| CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, | ||||
| INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES | ||||
| ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT | ||||
| NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR | ||||
| LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM | ||||
| TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER | ||||
| PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. | ||||
|  | ||||
| ### 17. Interpretation of Sections 15 and 16. | ||||
|  | ||||
| If the disclaimer of warranty and limitation of liability provided | ||||
| above cannot be given local legal effect according to their terms, | ||||
| reviewing courts shall apply local law that most closely approximates | ||||
| an absolute waiver of all civil liability in connection with the | ||||
| Program, unless a warranty or assumption of liability accompanies a | ||||
| copy of the Program in return for a fee. | ||||
|  | ||||
| END OF TERMS AND CONDITIONS | ||||
|  | ||||
| ## How to Apply These Terms to Your New Programs | ||||
|  | ||||
| If you develop a new program, and you want it to be of the greatest | ||||
| possible use to the public, the best way to achieve this is to make it | ||||
| free software which everyone can redistribute and change under these | ||||
| terms. | ||||
|  | ||||
| To do so, attach the following notices to the program. It is safest to | ||||
| attach them to the start of each source file to most effectively state | ||||
| the exclusion of warranty; and each file should have at least the | ||||
| "copyright" line and a pointer to where the full notice is found. | ||||
|  | ||||
|         <one line to give the program's name and a brief idea of what it does.> | ||||
|         Copyright (C) <year>  <name of author> | ||||
|  | ||||
|         This program is free software: you can redistribute it and/or modify | ||||
|         it under the terms of the GNU Affero General Public License as | ||||
|         published by the Free Software Foundation, either version 3 of the | ||||
|         License, or (at your option) any later version. | ||||
|  | ||||
|         This program is distributed in the hope that it will be useful, | ||||
|         but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|         MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|         GNU Affero General Public License for more details. | ||||
|  | ||||
|         You should have received a copy of the GNU Affero General Public License | ||||
|         along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
|  | ||||
| Also add information on how to contact you by electronic and paper | ||||
| mail. | ||||
|  | ||||
| If your software can interact with users remotely through a computer | ||||
| network, you should also make sure that it provides a way for users to | ||||
| get its source. For example, if your program is a web application, its | ||||
| interface could display a "Source" link that leads users to an archive | ||||
| of the code. There are many ways you could offer source, and different | ||||
| solutions will be better for different programs; see section 13 for | ||||
| the specific requirements. | ||||
|  | ||||
| You should also get your employer (if you work as a programmer) or | ||||
| school, if any, to sign a "copyright disclaimer" for the program, if | ||||
| necessary. For more information on this, and how to apply and follow | ||||
| the GNU AGPL, see <https://www.gnu.org/licenses/>. | ||||
							
								
								
									
										25
									
								
								Makefile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								Makefile
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | ||||
| .DEFAULT_GOAL := all | ||||
|  | ||||
| build: clean | ||||
| 	go build . | ||||
|  | ||||
| run: | ||||
| 	./oc-monitord | ||||
|  | ||||
| clean: | ||||
| 	rm -rf oc-monitord | ||||
|  | ||||
| docker: | ||||
| 	DOCKER_BUILDKIT=1 docker build -t oc/oc-monitord:0.0.1 -f Dockerfile . | ||||
| 	docker tag oc/oc-monitord:0.0.1 oc/oc-monitord:latest | ||||
| 	docker tag oc/oc-monitord:0.0.1 oc-monitord:latest | ||||
|  | ||||
| publish-kind: | ||||
| 	kind load docker-image oc/oc-monitord:0.0.1 --name opencloud | ||||
|  | ||||
| publish-registry: | ||||
| 	@echo "TODO" | ||||
|  | ||||
| all: docker publish-kind publish-registry | ||||
|  | ||||
| .PHONY: build run clean docker publish-kind publish-registry | ||||
							
								
								
									
										74
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										74
									
								
								README.md
									
									
									
									
									
								
							| @@ -47,82 +47,18 @@ In rules add a new entry : | ||||
|  | ||||
| This command **must return "yes"** | ||||
|  | ||||
| ## Allow services to be joined with reverse proxy | ||||
|  | ||||
| Since the development has been realised in a K3S environment, we will use the lightweight solution provided by **traefik**.  | ||||
| # Notes features/admiralty-docker | ||||
|  | ||||
| We need to install **metallb** to expose our cluster to the exterior and allow packets to reach traefik. | ||||
| - When executing monitord as a container we need to change any url with "localhost" to the container's host IP.  | ||||
|  | ||||
| ### Deploy traefik and metallb | ||||
|   We can :  | ||||
|   - declare a new parameter 'HOST_IP' | ||||
|   - decide that no peer can have "http://localhost" as its url and use an attribute from the peer object or isMyself() from oc-lib if a peer is the current host. | ||||
|  | ||||
| - Make sure that helm is installed, else visit : https://helm.sh/docs/intro/install/  | ||||
|  | ||||
| - Add the repositories for traefik and metallb | ||||
| > helm repo add metallb https://metallb.github.io/metallb  | ||||
| > helm repo add traefik https://helm.traefik.io/traefik | ||||
|  | ||||
| >helm repo update | ||||
|  | ||||
| - Create the namespaces for each | ||||
| > kubectl create ns traefik-ingress | ||||
| > kubectl create ns metallb-system  | ||||
|  | ||||
| - Configure the deployment  | ||||
|  | ||||
| ``` | ||||
| cat > traefik-values.yaml <<EOF | ||||
| globalArguments: | ||||
| deployment: | ||||
|   kind: DaemonSet | ||||
| providers: | ||||
|   kubernetesCRD: | ||||
|     enabled: true | ||||
| service: | ||||
|   type: LoadBalancer | ||||
| ingressRoute: | ||||
|   dashboard: | ||||
|     enabled: false | ||||
| EOF | ||||
| ``` | ||||
|  | ||||
| - Launch the installs | ||||
| > helm upgrade --install metallb metallb/metallb --namespace metallb-system | ||||
|  | ||||
| > helm install --namespace=traefik-ingress traefik traefik/traefik --values=./traefik-values.yaml | ||||
|  | ||||
| ### Configure metallb | ||||
|  | ||||
| ``` | ||||
| cat << 'EOF' | kubectl apply -f - | ||||
| apiVersion: metallb.io/v1beta1 | ||||
| kind: IPAddressPool | ||||
| metadata: | ||||
|   name: default-pool | ||||
|   namespace: metallb-system | ||||
| spec: | ||||
|   addresses: | ||||
|   - 192.168.0.200-192.168.0.250 | ||||
| --- | ||||
| apiVersion: metallb.io/v1beta1 | ||||
| kind: L2Advertisement | ||||
| metadata: | ||||
|   name: default | ||||
|   namespace: metallb-system | ||||
| spec: | ||||
|   ipAddressPools: | ||||
|   - default-pool | ||||
| EOF | ||||
| ``` | ||||
|  | ||||
| - Check that the services created in traefik-ingress have an external IP | ||||
|  | ||||
| > kubectl get service -n traefik-ingress -o wide | ||||
|  | ||||
| ## TODO | ||||
|  | ||||
| - [ ] Logs the output of each pods :  | ||||
|   - logsPods() function already exists | ||||
|   - need to implement the logic to create each pod's logger and start the monitoring routing | ||||
| - [ ] Allow the front to known on which IP the service are reachable | ||||
|   - currently doing it by using `kubectl get nodes -o wide` | ||||
|  | ||||
|   | ||||
							
								
								
									
										16
									
								
								conf/conf.go
									
									
									
									
									
								
							
							
						
						
									
										16
									
								
								conf/conf.go
									
									
									
									
									
								
							| @@ -3,14 +3,22 @@ package conf | ||||
| import "sync" | ||||
|  | ||||
| type Config struct { | ||||
| 	MongoURL	string | ||||
| 	Database	string | ||||
| 	MongoURL    string | ||||
| 	Database    string | ||||
| 	LokiURL     string | ||||
| 	NatsURL		string | ||||
| 	NatsURL     string | ||||
| 	ExecutionID string | ||||
| 	PeerID      string | ||||
| 	Timeout     int | ||||
| 	WorkflowID  string | ||||
| 	Logs 		string | ||||
| 	Logs        string | ||||
| 	Mode        string | ||||
| 	KubeHost    string | ||||
| 	KubePort    string | ||||
| 	KubeCA      string | ||||
| 	KubeCert    string | ||||
| 	KubeData    string | ||||
| 	ArgoHost	string 		// when executed in a container will replace addresses with "localhost" in their url | ||||
| } | ||||
|  | ||||
| var instance *Config | ||||
|   | ||||
| @@ -1,30 +0,0 @@ | ||||
| # nginx.conf | ||||
| user  nginx; | ||||
| worker_processes  auto; | ||||
| error_log  /var/log/nginx/error.log warn; | ||||
| pid        /var/run/nginx.pid; | ||||
| events { | ||||
|   worker_connections  1024; | ||||
| } | ||||
| http { | ||||
|   include       /etc/nginx/mime.types; | ||||
|   default_type  application/octet-stream; | ||||
|   log_format  main  '$remote_addr - $remote_user [$time_local] "$request" ' | ||||
|   '$status $body_bytes_sent "$http_referer" ' | ||||
|   '"$http_user_agent" "$http_x_forwarded_for"'; | ||||
|   access_log  /var/log/nginx/access.log  main; | ||||
|   sendfile        on; | ||||
|   #tcp_nopush     on; | ||||
|    | ||||
|   keepalive_timeout  65; | ||||
|   #gzip  on; | ||||
|   #include /etc/nginx/conf.d/*.conf; | ||||
| server { | ||||
|   listen 80; | ||||
|   location / { | ||||
|     root   /usr/share/nginx/html; | ||||
|     index  index.html index.htm; | ||||
|     try_files $uri $uri/ /index.html; | ||||
|   } | ||||
| } | ||||
| } | ||||
| @@ -5,7 +5,7 @@ | ||||
| 		"7c71a15b-bdbc-46d7-9dab-67e369804136", | ||||
| 		"0d565c87-50ae-4a73-843d-f8b2d4047772" | ||||
| 	], | ||||
| 	"datacenters": [ | ||||
| 	"computes": [ | ||||
| 		"7b989e97-c3e7-49d2-a3a7-f959da4870b5" | ||||
| 	], | ||||
| 	"graph": { | ||||
| @@ -183,7 +183,7 @@ | ||||
| 					"y": 0 | ||||
| 				}, | ||||
| 				"itemresource": { | ||||
| 					"datacenter": { | ||||
| 					"compute": { | ||||
| 						"cpus": [ | ||||
| 							{ | ||||
| 								"architecture": "x86", | ||||
| @@ -214,16 +214,16 @@ | ||||
| 						"abstractresource": { | ||||
| 							"abstractobject": { | ||||
| 								"id": "7b989e97-c3e7-49d2-a3a7-f959da4870b5", | ||||
| 								"name": "Mundi datacenter" | ||||
| 								"name": "Mundi compute" | ||||
| 							}, | ||||
| 							"short_description": "Mundi Opencloud Instance", | ||||
| 							"description": "A very long description of what this data is", | ||||
| 							"logo": "https://cloud.o-forge.io/core/deperecated-oc-catalog/src/branch/main/scripts/local_imgs/Mundi datacenter.png", | ||||
| 							"logo": "https://cloud.o-forge.io/core/deperecated-oc-catalog/src/branch/main/scripts/local_imgs/Mundi compute.png", | ||||
| 							"owner": "IRT", | ||||
| 							"source_url": "http://www.google.com", | ||||
| 							"resource_model": { | ||||
| 								"id": "c3983010-1990-4ac0-8533-5389867e4424", | ||||
| 								"resource_type": "datacenter_resource" | ||||
| 								"resource_type": "compute_resource" | ||||
| 							} | ||||
| 						} | ||||
| 					} | ||||
|   | ||||
							
								
								
									
										5
									
								
								docker_schedulerd.json
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								docker_schedulerd.json
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,5 @@ | ||||
| { | ||||
|     "MONGO_URL":"mongodb://mongo:27017/",  | ||||
|     "NATS_URL":"nats://nats:4222", | ||||
|     "MONGO_DATABASE":"DC_myDC" | ||||
| } | ||||
							
								
								
									
										32
									
								
								exemple.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								exemple.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,32 @@ | ||||
| # This template demonstrates a steps template and how to control sequential vs. parallel steps. | ||||
| # In this example, the hello1 completes before the hello2a, and hello2b steps, which run in parallel. | ||||
| apiVersion: argoproj.io/v1alpha1 | ||||
| kind: Workflow | ||||
| metadata: | ||||
|   generateName: steps- | ||||
| spec: | ||||
|   entrypoint: hello-hello-hello | ||||
|   templates: | ||||
|   - name: hello-hello-hello | ||||
|     steps: | ||||
|     - - name: hello1 | ||||
|         template: print-message | ||||
|         arguments: | ||||
|           parameters: [{name: message, value: "hello1"}] | ||||
|     - - name: hello2a | ||||
|         template: print-message | ||||
|         arguments: | ||||
|           parameters: [{name: message, value: "hello2a"}] | ||||
|       - name: hello2b | ||||
|         template: print-message | ||||
|         arguments: | ||||
|           parameters: [{name: message, value: "hello2b"}] | ||||
|  | ||||
|   - name: print-message | ||||
|     inputs: | ||||
|       parameters: | ||||
|       - name: message | ||||
|     container: | ||||
|       image: busybox | ||||
|       command: [echo] | ||||
|       args: ["{{inputs.parameters.message}}"] | ||||
							
								
								
									
										100
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										100
									
								
								go.mod
									
									
									
									
									
								
							| @@ -1,47 +1,101 @@ | ||||
| module oc-monitord | ||||
|  | ||||
| go 1.22.0 | ||||
| go 1.23.1 | ||||
|  | ||||
| toolchain go1.23.3 | ||||
|  | ||||
| require ( | ||||
| 	cloud.o-forge.io/core/oc-lib v0.0.0-20240904135449-4f0ab6a3760f | ||||
| 	cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9 | ||||
| 	github.com/akamensky/argparse v1.4.0 | ||||
| 	github.com/google/uuid v1.6.0 | ||||
| 	github.com/goraz/onion v0.1.3 | ||||
| 	github.com/nats-io/nats-server/v2 v2.10.18 | ||||
| 	github.com/nwtgck/go-fakelish v0.1.3 | ||||
| 	github.com/rs/zerolog v1.33.0 | ||||
| 	github.com/tidwall/gjson v1.17.2 | ||||
| 	github.com/rs/zerolog v1.34.0 | ||||
| 	gopkg.in/yaml.v3 v3.0.1 | ||||
| ) | ||||
|  | ||||
| require ( | ||||
| 	github.com/gabriel-vasile/mimetype v1.4.5 // indirect | ||||
| 	github.com/beego/beego/v2 v2.3.7 // indirect | ||||
| 	github.com/go-playground/validator/v10 v10.26.0 // indirect | ||||
| 	github.com/golang/protobuf v1.5.4 // indirect | ||||
| 	github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect | ||||
| 	github.com/sirupsen/logrus v1.9.3 // indirect | ||||
| 	google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect | ||||
| 	google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect | ||||
| 	google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect | ||||
| 	google.golang.org/grpc v1.63.0 // indirect | ||||
| ) | ||||
|  | ||||
| require ( | ||||
| 	github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d | ||||
| 	github.com/argoproj/argo-workflows/v3 v3.6.4 | ||||
| 	github.com/beorn7/perks v1.0.1 // indirect | ||||
| 	github.com/biter777/countries v1.7.5 // indirect | ||||
| 	github.com/cespare/xxhash/v2 v2.3.0 // indirect | ||||
| 	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect | ||||
| 	github.com/emicklei/go-restful/v3 v3.11.0 // indirect | ||||
| 	github.com/fxamacker/cbor/v2 v2.7.0 // indirect | ||||
| 	github.com/gabriel-vasile/mimetype v1.4.8 // indirect | ||||
| 	github.com/go-logr/logr v1.4.2 // indirect | ||||
| 	github.com/go-openapi/jsonpointer v0.21.0 // indirect | ||||
| 	github.com/go-openapi/jsonreference v0.20.4 // indirect | ||||
| 	github.com/go-openapi/swag v0.23.0 // indirect | ||||
| 	github.com/go-playground/locales v0.14.1 // indirect | ||||
| 	github.com/go-playground/universal-translator v0.18.1 // indirect | ||||
| 	github.com/go-playground/validator/v10 v10.22.0 // indirect | ||||
| 	github.com/golang/snappy v0.0.4 // indirect | ||||
| 	github.com/google/uuid v1.6.0 // indirect | ||||
| 	github.com/klauspost/compress v1.17.9 // indirect | ||||
| 	github.com/gogo/protobuf v1.3.2 // indirect | ||||
| 	github.com/golang/snappy v1.0.0 // indirect | ||||
| 	github.com/google/gnostic-models v0.6.8 // indirect | ||||
| 	github.com/google/go-cmp v0.7.0 // indirect | ||||
| 	github.com/google/gofuzz v1.2.0 // indirect | ||||
| 	github.com/hashicorp/golang-lru v1.0.2 // indirect | ||||
| 	github.com/josharian/intern v1.0.0 // indirect | ||||
| 	github.com/json-iterator/go v1.1.12 // indirect | ||||
| 	github.com/klauspost/compress v1.18.0 // indirect | ||||
| 	github.com/leodido/go-urn v1.4.0 // indirect | ||||
| 	github.com/mattn/go-colorable v0.1.13 // indirect | ||||
| 	github.com/mailru/easyjson v0.7.7 // indirect | ||||
| 	github.com/mattn/go-colorable v0.1.14 // indirect | ||||
| 	github.com/mattn/go-isatty v0.0.20 // indirect | ||||
| 	github.com/mitchellh/mapstructure v1.5.0 // indirect | ||||
| 	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect | ||||
| 	github.com/modern-go/reflect2 v1.0.2 // indirect | ||||
| 	github.com/montanaflynn/stats v0.7.1 // indirect | ||||
| 	github.com/nats-io/nats.go v1.37.0 // indirect | ||||
| 	github.com/nats-io/nkeys v0.4.7 // indirect | ||||
| 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect | ||||
| 	github.com/nats-io/nats.go v1.41.0 // indirect | ||||
| 	github.com/nats-io/nkeys v0.4.10 // indirect | ||||
| 	github.com/nats-io/nuid v1.0.1 // indirect | ||||
| 	github.com/robfig/cron/v3 v3.0.1 // indirect | ||||
| 	github.com/pkg/errors v0.9.1 // indirect | ||||
| 	github.com/prometheus/client_golang v1.22.0 // indirect | ||||
| 	github.com/prometheus/client_model v0.6.1 // indirect | ||||
| 	github.com/prometheus/common v0.63.0 // indirect | ||||
| 	github.com/prometheus/procfs v0.16.0 // indirect | ||||
| 	github.com/robfig/cron v1.2.0 // indirect | ||||
| 	github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect | ||||
| 	github.com/smartystreets/goconvey v1.6.4 // indirect | ||||
| 	github.com/tidwall/match v1.1.1 // indirect | ||||
| 	github.com/tidwall/pretty v1.2.0 // indirect | ||||
| 	github.com/ugorji/go/codec v1.1.7 // indirect | ||||
| 	github.com/vk496/cron v1.2.0 // indirect | ||||
| 	github.com/x448/float16 v0.8.4 // indirect | ||||
| 	github.com/xdg-go/pbkdf2 v1.0.0 // indirect | ||||
| 	github.com/xdg-go/scram v1.1.2 // indirect | ||||
| 	github.com/xdg-go/stringprep v1.0.4 // indirect | ||||
| 	github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect | ||||
| 	go.mongodb.org/mongo-driver v1.16.1 // indirect | ||||
| 	golang.org/x/crypto v0.26.0 // indirect | ||||
| 	golang.org/x/net v0.28.0 // indirect | ||||
| 	golang.org/x/sync v0.8.0 // indirect | ||||
| 	golang.org/x/sys v0.25.0 // indirect | ||||
| 	golang.org/x/text v0.18.0 // indirect | ||||
| 	go.mongodb.org/mongo-driver v1.17.3 // indirect | ||||
| 	golang.org/x/crypto v0.37.0 // indirect | ||||
| 	golang.org/x/net v0.39.0 // indirect | ||||
| 	golang.org/x/oauth2 v0.25.0 // indirect | ||||
| 	golang.org/x/sync v0.13.0 // indirect | ||||
| 	golang.org/x/sys v0.32.0 // indirect | ||||
| 	golang.org/x/term v0.31.0 // indirect | ||||
| 	golang.org/x/text v0.24.0 // indirect | ||||
| 	golang.org/x/time v0.7.0 // indirect | ||||
| 	google.golang.org/protobuf v1.36.6 // indirect | ||||
| 	gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect | ||||
| 	gopkg.in/inf.v0 v0.9.1 // indirect | ||||
| 	k8s.io/api v0.32.1 | ||||
| 	k8s.io/apimachinery v0.32.1 | ||||
| 	k8s.io/client-go v0.32.1 | ||||
| 	k8s.io/klog/v2 v2.130.1 // indirect | ||||
| 	k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect | ||||
| 	k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect | ||||
| 	sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect | ||||
| 	sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect | ||||
| 	sigs.k8s.io/yaml v1.4.0 // indirect | ||||
| ) | ||||
|   | ||||
							
								
								
									
										354
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										354
									
								
								go.sum
									
									
									
									
									
								
							| @@ -1,51 +1,27 @@ | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240729150539-dc30412f2edc h1:Q70pRG2f6fULpw+BE7ECQO7wk/Wy9VreZPrpuUXJ9yA= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240729150539-dc30412f2edc/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240730072752-250fefd0d85e h1:1rBqh6/cGgCukaRhj0I5Ypb6ydA9/EHUsH/pw+1yJQ4= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240730072752-250fefd0d85e/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240731093413-1ff4cb08d1b8 h1:Rr1EkGsjclzafKC/3ff/tccyh3wJL3fgL0rQyI4AEpQ= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240731093413-1ff4cb08d1b8/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240731094747-2795c924f75d h1:iGOJ/LHHbq2WMb7Klkh3A/f/sot9N9rfPNrNBCqIXCA= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240731094747-2795c924f75d/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240731095057-63236362ca3d h1:uBn7AeWPFCmhN6hknTOUwtGZvlEcTTDUPpDNaBNEutM= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240731095057-63236362ca3d/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240731123052-78bef28a2171 h1:SiTy74sfSeO97umBOnUZNrBl613LM0oqBw8eOOsYv7I= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240731123052-78bef28a2171/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240731142119-51f3a320b3a0 h1:XhEtERYUHnvl226d9CjgyocGRmUgsBjBsMlIA8ugx+0= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240731142119-51f3a320b3a0/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240731145630-ad455e0e3a91 h1:o7hnTWo9VpoYV/+l/+ET8jVF4olagDMpVQ2w19Wp6z0= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240731145630-ad455e0e3a91/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240801090939-0256266107ad h1:UNd4spyJHM8iWrFllTMGEWvunPdPnzd+3rJHerN+DhU= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240801090939-0256266107ad/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240801091141-b0f2cf46eb6b h1:/0uyJi/yf/iwPwc9n6dPAPrjLcm0N8ISfrgkZy/HFIU= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240801091141-b0f2cf46eb6b/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240801091914-dbbda341174e h1:VBF/4TUew2d+p67Nv/W/cdxWTYG4Yn/v3mv97wuKGBE= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240801091914-dbbda341174e/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240801093150-69d53f9d4d63 h1:sRqgTzwVhKHL4OOCOjwTxSYgf/ZjYG/286eaf/DLQXM= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240801093150-69d53f9d4d63/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240801093327-c3e6f04dd89a h1:NLFcAtK2mTi26e+t8wmj04PZTCKKSME+HWMsVT7I/I8= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240801093327-c3e6f04dd89a/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240801145026-b125c9be0152 h1:eV8M9cjPpyzG9wPTj69rWA8+zxWoi0GJqdn/4+C//L4= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240801145026-b125c9be0152/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240805134753-17f62b649523 h1:/gN4169dtvbyi3+oLfHTMe8RlfX+P4VrV+1nfAThS+k= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240805134753-17f62b649523/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240807111844-fe78927e739c h1:mt9c6vvW9sYhcIA5TM7YMHz5ItVYliatfWFPC4Gs/HM= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240807111844-fe78927e739c/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240807131622-6df71bde1d5e h1:LIZ2Mxwd9NQD2B6O07LZ1Zc7l6/eOhY9iOcFyr6DGDQ= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240807131622-6df71bde1d5e/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240807134103-0ec80473ccf7 h1:Q9fFnvEf0XzQvnCZ815wTRQ6zP/efU9RUcKXgcDoCng= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240807134103-0ec80473ccf7/go.mod h1:V5EL+NV2s9P1/BcFm3/icfLeBYVVMLl1Z0F0eecJZGo= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240822081914-4abf59a10d97 h1:6tbeTQvRnD0vDUl+5SLMgAh9ukjGxQ9WKjNcvvxN7cQ= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240822081914-4abf59a10d97/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240826085916-d0e1474f8f34 h1:40XQgwR9HxXSnouY+ZqE/xYCM4qa+U+RLA5GA5JSNyQ= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240826085916-d0e1474f8f34/go.mod h1:1hhYh5QWAbYw9cKplQ0ZD9PMgU8t6gPqiYF8sldv1HU= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240828135227-14d6a5f11c4e h1:/KWO/gIcP5f7T4r00715fNz0Y/Hil6Bj3J1ycuES1Zw= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240828135227-14d6a5f11c4e/go.mod h1:FIJD0taWLJ5pjQLJ6sfE2KlTkvbmk5SMcyrxdjsaVz0= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240904135449-4f0ab6a3760f h1:v9mw3uNg/DJswOvHooMu8/BMedA+vIXbma+8iUwsjUI= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20240904135449-4f0ab6a3760f/go.mod h1:FIJD0taWLJ5pjQLJ6sfE2KlTkvbmk5SMcyrxdjsaVz0= | ||||
| cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= | ||||
| cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9 h1:mSFFPwil5Ih+RPBvn88MBerQMtsoHnOuyCZQaf91a34= | ||||
| cloud.o-forge.io/core/oc-lib v0.0.0-20250313155727-88c88cac5bc9/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE= | ||||
| github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= | ||||
| github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= | ||||
| github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= | ||||
| github.com/akamensky/argparse v1.4.0 h1:YGzvsTqCvbEZhL8zZu2AiA5nq805NZh75JNj4ajn1xc= | ||||
| github.com/akamensky/argparse v1.4.0/go.mod h1:S5kwC7IuDcEr5VeXtGPRVZ5o/FdhcMlQz4IZQuw64xA= | ||||
| github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= | ||||
| github.com/argoproj/argo-workflows/v3 v3.6.4 h1:5+Cc1UwaQE5ka3w7R3hxZ1TK3M6VjDEXA5WSQ/IXrxY= | ||||
| github.com/argoproj/argo-workflows/v3 v3.6.4/go.mod h1:2f5zB8CkbNCCO1od+kd1dWkVokqcuyvu+tc+Jwx1MZg= | ||||
| github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= | ||||
| github.com/beego/beego/v2 v2.3.7 h1:z4btKtjU/rfp5BiYHkGD2QPjK9i1E9GH+I7vfhn6Agk= | ||||
| github.com/beego/beego/v2 v2.3.7/go.mod h1:5cqHsOHJIxkq44tBpRvtDe59GuVRVv/9/tyVDxd5ce4= | ||||
| github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= | ||||
| github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= | ||||
| github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q= | ||||
| github.com/biter777/countries v1.7.5/go.mod h1:1HSpZ526mYqKJcpT5Ti1kcGQ0L0SrXWIaptUWjFfv2E= | ||||
| github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= | ||||
| github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= | ||||
| github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= | ||||
| github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= | ||||
| github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= | ||||
| github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= | ||||
| github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= | ||||
| github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= | ||||
| @@ -55,75 +31,167 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV | ||||
| github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= | ||||
| github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= | ||||
| github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw= | ||||
| github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= | ||||
| github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= | ||||
| github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= | ||||
| github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= | ||||
| github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= | ||||
| github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= | ||||
| github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= | ||||
| github.com/etcd-io/etcd v3.3.17+incompatible/go.mod h1:cdZ77EstHBwVtD6iTgzgvogwcjo9m4iOqoijouPJ4bs= | ||||
| github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= | ||||
| github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I= | ||||
| github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s= | ||||
| github.com/gabriel-vasile/mimetype v1.4.5 h1:J7wGKdGu33ocBOhGy0z653k/lFKLFDPJMG8Gql0kxn4= | ||||
| github.com/gabriel-vasile/mimetype v1.4.5/go.mod h1:ibHel+/kbxn9x2407k1izTA1S81ku1z/DlgOW2QE0M4= | ||||
| github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= | ||||
| github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= | ||||
| github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= | ||||
| github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= | ||||
| github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= | ||||
| github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= | ||||
| github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= | ||||
| github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= | ||||
| github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= | ||||
| github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= | ||||
| github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= | ||||
| github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= | ||||
| github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= | ||||
| github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= | ||||
| github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= | ||||
| github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= | ||||
| github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= | ||||
| github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= | ||||
| github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= | ||||
| github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao= | ||||
| github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= | ||||
| github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k= | ||||
| github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= | ||||
| github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= | ||||
| github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= | ||||
| github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= | ||||
| github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= | ||||
| github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= | ||||
| github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= | ||||
| github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= | ||||
| github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= | ||||
| github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= | ||||
| github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= | ||||
| github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= | ||||
| github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= | ||||
| github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= | ||||
| github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= | ||||
| github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= | ||||
| github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= | ||||
| github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= | ||||
| github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= | ||||
| github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= | ||||
| github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | ||||
| github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= | ||||
| github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= | ||||
| github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= | ||||
| github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | ||||
| github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= | ||||
| github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | ||||
| github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= | ||||
| github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= | ||||
| github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= | ||||
| github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= | ||||
| github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= | ||||
| github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= | ||||
| github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= | ||||
| github.com/goraz/onion v0.1.3 h1:KhyvbDA2b70gcz/d5izfwTiOH8SmrvV43AsVzpng3n0= | ||||
| github.com/goraz/onion v0.1.3/go.mod h1:XEmz1XoBz+wxTgWB8NwuvRm4RAu3vKxvrmYtzK+XCuQ= | ||||
| github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= | ||||
| github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= | ||||
| github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= | ||||
| github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= | ||||
| github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= | ||||
| github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= | ||||
| github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= | ||||
| github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= | ||||
| github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= | ||||
| github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= | ||||
| github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= | ||||
| github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= | ||||
| github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= | ||||
| github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= | ||||
| github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= | ||||
| github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= | ||||
| github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= | ||||
| github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= | ||||
| github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= | ||||
| github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= | ||||
| github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= | ||||
| github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= | ||||
| github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= | ||||
| github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= | ||||
| github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= | ||||
| github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= | ||||
| github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= | ||||
| github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= | ||||
| github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= | ||||
| github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= | ||||
| github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= | ||||
| github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= | ||||
| github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= | ||||
| github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= | ||||
| github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= | ||||
| github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= | ||||
| github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= | ||||
| github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= | ||||
| github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= | ||||
| github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= | ||||
| github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= | ||||
| github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= | ||||
| github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= | ||||
| github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= | ||||
| github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= | ||||
| github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= | ||||
| github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= | ||||
| github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= | ||||
| github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= | ||||
| github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= | ||||
| github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= | ||||
| github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= | ||||
| github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= | ||||
| github.com/nats-io/nats-server/v2 v2.10.18 h1:tRdZmBuWKVAFYtayqlBB2BuCHNGAQPvoQIXOKwU3WSM= | ||||
| github.com/nats-io/nats-server/v2 v2.10.18/go.mod h1:97Qyg7YydD8blKlR8yBsUlPlWyZKjA7Bp5cl3MUE9K8= | ||||
| github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE= | ||||
| github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= | ||||
| github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= | ||||
| github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= | ||||
| github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= | ||||
| github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= | ||||
| github.com/nats-io/nats.go v1.41.0 h1:PzxEva7fflkd+n87OtQTXqCTyLfIIMFJBpyccHLE2Ko= | ||||
| github.com/nats-io/nats.go v1.41.0/go.mod h1:wV73x0FSI/orHPSYoyMeJB+KajMDoWyXmFaRrrYaaTo= | ||||
| github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc= | ||||
| github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U= | ||||
| github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= | ||||
| github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= | ||||
| github.com/nwtgck/go-fakelish v0.1.3 h1:bA8/xa9hQmzppexIhBvdmztcd/PJ4SPuAUTBdMKZ8G4= | ||||
| github.com/nwtgck/go-fakelish v0.1.3/go.mod h1:2HC44/OwVWwOa/g3+P2jUM3FEHQ0ya4gyCSU19PPd3Y= | ||||
| github.com/ogier/pflag v0.0.1/go.mod h1:zkFki7tvTa0tafRvTBIZTvzYyAu6kQhPZFnshFFPE+g= | ||||
| github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= | ||||
| github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= | ||||
| github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= | ||||
| github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= | ||||
| github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= | ||||
| github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= | ||||
| github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= | ||||
| github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= | ||||
| github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= | ||||
| github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | ||||
| github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= | ||||
| github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= | ||||
| github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= | ||||
| github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= | ||||
| github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= | ||||
| github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= | ||||
| github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | ||||
| github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= | ||||
| github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= | ||||
| github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= | ||||
| github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= | ||||
| github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= | ||||
| github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= | ||||
| github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= | ||||
| github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM= | ||||
| github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= | ||||
| github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= | ||||
| github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= | ||||
| github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= | ||||
| github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= | ||||
| github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= | ||||
| github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= | ||||
| github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= | ||||
| github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= | ||||
| github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= | ||||
| github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 h1:v9ezJDHA1XGxViAUSIoO/Id7Fl63u6d0YmsAm+/p2hs= | ||||
| github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02/go.mod h1:RF16/A3L0xSa0oSERcnhd8Pu3IXSDZSK2gmGIMsttFE= | ||||
| github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= | ||||
| github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= | ||||
| github.com/skarademir/naturalsort v0.0.0-20150715044055-69a5d87bef62/go.mod h1:oIdVclZaltY1Nf7OQUkg1/2jImBJ+ZfKZuDIRSwk3p0= | ||||
| github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= | ||||
| github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= | ||||
| @@ -135,22 +203,21 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU | ||||
| github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= | ||||
| github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= | ||||
| github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= | ||||
| github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= | ||||
| github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= | ||||
| github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= | ||||
| github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= | ||||
| github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= | ||||
| github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= | ||||
| github.com/tidwall/gjson v1.17.2 h1:YlBFFaxZdSXKP8zhqh5CRbk0wL7oCAU3D+JJLU5pE7U= | ||||
| github.com/tidwall/gjson v1.17.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= | ||||
| github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= | ||||
| github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= | ||||
| github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= | ||||
| github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= | ||||
| github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | ||||
| github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= | ||||
| github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= | ||||
| github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= | ||||
| github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= | ||||
| github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= | ||||
| github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= | ||||
| github.com/vk496/cron v1.2.0 h1:fDxb4qNi6Rmxh3h9snW1sKJ0nHgjpg3fYc0Oq+igbvk= | ||||
| github.com/vk496/cron v1.2.0/go.mod h1:f8lpm+SIXbjvujp8Dix4S2B+GGva/q0yrRPQ8hwTtOc= | ||||
| github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= | ||||
| github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= | ||||
| github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= | ||||
| github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= | ||||
| github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= | ||||
| @@ -158,80 +225,151 @@ github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3k | ||||
| github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= | ||||
| github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= | ||||
| github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= | ||||
| github.com/youmark/pkcs8 v0.0.0-20240424034433-3c2c7870ae76 h1:tBiBTKHnIjovYoLX/TPkcf+OjqqKGQrPtGT3Foz+Pgo= | ||||
| github.com/youmark/pkcs8 v0.0.0-20240424034433-3c2c7870ae76/go.mod h1:SQliXeA7Dhkt//vS29v3zpbEwoa+zb2Cn5xj5uO4K5U= | ||||
| github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= | ||||
| github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= | ||||
| github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | ||||
| github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | ||||
| github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= | ||||
| go.mongodb.org/mongo-driver v1.16.0 h1:tpRsfBJMROVHKpdGyc1BBEzzjDUWjItxbVSZ8Ls4BQ4= | ||||
| go.mongodb.org/mongo-driver v1.16.0/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw= | ||||
| go.mongodb.org/mongo-driver v1.16.1 h1:rIVLL3q0IHM39dvE+z2ulZLp9ENZKThVfuvN/IiN4l8= | ||||
| go.mongodb.org/mongo-driver v1.16.1/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw= | ||||
| go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ= | ||||
| go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= | ||||
| golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= | ||||
| golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | ||||
| golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||
| golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= | ||||
| golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= | ||||
| golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= | ||||
| golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= | ||||
| golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= | ||||
| golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= | ||||
| golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= | ||||
| golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= | ||||
| golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= | ||||
| golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= | ||||
| golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= | ||||
| golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | ||||
| golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | ||||
| golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= | ||||
| golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | ||||
| golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | ||||
| golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | ||||
| golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | ||||
| golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | ||||
| golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | ||||
| golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= | ||||
| golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | ||||
| golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= | ||||
| golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= | ||||
| golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= | ||||
| golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= | ||||
| golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= | ||||
| golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= | ||||
| golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= | ||||
| golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= | ||||
| golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= | ||||
| golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= | ||||
| golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= | ||||
| golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= | ||||
| golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= | ||||
| golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= | ||||
| golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= | ||||
| golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= | ||||
| golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= | ||||
| golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= | ||||
| golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= | ||||
| golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= | ||||
| golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= | ||||
| golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= | ||||
| golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= | ||||
| golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= | ||||
| golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= | ||||
| golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= | ||||
| golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= | ||||
| golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= | ||||
| golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= | ||||
| golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= | ||||
| golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= | ||||
| golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= | ||||
| golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | ||||
| golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | ||||
| golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= | ||||
| golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= | ||||
| golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= | ||||
| golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= | ||||
| golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= | ||||
| golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= | ||||
| golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= | ||||
| golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= | ||||
| golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= | ||||
| golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= | ||||
| golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= | ||||
| golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= | ||||
| golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||
| golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||
| golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= | ||||
| golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= | ||||
| golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= | ||||
| golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= | ||||
| golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | ||||
| golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | ||||
| golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | ||||
| golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= | ||||
| golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= | ||||
| golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= | ||||
| golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= | ||||
| golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= | ||||
| google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= | ||||
| google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= | ||||
| google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= | ||||
| google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= | ||||
| google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= | ||||
| google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= | ||||
| google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= | ||||
| google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= | ||||
| google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= | ||||
| google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= | ||||
| google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= | ||||
| google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= | ||||
| google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= | ||||
| google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= | ||||
| google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= | ||||
| google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= | ||||
| google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= | ||||
| google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= | ||||
| google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= | ||||
| gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | ||||
| gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= | ||||
| gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= | ||||
| gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= | ||||
| gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= | ||||
| gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= | ||||
| gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= | ||||
| gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= | ||||
| gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | ||||
| gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | ||||
| gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | ||||
| gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | ||||
| gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||
| gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | ||||
| gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||
| honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= | ||||
| honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= | ||||
| k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= | ||||
| k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= | ||||
| k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= | ||||
| k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= | ||||
| k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= | ||||
| k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= | ||||
| k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= | ||||
| k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= | ||||
| k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= | ||||
| k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= | ||||
| k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= | ||||
| k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= | ||||
| sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= | ||||
| sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= | ||||
| sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= | ||||
| sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= | ||||
| sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= | ||||
| sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= | ||||
|   | ||||
							
								
								
									
										219
									
								
								logger/argo_logs.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										219
									
								
								logger/argo_logs.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,219 @@ | ||||
| package logger | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"oc-monitord/tools" | ||||
| 	"oc-monitord/utils" | ||||
| 	"slices" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/rs/zerolog" | ||||
| 	"k8s.io/apimachinery/pkg/watch" | ||||
|  | ||||
| 	wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" | ||||
| ) | ||||
|  | ||||
| // An object to monitor the logs generated by a specific pod from a workflow execution | ||||
| type ArgoWatch struct { | ||||
| 	Name      string | ||||
| 	Namespace string | ||||
| 	Status    string | ||||
| 	Conditions | ||||
| 	Created  string | ||||
| 	Started  string | ||||
| 	Duration string | ||||
| 	Progress string | ||||
| 	Logs     []string | ||||
| } | ||||
|  | ||||
| type Conditions struct { | ||||
| 	PodRunning bool | ||||
| 	Completed  bool | ||||
| } | ||||
|  | ||||
| func (a *ArgoWatch) Equals(arg *ArgoWatch) bool { | ||||
| 	if arg == nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	return a.Status == arg.Status && a.Progress == arg.Progress && a.Conditions.PodRunning == arg.Conditions.PodRunning && a.Conditions.Completed == arg.Conditions.Completed | ||||
| } | ||||
|  | ||||
| func NewArgoLogs(name string, namespace string, stepMax int) *ArgoLogs { | ||||
| 	return &ArgoLogs{ | ||||
| 		Name:        "oc-monitor-" + name, | ||||
| 		Namespace:   namespace, | ||||
| 		CreatedDate: time.Now().Format("2006-01-02 15:04:05"), | ||||
| 		StepCount:   0, | ||||
| 		StepMax:     stepMax, | ||||
| 		stop:        false, | ||||
| 		Seen:        []string{}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // An object to monitor and log the output of an argo submit  | ||||
| type ArgoLogs struct { | ||||
| 	Name        string | ||||
| 	Namespace   string | ||||
| 	CreatedDate string | ||||
| 	StepCount   int | ||||
| 	StepMax     int | ||||
| 	stop        bool | ||||
| 	Started     time.Time | ||||
| 	Seen        []string | ||||
| 	Logs        []string | ||||
| 	IsStreaming bool | ||||
| } | ||||
|  | ||||
| func (a *ArgoLogs) NewWatch() *ArgoWatch { | ||||
| 	return &ArgoWatch{ | ||||
| 		Name:      a.Name, | ||||
| 		Namespace: a.Namespace, | ||||
| 		Status:    "Pending", | ||||
| 		Created:   a.CreatedDate, | ||||
| 		Started:   a.Started.Format("2006-01-02 15:04:05"), | ||||
| 		Conditions: Conditions{ | ||||
| 			PodRunning: a.StepCount > 0 && a.StepCount < a.StepMax, | ||||
| 			Completed:  a.StepCount == a.StepMax, | ||||
| 		}, | ||||
| 		Progress: fmt.Sprintf("%v/%v", a.StepCount, a.StepMax), | ||||
| 		Duration: "0s", | ||||
| 		Logs:     []string{}, | ||||
| 	} | ||||
|  | ||||
| } | ||||
|  | ||||
| func (a *ArgoLogs) StartStepRecording(current_watch *ArgoWatch, logger zerolog.Logger) { | ||||
| 	jsonified, _ := json.Marshal(current_watch) | ||||
| 	logger.Info().Msg(string(jsonified)) | ||||
| 	a.StepCount += 1 | ||||
| 	a.Started = time.Now() | ||||
| } | ||||
|  | ||||
|  | ||||
| type ArgoPodLog struct { | ||||
| 	PodName		string | ||||
| 	Step		string | ||||
| 	Message		string | ||||
| } | ||||
|  | ||||
| func NewArgoPodLog(name string, step string, msg string) ArgoPodLog { | ||||
| 	return ArgoPodLog{ | ||||
| 		PodName: name, | ||||
| 		Step: step, | ||||
| 		Message: msg, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func LogKubernetesArgo(wfName string, executionID string, watcher watch.Interface) { | ||||
| 	var argoWatcher *ArgoWatch | ||||
| 	var pods []string | ||||
| 	var node wfv1.NodeStatus | ||||
|  | ||||
| 	wfl := utils.GetWFLogger("") | ||||
|  | ||||
| 	for event := range (watcher.ResultChan()) { | ||||
| 		wf, ok := event.Object.(*wfv1.Workflow) | ||||
| 		if !ok { | ||||
| 			wfl.Error().Msg("unexpected type") | ||||
| 			continue | ||||
| 		} | ||||
| 		if len(wf.Status.Nodes) == 0 { | ||||
| 			wfl.Debug().Msg("No node status yet")	// The first output of the channel doesn't contain Nodes so we skip it | ||||
| 			continue | ||||
| 		} | ||||
| 		 | ||||
| 		conditions := retrieveCondition(wf)	 | ||||
|  | ||||
| 		// Retrieving the Status for the main node, which is named after the workflow | ||||
| 		if node, ok = wf.Status.Nodes[wfName]; !ok { | ||||
| 			bytified, _ := json.MarshalIndent(wf.Status.Nodes,"","\t")  | ||||
| 			wfl.Fatal().Msg("Could not find the " + wfName + " node in \n" + string(bytified)) | ||||
| 		} | ||||
|  | ||||
| 		now := time.Now() | ||||
| 		start, _ := time.Parse(time.RFC3339, node.StartedAt.String() ) | ||||
| 		duration := now.Sub(start) | ||||
|  | ||||
| 		newWatcher := ArgoWatch{ | ||||
| 			Name: node.Name, | ||||
| 			Namespace: executionID, | ||||
| 			Status: string(node.Phase), | ||||
| 			Created: node.StartedAt.String(), | ||||
| 			Started: node.StartedAt.String(), | ||||
| 			Progress: string(node.Progress), | ||||
| 			Duration: duration.String(), | ||||
| 			Conditions: conditions, | ||||
| 		} | ||||
|  | ||||
| 		if argoWatcher == nil { | ||||
| 			argoWatcher = &newWatcher | ||||
| 		} | ||||
|  | ||||
| 		if !newWatcher.Equals(argoWatcher){ | ||||
| 			jsonified, _ := json.Marshal(newWatcher) | ||||
| 			wfl.Info().Msg(string(jsonified)) | ||||
| 			argoWatcher = &newWatcher | ||||
| 		} | ||||
| 		 | ||||
| 		// I don't think we need to use WaitGroup here, because the loop itself  | ||||
| 		// acts as blocking process for the main thread, because Argo watch never closes the channel | ||||
| 		for _, pod := range wf.Status.Nodes{ | ||||
| 			if !slices.Contains(pods,pod.Name){ | ||||
| 				pl := wfl.With().Str("pod",  pod.Name).Logger() | ||||
| 				if wfName == pod.Name { pods = append(pods, pod.Name); continue }	// One of the node is the Workflow, the others are the pods so don't try to log on the wf name | ||||
| 				go logKubernetesPods(executionID, wfName, pod.Name, pl) | ||||
| 				pods = append(pods, pod.Name) | ||||
| 			}  | ||||
| 		} | ||||
|  | ||||
| 		// Stop listening to the chan when the Workflow is completed or something bad happened | ||||
| 		if node.Phase.Completed() { | ||||
| 			wfl.Info().Msg(wfName + " worflow completed") | ||||
| 			break | ||||
| 		} | ||||
| 		if node.Phase.FailedOrError() { | ||||
| 			wfl.Error().Msg(wfName + "has failed, please refer to the logs") | ||||
| 			wfl.Error().Msg(node.Message)	 | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func retrieveCondition(wf *wfv1.Workflow) (c Conditions) { | ||||
| 	for _, cond := range wf.Status.Conditions { | ||||
| 		if cond.Type == "PodRunning" { | ||||
| 			c.PodRunning = cond.Status == "True" | ||||
| 		} | ||||
| 		if cond.Type == "Completed" { | ||||
| 			c.Completed = cond.Status == "True" | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return  | ||||
| 	 | ||||
| } | ||||
|  | ||||
| // Function needed to be executed as a go thread  | ||||
| func logKubernetesPods(executionId string, wfName string,podName string, logger zerolog.Logger){ | ||||
| 	k, err := tools.NewKubernetesTool() | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg("Could not get Kubernetes tools") | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	reader, err := k.GetPodLogger(executionId, wfName, podName) | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg(err.Error()) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	scanner := bufio.NewScanner(reader) | ||||
| 	for scanner.Scan() { | ||||
| 		log := scanner.Text() | ||||
| 		podLog := NewArgoPodLog(wfName,podName,log) | ||||
| 		jsonified, _ := json.Marshal(podLog) | ||||
| 		logger.Info().Msg(string(jsonified)) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										144
									
								
								logger/local_argo_logs.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										144
									
								
								logger/local_argo_logs.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,144 @@ | ||||
| package logger | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"oc-monitord/conf" | ||||
|  | ||||
| 	"strings" | ||||
| 	"sync" | ||||
|  | ||||
| 	"cloud.o-forge.io/core/oc-lib/logs" | ||||
| 	"github.com/rs/zerolog" | ||||
| ) | ||||
|  | ||||
| var logger zerolog.Logger | ||||
| var wfLogger zerolog.Logger | ||||
|  | ||||
|  | ||||
| // Take the slice of string that make up one round of stderr outputs from the --watch option in argo submit | ||||
| func NewLocalArgoWatch(inputs []string) *ArgoWatch { | ||||
| 	var workflow ArgoWatch | ||||
|  | ||||
| 	for _, input := range inputs { | ||||
| 		line := strings.TrimSpace(input) | ||||
| 		if line == "" { | ||||
| 			continue | ||||
| 		} | ||||
| 		switch { | ||||
| 		case strings.HasPrefix(line, "Name:"): | ||||
| 			workflow.Name = parseValue(line) | ||||
| 		case strings.HasPrefix(line, "Namespace:"): | ||||
| 			workflow.Namespace = parseValue(line) | ||||
| 		case strings.HasPrefix(line, "Status:"): | ||||
| 			workflow.Status = parseValue(line) | ||||
| 		case strings.HasPrefix(line, "PodRunning"): | ||||
| 			workflow.PodRunning = parseBoolValue(line) | ||||
| 		case strings.HasPrefix(line, "Completed"): | ||||
| 			workflow.Completed = parseBoolValue(line) | ||||
| 		case strings.HasPrefix(line, "Created:"): | ||||
| 			workflow.Created = parseValue(line) | ||||
| 		case strings.HasPrefix(line, "Started:"): | ||||
| 			workflow.Started = parseValue(line) | ||||
| 		case strings.HasPrefix(line, "Duration:"): | ||||
| 			workflow.Duration = parseValue(line) | ||||
| 		case strings.HasPrefix(line, "Progress:"): | ||||
| 			workflow.Progress = parseValue(line) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return &workflow | ||||
| } | ||||
|  | ||||
|  | ||||
|  | ||||
| func parseValue(line string) string { | ||||
| 	parts := strings.SplitN(line, ":", 2) | ||||
| 	if len(parts) < 2 { | ||||
| 		return "" | ||||
| 	} | ||||
| 	return strings.TrimSpace(parts[1]) | ||||
| } | ||||
|  | ||||
| func parseBoolValue(line string) bool { | ||||
| 	value := parseValue(line) | ||||
| 	return value == "True" | ||||
| } | ||||
|  | ||||
| func LogLocalWorkflow(wfName string, pipe io.ReadCloser, wg *sync.WaitGroup) { | ||||
| 	logger = logs.GetLogger() | ||||
|  | ||||
| 	logger.Debug().Msg("created wf_logger") | ||||
| 	fmt.Println("created wf_logger") | ||||
| 	wfLogger = logger.With().Str("argo_name", wfName).Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() | ||||
|  | ||||
| 	var current_watch, previous_watch ArgoWatch | ||||
|  | ||||
| 	watch_output := make([]string, 0) | ||||
| 	scanner := bufio.NewScanner(pipe) | ||||
| 	for scanner.Scan() { | ||||
| 		log := scanner.Text() | ||||
| 		watch_output = append(watch_output, log) | ||||
|  | ||||
| 		// Log the progress of the WF | ||||
| 		if strings.HasPrefix(log, "Progress:") { | ||||
|  | ||||
| 			current_watch = *NewLocalArgoWatch(watch_output) | ||||
| 			workflowName := current_watch.Name | ||||
| 			if !current_watch.Equals(&previous_watch) { | ||||
| 				wg.Add(1) | ||||
| 				// checkStatus(current_watch.Status, previous_watch.Status) | ||||
| 				jsonified, err := json.Marshal(current_watch) | ||||
| 				if err != nil { | ||||
| 					logger.Error().Msg("Could not create watch log for " + workflowName) | ||||
| 				} | ||||
| 				wfLogger.Info().Msg(string(jsonified)) | ||||
| 				previous_watch = current_watch | ||||
| 				current_watch = ArgoWatch{} | ||||
| 				wg.Done() | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
|  | ||||
|  | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Debug, no logs sent | ||||
| func LogLocalPod(wfName string, pipe io.ReadCloser, steps []string, wg *sync.WaitGroup) { | ||||
| 	scanner := bufio.NewScanner(pipe) | ||||
| 	for scanner.Scan() { | ||||
| 		var podLogger zerolog.Logger | ||||
| 		fmt.Println("new line") | ||||
| 		wg.Add(1) | ||||
| 		 | ||||
| 		line := scanner.Text() | ||||
| 		podName := strings.Split(line, ":")[0] | ||||
| 		podLogger = wfLogger.With().Str("step_name", getStepName(podName, steps)).Logger() | ||||
| 		log := strings.Split(line,podName+":")[1] | ||||
| 		podLog := NewArgoPodLog(wfName,podName,log) | ||||
| 		 | ||||
| 		jsonifiedLog, err := json.Marshal(podLog) | ||||
| 		if err != nil { | ||||
| 			podLogger.Fatal().Msg(err.Error()) | ||||
| 		} | ||||
|  | ||||
| 		podLogger.Info().Msg(string(jsonifiedLog)) | ||||
| 		wg.Done() | ||||
| 	} | ||||
|  | ||||
| } | ||||
|  | ||||
| func getStepName(podName string, steps []string) string { | ||||
|  | ||||
| 	for _, step := range(steps) { | ||||
| 		if strings.Contains(podName,step){ | ||||
| 			return step | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return "error" | ||||
| } | ||||
|  | ||||
							
								
								
									
										357
									
								
								main.go
									
									
									
									
									
								
							
							
						
						
									
										357
									
								
								main.go
									
									
									
									
									
								
							| @@ -2,24 +2,31 @@ package main | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"encoding/json" | ||||
| 	"encoding/base64" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"os" | ||||
| 	"os/exec" | ||||
| 	"regexp" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"time" | ||||
|  | ||||
| 	"oc-monitord/conf" | ||||
| 	"oc-monitord/models" | ||||
| 	l "oc-monitord/logger" | ||||
| 	u "oc-monitord/utils" | ||||
| 	"oc-monitord/workflow_builder" | ||||
|  | ||||
| 	oclib "cloud.o-forge.io/core/oc-lib" | ||||
|  | ||||
| 	"cloud.o-forge.io/core/oc-lib/logs" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/booking" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/peer" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/utils" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/workflow_execution" | ||||
| 	"cloud.o-forge.io/core/oc-lib/tools" | ||||
|  | ||||
| 	tools2 "oc-monitord/tools" | ||||
|  | ||||
| 	"github.com/akamensky/argparse" | ||||
| 	"github.com/google/uuid" | ||||
| @@ -36,9 +43,7 @@ import ( | ||||
|  | ||||
| var logger zerolog.Logger | ||||
| var wf_logger zerolog.Logger | ||||
| var pods_logger zerolog.Logger | ||||
| var parser argparse.Parser | ||||
| var monitorLocal bool | ||||
| var workflowName string | ||||
|  | ||||
| const defaultConfigFile = "/etc/oc/ocmonitord_conf.json" | ||||
| @@ -46,24 +51,10 @@ const localConfigFile = "./conf/local_ocmonitord_conf.json" | ||||
|  | ||||
| func main() { | ||||
|  | ||||
| 	monitorLocal = false | ||||
| 	// Test if monitor is launched outside (with parameters) or in a k8s environment (env variables sets) | ||||
| 	if os.Getenv("KUBERNETES_SERVICE_HOST") == "" { | ||||
| 		// Not in a k8s environment, get conf from parameters | ||||
| 		fmt.Println("Executes outside of k8s") | ||||
| 		parser = *argparse.NewParser("oc-monitord", "Launch the execution of a workflow given as a parameter and sends the produced logs to a loki database") | ||||
| 		loadConfig(false, &parser) | ||||
| 	} else { | ||||
| 		// Executed in a k8s environment | ||||
| 		fmt.Println("Executes inside a k8s") | ||||
| 		monitorLocal = true | ||||
| 		loadConfig(true, nil) | ||||
| 	} | ||||
|  | ||||
| 	logger = logs.CreateLogger("oc-monitord") | ||||
|  | ||||
| 	logger.Debug().Msg("Loki URL : " + conf.GetConfig().LokiURL) | ||||
| 	logger.Debug().Msg("Workflow executed : " + conf.GetConfig().ExecutionID) | ||||
| 	os.Setenv("test_service", "true") // Only for service demo, delete before merging on main | ||||
| 	parser = *argparse.NewParser("oc-monitord", "Launch the execution of a workflow given as a parameter and sends the produced logs to a loki database") | ||||
| 	loadConfig(false, &parser) | ||||
| 	oclib.InitDaemon("oc-monitord") | ||||
|  | ||||
| 	oclib.SetConfig( | ||||
| 		conf.GetConfig().MongoURL, | ||||
| @@ -72,12 +63,15 @@ func main() { | ||||
| 		conf.GetConfig().LokiURL, | ||||
| 		conf.GetConfig().Logs, | ||||
| 	) | ||||
| 	oclib.Init("oc-monitord") | ||||
|  | ||||
| 	wf_id := getWorkflowId(conf.GetConfig().ExecutionID) | ||||
| 	conf.GetConfig().WorkflowID = wf_id | ||||
| 	logger = u.GetLogger() | ||||
|  | ||||
| 	logger.Debug().Msg("Starting construction of yaml argo for workflow :" + wf_id) | ||||
| 	logger.Debug().Msg("Loki URL : " + conf.GetConfig().LokiURL) | ||||
| 	logger.Debug().Msg("Workflow executed : " + conf.GetConfig().ExecutionID) | ||||
| 	exec := u.GetExecution(conf.GetConfig().ExecutionID) | ||||
| 	conf.GetConfig().WorkflowID = exec.WorkflowID | ||||
|  | ||||
| 	logger.Debug().Msg("Starting construction of yaml argo for workflow :" + exec.WorkflowID) | ||||
|  | ||||
| 	if _, err := os.Stat("./argo_workflows/"); os.IsNotExist(err) { | ||||
| 		os.Mkdir("./argo_workflows/", 0755) | ||||
| @@ -87,173 +81,187 @@ func main() { | ||||
| 	// // create argo | ||||
| 	new_wf := workflow_builder.WorflowDB{} | ||||
|  | ||||
| 	err := new_wf.LoadFrom(conf.GetConfig().WorkflowID) | ||||
| 	err := new_wf.LoadFrom(conf.GetConfig().WorkflowID, conf.GetConfig().PeerID) | ||||
| 	if err != nil { | ||||
|  | ||||
| 		logger.Error().Msg("Could not retrieve workflow " + conf.GetConfig().WorkflowID + " from oc-catalog API") | ||||
| 	} | ||||
|  | ||||
| 	argo_file_path, err := new_wf.ExportToArgo(conf.GetConfig().Timeout) | ||||
| 	builder, _, err := new_wf.ExportToArgo(exec.ExecutionsID, conf.GetConfig().Timeout) // Removed stepMax so far, I don't know if we need it anymore | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg("Could not create the Argo file for " + conf.GetConfig().WorkflowID) | ||||
| 		logger.Error().Msg(err.Error()) | ||||
| 	} | ||||
| 	logger.Debug().Msg("Created :" + argo_file_path) | ||||
|  | ||||
| 	workflowName = getContainerName(argo_file_path) | ||||
|  | ||||
| 	wf_logger = logger.With().Str("argo_name", workflowName).Str("workflow_id", conf.GetConfig().WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() | ||||
| 	wf_logger.Debug().Msg("Testing argo name") | ||||
|  | ||||
| 	executeWorkflow(argo_file_path) | ||||
|  | ||||
| } | ||||
|  | ||||
| // Return the Workflow ID associated to a workflow execution object | ||||
| func getWorkflowId(exec_id string) string { | ||||
|  | ||||
| 	res := oclib.LoadOne(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), exec_id) | ||||
|  | ||||
| 	if res.Code != 200 { | ||||
| 		logger.Error().Msg("Could not retrieve workflow ID from execution ID " + exec_id) | ||||
| 		return "" | ||||
| 	argoFilePath, err := builder.CompleteBuild(exec.ExecutionsID) | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg(err.Error()) | ||||
| 	} | ||||
|  | ||||
| 	wf_exec := res.ToWorkflowExecution() | ||||
| 	workflowName = getContainerName(argoFilePath) | ||||
|  | ||||
| 	return wf_exec.WorkflowID | ||||
| 	wf_logger := u.GetWFLogger(workflowName) | ||||
| 	wf_logger.Debug().Msg("Testing argo name") | ||||
|  | ||||
| 	if conf.GetConfig().KubeHost == "" { | ||||
| 		// Not in a k8s environment, get conf from parameters | ||||
| 		fmt.Println("Executes outside of k8s") | ||||
| 		executeOutside(argoFilePath, builder.Workflow) | ||||
| 	} else { | ||||
| 		// Executed in a k8s environment | ||||
| 		fmt.Println("Executes inside a k8s") | ||||
| 		// executeInside(exec.GetID(), "argo", argo_file_path, stepMax)  // commenting to use conf.ExecutionID instead of exec.GetID() | ||||
| 		executeInside(conf.GetConfig().ExecutionID, conf.GetConfig().ExecutionID, argoFilePath) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // So far we only log the output from  | ||||
| // So far we only log the output from | ||||
| func executeInside(execID string, ns string, argo_file_path string) { | ||||
| 	t, err := tools2.NewService(conf.GetConfig().Mode) | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg("Could not create KubernetesTool") | ||||
| 		return | ||||
| 	} | ||||
| 	 | ||||
| 	name, err := t.CreateArgoWorkflow(argo_file_path, ns) | ||||
| 	_ = name  | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg("Could not create argo workflow : " + err.Error()) | ||||
| 		fmt.Println("CA :" + conf.GetConfig().KubeCA) | ||||
| 		fmt.Println("Cert :" + conf.GetConfig().KubeCert) | ||||
| 		fmt.Println("Data :" + conf.GetConfig().KubeData) | ||||
| 		return | ||||
| 	} else { | ||||
| 		watcher, err := t.GetArgoWatch(execID, workflowName) | ||||
| 		if err != nil { | ||||
| 			logger.Error().Msg("Could not retrieve Watcher : " + err.Error()) | ||||
| 		} | ||||
|  | ||||
| func executeWorkflow(argo_file_path string) { | ||||
| 	// var stdout, stderr, stdout_logs, stderr_logs 	io.ReadCloser | ||||
| 	var stdout, stderr io.ReadCloser | ||||
| 	// var stderr 	io.ReadCloser | ||||
| 		l.LogKubernetesArgo(name, execID, watcher) | ||||
| 		if err != nil { | ||||
| 			logger.Error().Msg("Could not log workflow : " + err.Error()) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| } | ||||
|  | ||||
| func executeOutside(argo_file_path string, workflow workflow_builder.Workflow) { | ||||
| 	var stdoutSubmit, stderrSubmit io.ReadCloser | ||||
| 	var stdoutLogs, stderrLogs io.ReadCloser | ||||
| 	var wg sync.WaitGroup | ||||
| 	var err error | ||||
| 	cmd := exec.Command("argo", "submit", "--watch", "./argo_workflows/"+argo_file_path, "--serviceaccount=argo", "-n", "argo") | ||||
| 	fmt.Println(cmd) | ||||
| 	if stdout, err = cmd.StdoutPipe(); err != nil { | ||||
|  | ||||
| 	logger.Debug().Msg("executing :" + "argo submit --watch " + argo_file_path + " --serviceaccount sa-" + conf.GetConfig().ExecutionID + " -n " + conf.GetConfig().ExecutionID ) | ||||
|  | ||||
| 	cmdSubmit := exec.Command("argo", "submit", "--watch", argo_file_path, "--serviceaccount", "sa-"+conf.GetConfig().ExecutionID, "-n", conf.GetConfig().ExecutionID) | ||||
| 	if stdoutSubmit, err = cmdSubmit.StdoutPipe(); err != nil { | ||||
| 		wf_logger.Error().Msg("Could not retrieve stdoutpipe " + err.Error()) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if stderr, err = cmd.StderrPipe(); err != nil { | ||||
| 		wf_logger.Error().Msg("Could not retrieve stderrpipe " + err.Error()) | ||||
| 	 | ||||
| 	cmdLogs := exec.Command("argo", "logs", "oc-monitor-"+workflowName, "-n", conf.GetConfig().ExecutionID, "--follow","--no-color") | ||||
| 	if stdoutLogs, err = cmdLogs.StdoutPipe(); err != nil { | ||||
| 		wf_logger.Error().Msg("Could not retrieve stdoutpipe for 'argo logs'" + err.Error()) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if err := cmd.Start(); err != nil { | ||||
| 		panic(err) | ||||
| 	 | ||||
| 	var steps []string | ||||
| 	for _, template := range workflow.Spec.Templates { | ||||
| 		steps = append(steps, template.Name) | ||||
| 	} | ||||
|  | ||||
| 	var wg sync.WaitGroup | ||||
| 	go logWorkflow(stdout, &wg) | ||||
| 	go l.LogLocalWorkflow(workflowName, stdoutSubmit, &wg) | ||||
| 	go l.LogLocalPod(workflowName, stdoutLogs, steps, &wg) | ||||
|  | ||||
| 	if err := cmd.Wait(); err != nil { | ||||
| 	fmt.Println("Starting argo submit") | ||||
| 	if err := cmdSubmit.Start(); err != nil { | ||||
| 		wf_logger.Error().Msg("Could not start argo submit") | ||||
| 		wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text()) | ||||
| 		updateStatus("fatal", "") | ||||
| 	} | ||||
|  | ||||
| 	time.Sleep(5 * time.Second) | ||||
|  | ||||
| 	fmt.Println("Running argo logs") | ||||
| 	if err := cmdLogs.Run(); err != nil { | ||||
| 		wf_logger.Error().Msg("Could not run '" + strings.Join(cmdLogs.Args, " ") + "'") | ||||
| 		 | ||||
| 		wf_logger.Fatal().Msg(err.Error() + bufio.NewScanner(stderrLogs).Text()) | ||||
|  | ||||
| 	} | ||||
|  | ||||
| 	fmt.Println("Waiting argo submit") | ||||
| 	if err := cmdSubmit.Wait(); err != nil { | ||||
| 		wf_logger.Error().Msg("Could not execute argo submit") | ||||
| 		wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderr).Text()) | ||||
| 		updateStatus("fatal") | ||||
| 		wf_logger.Error().Msg(err.Error() + bufio.NewScanner(stderrSubmit).Text()) | ||||
| 		updateStatus("fatal", "") | ||||
| 	} | ||||
|  | ||||
| 	wg.Wait() | ||||
| } | ||||
|  | ||||
| // We could improve this function by creating an object with the same attribute as the output | ||||
| // and only send a new log if the current object has different values than the previous | ||||
| func logWorkflow(pipe io.ReadCloser, wg *sync.WaitGroup) { | ||||
| 	var current_watch, previous_watch models.ArgoWatch | ||||
|  | ||||
| 	watch_output := make([]string, 0) | ||||
| 	scanner := bufio.NewScanner(pipe) | ||||
| 	for scanner.Scan() { | ||||
| 		log := scanner.Text() | ||||
| 		watch_output = append(watch_output, log) | ||||
|  | ||||
| 		if strings.HasPrefix(log, "Progress:") { | ||||
|  | ||||
| 			current_watch = *models.NewArgoLogs(watch_output) | ||||
| 			workflowName = current_watch.Name | ||||
| 			if !current_watch.Equals(previous_watch) { | ||||
| 				wg.Add(1) | ||||
| 				checkStatus(current_watch.Status, previous_watch.Status) | ||||
| 				jsonified, err := json.Marshal(current_watch) | ||||
| 				if err != nil { | ||||
| 					logger.Error().Msg("Could not create watch log") | ||||
| 				} | ||||
| 				wf_logger.Info().Msg(string(jsonified)) | ||||
| 				previous_watch = current_watch | ||||
| 				current_watch = models.ArgoWatch{} | ||||
| 				wg.Done() | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Debug, no logs sent | ||||
| func logPods(pipe io.ReadCloser, name string) { | ||||
| 	pods_logger = wf_logger.With().Str("pod_name", name).Logger() | ||||
| 	scanner := bufio.NewScanner(pipe) | ||||
| 	for scanner.Scan() { | ||||
| 		log := scanner.Text() | ||||
| 		pods_logger.Info().Msg(log) | ||||
| 	} | ||||
|  | ||||
| } | ||||
|  | ||||
| func loadConfig(is_k8s bool, parser *argparse.Parser) { | ||||
|  | ||||
| 	var o *onion.Onion | ||||
|  | ||||
| 	o = initOnion(o) | ||||
| 	// These variables can only be retrieved in the onion | ||||
| 	// Variables that don't depend on the environment (from conf file), can be loaded after | ||||
| 	// We can't use underscore in the env variable names because it's the delimitor with OCMONITOR too | ||||
| 	setConf(is_k8s, o, parser) | ||||
|  | ||||
| 	if !IsValidUUID(conf.GetConfig().ExecutionID) { | ||||
| 		logger.Fatal().Msg("Provided ID is not an UUID") | ||||
| 	} | ||||
| 	// if !IsValidUUID(conf.GetConfig().ExecutionID) { | ||||
| 	// 	logger.Fatal().Msg("Provided ID is not an UUID") | ||||
| 	// } | ||||
| } | ||||
|  | ||||
| func setConf(is_k8s bool, o *onion.Onion, parser *argparse.Parser) { | ||||
| 	if is_k8s { | ||||
| 		conf.GetConfig().LokiURL = o.GetStringDefault("lokiurl", "http://127.0.0.1:3100") | ||||
| 		i, err := strconv.Atoi(o.GetString("timeout")) | ||||
| 		if err == nil { | ||||
| 			conf.GetConfig().Timeout = i | ||||
| 		} else { | ||||
| 			logger.Error().Msg("Could not parse timeout, using default value") | ||||
| 		} | ||||
| 		conf.GetConfig().ExecutionID = o.GetString("workflow") | ||||
| 		mongo := o.GetStringDefault("mongourl", "mongodb://127.0.0.1:27017") | ||||
| 		db := o.GetStringDefault("database", "DC_myDC") | ||||
| 	url := parser.String("u", "url", &argparse.Options{Required: true, Default: "http://127.0.0.1:3100", Help: "Url to the Loki database logs will be sent to"}) | ||||
| 	mode := parser.String("M", "mode", &argparse.Options{Required: false, Default: "", Help: "Mode of the execution"}) | ||||
| 	execution := parser.String("e", "execution", &argparse.Options{Required: true, Help: "Execution ID of the workflow to request from oc-catalog API"}) | ||||
| 	peer := parser.String("p", "peer", &argparse.Options{Required: false, Default: "", Help: "Peer ID of the workflow to request from oc-catalog API"}) | ||||
| 	mongo := parser.String("m", "mongo", &argparse.Options{Required: true, Default: "mongodb://127.0.0.1:27017", Help: "URL to reach the MongoDB"}) | ||||
| 	db := parser.String("d", "database", &argparse.Options{Required: true, Default: "DC_myDC", Help: "Name of the database to query in MongoDB"}) | ||||
| 	timeout := parser.Int("t", "timeout", &argparse.Options{Required: false, Default: -1, Help: "Timeout for the execution of the workflow"}) | ||||
|  | ||||
| 		conf.GetConfig().MongoURL = mongo | ||||
| 		conf.GetConfig().Database = db | ||||
| 	} else { | ||||
| 		url := parser.String("u", "url", &argparse.Options{Required: true, Default: "http://127.0.0.1:3100", Help: "Url to the Loki database logs will be sent to"}) | ||||
| 		execution := parser.String("e", "execution", &argparse.Options{Required: true, Help: "Execution ID of the workflow to request from oc-catalog API"}) | ||||
| 		mongo := parser.String("m", "mongo", &argparse.Options{Required: true, Default: "mongodb://127.0.0.1:27017", Help: "URL to reach the MongoDB"}) | ||||
| 		db := parser.String("d", "database", &argparse.Options{Required: true, Default: "DC_myDC", Help: "Name of the database to query in MongoDB"}) | ||||
| 		timeout := parser.Int("t", "timeout", &argparse.Options{Required: false, Default: -1, Help: "Timeout for the execution of the workflow"}) | ||||
| 		err := parser.Parse(os.Args) | ||||
| 		if err != nil { | ||||
| 			fmt.Println(parser.Usage(err)) | ||||
| 			os.Exit(1) | ||||
| 		} | ||||
| 		 | ||||
| 		conf.GetConfig().Logs = "debug" | ||||
| 	ca := parser.String("c", "ca", &argparse.Options{Required: false, Default: "", Help: "CA file for the Kubernetes cluster"}) | ||||
| 	cert := parser.String("C", "cert", &argparse.Options{Required: false, Default: "", Help: "Cert file for the Kubernetes cluster"}) | ||||
| 	data := parser.String("D", "data", &argparse.Options{Required: false, Default: "", Help: "Data file for the Kubernetes cluster"}) | ||||
|  | ||||
| 		conf.GetConfig().LokiURL = *url | ||||
| 		conf.GetConfig().MongoURL = *mongo | ||||
| 		conf.GetConfig().Database = *db | ||||
| 		conf.GetConfig().Timeout = *timeout | ||||
| 		conf.GetConfig().ExecutionID = *execution | ||||
| 	host := parser.String("H", "host", &argparse.Options{Required: false, Default: "", Help: "Host for the Kubernetes cluster"}) | ||||
| 	port := parser.String("P", "port", &argparse.Options{Required: false, Default: "6443", Help: "Port for the Kubernetes cluster"}) | ||||
|  | ||||
| 	// argoHost := parser.String("h", "argoHost", &argparse.Options{Required: false, Default: "", Help: "Host where Argo is running from"}) // can't use -h because its reserved to help | ||||
|  | ||||
| 	err := parser.Parse(os.Args) | ||||
| 	if err != nil { | ||||
| 		fmt.Println(parser.Usage(err)) | ||||
| 		os.Exit(1) | ||||
| 	} | ||||
| 	conf.GetConfig().Logs = "debug" | ||||
| 	conf.GetConfig().LokiURL = *url | ||||
| 	conf.GetConfig().MongoURL = *mongo | ||||
| 	conf.GetConfig().Database = *db | ||||
| 	conf.GetConfig().Timeout = *timeout | ||||
| 	conf.GetConfig().Mode = *mode | ||||
| 	conf.GetConfig().ExecutionID = *execution | ||||
| 	conf.GetConfig().PeerID = *peer | ||||
|  | ||||
| 	conf.GetConfig().KubeHost = *host | ||||
| 	conf.GetConfig().KubePort = *port | ||||
|  | ||||
| 	// conf.GetConfig().ArgoHost = *argoHost | ||||
|  | ||||
| 	decoded, err := base64.StdEncoding.DecodeString(*ca) | ||||
| 	if err == nil { | ||||
| 		conf.GetConfig().KubeCA = string(decoded) | ||||
| 	} | ||||
| 	decoded, err = base64.StdEncoding.DecodeString(*cert) | ||||
| 	if err == nil { | ||||
| 		conf.GetConfig().KubeCert = string(decoded) | ||||
| 	} | ||||
| 	decoded, err = base64.StdEncoding.DecodeString(*data) | ||||
| 	if err == nil { | ||||
| 		conf.GetConfig().KubeData = string(decoded) | ||||
| 	} | ||||
| } | ||||
|  | ||||
|  | ||||
| func initOnion(o *onion.Onion) *onion.Onion { | ||||
| 	logger = logs.CreateLogger("oc-monitord") | ||||
| 	configFile := "" | ||||
| @@ -292,28 +300,47 @@ func getContainerName(argo_file string) string { | ||||
| 	re := regexp.MustCompile(regex) | ||||
|  | ||||
| 	container_name := re.FindString(argo_file) | ||||
|  | ||||
| 	return container_name | ||||
| } | ||||
|  | ||||
| // Uses the ArgoWatch object to update status of the workflow execution object | ||||
| func checkStatus(current string, previous string) { | ||||
| 	if current != previous { | ||||
| 		updateStatus(current) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func updateStatus(status string) { | ||||
| func updateStatus(status string, log string) { | ||||
| 	exec_id := conf.GetConfig().ExecutionID | ||||
|  | ||||
| 	wf_exec := &workflow_execution.WorkflowExecution{} | ||||
| 	wf_exec := &workflow_execution.WorkflowExecution{AbstractObject: utils.AbstractObject{UUID: conf.GetConfig().ExecutionID}} | ||||
| 	wf_exec.ArgoStatusToState(status) | ||||
|  | ||||
| 	serialized := wf_exec.Serialize() | ||||
| 	res := oclib.UpdateOne(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), serialized, exec_id) | ||||
|  | ||||
| 	if res.Code != 200 { | ||||
| 		logger.Error().Msg("Could not update status for workflow execution " + exec_id) | ||||
| 	exec, _, err := workflow_execution.NewAccessor(&tools.APIRequest{ | ||||
| 		PeerID: conf.GetConfig().PeerID, | ||||
| 	}).UpdateOne(wf_exec, exec_id) | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg("Could not update status for workflow execution " + exec_id + err.Error()) | ||||
| 	} | ||||
| 	splitted := strings.Split(log, "-") | ||||
| 	if len(splitted) > 1 { | ||||
| 		we := exec.(*workflow_execution.WorkflowExecution) | ||||
| 		itemID := splitted[len(splitted)-1] // TODO: in logs found item ID | ||||
| 		caller := &tools.HTTPCaller{ | ||||
| 			URLS: map[tools.DataType]map[tools.METHOD]string{ | ||||
| 				tools.PEER: { | ||||
| 					tools.POST: "/status/", | ||||
| 				}, | ||||
| 				tools.BOOKING: { | ||||
| 					tools.PUT: "http://localhost:8080/booking/:id", | ||||
| 				}, | ||||
| 			}, | ||||
| 		} | ||||
| 		if we.PeerBookByGraph != nil { | ||||
| 			for peerID, val := range we.PeerBookByGraph { | ||||
| 				if val[itemID] == nil { | ||||
| 					continue | ||||
| 				} | ||||
| 				for _, log := range val[itemID] { | ||||
| 					(&peer.Peer{}).LaunchPeerExecution(peerID, log, tools.BOOKING, tools.PUT, &booking.Booking{ | ||||
| 						State: we.State, | ||||
| 					}, caller) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	fmt.Printf("status argo : %s  /nstatus db : %s", status, serialized["state"]) | ||||
| } | ||||
|   | ||||
| @@ -1,72 +0,0 @@ | ||||
| package models | ||||
|  | ||||
| import ( | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| type ArgoWatch struct { | ||||
| 	Name      string | ||||
| 	Namespace string | ||||
| 	Status    string | ||||
| 	Conditions | ||||
| 	Created  string | ||||
| 	Started  string | ||||
| 	Duration string | ||||
| 	Progress string | ||||
| } | ||||
|  | ||||
| type Conditions struct { | ||||
| 	PodRunning bool | ||||
| 	Completed  bool | ||||
| } | ||||
|  | ||||
| func (a *ArgoWatch) Equals(arg ArgoWatch) bool { | ||||
| 	return a.Status == arg.Status && a.Progress == arg.Progress && a.Conditions.PodRunning == arg.Conditions.PodRunning && a.Conditions.Completed == arg.Conditions.Completed | ||||
| } | ||||
|  | ||||
| // Take the slice of string that make up one round of stderr outputs from the --watch option in argo submit | ||||
| func NewArgoLogs(inputs []string) *ArgoWatch { | ||||
| 	var workflow ArgoWatch | ||||
|  | ||||
| 	for _, input := range inputs { | ||||
| 		line := strings.TrimSpace(input) | ||||
| 		if line == "" { | ||||
| 			continue | ||||
| 		} | ||||
| 		switch { | ||||
| 		case strings.HasPrefix(line, "Name:"): | ||||
| 			workflow.Name = parseValue(line) | ||||
| 		case strings.HasPrefix(line, "Namespace:"): | ||||
| 			workflow.Namespace = parseValue(line) | ||||
| 		case strings.HasPrefix(line, "Status:"): | ||||
| 			workflow.Status = parseValue(line) | ||||
| 		case strings.HasPrefix(line, "PodRunning"): | ||||
| 			workflow.PodRunning = parseBoolValue(line) | ||||
| 		case strings.HasPrefix(line, "Completed"): | ||||
| 			workflow.Completed = parseBoolValue(line) | ||||
| 		case strings.HasPrefix(line, "Created:"): | ||||
| 			workflow.Created = parseValue(line) | ||||
| 		case strings.HasPrefix(line, "Started:"): | ||||
| 			workflow.Started = parseValue(line) | ||||
| 		case strings.HasPrefix(line, "Duration:"): | ||||
| 			workflow.Duration = parseValue(line) | ||||
| 		case strings.HasPrefix(line, "Progress:"): | ||||
| 			workflow.Progress = parseValue(line) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return &workflow | ||||
| } | ||||
|  | ||||
| func parseValue(line string) string { | ||||
| 	parts := strings.SplitN(line, ":", 2) | ||||
| 	if len(parts) < 2 { | ||||
| 		return "" | ||||
| 	} | ||||
| 	return strings.TrimSpace(parts[1]) | ||||
| } | ||||
|  | ||||
| func parseBoolValue(line string) bool { | ||||
| 	value := parseValue(line) | ||||
| 	return value == "True" | ||||
| } | ||||
| @@ -1,101 +0,0 @@ | ||||
| package models | ||||
|  | ||||
| import "strconv" | ||||
|  | ||||
| // apiVersion: networking.k8s.io/v1 | ||||
| // kind: Ingress | ||||
| // metadata: | ||||
| //   name: example-ingress | ||||
| //   namespace: argo | ||||
| //   annotations: | ||||
| //     traefik.ingress.kubernetes.io/router.entrypoints: web  # Utilisation de l'entrypoint HTTP standard | ||||
| // spec: | ||||
| //   rules: | ||||
| //   - http: | ||||
| //       paths: | ||||
| //       - path: /dtf | ||||
| //         pathType: Prefix | ||||
| //         backend: | ||||
| //           service: | ||||
| //             name: workflow-service-qtjk2 | ||||
| //             port: | ||||
| //               number: 80 | ||||
|  | ||||
| var ingress_manifest = &Manifest{ | ||||
| 	ApiVersion: "networking.k8s.io/v1", | ||||
| 	Kind: "Ingress", | ||||
| 	Metadata: Metadata{ | ||||
| 		GenerateName: "ingress-argo-", | ||||
| 	}, | ||||
| } | ||||
|  | ||||
| type Ingress struct { | ||||
| 	ApiVersion string      `yaml:"apiVersion,omitempty"` | ||||
| 	Kind       string      `yaml:"kind,omitempty"` | ||||
| 	Metadata   Metadata    `yaml:"metadata,omitempty"` | ||||
| 	Spec       IngressSpec `yaml:"spec,omitempty"` | ||||
| } | ||||
|  | ||||
|  | ||||
|  | ||||
| type IngressSpec struct { | ||||
| 	Rules []Rule `yaml:"rules,omitempty"` | ||||
| } | ||||
|  | ||||
| type Rule struct { | ||||
| 	HTTP HTTP `yaml:"http,omitempty"` | ||||
| } | ||||
|  | ||||
| type HTTP struct { | ||||
| 	Paths []Path `yaml:"paths,omitempty"` | ||||
| } | ||||
|  | ||||
| type Path struct { | ||||
| 	Path     string  `yaml:"path,omitempty"` | ||||
| 	PathType string  `yaml:"pathType,omitempty"` | ||||
| 	Backend  Backend `yaml:"backend,omitempty"` | ||||
| } | ||||
|  | ||||
| type Backend struct { | ||||
| 	ServiceName string `yaml:"serviceName,omitempty"` | ||||
| 	ServicePort int64  `yaml:"servicePort,omitempty"` | ||||
| } | ||||
|  | ||||
| func NewIngress(contract map[string]map[string]string, serviceName string) Ingress { | ||||
| 	new_ingr := Ingress{ | ||||
| 		ApiVersion: "networking.k8s.io/v1", | ||||
| 		Kind:       "Ingress", | ||||
| 		Metadata: Metadata{ | ||||
| 			GenerateName: "ingress-argo-", | ||||
| 		}, | ||||
| 		Spec: IngressSpec{ | ||||
| 			Rules: []Rule{ | ||||
| 				{ | ||||
| 					HTTP: HTTP{ | ||||
| 						Paths: []Path{}, | ||||
| 						}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		} | ||||
|  | ||||
| 	for port_to_reverse_str, translations := range contract{ | ||||
| 		 | ||||
| 		port, _ := strconv.ParseInt(port_to_reverse_str,10,64) | ||||
|  | ||||
| 		port_reverse := Path{ | ||||
| 			Path:     translations["reverse"], | ||||
| 			PathType: "Prefix", | ||||
| 			Backend: Backend{ | ||||
| 				ServiceName: serviceName, | ||||
| 				ServicePort:port, | ||||
| 			}, | ||||
| 		} | ||||
|  | ||||
| 		new_ingr.Spec.Rules[0].HTTP.Paths = append(new_ingr.Spec.Rules[0].HTTP.Paths, port_reverse) | ||||
|  | ||||
| 	} | ||||
|  | ||||
| 	return new_ingr | ||||
| } | ||||
|  | ||||
| @@ -1,12 +0,0 @@ | ||||
| package models | ||||
|  | ||||
| type Manifest struct { | ||||
| 	ApiVersion string   `yaml:"apiVersion,omitempty"` | ||||
| 	Kind       string   `yaml:"kind"` | ||||
| 	Metadata   Metadata `yaml:"metadata,omitempty"` | ||||
| } | ||||
|  | ||||
| type Metadata struct { | ||||
|     Name      		string	`yaml:"name"` | ||||
|     GenerateName	string	`yaml:"generateName"` | ||||
| } | ||||
| @@ -1,35 +1,36 @@ | ||||
| package models | ||||
|  | ||||
|  | ||||
| type ServiceResource struct { | ||||
| 	Action            string `yaml:"action,omitempty"` | ||||
|     SuccessCondition  string `yaml:"successCondition,omitempty"` | ||||
| 	SuccessCondition  string `yaml:"successCondition,omitempty"` | ||||
| 	FailureCondition  string `yaml:"failureCondition,omitempty"` | ||||
| 	SetOwnerReference bool   `yaml:"setOwnerReference,omitempty"` | ||||
| 	Manifest          string `yaml:"manifest,omitempty"` | ||||
| } | ||||
|  | ||||
| type Service struct { | ||||
|     Manifest | ||||
|     Spec       ServiceSpec       `yaml:"spec"` | ||||
| 	APIVersion string      `yaml:"apiVersion"` | ||||
| 	Kind       string      `yaml:"kind"` | ||||
| 	Metadata   Metadata    `yaml:"metadata"` | ||||
| 	Spec       ServiceSpec `yaml:"spec"` | ||||
| } | ||||
|  | ||||
|  | ||||
| type Metadata struct { | ||||
| 	Name string `yaml:"name"` | ||||
| } | ||||
|  | ||||
| // ServiceSpec is the specification of the Kubernetes Service | ||||
| type ServiceSpec struct { | ||||
|     Selector  map[string]string `yaml:"selector,omitempty"` | ||||
|     Ports     []ServicePort     `yaml:"ports"` | ||||
|     ClusterIP string            `yaml:"clusterIP,omitempty"` | ||||
|     Type      string            `yaml:"type,omitempty"` | ||||
| 	Selector  map[string]string `yaml:"selector,omitempty"` | ||||
| 	Ports     []ServicePort     `yaml:"ports"` | ||||
| 	ClusterIP string            `yaml:"clusterIP,omitempty"` | ||||
| 	Type      string            `yaml:"type,omitempty"` | ||||
| } | ||||
|  | ||||
| // ServicePort defines a port for a Kubernetes Service | ||||
| type ServicePort struct { | ||||
|     Name       string `yaml:"name"`                 // Even if empty need to be in the yaml | ||||
|      | ||||
|     Protocol   string `yaml:"protocol,omitempty"` | ||||
|     Port       int64  `yaml:"port"` | ||||
|     TargetPort int64  `yaml:"targetPort,omitempty"` | ||||
|     NodePort   int64  `yaml:"nodePort,omitempty"` | ||||
| } | ||||
| 	Name       string `yaml:"name"` // Even if empty need to be in the yaml | ||||
| 	Protocol   string `yaml:"protocol,omitempty"` | ||||
| 	Port       int    `yaml:"port"` | ||||
| 	TargetPort int    `yaml:"targetPort,omitempty"` | ||||
| } | ||||
|   | ||||
| @@ -1,5 +1,12 @@ | ||||
| package models | ||||
|  | ||||
| import ( | ||||
| 	"strings" | ||||
|  | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/common/models" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/resources" | ||||
| ) | ||||
|  | ||||
| type Parameter struct { | ||||
| 	Name  string `yaml:"name,omitempty"` | ||||
| 	Value string `yaml:"value,omitempty"` | ||||
| @@ -12,9 +19,28 @@ type Container struct { | ||||
| 	VolumeMounts []VolumeMount `yaml:"volumeMounts,omitempty"` | ||||
| } | ||||
|  | ||||
| func (c *Container) AddVolumeMount(volumeMount VolumeMount, volumes []VolumeMount) []VolumeMount { | ||||
| 	for _, vm := range c.VolumeMounts { | ||||
| 		if vm.Name == volumeMount.Name { | ||||
| 			return volumes | ||||
| 		} | ||||
| 	} | ||||
| 	c.VolumeMounts = append(c.VolumeMounts, volumeMount) | ||||
| 	for _, vm := range c.VolumeMounts { | ||||
| 		for _, v := range volumes { | ||||
| 			if vm.Name == v.Name { | ||||
| 				return volumes | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	volumes = append(volumes, volumeMount) | ||||
| 	return volumes | ||||
| } | ||||
|  | ||||
| type VolumeMount struct { | ||||
| 	Name      string `yaml:"name"` | ||||
| 	MountPath string `yaml:"mountPath"` | ||||
| 	Name      string                     `yaml:"name"` | ||||
| 	MountPath string                     `yaml:"mountPath"` | ||||
| 	Storage   *resources.StorageResource `yaml:"-"` | ||||
| } | ||||
|  | ||||
| type Task struct { | ||||
| @@ -32,15 +58,95 @@ type Dag struct { | ||||
|  | ||||
| type TemplateMetadata struct { | ||||
| 	Labels map[string]string `yaml:"labels,omitempty"` | ||||
| 	Annotations map[string]string `yaml:"annotations,omitempty"` | ||||
| } | ||||
|  | ||||
| type Secret struct { | ||||
| 	Name string `yaml:"name"` | ||||
| 	Key  string `yaml:"key"` | ||||
| } | ||||
|  | ||||
| type Key struct { | ||||
| 	Key             string  `yaml:"key"` | ||||
| 	Bucket          string  `yaml:"bucket"` | ||||
| 	EndPoint        string  `yaml:"endpoint"` | ||||
| 	Insecure        bool    `yaml:"insecure"` | ||||
| 	AccessKeySecret *Secret `yaml accessKeySecret` | ||||
| 	SecretKeySecret *Secret `yaml secretKeySecret` | ||||
| } | ||||
|  | ||||
| type Artifact struct { | ||||
| 	Name string `yaml:"name"` | ||||
| 	Path string `yaml:"path"` | ||||
| 	S3   *Key   `yaml:"s3,omitempty"` | ||||
| } | ||||
|  | ||||
| type InOut struct { | ||||
| 	Parameters []Parameter `yaml:"parameters"` | ||||
| 	Artifacts  []Artifact  `yaml:"artifacts,omitempty"` | ||||
| } | ||||
|  | ||||
| type Template struct { | ||||
| 	Name   string `yaml:"name"` | ||||
| 	Inputs struct { | ||||
| 		Parameters []Parameter `yaml:"parameters"` | ||||
| 	} `yaml:"inputs,omitempty"` | ||||
| 	Container Container `yaml:"container,omitempty"` | ||||
| 	Dag       Dag       `yaml:"dag,omitempty"` | ||||
| 	Name      string           `yaml:"name"` | ||||
| 	Inputs    InOut            `yaml:"inputs,omitempty"` | ||||
| 	Outputs   InOut            `yaml:"outputs,omitempty"` | ||||
| 	Container Container        `yaml:"container,omitempty"` | ||||
| 	Dag       *Dag             `yaml:"dag,omitempty"` | ||||
| 	Metadata  TemplateMetadata `yaml:"metadata,omitempty"` | ||||
| 	Resource  ServiceResource `yaml:"resource,omitempty"` | ||||
| 	Resource  ServiceResource  `yaml:"resource,omitempty"` | ||||
| } | ||||
|  | ||||
| func (template *Template) CreateContainer(processing *resources.ProcessingResource, dag *Dag) { | ||||
| 	instance := processing.GetSelectedInstance() | ||||
| 	if instance == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	inst := instance.(*resources.ProcessingInstance) | ||||
| 	container := Container{Image: inst.Access.Container.Image} | ||||
| 	if container.Image == "" { | ||||
| 		return | ||||
| 	} | ||||
| 	container.Command = []string{"sh", "-c"} // all is bash | ||||
| 	for _, v := range inst.Env { | ||||
| 		template.Inputs.Parameters = append(template.Inputs.Parameters, Parameter{Name: v.Name}) | ||||
| 	} | ||||
| 	for _, v := range inst.Inputs { | ||||
| 		template.Inputs.Parameters = append(template.Inputs.Parameters, Parameter{Name: v.Name}) | ||||
| 	} | ||||
| 	for _, v := range inst.Inputs { | ||||
| 		template.Outputs.Parameters = append(template.Inputs.Parameters, Parameter{Name: v.Name}) | ||||
| 	} | ||||
| 	cmd := strings.ReplaceAll(inst.Access.Container.Command, container.Image, "") | ||||
|  | ||||
| 	for _, a := range strings.Split(cmd, " ") { | ||||
| 		container.Args = append(container.Args, template.ReplacePerEnv(a, inst.Env)) | ||||
| 	} | ||||
| 	for _, a := range strings.Split(inst.Access.Container.Args, " ") { | ||||
| 		container.Args = append(container.Args, template.ReplacePerEnv(a, inst.Env)) | ||||
| 	} | ||||
| 	container.Args = []string{strings.Join(container.Args, " ")} | ||||
|  | ||||
| 	template.Container = container | ||||
| } | ||||
|  | ||||
| func (template *Template) ReplacePerEnv(arg string, envs []models.Param) string { | ||||
| 	for _, v := range envs { | ||||
| 		if strings.Contains(arg, v.Name) { | ||||
| 			value := "{{ inputs.parameters." + v.Name + " }}" | ||||
| 			arg = strings.ReplaceAll(arg, v.Name, value) | ||||
| 			arg = strings.ReplaceAll(arg, "$"+v.Name, value) | ||||
| 			arg = strings.ReplaceAll(arg, "$", "") | ||||
| 		} | ||||
| 	} | ||||
| 	return arg | ||||
| } | ||||
|  | ||||
| // Add the metadata that allow Admiralty to pick up an Argo Workflow that needs to be reparted | ||||
| // The value of "clustername" is the peerId, which must be replaced by the node name's for this specific execution | ||||
| func (t *Template) AddAdmiraltyAnnotations(peerId string){ | ||||
| 	if t.Metadata.Annotations == nil { | ||||
| 		t.Metadata.Annotations = make(map[string]string) | ||||
| 	} | ||||
| 	t.Metadata.Annotations["multicluster.admiralty.io/elect"] = "" | ||||
| 	t.Metadata.Annotations["multicluster.admiralty.io/clustername"] = peerId | ||||
| } | ||||
							
								
								
									
										
											BIN
										
									
								
								oc-monitord
									
									
									
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								oc-monitord
									
									
									
									
									
								
							
										
											Binary file not shown.
										
									
								
							| @@ -4,7 +4,7 @@ | ||||
|     "0d565c87-50ae-4a73-843d-f8b2d4047772", | ||||
|     "2ce0323f-a85d-4b8b-a783-5280f48d634a" | ||||
|   ], | ||||
|   "datacenters": [ | ||||
|   "computes": [ | ||||
|     "7b989e97-c3e7-49d2-a3a7-f959da4870b5" | ||||
|   ], | ||||
|   "graph": { | ||||
| @@ -79,17 +79,17 @@ | ||||
|         "id": "6a7e8860-7c26-4b70-9b3a-1bd27adcdfe1", | ||||
|         "width": 0, | ||||
|         "height": 0, | ||||
|         "datacenter" : { | ||||
|         "compute" : { | ||||
|           "id": "7b989e97-c3e7-49d2-a3a7-f959da4870b5", | ||||
|           "name": "Mundi datacenter", | ||||
|           "name": "Mundi compute", | ||||
|           "short_description": "Mundi Opencloud Instance", | ||||
|           "description": "A very long description of what this data is", | ||||
|           "logo": "https://cloud.o-forge.io/core/deperecated-oc-catalog/src/branch/main/scripts/local_imgs/Mundi datacenter.png", | ||||
|           "logo": "https://cloud.o-forge.io/core/deperecated-oc-catalog/src/branch/main/scripts/local_imgs/Mundi compute.png", | ||||
|           "owner": "IRT", | ||||
|           "source_url": "http://www.google.com", | ||||
|           "resource_model": { | ||||
|             "id": "c3983010-1990-4ac0-8533-5389867e4424", | ||||
|             "resource_type": "datacenter_resource" | ||||
|             "resource_type": "compute_resource" | ||||
|           }, | ||||
|           "cpus": [ | ||||
|             { | ||||
|   | ||||
							
								
								
									
										27
									
								
								tools/interface.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								tools/interface.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| package tools | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"io" | ||||
|  | ||||
| 	"k8s.io/apimachinery/pkg/watch" | ||||
| ) | ||||
|  | ||||
| type Tool interface { | ||||
| 	CreateArgoWorkflow(path string, ns string) (string, error) | ||||
| 	CreateAccessSecret(ns string, login string, password string) (string, error) | ||||
| 	GetArgoWatch(executionId string, wfName string) (watch.Interface, error) | ||||
| 	GetPodLogger(ns string, wfName string, podName string) (io.ReadCloser, error) | ||||
| } | ||||
|  | ||||
| var _service = map[string]func() (Tool, error){ | ||||
| 	"kubernetes": NewKubernetesTool, | ||||
| } | ||||
|  | ||||
| func NewService(name string) (Tool, error) { | ||||
| 	service, ok := _service[name] | ||||
| 	if !ok { | ||||
| 		return nil, errors.New("service not found") | ||||
| 	} | ||||
| 	return service() | ||||
| } | ||||
							
								
								
									
										187
									
								
								tools/kubernetes.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										187
									
								
								tools/kubernetes.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,187 @@ | ||||
| package tools | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"encoding/base64" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"oc-monitord/conf" | ||||
| 	"oc-monitord/utils" | ||||
| 	"os" | ||||
| 	"time" | ||||
|  | ||||
| 	wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" | ||||
| 	"github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned" | ||||
| 	"github.com/google/uuid" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/runtime" | ||||
| 	"k8s.io/apimachinery/pkg/runtime/serializer" | ||||
| 	"k8s.io/apimachinery/pkg/watch" | ||||
| 	"k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/client-go/rest" | ||||
| ) | ||||
|  | ||||
| type KubernetesTools struct { | ||||
| 	Set          *kubernetes.Clientset | ||||
| 	VersionedSet *versioned.Clientset | ||||
| } | ||||
|  | ||||
| func NewKubernetesTool() (Tool, error) { | ||||
| 	// Load Kubernetes config (from ~/.kube/config) | ||||
| 	config := &rest.Config{ | ||||
| 		Host: conf.GetConfig().KubeHost + ":" + conf.GetConfig().KubePort, | ||||
| 		TLSClientConfig: rest.TLSClientConfig{ | ||||
| 			CAData:   []byte(conf.GetConfig().KubeCA), | ||||
| 			CertData: []byte(conf.GetConfig().KubeCert), | ||||
| 			KeyData:  []byte(conf.GetConfig().KubeData), | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	// Create clientset | ||||
| 	clientset, err := kubernetes.NewForConfig(config) | ||||
| 	if err != nil { | ||||
| 		return nil, errors.New("Error creating Kubernetes client: " + err.Error()) | ||||
| 	} | ||||
|  | ||||
| 	clientset2, err := versioned.NewForConfig(config) | ||||
| 	if err != nil { | ||||
| 		return nil, errors.New("Error creating Kubernetes versionned client: " + err.Error()) | ||||
| 	} | ||||
|  | ||||
| 	return &KubernetesTools{ | ||||
| 		Set:          clientset, | ||||
| 		VersionedSet: clientset2, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| func (k *KubernetesTools) CreateArgoWorkflow(path string, ns string) (string, error) { | ||||
| 	// Read workflow YAML file | ||||
| 	workflowYAML, err := os.ReadFile(path) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	// Decode the YAML into a Workflow struct | ||||
| 	scheme := runtime.NewScheme() | ||||
| 	_ = wfv1.AddToScheme(scheme) | ||||
| 	codecs := serializer.NewCodecFactory(scheme) | ||||
| 	decode := codecs.UniversalDeserializer().Decode | ||||
|  | ||||
| 	obj, _, err := decode(workflowYAML, nil, nil) | ||||
| 	if err != nil { | ||||
| 		return "", errors.New("failed to decode YAML: " + err.Error()) | ||||
| 	} | ||||
|  | ||||
| 	workflow, ok := obj.(*wfv1.Workflow) | ||||
| 	if !ok { | ||||
| 		return "", errors.New("decoded object is not a Workflow") | ||||
| 	} | ||||
|  | ||||
| 	// Create the workflow in the "argo" namespace | ||||
| 	createdWf, err := k.VersionedSet.ArgoprojV1alpha1().Workflows(ns).Create(context.TODO(), workflow, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		return "", errors.New("failed to create workflow: " + err.Error()) | ||||
| 	} | ||||
| 	fmt.Printf("workflow %s created in namespace %s\n", createdWf.Name, ns) | ||||
| 	return createdWf.Name, nil | ||||
| } | ||||
|  | ||||
| func (k *KubernetesTools) CreateAccessSecret(ns string, login string, password string) (string, error) { | ||||
| 	// Namespace where the secret will be created | ||||
| 	namespace := "default" | ||||
| 	// Encode the secret data (Kubernetes requires base64-encoded values) | ||||
| 	secretData := map[string][]byte{ | ||||
| 		"access-key": []byte(base64.StdEncoding.EncodeToString([]byte(login))), | ||||
| 		"secret-key": []byte(base64.StdEncoding.EncodeToString([]byte(password))), | ||||
| 	} | ||||
|  | ||||
| 	// Define the Secret object | ||||
| 	name := uuid.New().String() | ||||
| 	secret := &v1.Secret{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:      name, | ||||
| 			Namespace: ns, | ||||
| 		}, | ||||
| 		Type: v1.SecretTypeOpaque, | ||||
| 		Data: secretData, | ||||
| 	} | ||||
| 	// Create the Secret in Kubernetes | ||||
| 	_, err := k.Set.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		return "", errors.New("Error creating secret: " + err.Error()) | ||||
| 	} | ||||
| 	return name, nil | ||||
| } | ||||
|  | ||||
| func (k *KubernetesTools) GetArgoWatch(executionId string, wfName string) (watch.Interface, error){ | ||||
| 	wfl := utils.GetWFLogger("") | ||||
| 	wfl.Debug().Msg("Starting argo watch with argo lib") | ||||
| 	fmt.Println("metadata.name=oc-monitor-"+wfName + "  in namespace : " + executionId) | ||||
| 	options := metav1.ListOptions{FieldSelector: "metadata.name=oc-monitor-"+wfName} | ||||
| 	fmt.Println(options) | ||||
| 	watcher, err := k.VersionedSet.ArgoprojV1alpha1().Workflows(executionId).Watch(context.TODO(), options) | ||||
| 	if err != nil { | ||||
| 		return nil, errors.New("Error executing 'argo watch " + wfName + " -n " + executionId + " with ArgoprojV1alpha1 client") | ||||
| 	} | ||||
| 	 | ||||
|  | ||||
| 	return watcher, nil  | ||||
|  | ||||
| } | ||||
|  | ||||
| func (k *KubernetesTools) GetPodLogger(ns string, wfName string, nodeName string) (io.ReadCloser, error) { | ||||
| 	var targetPod v1.Pod | ||||
|  | ||||
| 	pods, err := k.Set.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{ | ||||
|         LabelSelector: "workflows.argoproj.io/workflow="+wfName, | ||||
|     }) | ||||
|     if err != nil { | ||||
| 		return nil, fmt.Errorf("failed to list pods: " + err.Error()) | ||||
|     } | ||||
|     if len(pods.Items) == 0 { | ||||
| 		return nil, fmt.Errorf("no pods found with label workflows.argoproj.io/node-name=" + nodeName) | ||||
|     } | ||||
|  | ||||
|     for _, pod := range pods.Items { | ||||
| 		if pod.Annotations["workflows.argoproj.io/node-name"] == nodeName { | ||||
| 			targetPod = pod | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// k8s API throws an error if we try getting logs while the container are not initialized, so we repeat status check there | ||||
| 	k.testPodReady(targetPod, ns) | ||||
| 	 | ||||
| 	// When using kubec	logs for a pod we see it contacts /api/v1/namespaces/NAMESPACE/pods/oc-monitor-PODNAME/log?container=main so we add this container: main to the call | ||||
| 	req, err := k.Set.CoreV1().Pods(ns).GetLogs(targetPod.Name, &v1.PodLogOptions{Follow: true, Container: "main"}). Stream(context.Background()) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf(" Error when trying to get logs for " + targetPod.Name + " : " + err.Error()) | ||||
| 	} | ||||
|  | ||||
| 	return req, nil | ||||
| } | ||||
|  | ||||
| func (k *KubernetesTools) testPodReady(pod v1.Pod, ns string) { | ||||
| 	for { | ||||
| 		pod, err := k.Set.CoreV1().Pods(ns).Get(context.Background(), pod.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			wfl := utils.GetWFLogger("") | ||||
| 			wfl.Error().Msg("Error fetching pod: " + err.Error() + "\n") | ||||
| 			break | ||||
| 		} | ||||
| 	 | ||||
| 		var initialized bool | ||||
| 		for _, cond := range pod.Status.Conditions { | ||||
| 			if cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue { | ||||
| 				initialized = true | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	 | ||||
| 		if initialized { | ||||
| 			return | ||||
| 		} | ||||
| 	 | ||||
| 		time.Sleep(2 * time.Second) // avoid hammering the API | ||||
| 	} | ||||
| } | ||||
| @@ -1,11 +0,0 @@ | ||||
| globalArguments: | ||||
| deployment: | ||||
|   kind: DaemonSet | ||||
| providers: | ||||
|   kubernetesCRD: | ||||
|     enabled: true | ||||
| service: | ||||
|   type: LoadBalancer | ||||
| ingressRoute: | ||||
|   dashboard: | ||||
|     enabled: false | ||||
							
								
								
									
										45
									
								
								utils/utils.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								utils/utils.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,45 @@ | ||||
| package utils | ||||
|  | ||||
| import ( | ||||
| 	"oc-monitord/conf" | ||||
| 	"sync" | ||||
|  | ||||
| 	oclib "cloud.o-forge.io/core/oc-lib" | ||||
| 	"cloud.o-forge.io/core/oc-lib/logs" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/workflow_execution" | ||||
| 	"github.com/rs/zerolog" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	logger 		zerolog.Logger | ||||
| 	wf_logger 	zerolog.Logger | ||||
|  	pods_logger zerolog.Logger | ||||
| 	onceLogger 	sync.Once | ||||
| 	onceWF 		sync.Once | ||||
| ) | ||||
| func GetExecution(exec_id string) *workflow_execution.WorkflowExecution { | ||||
| 	res := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), "", conf.GetConfig().PeerID, []string{}, nil).LoadOne(exec_id) | ||||
| 	if res.Code != 200 { | ||||
| 		logger := oclib.GetLogger() | ||||
| 		logger.Error().Msg("Could not retrieve workflow ID from execution ID " + exec_id) | ||||
| 		return nil | ||||
| 	} | ||||
| 	return res.ToWorkflowExecution() | ||||
| } | ||||
|  | ||||
| func GetLogger() zerolog.Logger { | ||||
| 	onceLogger.Do(func(){ | ||||
| 		logger = logs.CreateLogger("oc-monitord") | ||||
| 	}) | ||||
| 	return logger | ||||
| } | ||||
|  | ||||
| func GetWFLogger(workflowName string) zerolog.Logger { | ||||
| 	onceWF.Do(func(){ | ||||
| 		wf_logger = logger.With(). | ||||
| 							Str("argo_name", workflowName). | ||||
| 							Str("workflow_id", conf.GetConfig(). | ||||
| 							WorkflowID).Str("workflow_execution_id", conf.GetConfig().ExecutionID).Logger() | ||||
| 	}) | ||||
| 	return wf_logger | ||||
| } | ||||
							
								
								
									
										143
									
								
								workflow_builder/admiralty_setter.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										143
									
								
								workflow_builder/admiralty_setter.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,143 @@ | ||||
| package workflow_builder | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 	"slices" | ||||
| 	"time" | ||||
|  | ||||
| 	oclib "cloud.o-forge.io/core/oc-lib" | ||||
| 	"cloud.o-forge.io/core/oc-lib/logs" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/peer" | ||||
| 	tools "cloud.o-forge.io/core/oc-lib/tools" | ||||
| ) | ||||
|  | ||||
|  | ||||
| type AdmiraltySetter struct { | ||||
| 	Id			string				// ID to identify the execution, correspond to workflow_executions id | ||||
| 	NodeName 	string	// Allows to retrieve the name of the node used for this execution on each peer {"peerId": "nodeName"} | ||||
| } | ||||
|  | ||||
| func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID string) error { | ||||
| 	 | ||||
| 	logger = logs.GetLogger() | ||||
|  | ||||
| 	data := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(remotePeerID) | ||||
| 	if data.Code != 200 { | ||||
| 		logger.Error().Msg("Error while trying to instantiate remote peer " + remotePeerID) | ||||
| 		return fmt.Errorf(data.Err) | ||||
| 	} | ||||
| 	remotePeer := data.ToPeer() | ||||
|  | ||||
| 	data = oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(localPeerID) | ||||
| 	if data.Code != 200 { | ||||
| 		logger.Error().Msg("Error while trying to instantiate local peer " + remotePeerID) | ||||
| 		return fmt.Errorf(data.Err) | ||||
| 	} | ||||
| 	localPeer := data.ToPeer() | ||||
|  | ||||
| 	caller := tools.NewHTTPCaller( | ||||
| 		map[tools.DataType]map[tools.METHOD]string{ | ||||
| 			tools.ADMIRALTY_SOURCE: map[tools.METHOD]string{ | ||||
| 				tools.POST :"/:id", | ||||
| 			}, | ||||
| 			tools.ADMIRALTY_KUBECONFIG: map[tools.METHOD]string{ | ||||
| 				tools.GET:"/:id", | ||||
| 			}, | ||||
| 			tools.ADMIRALTY_SECRET: map[tools.METHOD]string{ | ||||
| 				tools.POST:"/:id", | ||||
| 			}, | ||||
| 			tools.ADMIRALTY_TARGET: map[tools.METHOD]string{ | ||||
| 				tools.POST:"/:id", | ||||
| 			}, | ||||
| 			tools.ADMIRALTY_NODES: map[tools.METHOD]string{ | ||||
| 				tools.GET:"/:id", | ||||
| 			}, | ||||
| 		}, | ||||
| 	) | ||||
|  | ||||
| 	logger.Info().Msg(" Creating the Admiralty Source on " + remotePeerID + " ns-" + s.Id + "\n\n") | ||||
| 	_ = s.callRemoteExecution(remotePeer, []int{http.StatusCreated, http.StatusConflict},caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true) | ||||
| 	logger.Info().Msg(" Retrieving kubeconfig with the secret on " + remotePeerID + " ns-" + s.Id + "\n\n") | ||||
| 	kubeconfig := s.getKubeconfig(remotePeer, caller) | ||||
| 	logger.Info().Msg(" Creating a secret from the kubeconfig " + localPeerID + " ns-" + s.Id + "\n\n") | ||||
| 	_ = s.callRemoteExecution(localPeer, []int{http.StatusCreated}, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true) | ||||
| 	logger.Info().Msg(" Creating the Admiralty Target on " + localPeerID + " ns-" + s.Id + "\n\n") | ||||
| 	_ = s.callRemoteExecution(localPeer,[]int{http.StatusCreated, http.StatusConflict},caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true) | ||||
| 	logger.Info().Msg(" Checking for the creation of the admiralty node on " + localPeerID + " ns-" + s.Id + "\n\n") | ||||
| 	s.checkNodeStatus(localPeer,caller) | ||||
| 	 | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCaller) map[string]string { | ||||
| 	var kubedata map[string]string | ||||
| 	_ = s.callRemoteExecution(peer, []int{http.StatusOK}, caller, s.Id, tools.ADMIRALTY_KUBECONFIG, tools.GET, nil, true) | ||||
| 	if caller.LastResults["body"] == nil || len(caller.LastResults["body"].([]byte)) == 0 { | ||||
| 		fmt.Println("Something went wrong when retrieving data from Get call for kubeconfig") | ||||
| 		panic(0) | ||||
| 	} | ||||
| 	err := json.Unmarshal(caller.LastResults["body"].([]byte), &kubedata) | ||||
| 	if err != nil { | ||||
| 		fmt.Println("Something went wrong when unmarshalling data from Get call for kubeconfig") | ||||
| 		panic(0) | ||||
| 	} | ||||
|  | ||||
| 	return kubedata | ||||
| } | ||||
|  | ||||
| func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode []int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) *peer.PeerExecution { | ||||
| 	resp, err := peer.LaunchPeerExecution(peer.UUID, dataID, dt, method, body, caller) | ||||
| 	if err != nil { | ||||
| 		fmt.Println("Error when executing on peer at", peer.Url) | ||||
| 		fmt.Println(err) | ||||
| 		panic(0) | ||||
| 	} | ||||
|  | ||||
| 	if !slices.Contains(expectedCode, caller.LastResults["code"].(int)) { | ||||
| 		fmt.Println("Didn't receive the expected code :", caller.LastResults["code"], "when expecting", expectedCode) | ||||
| 		if _, ok := caller.LastResults["body"]; ok { | ||||
| 			logger.Info().Msg(string(caller.LastResults["body"].([]byte))) | ||||
| 			// fmt.Println(string(caller.LastResults["body"].([]byte))) | ||||
| 		} | ||||
| 		if panicCode { | ||||
| 			panic(0) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return resp | ||||
| } | ||||
|  | ||||
| func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller){ | ||||
| 	var data map[string]interface{} | ||||
| 	if resp, ok := caller.LastResults["body"]; ok { | ||||
| 		json.Unmarshal(resp.([]byte), &data) | ||||
| 	} | ||||
|  | ||||
| 	if node, ok := data["node"]; ok { | ||||
| 		metadata := node.(map[string]interface{})["metadata"] | ||||
| 		name := metadata.(map[string]interface{})["name"].(string) | ||||
| 		s.NodeName = name | ||||
| 	} else { | ||||
| 		fmt.Println("Could not retrieve data about the recently created node") | ||||
| 		panic(0) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *AdmiraltySetter) checkNodeStatus(localPeer *peer.Peer, caller *tools.HTTPCaller){ | ||||
| 	for i := range(5) { | ||||
| 		time.Sleep(5 * time.Second) // let some time for kube to generate the node | ||||
| 		_ = s.callRemoteExecution(localPeer,[]int{http.StatusOK},caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil, false) | ||||
| 		if caller.LastResults["code"] == 200 { | ||||
| 			s.storeNodeName(caller) | ||||
| 			return | ||||
| 		} | ||||
| 		if i == 5 { | ||||
| 			logger.Error().Msg("Node on " + localPeer.Name + " was never found, panicking !") | ||||
| 			panic(0) | ||||
| 		} | ||||
| 		logger.Info().Msg("Could not verify that node is up. Retrying...") | ||||
| 	} | ||||
| 	 | ||||
| } | ||||
| @@ -5,18 +5,18 @@ | ||||
| package workflow_builder | ||||
|  | ||||
| import ( | ||||
| 	"oc-monitord/models" | ||||
| 	"fmt" | ||||
| 	"oc-monitord/conf" | ||||
| 	. "oc-monitord/models" | ||||
| 	tools2 "oc-monitord/tools" | ||||
| 	"os" | ||||
| 	"slices" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	oclib "cloud.o-forge.io/core/oc-lib" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/resource_model" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/resources/processing" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/resources/workflow/graph" | ||||
| 	"cloud.o-forge.io/core/oc-lib/logs" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/common/enum" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/resources" | ||||
| 	w "cloud.o-forge.io/core/oc-lib/models/workflow" | ||||
| 	"github.com/nwtgck/go-fakelish" | ||||
| 	"github.com/rs/zerolog" | ||||
| @@ -25,257 +25,337 @@ import ( | ||||
|  | ||||
| var logger zerolog.Logger | ||||
|  | ||||
| type ServiceExposure int  | ||||
| const ( | ||||
| 	PAT 	ServiceExposure = iota | ||||
| 	Reverse | ||||
| 	Both | ||||
| ) | ||||
|  | ||||
| type ArgoBuilder struct { | ||||
| 	OriginWorkflow 	w.Workflow | ||||
| 	OriginWorkflow 	*w.Workflow | ||||
| 	Workflow       	Workflow | ||||
| 	Services		[]Service | ||||
| 	Services       	[]*Service | ||||
| 	Timeout        	int | ||||
| 	RemotePeers		[]string | ||||
| } | ||||
|  | ||||
| type Workflow struct { | ||||
| 	Manifest | ||||
| 	Spec ArgoSpec `yaml:"spec,omitempty"` | ||||
| 	ApiVersion string `yaml:"apiVersion"` | ||||
| 	Kind       string `yaml:"kind"` | ||||
| 	Metadata   struct { | ||||
| 		Name string `yaml:"name"` | ||||
| 	} `yaml:"metadata"` | ||||
| 	Spec Spec `yaml:"spec,omitempty"` | ||||
| } | ||||
|  | ||||
| type ArgoSpec struct { | ||||
| 	Entrypoint string                `yaml:"entrypoint"` | ||||
| 	Arguments  []Parameter           `yaml:"arguments,omitempty"` | ||||
| 	Volumes    []VolumeClaimTemplate `yaml:"volumeClaimTemplates,omitempty"` | ||||
| 	Templates  []Template            `yaml:"templates"` | ||||
| 	Timeout    int                   `yaml:"activeDeadlineSeconds,omitempty"` | ||||
| func (b *Workflow) getDag() *Dag { | ||||
| 	for _, t := range b.Spec.Templates { | ||||
| 		if t.Name == "dag" { | ||||
| 			return t.Dag | ||||
| 		} | ||||
| 	} | ||||
| 	b.Spec.Templates = append(b.Spec.Templates, Template{Name: "dag", Dag: &Dag{}}) | ||||
| 	return b.Spec.Templates[len(b.Spec.Templates)-1].Dag | ||||
| } | ||||
|  | ||||
| type Spec struct { | ||||
| 	ServiceAccountName	string					`yaml:"serviceAccountName"` | ||||
| 	Entrypoint 			string                	`yaml:"entrypoint"` | ||||
| 	Arguments  			[]Parameter           	`yaml:"arguments,omitempty"` | ||||
| 	Volumes    			[]VolumeClaimTemplate 	`yaml:"volumeClaimTemplates,omitempty"` | ||||
| 	Templates  			[]Template            	`yaml:"templates"` | ||||
| 	Timeout    			int                   	`yaml:"activeDeadlineSeconds,omitempty"` | ||||
| } | ||||
|  | ||||
|  | ||||
| func (b *ArgoBuilder) CreateDAG() (string, error) { | ||||
| 	 | ||||
| // TODO: found on a processing instance linked to storage | ||||
| // add s3, gcs, azure, etc if needed on a link between processing and storage | ||||
| func (b *ArgoBuilder) CreateDAG(namespace string, write bool) ( int, []string, []string, error) { | ||||
| 	logger = logs.GetLogger() | ||||
| 	fmt.Println("Creating DAG", b.OriginWorkflow.Graph.Items) | ||||
| 	// handle services by checking if there is only one processing with hostname and port | ||||
| 		 | ||||
| 	b.createTemplates() | ||||
| 	b.createDAGstep() | ||||
| 	b.createVolumes() | ||||
| 	firstItems, lastItems, volumes := b.createTemplates(namespace) | ||||
| 	b.createVolumes(volumes) | ||||
|  | ||||
| 	 | ||||
| 	if b.Timeout > 0 { | ||||
| 		b.Workflow.Spec.Timeout = b.Timeout | ||||
| 	} | ||||
| 	b.Workflow.Spec.ServiceAccountName = "sa-"+namespace | ||||
| 	b.Workflow.Spec.Entrypoint = "dag" | ||||
| 	b.Workflow.Manifest.ApiVersion = "argoproj.io/v1alpha1" | ||||
| 	b.Workflow.ApiVersion = "argoproj.io/v1alpha1" | ||||
| 	b.Workflow.Kind = "Workflow" | ||||
| 	random_name := generateWfName() | ||||
| 	b.Workflow.Metadata.Name = "oc-monitor-" + random_name | ||||
| 	logger = oclib.GetLogger() | ||||
| 	yamlified, err := yaml.Marshal(b.Workflow) | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg("Could not transform object to yaml file") | ||||
| 		return "", err | ||||
| 	if !write { | ||||
| 		return len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil | ||||
| 	} | ||||
|  | ||||
| 	// Give a unique name to each argo file with its timestamp DD:MM:YYYY_hhmmss | ||||
| 	current_timestamp := time.Now().Format("02_01_2006_150405") | ||||
| 	file_name := random_name + "_" + current_timestamp + ".yml" | ||||
| 	workflows_dir := "./argo_workflows/" | ||||
| 	err = os.WriteFile(workflows_dir+file_name, []byte(yamlified), 0660) | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg("Could not write the yaml file") | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	return file_name, nil | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) createTemplates() { | ||||
| 	for _, comp := range b.getProcessings() { | ||||
| 		var command string | ||||
| 		var args string | ||||
| 		var env string | ||||
| 		var serv models.Service | ||||
|  | ||||
| 		comp_res := comp.Processing | ||||
|  | ||||
| 		command = getStringValue(comp_res.AbstractResource, "command") | ||||
| 		args = getStringValue(comp_res.AbstractResource, "args") | ||||
| 		env = getStringValue(comp_res.AbstractResource, "env") | ||||
|  | ||||
| 		image_name := strings.Split(command, " ")[0]   // TODO : decide where to store the image name, GUI or models.computing.Image | ||||
| 		temp_container := Container{Image: image_name} // TODO : decide where to store the image name, GUI or models.computing.Image | ||||
| 		temp_container.Command = getComputingCommands(command) | ||||
| 		temp_container.Args = getComputingArgs(args, command) | ||||
| 		// Only for dev purpose, | ||||
| 		input_names := getComputingEnvironmentName(strings.Split(env, " ")) | ||||
|  | ||||
| 		var inputs_container []Parameter | ||||
| 		for _, name := range input_names { | ||||
| 			inputs_container = append(inputs_container, Parameter{Name: name}) | ||||
| 		} | ||||
|  | ||||
| 		argo_name := getArgoName(comp_res.GetName(), comp.ID) | ||||
| 		new_temp := Template{Name: argo_name, Container: temp_container} | ||||
| 		new_temp.Inputs.Parameters = inputs_container | ||||
| 		new_temp.Container.VolumeMounts = append(new_temp.Container.VolumeMounts, VolumeMount{Name: "workdir", MountPath: "/mnt/vol"}) // TODO : replace this with a search of the storage / data source name | ||||
| 		 | ||||
| 		if (b.isService(comp.ID)){ | ||||
|  | ||||
| 			serv_type := getServiceExposure(*comp.Processing) | ||||
| 			if serv_type == PAT || serv_type == Both{ | ||||
| 				serv = b.CreateKubeService(comp, NodePort) | ||||
| 				b.addKubeServiceToWorkflow(serv, argo_name, comp.ID) | ||||
| 				new_temp.Metadata.Labels = make(map[string]string) | ||||
| 				new_temp.Metadata.Labels["app"] = serv.Spec.Selector["app"]		// Construct the template for the k8s service and add a link in graph between k8s service and processing | ||||
| 				b.addServiceToArgo(serv) | ||||
| 				ingress := b.CreateIngress(comp,serv) | ||||
| 				b.addIngressToWorfklow(ingress, argo_name, comp.ID) | ||||
|  | ||||
| 			} | ||||
| 			if serv_type == Reverse || serv_type == Both{ | ||||
| 				serv = b.CreateKubeService(comp, ClusterIP) | ||||
| 				// create ingress by passing the service and the processing (or reverse) | ||||
| 				b.addKubeServiceToWorkflow(serv, argo_name, comp.ID) | ||||
| 				new_temp.Metadata.Labels = make(map[string]string) | ||||
| 				new_temp.Metadata.Labels["app"] = serv.Spec.Selector["app"]		// Construct the template for the k8s service and add a link in graph between k8s service and processing | ||||
| 				b.addServiceToArgo(serv) | ||||
| 			} | ||||
|  | ||||
| 			 | ||||
| 			// if err != nil { | ||||
| 			// 	// TODO | ||||
| 			// } | ||||
| 		} | ||||
| 		 | ||||
| 		b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, new_temp) | ||||
|  | ||||
| 	} | ||||
|  | ||||
|  | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) createDAGstep() { | ||||
| 	new_dag := Dag{} | ||||
| 	for _, comp := range b.getProcessings() { | ||||
| 		comp_res := comp.Processing | ||||
| 		env := getStringValue(comp_res.AbstractResource, "env") | ||||
| 		unique_name := getArgoName(comp_res.GetName(), comp.ID) | ||||
| 		step := Task{Name: unique_name, Template: unique_name} | ||||
| 		comp_envs := getComputingEnvironment(strings.Split(env, " ")) | ||||
|  | ||||
| 		for name, value := range comp_envs { | ||||
| 			step.Arguments.Parameters = append(step.Arguments.Parameters, Parameter{Name: name, Value: value}) | ||||
| 		} | ||||
|  | ||||
| 		// retrieves the name (computing.name-computing.ID) | ||||
| 		step.Dependencies = b.getDependency(comp.ID) // Error : we use the component ID instead of the GraphItem ID -> store objects | ||||
| 		new_dag.Tasks = append(new_dag.Tasks, step) | ||||
| 	} | ||||
|  | ||||
| 	for i, _ := range b.Services { | ||||
| 		name := "workflow-service-pod-"+strconv.Itoa(i + 1) | ||||
| 		new_dag.Tasks = append(new_dag.Tasks, Task{Name: name , Template: name}) | ||||
| 	} | ||||
|  | ||||
| 	b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, Template{Name: "dag", Dag: new_dag}) | ||||
| 	 | ||||
| 	 | ||||
| 	return  len(b.Workflow.getDag().Tasks), firstItems, lastItems, nil | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) createVolumes() { | ||||
| 	// For testing purposes we only declare one volume, mounted in each computing | ||||
| 	new_volume := VolumeClaimTemplate{} | ||||
| 	new_volume.Metadata.Name = "workdir" | ||||
| 	new_volume.Spec.AccessModes = []string{"ReadWriteOnce"} | ||||
| 	new_volume.Spec.Resources.Requests.Storage = "1Gi" | ||||
| 	b.Workflow.Spec.Volumes = append(b.Workflow.Spec.Volumes, new_volume) | ||||
| func (b *ArgoBuilder) createTemplates(namespace string) ([]string, []string, []VolumeMount) { | ||||
| 	volumes := []VolumeMount{} | ||||
| 	firstItems := []string{} | ||||
| 	lastItems := []string{} | ||||
| 	items := b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsProcessing) | ||||
| 	fmt.Println("Creating templates", len(items)) | ||||
| 	for _, item := range b.OriginWorkflow.GetGraphItems(b.OriginWorkflow.Graph.IsProcessing) { | ||||
| 		instance := item.Processing.GetSelectedInstance() | ||||
| 		fmt.Println("Creating template for", item.Processing.GetName(), instance) | ||||
| 		if instance == nil || instance.(*resources.ProcessingInstance).Access == nil && instance.(*resources.ProcessingInstance).Access.Container != nil { | ||||
| 			logger.Error().Msg("Not enough configuration setup, template can't be created : " + item.Processing.GetName()) | ||||
| 			return firstItems, lastItems, volumes | ||||
| 		} | ||||
| 		volumes, firstItems, lastItems = b.createArgoTemplates(namespace, | ||||
| 			item.ID, item.Processing, volumes, firstItems, lastItems) | ||||
| 	} | ||||
| 	firstWfTasks := map[string][]string{} | ||||
| 	latestWfTasks := map[string][]string{} | ||||
| 	relatedWfTasks := map[string][]string{} | ||||
| 	for _, wf := range b.OriginWorkflow.Workflows { | ||||
| 		realWorkflow, code, err := w.NewAccessor(nil).LoadOne(wf) | ||||
| 		if code != 200 { | ||||
| 			logger.Error().Msg("Error loading the workflow : " + err.Error()) | ||||
| 			continue | ||||
| 		} | ||||
| 		subBuilder := ArgoBuilder{OriginWorkflow: realWorkflow.(*w.Workflow), Timeout: b.Timeout} | ||||
| 		_, fi, li, err := subBuilder.CreateDAG(namespace, false) | ||||
| 		if err != nil { | ||||
| 			logger.Error().Msg("Error creating the subworkflow : " + err.Error()) | ||||
| 			continue | ||||
| 		} | ||||
| 		firstWfTasks[wf] = fi | ||||
| 		if ok, depsOfIds := subBuilder.isArgoDependancy(wf); ok { // IS BEFORE | ||||
| 			latestWfTasks[wf] = li | ||||
| 			relatedWfTasks[wf] = depsOfIds | ||||
| 		} | ||||
| 		subDag := subBuilder.Workflow.getDag() | ||||
| 		d := b.Workflow.getDag() | ||||
| 		d.Tasks = append(d.Tasks, subDag.Tasks...) // add the tasks of the subworkflow to the main workflow | ||||
| 		b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, subBuilder.Workflow.Spec.Templates...) | ||||
| 		b.Workflow.Spec.Volumes = append(b.Workflow.Spec.Volumes, subBuilder.Workflow.Spec.Volumes...) | ||||
| 		b.Workflow.Spec.Arguments = append(b.Workflow.Spec.Arguments, subBuilder.Workflow.Spec.Arguments...) | ||||
| 		b.Services = append(b.Services, subBuilder.Services...) | ||||
| 	} | ||||
| 	for wfID, depsOfIds := range relatedWfTasks { | ||||
| 		for _, dep := range depsOfIds { | ||||
| 			for _, task := range b.Workflow.getDag().Tasks { | ||||
| 				if strings.Contains(task.Name, dep) { | ||||
| 					index := -1 | ||||
| 					for i, depp := range task.Dependencies { | ||||
| 						if strings.Contains(depp, wfID) { | ||||
| 							index = i | ||||
| 							break | ||||
| 						} | ||||
| 					} | ||||
| 					if index != -1 { | ||||
| 						task.Dependencies = append(task.Dependencies[:index], task.Dependencies[index+1:]...) | ||||
| 					} | ||||
| 					task.Dependencies = append(task.Dependencies, latestWfTasks[wfID]...) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	for wfID, fi := range firstWfTasks { | ||||
| 		deps := b.getArgoDependencies(wfID) | ||||
| 		if len(deps) > 0 { | ||||
| 			for _, dep := range fi { | ||||
| 				for _, task := range b.Workflow.getDag().Tasks { | ||||
| 					if strings.Contains(task.Name, dep) { | ||||
| 						task.Dependencies = append(task.Dependencies, deps...) | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	if b.Services != nil { | ||||
| 		dag := b.Workflow.getDag() | ||||
| 		dag.Tasks = append(dag.Tasks, Task{Name: "workflow-service-pod", Template: "workflow-service-pod"}) | ||||
| 		b.addServiceToArgo() | ||||
| 	} | ||||
| 	return firstItems, lastItems, volumes | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) createArgoTemplates(namespace string, | ||||
| 	id string, | ||||
| 	processing *resources.ProcessingResource, | ||||
| 	volumes []VolumeMount, | ||||
| 	firstItems []string, | ||||
| 	lastItems []string) ([]VolumeMount, []string, []string) { | ||||
| 	_, firstItems, lastItems = b.addTaskToArgo(b.Workflow.getDag(), id, processing, firstItems, lastItems) | ||||
| 	template := &Template{Name: getArgoName(processing.GetName(), id)} | ||||
| 	fmt.Println("Creating template for", template.Name) | ||||
| 	isReparted, peerId := b.isProcessingReparted(*processing,id) | ||||
| 	template.CreateContainer(processing, b.Workflow.getDag()) | ||||
| 	if isReparted { | ||||
| 		b.RemotePeers = append(b.RemotePeers, peerId) | ||||
| 		template.AddAdmiraltyAnnotations(peerId) | ||||
| 	} | ||||
| 	// get datacenter from the processing | ||||
| 	if processing.IsService { | ||||
| 		b.CreateService(id, processing) | ||||
| 		template.Metadata.Labels = make(map[string]string) | ||||
| 		template.Metadata.Labels["app"] = "oc-service-" + processing.GetName() // Construct the template for the k8s service and add a link in graph between k8s service and processing | ||||
| 	} | ||||
| 	related := b.OriginWorkflow.GetByRelatedProcessing(id, b.OriginWorkflow.Graph.IsStorage) | ||||
| 	for _, r := range related { | ||||
| 		storage := r.Node.(*resources.StorageResource) | ||||
| 		for _, linkToStorage := range r.Links { | ||||
| 			for _, rw := range linkToStorage.StorageLinkInfos { | ||||
| 				art := Artifact{Path: template.ReplacePerEnv(rw.Source, linkToStorage.Env)} | ||||
| 				if rw.Write { | ||||
| 					art.Name = storage.GetName() + "-" + rw.Destination + "-input-write" | ||||
| 				} else { | ||||
| 					art.Name = storage.GetName() + "-" + rw.Destination + "-input-read" | ||||
| 				} | ||||
| 				if storage.StorageType == enum.S3 { | ||||
| 					art.S3 = &Key{ | ||||
| 						Key:      template.ReplacePerEnv(rw.Destination+"/"+rw.FileName, linkToStorage.Env), | ||||
| 						Insecure: true, // temporary | ||||
| 					} | ||||
| 					sel := storage.GetSelectedInstance() | ||||
| 					if sel != nil { | ||||
| 						if sel.(*resources.StorageResourceInstance).Credentials != nil { | ||||
| 							tool, err := tools2.NewService(conf.GetConfig().Mode) | ||||
| 							if err != nil || tool == nil { | ||||
| 								logger.Error().Msg("Could not create the access secret") | ||||
| 							} else { | ||||
| 								id, err := tool.CreateAccessSecret(namespace, | ||||
| 									sel.(*resources.StorageResourceInstance).Credentials.Login, | ||||
| 									sel.(*resources.StorageResourceInstance).Credentials.Pass) | ||||
| 								if err == nil { | ||||
| 									art.S3.AccessKeySecret = &Secret{ | ||||
| 										Name: id, | ||||
| 										Key:  "access-key", | ||||
| 									} | ||||
| 									art.S3.SecretKeySecret = &Secret{ | ||||
| 										Name: id, | ||||
| 										Key:  "secret-key", | ||||
| 									} | ||||
| 								} | ||||
| 							} | ||||
| 						} | ||||
| 						art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source+"/", "") | ||||
| 						art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source, "") | ||||
| 						splits := strings.Split(art.S3.EndPoint, "/") | ||||
| 						if len(splits) > 1 { | ||||
| 							art.S3.Bucket = splits[0] | ||||
| 							art.S3.EndPoint = strings.Join(splits[1:], "/") | ||||
| 						} else { | ||||
| 							art.S3.Bucket = splits[0] | ||||
| 						} | ||||
| 					} | ||||
| 				} | ||||
| 				if rw.Write { | ||||
| 					template.Outputs.Artifacts = append(template.Inputs.Artifacts, art) | ||||
| 				} else { | ||||
| 					template.Inputs.Artifacts = append(template.Outputs.Artifacts, art) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		index := 0 | ||||
| 		if storage.SelectedInstanceIndex != nil && (*storage.SelectedInstanceIndex) >= 0 { | ||||
| 			index = *storage.SelectedInstanceIndex | ||||
| 		} | ||||
| 		s := storage.Instances[index] | ||||
| 		if s.Local { | ||||
| 			volumes = template.Container.AddVolumeMount(VolumeMount{ | ||||
| 				Name:      strings.ReplaceAll(strings.ToLower(storage.GetName()), " ", "-"), | ||||
| 				MountPath: s.Source, | ||||
| 				Storage:   storage, | ||||
| 			}, volumes) | ||||
| 		} | ||||
| 	} | ||||
| 	b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, *template) | ||||
| 	return volumes, firstItems, lastItems | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) addTaskToArgo(dag *Dag, graphItemID string, processing *resources.ProcessingResource, | ||||
| 	firstItems []string, lastItems []string) (*Dag, []string, []string) { | ||||
| 	unique_name := getArgoName(processing.GetName(), graphItemID) | ||||
| 	step := Task{Name: unique_name, Template: unique_name} | ||||
| 	instance := processing.GetSelectedInstance() | ||||
| 	if instance != nil { | ||||
| 		for _, value := range instance.(*resources.ProcessingInstance).Env { | ||||
| 			step.Arguments.Parameters = append(step.Arguments.Parameters, Parameter{ | ||||
| 				Name:  value.Name, | ||||
| 				Value: value.Value, | ||||
| 			}) | ||||
| 		} | ||||
| 		for _, value := range instance.(*resources.ProcessingInstance).Inputs { | ||||
| 			step.Arguments.Parameters = append(step.Arguments.Parameters, Parameter{ | ||||
| 				Name:  value.Name, | ||||
| 				Value: value.Value, | ||||
| 			}) | ||||
| 		} | ||||
| 		for _, value := range instance.(*resources.ProcessingInstance).Outputs { | ||||
| 			step.Arguments.Parameters = append(step.Arguments.Parameters, Parameter{ | ||||
| 				Name:  value.Name, | ||||
| 				Value: value.Value, | ||||
| 			}) | ||||
| 		} | ||||
| 	} | ||||
| 	step.Dependencies = b.getArgoDependencies(graphItemID) | ||||
| 	name := "" | ||||
| 	if b.OriginWorkflow.Graph.Items[graphItemID].Processing != nil { | ||||
| 		name = b.OriginWorkflow.Graph.Items[graphItemID].Processing.GetName() | ||||
| 	} | ||||
| 	if b.OriginWorkflow.Graph.Items[graphItemID].Workflow != nil { | ||||
| 		name = b.OriginWorkflow.Graph.Items[graphItemID].Workflow.GetName() | ||||
| 	} | ||||
| 	if len(step.Dependencies) == 0 && name != "" { | ||||
| 		firstItems = append(firstItems, getArgoName(name, graphItemID)) | ||||
| 	} | ||||
| 	if ok, _ := b.isArgoDependancy(graphItemID); !ok && name != "" { | ||||
| 		lastItems = append(lastItems, getArgoName(name, graphItemID)) | ||||
| 	} | ||||
| 	dag.Tasks = append(dag.Tasks, step) | ||||
| 	return dag, firstItems, lastItems | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) getDependency(current_computing_id string) (dependencies []string) { | ||||
| func (b *ArgoBuilder) createVolumes(volumes []VolumeMount) { // TODO : one think about remote volume but TG | ||||
| 	for _, volume := range volumes { | ||||
| 		index := 0 | ||||
| 		if volume.Storage.SelectedInstanceIndex != nil && (*volume.Storage.SelectedInstanceIndex) >= 0 { | ||||
| 			index = *volume.Storage.SelectedInstanceIndex | ||||
| 		} | ||||
| 		storage := volume.Storage.Instances[index] | ||||
| 		new_volume := VolumeClaimTemplate{} | ||||
| 		new_volume.Metadata.Name = strings.ReplaceAll(strings.ToLower(volume.Name), " ", "-") | ||||
| 		new_volume.Spec.AccessModes = []string{"ReadWriteOnce"} | ||||
| 		new_volume.Spec.Resources.Requests.Storage = fmt.Sprintf("%v", storage.SizeGB) + storage.SizeType.ToArgo() | ||||
| 		b.Workflow.Spec.Volumes = append(b.Workflow.Spec.Volumes, new_volume) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) isArgoDependancy(id string) (bool, []string) { | ||||
| 	dependancyOfIDs := []string{} | ||||
| 	isDeps := false | ||||
| 	for _, link := range b.OriginWorkflow.Graph.Links { | ||||
| 		if b.OriginWorkflow.Graph.Items[link.Source.ID].Processing == nil { | ||||
| 		if _, ok := b.OriginWorkflow.Graph.Items[link.Destination.ID]; !ok { | ||||
| 			fmt.Println("Could not find the source of the link", link.Destination.ID) | ||||
| 			continue | ||||
| 		} | ||||
| 		source := b.OriginWorkflow.Graph.Items[link.Destination.ID].Processing | ||||
| 		if id == link.Source.ID && source != nil { | ||||
| 			isDeps = true | ||||
| 			dependancyOfIDs = append(dependancyOfIDs, getArgoName(source.GetName(), link.Destination.ID)) | ||||
| 		} | ||||
| 		wourceWF := b.OriginWorkflow.Graph.Items[link.Destination.ID].Workflow | ||||
| 		if id == link.Source.ID && wourceWF != nil { | ||||
| 			isDeps = true | ||||
| 			dependancyOfIDs = append(dependancyOfIDs, getArgoName(wourceWF.GetName(), link.Destination.ID)) | ||||
| 		} | ||||
| 	} | ||||
| 	return isDeps, dependancyOfIDs | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) getArgoDependencies(id string) (dependencies []string) { | ||||
| 	for _, link := range b.OriginWorkflow.Graph.Links { | ||||
| 		if _, ok := b.OriginWorkflow.Graph.Items[link.Source.ID]; !ok { | ||||
| 			fmt.Println("Could not find the source of the link", link.Source.ID) | ||||
| 			continue | ||||
| 		} | ||||
| 		source := b.OriginWorkflow.Graph.Items[link.Source.ID].Processing | ||||
| 		if current_computing_id == link.Destination.ID && source != nil { | ||||
| 		if id == link.Destination.ID && source != nil { | ||||
| 			dependency_name := getArgoName(source.GetName(), link.Source.ID) | ||||
| 			dependencies = append(dependencies, dependency_name) | ||||
| 			continue | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
|  | ||||
| } | ||||
|  | ||||
| func getComputingCommands(user_input string) []string { | ||||
| 	user_input = removeImageName(user_input) | ||||
| 	if len(user_input) == 0 { | ||||
| 		return []string{} | ||||
| 	} | ||||
| 	return strings.Split(user_input, " ") | ||||
| } | ||||
|  | ||||
| func getComputingArgs(user_input string, command string) (list_args []string) { | ||||
| 	if len(user_input) == 0 { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	args := strings.Split(user_input," ") | ||||
|  | ||||
| 	// quickfix that might need improvement | ||||
| 	if strings.Contains(command, "sh -c") { | ||||
| 		list_args = append(list_args, strings.Join(args, " ")) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	list_args = append(list_args, args...) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Currently implements code to overcome problems in data structure | ||||
| func getComputingEnvironment(user_input []string) (map_env map[string]string) { | ||||
| 	logger := oclib.GetLogger() | ||||
| 	is_empty := len(user_input) == 0 | ||||
| 	is_empty_string := len(user_input) == 1 && user_input[0] == "" | ||||
|  | ||||
| 	if is_empty || is_empty_string { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if len(user_input) == 1 { | ||||
| 		user_input = strings.Split(user_input[0], ",") | ||||
| 	} | ||||
|  | ||||
| 	map_env = make(map[string]string, 0) | ||||
|  | ||||
| 	for _, str := range user_input { | ||||
| 		new_pair := strings.Split(str, "=") | ||||
|  | ||||
| 		if len(new_pair) != 2 { | ||||
| 			logger.Error().Msg("Error extracting the environment variable from " + str) | ||||
| 			panic(0) | ||||
| 		} | ||||
|  | ||||
| 		map_env[new_pair[0]] = new_pair[1] | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func getComputingEnvironmentName(user_input []string) (list_names []string) { | ||||
| 	env_map := getComputingEnvironment(user_input) | ||||
| 	for name := range env_map { | ||||
| 		list_names = append(list_names, name) | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func generateWfName() (Name string) { | ||||
| 	Name = fakelish.GenerateFakeWord(5, 8) + "-" + fakelish.GenerateFakeWord(5, 8) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func getArgoName(raw_name string, component_id string) (formatedName string) { | ||||
| @@ -285,76 +365,102 @@ func getArgoName(raw_name string, component_id string) (formatedName string) { | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func removeImageName(user_input string) string { | ||||
| 	// First command is the name of the container for now | ||||
| 	if len(strings.Split(user_input, " ")) == 1 { | ||||
| 		return "" | ||||
| // Verify if a processing resource is attached to another Compute than the one hosting | ||||
| // the current Open Cloud instance. If true return the peer ID to contact | ||||
| func (b *ArgoBuilder) isProcessingReparted(processing resources.ProcessingResource, graphID string) (bool,string) { | ||||
| 	computeAttached := b.retrieveProcessingCompute(graphID) | ||||
| 	if computeAttached == nil { | ||||
| 		logger.Error().Msg("No compute was found attached to processing " + processing.Name + " : " + processing.UUID ) | ||||
| 		panic(0) | ||||
| 	} | ||||
|  | ||||
| 	slice_input := strings.Split(user_input, " ") | ||||
| 	new_slice := slice_input[1:] | ||||
| 	user_input = strings.Join(new_slice, " ") | ||||
| 	 | ||||
| 	// Creates an accessor srtictly for Peer Collection  | ||||
| 	req := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"","",nil,nil) | ||||
| 	if req == nil { | ||||
| 		fmt.Println("TODO : handle error when trying to create a request on the Peer Collection") | ||||
| 		return false, "" | ||||
| 	}  | ||||
|  | ||||
| 	return user_input | ||||
| 	res := req.LoadOne(computeAttached.CreatorID) | ||||
| 	if res.Err != "" { | ||||
| 		fmt.Print("TODO : handle error when requesting PeerID") | ||||
| 		fmt.Print(res.Err) | ||||
| 		return false, "" | ||||
| 	} | ||||
| 	 | ||||
| 	peer := *res.ToPeer() | ||||
|  | ||||
| 	isNotReparted, _ := peer.IsMySelf() | ||||
| 	fmt.Println("Result IsMySelf for ", peer.UUID ," : ", isNotReparted) | ||||
| 	 | ||||
| 	return !isNotReparted, peer.UUID | ||||
| } | ||||
|  | ||||
| // Return the graphItem containing a Processing resource, so that we have access to the ID of the graphItem in order to identify it in the links | ||||
| func (b *ArgoBuilder) getProcessings() (list_computings []graph.GraphItem) { | ||||
| 	for _, item := range b.OriginWorkflow.Graph.Items { | ||||
| 		if item.Processing != nil { | ||||
| 			list_computings = append(list_computings, item) | ||||
| func (b *ArgoBuilder) retrieveProcessingCompute(graphID string) *resources.ComputeResource { | ||||
| 	for _, link := range b.OriginWorkflow.Graph.Links { | ||||
| 		// If a link contains the id of the processing | ||||
| 		var oppositeId string  | ||||
| 		if link.Source.ID == graphID{ | ||||
| 			oppositeId = link.Destination.ID | ||||
| 		} else if(link.Destination.ID == graphID){ | ||||
| 			oppositeId = link.Source.ID | ||||
| 		} | ||||
| 		fmt.Println("OppositeId : ", oppositeId) | ||||
| 		if oppositeId != "" { | ||||
| 			dt, res := b.OriginWorkflow.Graph.GetResource(oppositeId) | ||||
| 			if dt == oclib.COMPUTE_RESOURCE { | ||||
| 				return res.(*resources.ComputeResource) | ||||
| 			} else { | ||||
| 				continue | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 	} | ||||
| 		 | ||||
| 	return nil  | ||||
| } | ||||
|  | ||||
|  | ||||
| // Execute the last actions once the YAML file for the Argo Workflow is created | ||||
| func (b *ArgoBuilder) CompleteBuild(executionsId string) (string, error) { | ||||
| 	fmt.Println("DEV :: Completing build") | ||||
| 	setter := AdmiraltySetter{Id: executionsId} | ||||
| 	// Setup admiralty for each node | ||||
| 	for _, peer := range b.RemotePeers { | ||||
| 		fmt.Println("DEV :: Launching Admiralty Setup for ", peer) | ||||
| 		setter.InitializeAdmiralty(conf.GetConfig().PeerID,peer) | ||||
| 	} | ||||
|  | ||||
| 	// Update the name of the admiralty node to use  | ||||
| 	for _, template := range b.Workflow.Spec.Templates { | ||||
| 		if len(template.Metadata.Annotations) > 0 { | ||||
| 			if resp, ok := template.Metadata.Annotations["multicluster.admiralty.io/clustername"]; ok { | ||||
| 				fmt.Println(resp) | ||||
| 				template.Metadata.Annotations["multicluster.admiralty.io/clustername"] = "target-" + conf.GetConfig().ExecutionID | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Pass a GraphItem's UUID and not the ID   | ||||
| func (b *ArgoBuilder) IsProcessing(component_uuid string) bool { | ||||
| 	return slices.Contains(b.OriginWorkflow.Processings, component_uuid) | ||||
| } | ||||
|  | ||||
| func getStringValue(comp resource_model.AbstractResource, key string) string { | ||||
| 	if res := comp.GetModelValue(key); res != nil { | ||||
| 		return res.(string) | ||||
| 	// Generate the YAML file | ||||
| 	random_name := fakelish.GenerateFakeWord(5, 8) + "-" + fakelish.GenerateFakeWord(5, 8) | ||||
| 	b.Workflow.Metadata.Name = "oc-monitor-" + random_name | ||||
| 	logger = oclib.GetLogger() | ||||
| 	yamlified, err := yaml.Marshal(b.Workflow) | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg("Could not transform object to yaml file") | ||||
| 		return "", err  | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) isService(id string) bool{ | ||||
| 	// Give a unique name to each argo file with its timestamp DD:MM:YYYY_hhmmss | ||||
| 	current_timestamp := time.Now().Format("02_01_2006_150405") | ||||
| 	file_name := random_name + "_" + current_timestamp + ".yml" | ||||
| 	workflows_dir := "./argo_workflows/" | ||||
| 	err = os.WriteFile(workflows_dir+file_name, []byte(yamlified), 0660) | ||||
| 	 | ||||
| 	comp := b.OriginWorkflow.Graph.Items[id] | ||||
|  | ||||
| 	if comp.Processing == nil { | ||||
| 		return false | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg("Could not write the yaml file") | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	_, is_exposed := comp.Processing.ResourceModel.Model["expose"] | ||||
| 	return is_exposed | ||||
| } | ||||
|  | ||||
| func getServiceExposure(service processing.ProcessingResource) ServiceExposure{ | ||||
| 	var exposure_type ServiceExposure | ||||
|  | ||||
| 	contract := getExposeContract(service.ResourceModel.Model["expose"]) | ||||
| 	_, pat := contract["PAT"] | ||||
| 	_, reverse := contract["reverse"] | ||||
| 	 | ||||
| 	if pat && reverse { | ||||
| 		exposure_type=  Both | ||||
| 	} | ||||
| 	if pat { | ||||
| 		exposure_type = PAT | ||||
| 	} | ||||
| 	if reverse{ | ||||
| 		exposure_type = Reverse  | ||||
| 	} | ||||
|  | ||||
| 	return exposure_type | ||||
| 	 | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) CreateIngress(processing processing.ProcessingResource, service Service) Ingress{ | ||||
| 	contract := getExposeContract(processing.ResourceModel.Model["expose"]) | ||||
| 	new_ingress := models.NewIngress(contract,service.Metadata.Name) | ||||
| 	return new_ingress | ||||
| 	return workflows_dir + file_name, nil | ||||
| } | ||||
| @@ -2,214 +2,65 @@ package workflow_builder | ||||
|  | ||||
| import ( | ||||
| 	"oc-monitord/models" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
|  | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/resource_model" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/resources/workflow/graph" | ||||
| 	"github.com/nwtgck/go-fakelish" | ||||
| 	"go.mongodb.org/mongo-driver/bson" | ||||
| 	"go.mongodb.org/mongo-driver/bson/primitive" | ||||
| 	"cloud.o-forge.io/core/oc-lib/models/resources" | ||||
| 	"gopkg.in/yaml.v3" | ||||
| ) | ||||
|  | ||||
| type ServiceType string  | ||||
|  | ||||
| const ( | ||||
| 	NodePort	ServiceType = "NodePort" | ||||
| 	ClusterIP	ServiceType = "ClusterIP" | ||||
| ) | ||||
|  | ||||
| // TODO : refactor this method or the deserialization process in oc-lib to get rid of the mongo code | ||||
| func getExposeContract(expose resource_model.Model) map[string]map[string]string { | ||||
| 	contract := make(map[string]map[string]string,0) | ||||
| 	 | ||||
| 	mapped_info := bson.M{} | ||||
| 	// var contract PortTranslation | ||||
| 	_ , byt, _ := bson.MarshalValue(expose.Value) | ||||
| 	 | ||||
| 	bson.Unmarshal(byt,&mapped_info) | ||||
|  | ||||
| 	for _,v := range mapped_info { | ||||
| 		port := v.(primitive.M)["Key"].(string) | ||||
| 		// exposed_port := map[string]interface{}{data["Key"] : ""} | ||||
| 		port_translation := v.(primitive.M)["Value"] | ||||
| 		contract[port] = map[string]string{} | ||||
| 		for _,v2 := range port_translation.(primitive.A) { | ||||
| 			if v2.(primitive.M)["Key"] == "reverse" { | ||||
| 				contract[port]["reverse"] = v2.(primitive.M)["Value"].(string) | ||||
| 			} | ||||
| 			if v2.(primitive.M)["Key"] == "PAT" { | ||||
| 				contract[port]["PAT"] = v2.(primitive.M)["Value"].(string) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return contract | ||||
| } | ||||
|  | ||||
|  | ||||
| func (b *ArgoBuilder) CreateKubeService(processing graph.GraphItem, service_type ServiceType) models.Service{ | ||||
| 	 | ||||
| 	// model { | ||||
| 	// 	Type : "dict", | ||||
| 	// 	Value : { | ||||
| 	// 		"80" : { | ||||
| 	// 			"reverse"	: "", | ||||
| 	// 			"PAT"		: "34000"	 | ||||
| 	// 		}, | ||||
| 	// 		"344" : { | ||||
| 	// 			"reverse"	: "", | ||||
| 	// 			"PAT"		: "34400"	 | ||||
| 	// 		} | ||||
| 	// 	}  | ||||
| 	// } | ||||
| 	 | ||||
|  | ||||
| func (b *ArgoBuilder) CreateService(id string, processing *resources.ProcessingResource) { | ||||
| 	new_service := models.Service{ | ||||
| 		Manifest: models.Manifest{ | ||||
| 			ApiVersion: "v1",  | ||||
| 			Kind: "Service",  | ||||
| 			Metadata: models.Metadata{ | ||||
| 				Name: "workflow-service-"+ processing.Processing.Name + "-" + processing.ID , | ||||
| 				}, | ||||
| 			}, | ||||
| 			Spec: models.ServiceSpec{ | ||||
| 				Selector: map[string]string{"app": "service-" + fakelish.GenerateFakeWord(5, 8)}, | ||||
| 				Ports: []models.ServicePort{ | ||||
| 				}, | ||||
| 				Type: string(service_type), | ||||
| 			}, | ||||
| 		APIVersion: "v1", | ||||
| 		Kind:       "Service", | ||||
| 		Metadata: models.Metadata{ | ||||
| 			Name: "workflow-service", | ||||
| 		}, | ||||
| 		Spec: models.ServiceSpec{ | ||||
| 			Selector: map[string]string{"app": "oc-service"}, | ||||
| 			Ports:    []models.ServicePort{}, | ||||
| 			Type:     "NodePort", | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	completeServicePorts(&new_service, processing) | ||||
| 	 | ||||
| 	return new_service | ||||
| 	if processing == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	b.completeServicePorts(&new_service, id, processing) | ||||
| 	b.Services = append(b.Services, &new_service) | ||||
| } | ||||
|  | ||||
| func completeServicePorts(service *models.Service, processing graph.GraphItem) { | ||||
| 	 | ||||
| 	contract := getExposeContract(processing.Processing.ResourceModel.Model["expose"]) | ||||
| 	 | ||||
| func (b *ArgoBuilder) completeServicePorts(service *models.Service, id string, processing *resources.ProcessingResource) { | ||||
| 	instance := processing.GetSelectedInstance() | ||||
| 	if instance != nil && instance.(*resources.ProcessingInstance).Access != nil && instance.(*resources.ProcessingInstance).Access.Container != nil { | ||||
| 		for _, execute := range instance.(*resources.ProcessingInstance).Access.Container.Exposes { | ||||
| 			if execute.PAT != 0 { | ||||
| 				new_port_translation := models.ServicePort{ | ||||
| 					Name:       strings.ToLower(processing.Name) + id, | ||||
| 					Port:       execute.Port, | ||||
| 					TargetPort: execute.PAT, | ||||
| 					Protocol:   "TCP", | ||||
| 				} | ||||
| 				service.Spec.Ports = append(service.Spec.Ports, new_port_translation) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| 	for str_port,translation_dict := range contract{ | ||||
| 		 | ||||
| 		port, err := strconv.ParseInt(str_port, 10, 64) | ||||
| func (b *ArgoBuilder) addServiceToArgo() error { | ||||
| 	for _, service := range b.Services { | ||||
| 		service_manifest, err := yaml.Marshal(service) | ||||
| 		if err != nil { | ||||
| 			logger.Error().Msg("Could not convert " + str_port + "to an int") | ||||
| 			return | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		// This condition allows us to create NodePort if PAT is filled or a ClusterIP that only expose port 80 and 443 (for Ingress) | ||||
| 		if _, ok := translation_dict["PAT"]; ok{ | ||||
| 			port_translation, err := strconv.ParseInt(translation_dict["PAT"], 10, 64) | ||||
| 			if err != nil { | ||||
| 				logger.Error().Msg("Could not convert " + translation_dict["PAT"] + "to an int") | ||||
| 				return | ||||
| 			} | ||||
| 			 | ||||
| 			new_port_translation := models.ServicePort{ | ||||
| 				Name: strings.ToLower(processing.Processing.Name) + processing.ID, | ||||
| 				Port:  port_translation-30000, | ||||
| 				TargetPort: port, | ||||
| 				NodePort: port_translation, | ||||
| 				Protocol: "TCP", | ||||
| 			} | ||||
| 			service.Spec.Ports = append(service.Spec.Ports, new_port_translation) | ||||
| 		} else { | ||||
| 			port_spec := []models.ServicePort{ | ||||
| 				models.ServicePort{ | ||||
| 					Name: strings.ToLower(processing.Processing.Name) + processing.ID + "-80", | ||||
| 					Port:  80, | ||||
| 					TargetPort: 80, | ||||
| 					Protocol: "TCP", | ||||
| 		service_template := models.Template{Name: "workflow-service-pod", | ||||
| 			Resource: models.ServiceResource{ | ||||
| 				Action:            "create", | ||||
| 				SuccessCondition:  "status.succeeded > 0", | ||||
| 				FailureCondition:  "status.failed > 3", | ||||
| 				SetOwnerReference: true, | ||||
| 				Manifest:          string(service_manifest), | ||||
| 			}, | ||||
| 			models.ServicePort{ | ||||
| 				Name: strings.ToLower(processing.Processing.Name) + processing.ID + "-443", | ||||
| 				Port:  443, | ||||
| 				TargetPort: 443, | ||||
| 				Protocol: "TCP", | ||||
| 			}, | ||||
| 		 | ||||
| 		} | ||||
| 		service.Spec.Ports = append(service.Spec.Ports,port_spec...) | ||||
|  | ||||
| 		} | ||||
| 		b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, service_template) | ||||
| 	} | ||||
| 	return | ||||
|  | ||||
| } | ||||
|  | ||||
| // The k8s service passed as the parameter only expose one port because it is the result of CreateService() | ||||
| // we check if this port is already exposed by a service in the workflow and we proceed to the creation of a new service OR  | ||||
| // add the port to the list of port exposed by an existing if portAlreadyExposed is false  | ||||
| func (b *ArgoBuilder) addKubeServiceToWorkflow(service models.Service, processing_name string, processing_id string) (label string) { | ||||
| 	if exposed, service_available_port := b.portAlreadyExposed(service.Spec.Ports[0].TargetPort); exposed && service_available_port != nil{ | ||||
| 		// The port you want to expose is already exposed by all the existing services | ||||
| 		service_available_port.Spec.Ports = append(service_available_port.Spec.Ports, service.Spec.Ports...) | ||||
| 		return service_available_port.Spec.Selector["app"] | ||||
| 	}  | ||||
|  | ||||
| 	b.Services = append(b.Services, service) | ||||
| 	return service.Spec.Selector["app"]	 | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) addServiceToArgo(service models.Service) error { | ||||
| 	service_manifest, err := yaml.Marshal(service) | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg("Could not marshal service manifest") | ||||
| 		return err | ||||
| 	} | ||||
| 	 | ||||
| 	service_template := models.Template{Name: "workflow-service-pod-" + strconv.Itoa(len(b.Services)),  | ||||
| 								Resource: models.ServiceResource{ | ||||
| 									Action: "create",  | ||||
| 									SuccessCondition: "status.succeeded > 0", | ||||
| 									FailureCondition: "status.failed > 3", | ||||
| 									SetOwnerReference: true, | ||||
| 									Manifest: string(service_manifest), | ||||
| 									}, | ||||
| 								} | ||||
| 	b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, service_template) | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
|  | ||||
| func (b *ArgoBuilder) addLabel(name string, id string) { | ||||
| 	argo_name := getArgoName(name,id) | ||||
| 	for _, template := range b.Workflow.Spec.Templates{ | ||||
| 		if template.Name == argo_name{ | ||||
| 			template.Metadata.Labels["app"] = "service-workflow"			 | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (b *ArgoBuilder) portAlreadyExposed(port int64) (exposed bool, service *models.Service ){ | ||||
| 	// For all already existing k8s services, test if the port in parameter is already exposed and returns the first service that doesn't yet expose this port  | ||||
| 	for _, s := range b.Services { | ||||
| 		i := 0 | ||||
| 		port_exposed := false | ||||
| 		for !port_exposed { | ||||
| 			if s.Spec.Ports[i].TargetPort == port { | ||||
| 				port_exposed = true | ||||
| 			} | ||||
| 		} | ||||
| 		if !port_exposed { | ||||
| 			return false, &s | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return true, nil | ||||
| } | ||||
|  | ||||
| // If contract has a reverse value | ||||
| 	// Test if service already exist for the argo service | ||||
| 		// Yes : create ingress, associate it with the existing kube service (name + exposed port on service)  | ||||
| 		// No :  | ||||
| 			// - create template for a service that expose the port of the argo service (pod) | ||||
| 			// - store the name of the service and its port exposed | ||||
| 			// - create template for an ingress : manifest must contain the path, name of the service and port exposed on the service | ||||
| func (b *ArgoBuilder) addIngress(){ | ||||
|  | ||||
| } | ||||
| @@ -13,19 +13,21 @@ type WorflowDB struct { | ||||
| } | ||||
|  | ||||
| // Create the obj!ects from the mxgraphxml stored in the workflow given as a parameter | ||||
| func (w *WorflowDB) LoadFrom(workflow_id string) error { | ||||
| func (w *WorflowDB) LoadFrom(workflow_id string, peerID string) error { | ||||
| 	fmt.Println("Loading workflow from " + workflow_id) | ||||
| 	var err error | ||||
| 	if w.Workflow, err = w.getWorkflow(workflow_id); err != nil { | ||||
| 	if w.Workflow, err = w.getWorkflow(workflow_id, peerID); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Use oclib to retrieve the graph contained in the workflow referenced | ||||
| func (w *WorflowDB) getWorkflow(workflow_id string) (workflow *workflow.Workflow, err error) { | ||||
| func (w *WorflowDB) getWorkflow(workflow_id string, peerID string) (workflow *workflow.Workflow, err error) { | ||||
| 	logger := oclib.GetLogger() | ||||
|  | ||||
| 	lib_data := oclib.LoadOne(oclib.LibDataEnum(oclib.WORKFLOW), workflow_id) | ||||
| 	lib_data := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW), "", peerID, []string{}, nil).LoadOne(workflow_id) | ||||
| 	fmt.Println("ERR", lib_data.Code, lib_data.Err) | ||||
| 	if lib_data.Code != 200 { | ||||
| 		logger.Error().Msg("Error loading the graph") | ||||
| 		return workflow, errors.New(lib_data.Err) | ||||
| @@ -39,20 +41,20 @@ func (w *WorflowDB) getWorkflow(workflow_id string) (workflow *workflow.Workflow | ||||
| 	return new_wf, nil | ||||
| } | ||||
|  | ||||
| func (w *WorflowDB) ExportToArgo(timeout int) (string, error) { | ||||
| func (w *WorflowDB) ExportToArgo(namespace string, timeout int) (*ArgoBuilder, int, error) { | ||||
| 	logger := oclib.GetLogger() | ||||
|  | ||||
| 	fmt.Println("Exporting to Argo", w.Workflow) | ||||
| 	if len(w.Workflow.Name) == 0 || w.Workflow.Graph == nil { | ||||
| 		return "", fmt.Errorf("can't export a graph that has not been loaded yet") | ||||
| 		return nil, 0, fmt.Errorf("can't export a graph that has not been loaded yet") | ||||
| 	} | ||||
|  | ||||
| 	argo_builder := ArgoBuilder{OriginWorkflow: *w.Workflow, Timeout: timeout} | ||||
| 	filename, err := argo_builder.CreateDAG() | ||||
| 	argoBuilder := ArgoBuilder{OriginWorkflow: w.Workflow, Timeout: timeout} | ||||
| 	stepMax, _, _, err := argoBuilder.CreateDAG(namespace, true) | ||||
| 	if err != nil { | ||||
| 		logger.Error().Msg("Could not create the argo file for " + w.Workflow.Name) | ||||
| 		return "", err | ||||
| 		return nil, 0, err | ||||
| 	} | ||||
| 	return filename, nil | ||||
| 	return &argoBuilder, stepMax, nil | ||||
| } | ||||
|  | ||||
| // TODO implement this function | ||||
|   | ||||
| @@ -4,7 +4,7 @@ import ( | ||||
| 	"testing" | ||||
| ) | ||||
|  | ||||
| func TestGetGraph(t *testing.T){ | ||||
| func TestGetGraph(t *testing.T) { | ||||
| 	w := WorflowDB{} | ||||
| 	w.LoadFrom("test-log") | ||||
| 	w.LoadFrom("test-log", "") | ||||
| } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user