Compare commits
33 Commits
feature/pa
...
main
Author | SHA1 | Date | |
---|---|---|---|
817f3eb468 | |||
937116e9c5 | |||
bd58016d4b | |||
0bfa8fdad0 | |||
|
54610f2dcc | ||
|
defdcf4264 | ||
|
bcc024caef | ||
|
6fce8f3aac | ||
b6dea94196 | |||
fba84ac612 | |||
|
1b21c142f1 | ||
|
6c3a20999b | ||
|
90fa0b8edd | ||
|
b43cb6d758 | ||
|
d94f9603e8 | ||
|
494ba2f361 | ||
|
0f6213cd14 | ||
|
012c8a83cb | ||
|
a59a48cc66 | ||
|
e106d7e243 | ||
|
139b249a7c | ||
7a7364fb45 | |||
b4d57a8f2f | |||
907880bfd6 | |||
09b8046073 | |||
90af391a0d | |||
88b2edee2f | |||
88610c3dba | |||
b753523c35 | |||
7246dea2b2 | |||
8ddb119899 | |||
7d78920304 | |||
b4cef41db2 |
43
Dockerfile
43
Dockerfile
@ -1,25 +1,44 @@
|
|||||||
FROM golang:alpine AS builder
|
ARG KUBERNETES_HOST=${KUBERNETES_HOST:-"127.0.0.1"}
|
||||||
LABEL maintainer="IRT PFN"
|
FROM golang:alpine AS deps
|
||||||
ENV DOCKER_ENVIRONMENT=true
|
|
||||||
|
ARG MONITORD_IMAGE
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
COPY go.mod go.sum ./
|
||||||
|
RUN sed -i '/replace/d' go.mod
|
||||||
|
RUN go mod download -x
|
||||||
|
|
||||||
|
#----------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
FROM golang:alpine AS builder
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
COPY --from=deps /go/pkg /go/pkg
|
||||||
|
COPY --from=deps /app/go.mod /app/go.sum ./
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN go build .
|
RUN go build
|
||||||
|
|
||||||
FROM oc-monitord:latest AS monitord
|
#----------------------------------------------------------------------------------------------
|
||||||
|
FROM ${MONITORD_IMAGE:-oc-monitord}:latest AS monitord
|
||||||
|
|
||||||
FROM argoproj/argocd:latest
|
FROM scratch
|
||||||
|
|
||||||
ENV MONITORD_PATH = "./oc-monitord"
|
ENV KUBERNETES_SERVICE_HOST=$KUBERNETES_HOST
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
COPY conf/docker_schedulerd.json /etc/oc/schedulerd.json
|
COPY docker_schedulerd.json /etc/oc/schedulerd.json
|
||||||
|
|
||||||
COPY --from=monitord /app/oc-monitord .
|
COPY --from=monitord /app/oc-monitord /usr/bin/oc-monitord
|
||||||
COPY --from=builder /app/oc-schedulerd .
|
COPY --from=builder /app/oc-schedulerd /usr/bin/oc-schedulerd
|
||||||
COPY conf/docker_schedulerd.json /etc/oc/schedulerd.json
|
|
||||||
|
|
||||||
ENTRYPOINT ["/app/oc-schedulerd"]
|
COPY docker_schedulerd.json /etc/oc/schedulerd.json
|
||||||
|
|
||||||
|
# COPY argo_workflows .
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
ENTRYPOINT ["oc-schedulerd"]
|
||||||
|
34
Makefile
Normal file
34
Makefile
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
.DEFAULT_GOAL := all
|
||||||
|
|
||||||
|
build: clean
|
||||||
|
go build .
|
||||||
|
|
||||||
|
build-monitord:
|
||||||
|
go build -o ../oc-monitord ../oc-monitord
|
||||||
|
|
||||||
|
run:
|
||||||
|
./oc-schedulerd
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -rf oc-schedulerd
|
||||||
|
|
||||||
|
docker:
|
||||||
|
DOCKER_BUILDKIT=1 docker build -t oc-schedulerd --build-arg MONITORD_IMAGE=oc-monitord -f Dockerfile . --build-arg=HOST=$(HOST)
|
||||||
|
docker tag oc-schedulerd:latest oc/oc-schedulerd:0.0.1
|
||||||
|
|
||||||
|
publish-kind:
|
||||||
|
kind load docker-image oc/oc-schedulerd:0.0.1 --name opencloud | true
|
||||||
|
|
||||||
|
publish-registry:
|
||||||
|
@echo "TODO"
|
||||||
|
|
||||||
|
docker-deploy:
|
||||||
|
docker compose up -d
|
||||||
|
|
||||||
|
run-docker: docker publish-kind publish-registry docker-deploy
|
||||||
|
|
||||||
|
all: docker publish-kind publish-registry
|
||||||
|
|
||||||
|
dev: build-monitord build run
|
||||||
|
|
||||||
|
.PHONY: build run clean docker publish-kind publish-registry
|
38
README.md
38
README.md
@ -1,18 +1,34 @@
|
|||||||
# oc-scheduler
|
# oc-scheduler
|
||||||
|
|
||||||
OC-Scheduler retrieves the content of submitted workflows and prepare them to be executed.
|
oc-schedulerd is a daemon performing to actions at the same time :
|
||||||
|
- subscribing to the local NATS instance' custom channels for message commanding either the scheduling or the removing of an execution.
|
||||||
|
- polling oc-catalog for scheduled executions
|
||||||
|
|
||||||
## Parsing
|
Depending on the environment it is running in, oc-schedulerd will either :
|
||||||
|
- execute the oc-monitord binary
|
||||||
|
- run an oc-monitord container
|
||||||
|
|
||||||
From a workflow's name we retrieve the xml graph associated and parse it in order to create the object representing each componant.
|
## Parameters
|
||||||
Each object is linked to another, represented by a links object with the two object IDs has attributes.
|
|
||||||
|
|
||||||
TODO :
|
oc-schedulerd uses json files to load its configuration. The template for this configuration file is below
|
||||||
- [x] Retrieve the user input's for each component.
|
|
||||||
|
|
||||||
## Organising
|
```json
|
||||||
|
{
|
||||||
|
"LOKI_URL" : "http://[IP/URL]:3100",
|
||||||
|
"MONGO_URL":"mongodb://[IP/URL]:27017/",
|
||||||
|
"NATS_URL":"nats://[IP/URL]:4222",
|
||||||
|
"MONGO_DATABASE":"",
|
||||||
|
"MONITORD_PATH": "",
|
||||||
|
"KUBERNETES_SERVICE_HOST" : "[IP/URL]",
|
||||||
|
"MONITOR_MODE": "",
|
||||||
|
"KUBE_CA": "",
|
||||||
|
"KUBE_CERT": "",
|
||||||
|
"KUBE_DATA": ""
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
TODO :
|
**monitor_mode** : should be either "local","container", ""
|
||||||
- [ ] create an argo file from the graph/worfklow
|
|
||||||
- [ ] Create a different entry for each component
|
## TODO
|
||||||
- [ ] execute each element in the right order
|
|
||||||
|
- [ ] Implement the discovery of current mode : local, local in container, as a container
|
12
conf/conf.go
12
conf/conf.go
@ -14,6 +14,12 @@ type Config struct {
|
|||||||
Logs string
|
Logs string
|
||||||
LokiUrl string
|
LokiUrl string
|
||||||
NatsUrl string
|
NatsUrl string
|
||||||
|
Mode string
|
||||||
|
KubeHost string
|
||||||
|
KubePort string
|
||||||
|
KubeCA string
|
||||||
|
KubeCert string
|
||||||
|
KubeData string
|
||||||
}
|
}
|
||||||
|
|
||||||
var instance *Config
|
var instance *Config
|
||||||
@ -38,12 +44,6 @@ func init() {
|
|||||||
o = onion.New(l2, l3)
|
o = onion.New(l2, l3)
|
||||||
}
|
}
|
||||||
GetConfig().MonitorPath = o.GetStringDefault("MONITORD_PATH", "../oc-monitord/oc-monitord")
|
GetConfig().MonitorPath = o.GetStringDefault("MONITORD_PATH", "../oc-monitord/oc-monitord")
|
||||||
GetConfig().Logs = o.GetStringDefault("LOG_LEVEL", "info")
|
|
||||||
GetConfig().LokiUrl = o.GetStringDefault("LOKI_URL", "http://127.0.0.1:3100")
|
|
||||||
GetConfig().NatsUrl = o.GetStringDefault("NATS_URL", "http://127.0.0.1:4222")
|
|
||||||
GetConfig().MongoUrl = o.GetStringDefault("MONGO_URL", "mongodb://127.0.0.1:27017")
|
|
||||||
GetConfig().DBName = o.GetStringDefault("MONGO_DATABASE", "DC_myDC")
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetConfig() *Config {
|
func GetConfig() *Config {
|
||||||
|
@ -1,6 +0,0 @@
|
|||||||
{
|
|
||||||
"LOKI_URL" : "http://loki:3100",
|
|
||||||
"MONGO_URL":"mongodb://mongo:27017/",
|
|
||||||
"MONGO_DATABASE":"DC_myDC",
|
|
||||||
"NATS_URL": "nats://nats:4222"
|
|
||||||
}
|
|
146
daemons/execute_monitor_container.go
Normal file
146
daemons/execute_monitor_container.go
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
package daemons
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"oc-schedulerd/conf"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ContainerMonitor struct {
|
||||||
|
Monitor LocalMonitor
|
||||||
|
KubeCA string
|
||||||
|
KubeCert string
|
||||||
|
KubeData string
|
||||||
|
KubeHost string
|
||||||
|
KubePort string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewContainerMonitor(UUID string, peerId string, duration int) (Executor){
|
||||||
|
return &ContainerMonitor{
|
||||||
|
Monitor: LocalMonitor{
|
||||||
|
ExecutionID: UUID,
|
||||||
|
PeerID: peerId,
|
||||||
|
Duration: duration,
|
||||||
|
LokiUrl: conf.GetConfig().LokiUrl,
|
||||||
|
MongoUrl: conf.GetConfig().MongoUrl,
|
||||||
|
DBName: conf.GetConfig().DBName,
|
||||||
|
},
|
||||||
|
KubeCA: conf.GetConfig().KubeCA,
|
||||||
|
KubeCert: conf.GetConfig().KubeCert,
|
||||||
|
KubeData: conf.GetConfig().KubeData,
|
||||||
|
KubeHost: conf.GetConfig().KubeHost,
|
||||||
|
KubePort: conf.GetConfig().KubePort,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *ContainerMonitor) PrepareMonitorExec() []string {
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-e", cm.Monitor.ExecutionID,
|
||||||
|
"-p", cm.Monitor.PeerID,
|
||||||
|
"-u", cm.Monitor.LokiUrl,
|
||||||
|
"-m", cm.Monitor.MongoUrl,
|
||||||
|
"-d", cm.Monitor.DBName,
|
||||||
|
"-M", "kubernetes",
|
||||||
|
"-H", cm.KubeHost,
|
||||||
|
"-P", cm.KubePort,
|
||||||
|
"-C", cm.KubeCert,
|
||||||
|
"-D", cm.KubeData,
|
||||||
|
"-c", cm.KubeCA,
|
||||||
|
}
|
||||||
|
|
||||||
|
if cm.Monitor.Duration > 0 {
|
||||||
|
args = append(args, "-t", fmt.Sprintf("%d", cm.Monitor.Duration))
|
||||||
|
}
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contact the docker's API at the KubeHost's URL to :
|
||||||
|
// - Check if the image exists
|
||||||
|
// - Create the container
|
||||||
|
// - Start the container
|
||||||
|
func (cm *ContainerMonitor) LaunchMonitor(args []string, l zerolog.Logger) {
|
||||||
|
|
||||||
|
var containerID string
|
||||||
|
imageName := "oc-monitord"
|
||||||
|
url := "http://" + cm.KubeHost + ":2375"
|
||||||
|
|
||||||
|
resp, err := http.Get(url + "/images/" + imageName + "/json")
|
||||||
|
if err != nil {
|
||||||
|
l.Fatal().Msg("Error when contacting the docker API on " + url + ": " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
d, _ := io.ReadAll(resp.Body)
|
||||||
|
l.Fatal().Msg("Couldn't find the oc-monitord image : " + string(d))
|
||||||
|
}
|
||||||
|
|
||||||
|
dataCreation := map[string]interface{}{"Image": imageName, "Cmd" : args}
|
||||||
|
byteData, err := json.Marshal(dataCreation)
|
||||||
|
if err != nil {
|
||||||
|
l.Fatal().Msg("Error when contacting the creating request body : " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
r, _ := http.NewRequest("POST",url + "/containers/create", bytes.NewBuffer(byteData))
|
||||||
|
r.Header.Add("Content-Type","application/json")
|
||||||
|
resp, err = http.DefaultClient.Do(r)
|
||||||
|
if err != nil {
|
||||||
|
l.Fatal().Msg("Error when contacting the docker API on " + url + ": " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode == 201 {
|
||||||
|
var d map[string]interface{}
|
||||||
|
|
||||||
|
b, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
l.Fatal().Msg(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(b, &d)
|
||||||
|
if err != nil {
|
||||||
|
l.Fatal().Msg(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
containerID = d["Id"].(string)
|
||||||
|
} else {
|
||||||
|
d, _ := io.ReadAll(resp.Body)
|
||||||
|
l.Fatal().Msg("Error when creating the container on " + url + "\n " + string(d))
|
||||||
|
}
|
||||||
|
|
||||||
|
networkName := "oc"
|
||||||
|
|
||||||
|
dataNetwork, _ := json.Marshal(map[string]string{"Container" : containerID})
|
||||||
|
r, _ = http.NewRequest("POST",url + "/networks/" + networkName + "/connect", bytes.NewBuffer(dataNetwork))
|
||||||
|
r.Header.Add("Content-Type","application/json")
|
||||||
|
resp, err = http.DefaultClient.Do(r)
|
||||||
|
if err != nil {
|
||||||
|
l.Fatal().Msg("Error when contacting the docker API on " + url + ": " + err.Error())
|
||||||
|
}
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
d, _ := io.ReadAll(resp.Body)
|
||||||
|
l.Error().Msg("Error when adding container to the network : " + string(d))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err = http.Post( url + "/containers/" + containerID + "/start", "", nil)
|
||||||
|
if err != nil {
|
||||||
|
l.Fatal().Msg("Error when contacting the docker API on " + url + ": " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode >= 300 {
|
||||||
|
d, _ := io.ReadAll(resp.Body)
|
||||||
|
l.Fatal().Msg("Error when starting the container on " + url + "\n " + string(d))
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Info().Msg("Started container " + containerID)
|
||||||
|
// we can add logging with GET /containers/id/logs?stdout=true&follow=true
|
||||||
|
|
||||||
|
// logExecution(stdoutMonitord, l)
|
||||||
|
}
|
@ -1,67 +0,0 @@
|
|||||||
package daemons
|
|
||||||
|
|
||||||
// type manifestValues struct{
|
|
||||||
// ARGO_FILE string
|
|
||||||
// LOKI_URL string
|
|
||||||
// CONTAINER_NAME string
|
|
||||||
// }
|
|
||||||
|
|
||||||
// manifest, err := em.CreateManifest(booking, argo_file_path)
|
|
||||||
// if err != nil {
|
|
||||||
// logger.Logger.Error().Msg("Could not create manifest " + err.Error())
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // launch a pod that contains oc-monitor, give it the name of the workflow
|
|
||||||
// cmd := exec.Command("kubectl","apply","-f", "manifests/"+manifest)
|
|
||||||
// output , err := cmd.CombinedOutput()
|
|
||||||
// if err != nil {
|
|
||||||
// logger.Logger.Error().Msg("failed to create new pod for " + booking.Workflow + " :" + err.Error())
|
|
||||||
// return
|
|
||||||
// }
|
|
||||||
|
|
||||||
// func (*ExecutionManager) CreateManifest(booking models.Booking, argo_file string) (manifest_filepath string, err error) {
|
|
||||||
|
|
||||||
// var filled_template bytes.Buffer
|
|
||||||
|
|
||||||
// manifest_file, err := os.ReadFile("conf/monitor_pod_template.yml")
|
|
||||||
// if err != nil {
|
|
||||||
// logger.Logger.Error().Msg("Could not open the k8s template file for " + booking.Workflow)
|
|
||||||
// return "", err
|
|
||||||
// }
|
|
||||||
|
|
||||||
// container_name := getContainerName(argo_file)
|
|
||||||
// tmpl, err := template.New("manifest_template").Parse(string(manifest_file))
|
|
||||||
// if err != nil {
|
|
||||||
// logger.Logger.Error().Msg(err.Error())
|
|
||||||
// return "", err
|
|
||||||
// }
|
|
||||||
|
|
||||||
// manifest_data := manifestValues{
|
|
||||||
// ARGO_FILE: argo_file,
|
|
||||||
// LOKI_URL: conf.GetConfig().Logs,
|
|
||||||
// CONTAINER_NAME: container_name,
|
|
||||||
// }
|
|
||||||
|
|
||||||
// err = tmpl.Execute(&filled_template, manifest_data)
|
|
||||||
// if err != nil {
|
|
||||||
// logger.Logger.Error().Msg("Could not complete manifest template for " + booking.Workflow)
|
|
||||||
// return "", err }
|
|
||||||
|
|
||||||
// manifest_filepath = booking.Workflow + "_manifest.yaml"
|
|
||||||
|
|
||||||
// err = os.WriteFile("manifests/" + manifest_filepath, filled_template.Bytes(),0644)
|
|
||||||
// if err != nil {
|
|
||||||
// logger.Logger.Error().Msg("Could not write the YAML file for " + booking.Workflow + "'s manifest")
|
|
||||||
// return "", err
|
|
||||||
// }
|
|
||||||
|
|
||||||
// return manifest_filepath, nil
|
|
||||||
// }
|
|
||||||
|
|
||||||
// func getContainerName(argo_file string) string {
|
|
||||||
// regex := "([a-zA-Z]+-[a-zA-Z]+)"
|
|
||||||
// re := regexp.MustCompile(regex)
|
|
||||||
|
|
||||||
// container_name := re.FindString(argo_file)
|
|
||||||
// return container_name
|
|
||||||
// }
|
|
@ -9,39 +9,65 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type LocalMonitor struct {
|
type LocalMonitor struct {
|
||||||
LokiURL string
|
|
||||||
KubeURL string
|
|
||||||
ExecutionID string
|
ExecutionID string
|
||||||
PeerID string
|
PeerID string
|
||||||
Duration int
|
Duration int
|
||||||
Logger zerolog.Logger
|
LokiUrl string
|
||||||
|
MongoUrl string
|
||||||
|
DBName string
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lm *LocalMonitor) LaunchLocalMonitor() {
|
func NewLocalMonitor(UUID string, peerId string, duration int) (Executor){
|
||||||
if lm.LokiURL == "" || lm.KubeURL == "" || lm.ExecutionID == "" {
|
return &LocalMonitor{
|
||||||
lm.Logger.Error().Msg("Missing parameter in LocalMonitor")
|
ExecutionID: UUID,
|
||||||
}
|
PeerID: peerId,
|
||||||
|
Duration: duration,
|
||||||
// For dev purposes, in prod KubeURL must be a kube API's URL
|
LokiUrl: conf.GetConfig().LokiUrl,
|
||||||
if lm.KubeURL != "localhost" {
|
MongoUrl: conf.GetConfig().MongoUrl,
|
||||||
lm.execRemoteKube()
|
DBName: conf.GetConfig().DBName,
|
||||||
} else {
|
|
||||||
lm.execLocalKube()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lm *LocalMonitor) execLocalKube() {
|
// func (lm *LocalMonitor) LaunchLocalMonitor() {
|
||||||
args := []string{"-e", lm.ExecutionID, "-p", lm.PeerID, "-u", lm.LokiURL, "-m", conf.GetConfig().MongoUrl, "-d", conf.GetConfig().DBName}
|
// if lm.ExecutionID == "" {
|
||||||
|
// lm.Logger.Error().Msg("Missing parameter in LocalMonitor")
|
||||||
|
// }
|
||||||
|
|
||||||
|
// }
|
||||||
|
|
||||||
|
func (lm *LocalMonitor) PrepareMonitorExec() []string {
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-e", lm.ExecutionID,
|
||||||
|
"-p", lm.PeerID,
|
||||||
|
"-u", lm.LokiUrl,
|
||||||
|
"-m", lm.MongoUrl,
|
||||||
|
"-d", lm.DBName,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
if lm.Duration > 0 {
|
if lm.Duration > 0 {
|
||||||
args = append(args, "-t", fmt.Sprintf("%d", lm.Duration))
|
args = append(args, "-t", fmt.Sprintf("%d", lm.Duration))
|
||||||
}
|
}
|
||||||
cmd := exec.Command(conf.GetConfig().MonitorPath, args...)
|
|
||||||
fmt.Printf("Command : %v\n", cmd)
|
return args
|
||||||
err := cmd.Start()
|
}
|
||||||
if err != nil {
|
|
||||||
lm.Logger.Error().Msg("Could not start oc-monitor for " + lm.ExecutionID + " : " + err.Error())
|
func (lm *LocalMonitor) LaunchMonitor(args []string, l zerolog.Logger) {
|
||||||
}
|
cmd := exec.Command(conf.GetConfig().MonitorPath, args...)
|
||||||
|
fmt.Printf("Command : %v\n", cmd)
|
||||||
|
|
||||||
|
stdoutMonitord, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
l.Error().Msg("Could not retrieve stdoutpipe for execution of oc-monitord" + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = cmd.Start()
|
||||||
|
if err != nil {
|
||||||
|
l.Error().Msg("Could not start oc-monitor for " + lm.ExecutionID + " : " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
logExecution(stdoutMonitord, l)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO : implement this
|
|
||||||
func (lm *LocalMonitor) execRemoteKube() {}
|
|
||||||
|
@ -3,60 +3,81 @@ package daemons
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"oc-schedulerd/conf"
|
"oc-schedulerd/conf"
|
||||||
"os"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
oclib "cloud.o-forge.io/core/oc-lib"
|
||||||
workflow_execution "cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
workflow_execution "cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||||
)
|
)
|
||||||
|
|
||||||
var Bookings = ScheduledBooking{Bookings: []*workflow_execution.WorkflowExecutions{}}
|
var Executions = ScheduledExecution{Execs: map[string]workflow_execution.WorkflowExecution{}}
|
||||||
|
|
||||||
type ExecutionManager struct{}
|
type ExecutionManager struct{}
|
||||||
|
|
||||||
// Loop every second on the booking's list and move the booking that must start to a new list
|
// Loop every second on the Execution's list and move the Execution that must start to a new list
|
||||||
// that will be looped over to start them
|
// that will be looped over to start them
|
||||||
func (em *ExecutionManager) RetrieveNextExecutions() {
|
func (em *ExecutionManager) RetrieveNextExecutions() {
|
||||||
logger := oclib.GetLogger()
|
logger := oclib.GetLogger()
|
||||||
for {
|
for {
|
||||||
fmt.Println("Checking for bookings", len(Bookings.Bookings))
|
fmt.Println("Checking for executions", len(Executions.Execs))
|
||||||
Bookings.Mu.Lock()
|
Executions.Mu.Lock()
|
||||||
if len(Bookings.Bookings) > 0 {
|
if len(Executions.Execs) > 0 {
|
||||||
bookings := Bookings.Bookings
|
executions := Executions.Execs
|
||||||
for i := len(bookings) - 1; i >= 0; i-- {
|
for execId, exec := range executions {
|
||||||
if bookings[i].ExecDate.Before(time.Now().UTC()) {
|
if exec.ExecDate.Before(time.Now().UTC()) {
|
||||||
logger.Info().Msg("Will execute " + bookings[i].UUID + " soon")
|
logger.Info().Msg("Will execute " + execId + " soon")
|
||||||
go em.executeBooking(bookings[i])
|
go em.executeExecution(&exec)
|
||||||
Bookings.Bookings = append(bookings[:i], bookings[i+1:]...)
|
delete(executions,execId)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Bookings.Mu.Unlock()
|
Executions.Mu.Unlock()
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (em *ExecutionManager) executeBooking(booking *workflow_execution.WorkflowExecutions) {
|
func (em *ExecutionManager) executeExecution(execution *workflow_execution.WorkflowExecution) {
|
||||||
// start execution
|
// start execution
|
||||||
// create the yaml that describes the pod : filename, path/url to Loki
|
// create the yaml that describes the pod : filename, path/url to Loki
|
||||||
exec_method := os.Getenv("MONITOR_METHOD")
|
var executor Executor
|
||||||
|
// exec_method := os.Getenv("MONITOR_METHOD")
|
||||||
logger := oclib.GetLogger()
|
logger := oclib.GetLogger()
|
||||||
if exec_method == "k8s" {
|
|
||||||
logger.Error().Msg("TODO : executing oc-monitor in a k8s")
|
|
||||||
} else {
|
|
||||||
logger.Debug().Msg("Executing oc-monitor localy")
|
|
||||||
duration := 0
|
duration := 0
|
||||||
if booking.EndDate != nil {
|
if execution.EndDate != nil {
|
||||||
duration = int(booking.EndDate.Sub(booking.ExecDate).Seconds())
|
duration = int(execution.EndDate.Sub(execution.ExecDate).Seconds())
|
||||||
}
|
}
|
||||||
monitor := LocalMonitor{
|
|
||||||
Logger: logger,
|
if conf.GetConfig().Mode == "local" {
|
||||||
Duration: duration,
|
executor = NewLocalMonitor(execution.UUID, execution.CreatorID, duration)
|
||||||
LokiURL: conf.GetConfig().LokiUrl,
|
|
||||||
KubeURL: "localhost",
|
|
||||||
ExecutionID: booking.UUID,
|
|
||||||
PeerID: booking.CreatorID,
|
|
||||||
}
|
}
|
||||||
monitor.LaunchLocalMonitor()
|
|
||||||
|
if conf.GetConfig().Mode == "container" {
|
||||||
|
executor = NewContainerMonitor(execution.UUID, execution.CreatorID, duration)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if executor == nil {
|
||||||
|
logger.Fatal().Msg("Could not create executor")
|
||||||
|
}
|
||||||
|
|
||||||
|
args := executor.PrepareMonitorExec()
|
||||||
|
executor.LaunchMonitor(args,logger)
|
||||||
|
|
||||||
|
// if exec_method == "k8s" {
|
||||||
|
// logger.Error().Msg("TODO : executing oc-monitor in a k8s")
|
||||||
|
// } else {
|
||||||
|
// logger.Debug().Msg("Executing oc-monitor localy")
|
||||||
|
// duration := 0
|
||||||
|
// if Execution.EndDate != nil {
|
||||||
|
// duration = int(Execution.EndDate.Sub(Execution.ExecDate).Seconds())
|
||||||
|
// }
|
||||||
|
// monitor := LocalMonitor{
|
||||||
|
// Logger: logger,
|
||||||
|
// Duration: duration,
|
||||||
|
// ExecutionID: Execution.ExecutionsID,
|
||||||
|
// PeerID: Execution.CreatorID,
|
||||||
|
// LokiUrl: conf.GetConfig().LokiUrl,
|
||||||
|
|
||||||
|
// }
|
||||||
|
// monitor.LaunchLocalMonitor()
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
|
21
daemons/interface.go
Normal file
21
daemons/interface.go
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
package daemons
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Executor interface {
|
||||||
|
PrepareMonitorExec() []string
|
||||||
|
LaunchMonitor(args []string, l zerolog.Logger)
|
||||||
|
}
|
||||||
|
|
||||||
|
func logExecution(reader io.ReadCloser, l zerolog.Logger) {
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
for scanner.Scan() {
|
||||||
|
output := scanner.Text()
|
||||||
|
l.Debug().Msg(output)
|
||||||
|
}
|
||||||
|
}
|
@ -3,7 +3,6 @@ package daemons
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"oc-schedulerd/conf"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -17,40 +16,33 @@ import (
|
|||||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ScheduledBooking struct {
|
type ScheduledExecution struct {
|
||||||
Bookings []*workflow_execution.WorkflowExecutions
|
Execs map[string]workflow_execution.WorkflowExecution
|
||||||
Mu sync.Mutex
|
Mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sb *ScheduledBooking) DeleteSchedules(workflow_id string) {
|
func (sb *ScheduledExecution) DeleteSchedules(uuid string) {
|
||||||
toNotDelete := []*workflow_execution.WorkflowExecutions{}
|
Executions.Mu.Lock()
|
||||||
for _, b := range sb.Bookings {
|
defer Executions.Mu.Unlock()
|
||||||
if b.WorkflowID != workflow_id {
|
delete(sb.Execs,uuid)
|
||||||
toNotDelete = append(toNotDelete, b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Bookings.Mu.Lock()
|
|
||||||
defer Bookings.Mu.Unlock()
|
|
||||||
sb.Bookings = toNotDelete
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sb *ScheduledBooking) AddSchedules(new_bookings []*workflow_execution.WorkflowExecutions, logger zerolog.Logger) {
|
func (sb *ScheduledExecution) AddSchedules(new_executions []*workflow_execution.WorkflowExecution, logger zerolog.Logger) {
|
||||||
Bookings.Mu.Lock()
|
Executions.Mu.Lock()
|
||||||
defer Bookings.Mu.Unlock()
|
defer Executions.Mu.Unlock()
|
||||||
for _, exec := range new_bookings {
|
for _, exec := range new_executions {
|
||||||
fmt.Println("Adding "+exec.UUID, !sb.execIsSet(exec))
|
fmt.Println("Adding "+exec.UUID, !sb.execIsSet(exec))
|
||||||
if !sb.execIsSet(exec) {
|
if !sb.execIsSet(exec) {
|
||||||
sb.Bookings = append(sb.Bookings, exec)
|
sb.Execs[exec.UUID] = *exec
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sb *ScheduledBooking) execIsSet(exec *workflow_execution.WorkflowExecutions) bool {
|
func (sb *ScheduledExecution) execIsSet(exec *workflow_execution.WorkflowExecution) bool {
|
||||||
for _, b := range sb.Bookings {
|
if _, ok := sb.Execs[exec.UUID]; ok{
|
||||||
if b.Equals(exec) {
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return false
|
return false
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -66,7 +58,7 @@ type ScheduleManager struct {
|
|||||||
// on workflows' scheduling. Messages must contain
|
// on workflows' scheduling. Messages must contain
|
||||||
// workflow execution ID, to allow retrieval of execution infos
|
// workflow execution ID, to allow retrieval of execution infos
|
||||||
func (s *ScheduleManager) ListenNATS() {
|
func (s *ScheduleManager) ListenNATS() {
|
||||||
nc, err := nats.Connect(conf.GetConfig().NatsUrl)
|
nc, err := nats.Connect(oclib.GetConfig().NATSUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.Logger.Error().Msg("Could not connect to NATS")
|
s.Logger.Error().Msg("Could not connect to NATS")
|
||||||
return
|
return
|
||||||
@ -102,7 +94,7 @@ func (s *ScheduleManager) listenForChange(nc *nats.Conn, chanName string, delete
|
|||||||
}
|
}
|
||||||
fmt.Println("Catching " + str + " workflow... " + map_mess["id"])
|
fmt.Println("Catching " + str + " workflow... " + map_mess["id"])
|
||||||
if delete {
|
if delete {
|
||||||
Bookings.DeleteSchedules(map_mess["id"])
|
Executions.DeleteSchedules(map_mess["id"])
|
||||||
} else {
|
} else {
|
||||||
s.getNextScheduledWorkflows(1)
|
s.getNextScheduledWorkflows(1)
|
||||||
}
|
}
|
||||||
@ -112,14 +104,14 @@ func (s *ScheduleManager) listenForChange(nc *nats.Conn, chanName string, delete
|
|||||||
// Used at launch of the component to retrieve the next scheduled workflows
|
// Used at launch of the component to retrieve the next scheduled workflows
|
||||||
// and then every X minutes in case some workflows were scheduled before launch
|
// and then every X minutes in case some workflows were scheduled before launch
|
||||||
func (s *ScheduleManager) SchedulePolling() {
|
func (s *ScheduleManager) SchedulePolling() {
|
||||||
var sleep_time float64 = 1
|
var sleep_time float64 = 20
|
||||||
for {
|
for {
|
||||||
s.getNextScheduledWorkflows(1)
|
s.getNextScheduledWorkflows(1)
|
||||||
s.Logger.Info().Msg("Current list of schedules -------> " + fmt.Sprintf("%v", len(Bookings.Bookings)))
|
s.Logger.Info().Msg("Current list of schedules -------> " + fmt.Sprintf("%v", len(Executions.Execs)))
|
||||||
time.Sleep(time.Minute * time.Duration(sleep_time))
|
time.Sleep(time.Second * time.Duration(sleep_time))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (s *ScheduleManager) getExecution(from time.Time, to time.Time) (exec_list []*workflow_execution.WorkflowExecutions, err error) {
|
func (s *ScheduleManager) getExecution(from time.Time, to time.Time) (exec_list []*workflow_execution.WorkflowExecution, err error) {
|
||||||
fmt.Printf("Getting workflows execution from %s to %s \n", from.String(), to.String())
|
fmt.Printf("Getting workflows execution from %s to %s \n", from.String(), to.String())
|
||||||
f := dbs.Filters{
|
f := dbs.Filters{
|
||||||
And: map[string][]dbs.Filter{
|
And: map[string][]dbs.Filter{
|
||||||
@ -133,7 +125,7 @@ func (s *ScheduleManager) getExecution(from time.Time, to time.Time) (exec_list
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, exec := range res.Data {
|
for _, exec := range res.Data {
|
||||||
exec_list = append(exec_list, exec.(*workflow_execution.WorkflowExecutions))
|
exec_list = append(exec_list, exec.(*workflow_execution.WorkflowExecution))
|
||||||
}
|
}
|
||||||
fmt.Println("Found "+fmt.Sprintf("%v", len(exec_list))+" workflows", res)
|
fmt.Println("Found "+fmt.Sprintf("%v", len(exec_list))+" workflows", res)
|
||||||
return
|
return
|
||||||
@ -151,6 +143,6 @@ func (s *ScheduleManager) getNextScheduledWorkflows(minutes float64) {
|
|||||||
); err != nil {
|
); err != nil {
|
||||||
s.Logger.Error().Msg("Could not retrieve next schedules")
|
s.Logger.Error().Msg("Could not retrieve next schedules")
|
||||||
} else {
|
} else {
|
||||||
Bookings.AddSchedules(next_wf_exec, s.Logger)
|
Executions.AddSchedules(next_wf_exec, s.Logger)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,36 +0,0 @@
|
|||||||
version: '3.4'
|
|
||||||
|
|
||||||
services:
|
|
||||||
nats:
|
|
||||||
image: 'nats:latest'
|
|
||||||
container_name: nats
|
|
||||||
ports:
|
|
||||||
- 4222:4222
|
|
||||||
command:
|
|
||||||
- "--debug"
|
|
||||||
networks:
|
|
||||||
- catalog
|
|
||||||
loki:
|
|
||||||
image: 'grafana/loki'
|
|
||||||
container_name: loki
|
|
||||||
ports :
|
|
||||||
- "3100:3100"
|
|
||||||
networks:
|
|
||||||
- catalog
|
|
||||||
grafana:
|
|
||||||
image: 'grafana/grafana'
|
|
||||||
container_name: grafana
|
|
||||||
ports:
|
|
||||||
- '3000:3000'
|
|
||||||
networks:
|
|
||||||
- catalog
|
|
||||||
volumes:
|
|
||||||
- ./conf/grafana_data_source.yml:/etc/grafana/provisioning/datasources/datasource.yml
|
|
||||||
environment:
|
|
||||||
- GF_SECURITY_ADMIN_PASSWORD=pfnirt # Change this to anything but admin to not have a password change page at startup
|
|
||||||
- GF_SECURITY_ADMIN_USER=admin
|
|
||||||
- GF_SECURITY_DISABLE_INITIAL_ADMIN_PASSWORD_CHANGE=true
|
|
||||||
|
|
||||||
networks:
|
|
||||||
catalog:
|
|
||||||
external: true
|
|
@ -2,15 +2,20 @@ version: '3.4'
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
oc-schedulerd:
|
oc-schedulerd:
|
||||||
|
env_file:
|
||||||
|
- path: ./env.env
|
||||||
|
required: false
|
||||||
environment:
|
environment:
|
||||||
- MONGO_DATABASE=DC_myDC
|
- MONGO_DATABASE=DC_myDC
|
||||||
|
- KUBE_CA=${KUBE_CA:-LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJWUxWNkFPQkdrU1F3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOekl6TVRFeU1ETTJNQjRYRFRJME1EZ3dPREV3TVRNMU5sb1hEVEkxTURndwpPREV3TVRNMU5sb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJGQ2Q1MFdPeWdlQ2syQzcKV2FrOWY4MVAvSkJieVRIajRWOXBsTEo0ck5HeHFtSjJOb2xROFYxdUx5RjBtOTQ2Nkc0RmRDQ2dqaXFVSk92Swp3NVRPNnd5alNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVFJkOFI5cXVWK2pjeUVmL0ovT1hQSzMyS09XekFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQTArbThqTDBJVldvUTZ0dnB4cFo4NVlMalF1SmpwdXM0aDdnSXRxS3NmUVVDSUI2M2ZNdzFBMm5OVWU1TgpIUGZOcEQwSEtwcVN0Wnk4djIyVzliYlJUNklZCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTWpNeE1USXdNell3SGhjTk1qUXdPREE0TVRBeE16VTJXaGNOTXpRd09EQTJNVEF4TXpVMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTWpNeE1USXdNell3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRc3hXWk9pbnIrcVp4TmFEQjVGMGsvTDF5cE01VHAxOFRaeU92ektJazQKRTFsZWVqUm9STW0zNmhPeVljbnN3d3JoNnhSUnBpMW5RdGhyMzg0S0Z6MlBvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTBYZkVmYXJsZm8zTWhIL3lmemx6Cnl0OWlqbHN3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUxJL2dNYnNMT3MvUUpJa3U2WHVpRVMwTEE2cEJHMXgKcnBlTnpGdlZOekZsQWlFQW1wdjBubjZqN3M0MVI0QzFNMEpSL0djNE53MHdldlFmZWdEVGF1R2p3cFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K}
|
||||||
|
- KUBE_TOKEN=${KUBE_TOKEN:-LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU5ZS1BFb1dhd1NKUzJlRW5oWmlYMk5VZlY1ZlhKV2krSVNnV09TNFE5VTlvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFVUozblJZN0tCNEtUWUx0WnFUMS96VS84a0Z2Sk1lUGhYMm1Vc25pczBiR3FZblkyaVZEeApYVzR2SVhTYjNqcm9iZ1YwSUtDT0twUWs2OHJEbE03ckRBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=}
|
||||||
image: 'oc-schedulerd:latest'
|
image: 'oc-schedulerd:latest'
|
||||||
ports:
|
ports:
|
||||||
- 9001:8080
|
- 9001:8080
|
||||||
container_name: oc-schedulerd
|
container_name: oc-schedulerd
|
||||||
networks:
|
networks:
|
||||||
- catalog
|
- oc
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
catalog:
|
oc:
|
||||||
external: true
|
external: true
|
11
docker_schedulerd.json
Normal file
11
docker_schedulerd.json
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"LOKI_URL" : "http://loki:3100",
|
||||||
|
"MONGO_URL":"mongodb://mongo:27017/",
|
||||||
|
"NATS_URL":"nats://nats:4222",
|
||||||
|
"MONGO_DATABASE":"DC_myDC",
|
||||||
|
"MONITORD_PATH": "oc-monitord",
|
||||||
|
"MODE": "kubernetes",
|
||||||
|
"KUBE_CA": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTkRNMk56UTNPRGt3SGhjTk1qVXdOREF6TVRBd05qSTVXaGNOTXpVd05EQXhNVEF3TmpJNQpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTkRNMk56UTNPRGt3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUL1NDWEMycjFTWGdza0FvTGJKSEtIem4zQXYva2t0ZElpSk42WlBsWVEKY3p0dXV5K3JBMHJ5VUlkZnIyK3VCRS9VN0NjSlhPL004QVdyODFwVklzVmdvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVFHOVBQQ0g0c1lMbFkvQk5CdnN5CklEam1PK0l3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUtJeFc4NERQTW1URXVVN0Z3ek44SFB6ZHdldWh6U20KVzNYMU9tczFSQVNRQWlFQXI4UTJZSGtNQndSOThhcWtTa2JqU1dhejg0OEY2VkZLWjFacXpNbDFZaTg9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",
|
||||||
|
"KUBE_CERT": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJZWFxQUp2bHhmYzh3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOelF6TmpjME56ZzVNQjRYRFRJMU1EUXdNekV3TURZeU9Wb1hEVEkyTURRdwpNekV3TURZeU9Wb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJJelpGSlJUVHJmYXlNNFoKTjlRclN4MC9wbDdoZGdvWFM5bGEydmFFRkhlYVFaalRML2NZd1dMUnhoOWVOa01SRDZjTk4reWZkSXE2aWo1SQo5RTlENGdLalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVFFzUkZXUlNweDV0RGZnZDh1UTdweUw0ZERMVEFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQStXZTlBVXJRUm5pWjVCUERELzJwWjA3TzFQWWFIc01ycTZZcVB4VlV5cGdDSUhrRE8rcVlMYUhkUEhXZgpWUGszNXJmejM0Qk4xN2VyaEVxRjF0U0c1MWFqCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTkRNMk56UTNPRGt3SGhjTk1qVXdOREF6TVRBd05qSTVXaGNOTXpVd05EQXhNVEF3TmpJNQpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTkRNMk56UTNPRGt3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUNDF1NXIzM0JyenZ3ZXZaWHM2TEg3T1k4NGhOOGRrODdnTlhaUndBdWkKdXJBaU45TFdYcmYxeFoyaXp5d0FiVGk1ZVc2Q1hIMjhDdEVSWUlrcjNoTXdvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTBMRVJWa1VxY2ViUTM0SGZMa082CmNpK0hReTB3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUpLWGZLdXBzdklONEtQVW50c1lPNXhiaGhSQmhSYlIKN3JyeWs2VHpZMU5JQWlBVktKWis3UUxzeGFyQktORnI3eTVYYlNGanI3Y1gyQmhOYy9wdnFLcWtFUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",
|
||||||
|
"KUBE_DATA": "LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUVJd01wVjdzMHc2S0VTQ2FBWDhvSVZPUHloa2U0Q3duNWZQZnhOaUYyM3JvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFak5rVWxGTk90OXJJemhrMzFDdExIVCttWHVGMkNoZEwyVnJhOW9RVWQ1cEJtTk12OXhqQgpZdEhHSDE0MlF4RVBwdzAzN0o5MGlycUtQa2owVDBQaUFnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo="
|
||||||
|
}
|
104
go.mod
104
go.mod
@ -1,103 +1,55 @@
|
|||||||
module oc-schedulerd
|
module oc-schedulerd
|
||||||
|
|
||||||
go 1.22.0
|
go 1.23.0
|
||||||
|
|
||||||
toolchain go1.22.5
|
toolchain go1.24.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
cloud.o-forge.io/core/oc-lib v0.0.0-20250624102227-e600fedcab06
|
||||||
github.com/beego/beego v1.12.12
|
github.com/beego/beego v1.12.12
|
||||||
github.com/beego/beego/v2 v2.3.1
|
|
||||||
github.com/goraz/onion v0.1.3
|
github.com/goraz/onion v0.1.3
|
||||||
github.com/nats-io/nats.go v1.37.0
|
github.com/nats-io/nats.go v1.42.0
|
||||||
github.com/nwtgck/go-fakelish v0.1.3
|
github.com/rs/zerolog v1.34.0
|
||||||
github.com/rs/zerolog v1.33.0
|
go.mongodb.org/mongo-driver v1.17.3
|
||||||
github.com/tidwall/gjson v1.17.1
|
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
|
||||||
k8s.io/client-go v0.30.3
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250205160221-88b7cfe2fd0f // indirect
|
github.com/beego/beego/v2 v2.3.8 // indirect
|
||||||
github.com/Klathmon/StructToMap v0.0.0-20140724123129-3d0229e2dce7 // indirect
|
|
||||||
github.com/antihax/optional v1.0.0 // indirect
|
|
||||||
github.com/aws/aws-sdk-go v1.36.29 // indirect
|
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/biter777/countries v1.7.5 // indirect
|
github.com/biter777/countries v1.7.5 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
|
||||||
github.com/gabriel-vasile/mimetype v1.4.6 // indirect
|
|
||||||
github.com/go-logr/logr v1.4.1 // indirect
|
|
||||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
|
||||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
|
||||||
github.com/go-openapi/swag v0.22.3 // indirect
|
|
||||||
github.com/go-playground/locales v0.14.1 // indirect
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
github.com/go-playground/validator/v10 v10.22.1 // indirect
|
github.com/go-playground/validator/v10 v10.26.0 // indirect
|
||||||
github.com/go-stack/stack v1.8.0 // indirect
|
github.com/golang/snappy v1.0.0 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
|
||||||
github.com/golang/protobuf v1.5.4 // indirect
|
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
|
||||||
github.com/google/gnostic-models v0.6.8 // indirect
|
|
||||||
github.com/google/gofuzz v1.2.0 // indirect
|
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||||
github.com/imdario/mergo v0.3.8 // indirect
|
github.com/klauspost/compress v1.18.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
|
||||||
github.com/klauspost/compress v1.17.11 // indirect
|
|
||||||
github.com/leodido/go-urn v1.4.0 // indirect
|
github.com/leodido/go-urn v1.4.0 // indirect
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/marcinwyszynski/geopoint v0.0.0-20140302213024-cf2a6f750c5b // indirect
|
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
|
||||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/nats-io/jwt v0.3.2 // indirect
|
github.com/nats-io/nkeys v0.4.11 // indirect
|
||||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
|
||||||
github.com/nats-io/nuid v1.0.1 // indirect
|
github.com/nats-io/nuid v1.0.1 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||||
github.com/prometheus/client_golang v1.19.0 // indirect
|
github.com/prometheus/client_model v0.6.2 // indirect
|
||||||
github.com/prometheus/client_model v0.5.0 // indirect
|
github.com/prometheus/common v0.64.0 // indirect
|
||||||
github.com/prometheus/common v0.48.0 // indirect
|
github.com/prometheus/procfs v0.16.1 // indirect
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
|
||||||
github.com/robfig/cron v1.2.0 // indirect
|
github.com/robfig/cron v1.2.0 // indirect
|
||||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect
|
||||||
github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 // indirect
|
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
|
||||||
github.com/tidwall/match v1.1.1 // indirect
|
|
||||||
github.com/tidwall/pretty v1.2.0 // indirect
|
|
||||||
github.com/ugorji/go/codec v1.1.7 // indirect
|
|
||||||
github.com/vk496/cron v1.2.0 // indirect
|
|
||||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||||
github.com/xdg-go/scram v1.1.2 // indirect
|
github.com/xdg-go/scram v1.1.2 // indirect
|
||||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c // indirect
|
|
||||||
github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc // indirect
|
|
||||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||||
go.mongodb.org/mongo-driver v1.17.1 // indirect
|
golang.org/x/crypto v0.38.0 // indirect
|
||||||
golang.org/x/crypto v0.28.0 // indirect
|
golang.org/x/net v0.40.0 // indirect
|
||||||
golang.org/x/net v0.30.0 // indirect
|
golang.org/x/sync v0.14.0 // indirect
|
||||||
golang.org/x/oauth2 v0.16.0 // indirect
|
golang.org/x/sys v0.33.0 // indirect
|
||||||
golang.org/x/sync v0.8.0 // indirect
|
golang.org/x/text v0.25.0 // indirect
|
||||||
golang.org/x/sys v0.26.0 // indirect
|
google.golang.org/protobuf v1.36.6 // indirect
|
||||||
golang.org/x/term v0.25.0 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
golang.org/x/text v0.19.0 // indirect
|
|
||||||
golang.org/x/time v0.3.0 // indirect
|
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
|
||||||
google.golang.org/protobuf v1.35.1 // indirect
|
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
|
||||||
k8s.io/api v0.30.3 // indirect
|
|
||||||
k8s.io/apimachinery v0.30.3 // indirect
|
|
||||||
k8s.io/klog/v2 v2.120.1 // indirect
|
|
||||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
|
||||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
|
||||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
|
||||||
)
|
)
|
||||||
|
@ -1,59 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Note: the example only works with the code within the same release/branch.
|
|
||||||
package k8s
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
|
||||||
"k8s.io/client-go/util/homedir"
|
|
||||||
//
|
|
||||||
// Uncomment to load all auth plugins
|
|
||||||
// _ "k8s.io/client-go/plugin/pkg/client/auth"
|
|
||||||
//
|
|
||||||
// Or uncomment to load specific auth plugins
|
|
||||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
|
||||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
|
||||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewK8SClient() *kubernetes.Clientset {
|
|
||||||
var kubeconfig *string
|
|
||||||
if home := homedir.HomeDir(); home != "" {
|
|
||||||
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
|
|
||||||
} else {
|
|
||||||
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
|
|
||||||
}
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
// use the current context in kubeconfig
|
|
||||||
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
|
|
||||||
if err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// create the clientset
|
|
||||||
clientset, err := kubernetes.NewForConfig(config)
|
|
||||||
if err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return clientset
|
|
||||||
}
|
|
51
main.go
51
main.go
@ -1,23 +1,57 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"oc-schedulerd/conf"
|
"oc-schedulerd/conf"
|
||||||
"oc-schedulerd/daemons"
|
"oc-schedulerd/daemons"
|
||||||
|
"os"
|
||||||
|
|
||||||
oclib "cloud.o-forge.io/core/oc-lib"
|
oclib "cloud.o-forge.io/core/oc-lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
oclib.InitDaemon("oc-schedulerd")
|
oclib.InitDaemon("oc-schedulerd")
|
||||||
oclib.SetConfig(
|
l := oclib.GetLogger()
|
||||||
conf.GetConfig().MongoUrl,
|
o := oclib.GetConfLoader()
|
||||||
conf.GetConfig().DBName,
|
|
||||||
conf.GetConfig().NatsUrl,
|
c := oclib.SetConfig(
|
||||||
conf.GetConfig().LokiUrl,
|
o.GetStringDefault("MONGO_URL", "mongodb://127.0.0.1:27017"),
|
||||||
conf.GetConfig().Logs,
|
o.GetStringDefault("MONGO_DATABASE", "DC_myDC"),
|
||||||
|
o.GetStringDefault("NATS_URL", "nats://localhost:4222"),
|
||||||
|
o.GetStringDefault("LOKI_URL", ""),
|
||||||
|
o.GetStringDefault("LOG_LEVEL", "info"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
conf.GetConfig().DBName = c.MongoDatabase
|
||||||
|
conf.GetConfig().MongoUrl = c.MongoUrl
|
||||||
|
conf.GetConfig().NatsUrl = c.NATSUrl
|
||||||
|
conf.GetConfig().LokiUrl = c.LokiUrl
|
||||||
|
conf.GetConfig().Mode = o.GetStringDefault("MODE", "")
|
||||||
|
|
||||||
|
if conf.GetConfig().Mode == "container" {
|
||||||
|
conf.GetConfig().KubeHost = o.GetStringDefault("KUBERNETES_SERVICE_HOST", os.Getenv("KUBERNETES_SERVICE_HOST"))
|
||||||
|
conf.GetConfig().KubePort = o.GetStringDefault("KUBERNETES_SERVICE_PORT", "6443")
|
||||||
|
|
||||||
|
conf.GetConfig().KubeCA = o.GetStringDefault("KUBE_CA", os.Getenv("KUBE_CA"))
|
||||||
|
conf.GetConfig().KubeCert = o.GetStringDefault("KUBE_CERT", os.Getenv("KUBE_CERT"))
|
||||||
|
conf.GetConfig().KubeData = o.GetStringDefault("KUBE_DATA", os.Getenv("KUBE_DATA"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test if oc-monitor binary is reachable
|
||||||
|
// For local executions
|
||||||
|
if _, err := os.Stat("../oc-monitord/oc-monitord"); err == nil {
|
||||||
|
conf.GetConfig().MonitorPath = "../oc-monitord/oc-monitord"
|
||||||
|
}
|
||||||
|
// For container executions
|
||||||
|
if _, err := os.Stat("/usr/bin/oc-monitord"); conf.GetConfig().MonitorPath == "" && err == nil {
|
||||||
|
conf.GetConfig().MonitorPath = "/usr/bin/oc-monitord"
|
||||||
|
}
|
||||||
|
|
||||||
|
if conf.GetConfig().MonitorPath == "" {
|
||||||
|
l.Fatal().Msg("Could not find oc-monitord binary")
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Info().Msg("oc-monitord binary at " + conf.GetConfig().MonitorPath)
|
||||||
|
|
||||||
sch_mngr := daemons.ScheduleManager{Logger: oclib.GetLogger()}
|
sch_mngr := daemons.ScheduleManager{Logger: oclib.GetLogger()}
|
||||||
exe_mngr := daemons.ExecutionManager{}
|
exe_mngr := daemons.ExecutionManager{}
|
||||||
|
|
||||||
@ -26,5 +60,4 @@ func main() {
|
|||||||
|
|
||||||
exe_mngr.RetrieveNextExecutions()
|
exe_mngr.RetrieveNextExecutions()
|
||||||
|
|
||||||
fmt.Print("stop")
|
|
||||||
}
|
}
|
||||||
|
BIN
oc-schedulerd
BIN
oc-schedulerd
Binary file not shown.
12
schedulerd.json
Normal file
12
schedulerd.json
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"LOKI_URL" : "http://172.16.0.181:3100",
|
||||||
|
"MONGO_URL":"mongodb://172.16.0.181:27017/",
|
||||||
|
"NATS_URL":"nats://172.16.0.181:4222",
|
||||||
|
"MONGO_DATABASE":"DC_myDC",
|
||||||
|
"MONITORD_PATH": "../oc-monitord/oc-monitord",
|
||||||
|
"KUBERNETES_SERVICE_HOST" : "172.16.0.181",
|
||||||
|
"MODE": "container",
|
||||||
|
"KUBE_CA": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTkRNMk56UTNPRGt3SGhjTk1qVXdOREF6TVRBd05qSTVXaGNOTXpVd05EQXhNVEF3TmpJNQpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTkRNMk56UTNPRGt3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUL1NDWEMycjFTWGdza0FvTGJKSEtIem4zQXYva2t0ZElpSk42WlBsWVEKY3p0dXV5K3JBMHJ5VUlkZnIyK3VCRS9VN0NjSlhPL004QVdyODFwVklzVmdvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVFHOVBQQ0g0c1lMbFkvQk5CdnN5CklEam1PK0l3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUtJeFc4NERQTW1URXVVN0Z3ek44SFB6ZHdldWh6U20KVzNYMU9tczFSQVNRQWlFQXI4UTJZSGtNQndSOThhcWtTa2JqU1dhejg0OEY2VkZLWjFacXpNbDFZaTg9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",
|
||||||
|
"KUBE_CERT": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJZWFxQUp2bHhmYzh3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOelF6TmpjME56ZzVNQjRYRFRJMU1EUXdNekV3TURZeU9Wb1hEVEkyTURRdwpNekV3TURZeU9Wb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJJelpGSlJUVHJmYXlNNFoKTjlRclN4MC9wbDdoZGdvWFM5bGEydmFFRkhlYVFaalRML2NZd1dMUnhoOWVOa01SRDZjTk4reWZkSXE2aWo1SQo5RTlENGdLalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVFFzUkZXUlNweDV0RGZnZDh1UTdweUw0ZERMVEFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQStXZTlBVXJRUm5pWjVCUERELzJwWjA3TzFQWWFIc01ycTZZcVB4VlV5cGdDSUhrRE8rcVlMYUhkUEhXZgpWUGszNXJmejM0Qk4xN2VyaEVxRjF0U0c1MWFqCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTkRNMk56UTNPRGt3SGhjTk1qVXdOREF6TVRBd05qSTVXaGNOTXpVd05EQXhNVEF3TmpJNQpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTkRNMk56UTNPRGt3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUNDF1NXIzM0JyenZ3ZXZaWHM2TEg3T1k4NGhOOGRrODdnTlhaUndBdWkKdXJBaU45TFdYcmYxeFoyaXp5d0FiVGk1ZVc2Q1hIMjhDdEVSWUlrcjNoTXdvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTBMRVJWa1VxY2ViUTM0SGZMa082CmNpK0hReTB3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUpLWGZLdXBzdklONEtQVW50c1lPNXhiaGhSQmhSYlIKN3JyeWs2VHpZMU5JQWlBVktKWis3UUxzeGFyQktORnI3eTVYYlNGanI3Y1gyQmhOYy9wdnFLcWtFUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",
|
||||||
|
"KUBE_DATA": "LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUVJd01wVjdzMHc2S0VTQ2FBWDhvSVZPUHloa2U0Q3duNWZQZnhOaUYyM3JvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFak5rVWxGTk90OXJJemhrMzFDdExIVCttWHVGMkNoZEwyVnJhOW9RVWQ1cEJtTk12OXhqQgpZdEhHSDE0MlF4RVBwdzAzN0o5MGlycUtQa2owVDBQaUFnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo="
|
||||||
|
}
|
Binary file not shown.
Before Width: | Height: | Size: 665 B |
Binary file not shown.
Before Width: | Height: | Size: 628 B |
@ -1,60 +0,0 @@
|
|||||||
<!-- HTML for static distribution bundle build -->
|
|
||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<title>Swagger UI</title>
|
|
||||||
<link rel="stylesheet" type="text/css" href="./swagger-ui.css" />
|
|
||||||
<link rel="icon" type="image/png" href="./favicon-32x32.png" sizes="32x32" />
|
|
||||||
<link rel="icon" type="image/png" href="./favicon-16x16.png" sizes="16x16" />
|
|
||||||
<style>
|
|
||||||
html
|
|
||||||
{
|
|
||||||
box-sizing: border-box;
|
|
||||||
overflow: -moz-scrollbars-vertical;
|
|
||||||
overflow-y: scroll;
|
|
||||||
}
|
|
||||||
|
|
||||||
*,
|
|
||||||
*:before,
|
|
||||||
*:after
|
|
||||||
{
|
|
||||||
box-sizing: inherit;
|
|
||||||
}
|
|
||||||
|
|
||||||
body
|
|
||||||
{
|
|
||||||
margin:0;
|
|
||||||
background: #fafafa;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
|
|
||||||
<body>
|
|
||||||
<div id="swagger-ui"></div>
|
|
||||||
|
|
||||||
<script src="./swagger-ui-bundle.js" charset="UTF-8"> </script>
|
|
||||||
<script src="./swagger-ui-standalone-preset.js" charset="UTF-8"> </script>
|
|
||||||
<script>
|
|
||||||
window.onload = function() {
|
|
||||||
// Begin Swagger UI call region
|
|
||||||
const ui = SwaggerUIBundle({
|
|
||||||
url: "https://petstore.swagger.io/v2/swagger.json",
|
|
||||||
dom_id: '#swagger-ui',
|
|
||||||
deepLinking: true,
|
|
||||||
presets: [
|
|
||||||
SwaggerUIBundle.presets.apis,
|
|
||||||
SwaggerUIStandalonePreset
|
|
||||||
],
|
|
||||||
plugins: [
|
|
||||||
SwaggerUIBundle.plugins.DownloadUrl
|
|
||||||
],
|
|
||||||
layout: "StandaloneLayout"
|
|
||||||
});
|
|
||||||
// End Swagger UI call region
|
|
||||||
|
|
||||||
window.ui = ui;
|
|
||||||
};
|
|
||||||
</script>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -1,79 +0,0 @@
|
|||||||
<!doctype html>
|
|
||||||
<html lang="en-US">
|
|
||||||
<head>
|
|
||||||
<title>Swagger UI: OAuth2 Redirect</title>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<script>
|
|
||||||
'use strict';
|
|
||||||
function run () {
|
|
||||||
var oauth2 = window.opener.swaggerUIRedirectOauth2;
|
|
||||||
var sentState = oauth2.state;
|
|
||||||
var redirectUrl = oauth2.redirectUrl;
|
|
||||||
var isValid, qp, arr;
|
|
||||||
|
|
||||||
if (/code|token|error/.test(window.location.hash)) {
|
|
||||||
qp = window.location.hash.substring(1);
|
|
||||||
} else {
|
|
||||||
qp = location.search.substring(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
arr = qp.split("&");
|
|
||||||
arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';});
|
|
||||||
qp = qp ? JSON.parse('{' + arr.join() + '}',
|
|
||||||
function (key, value) {
|
|
||||||
return key === "" ? value : decodeURIComponent(value);
|
|
||||||
}
|
|
||||||
) : {};
|
|
||||||
|
|
||||||
isValid = qp.state === sentState;
|
|
||||||
|
|
||||||
if ((
|
|
||||||
oauth2.auth.schema.get("flow") === "accessCode" ||
|
|
||||||
oauth2.auth.schema.get("flow") === "authorizationCode" ||
|
|
||||||
oauth2.auth.schema.get("flow") === "authorization_code"
|
|
||||||
) && !oauth2.auth.code) {
|
|
||||||
if (!isValid) {
|
|
||||||
oauth2.errCb({
|
|
||||||
authId: oauth2.auth.name,
|
|
||||||
source: "auth",
|
|
||||||
level: "warning",
|
|
||||||
message: "Authorization may be unsafe, passed state was changed in server Passed state wasn't returned from auth server"
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (qp.code) {
|
|
||||||
delete oauth2.state;
|
|
||||||
oauth2.auth.code = qp.code;
|
|
||||||
oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
|
|
||||||
} else {
|
|
||||||
let oauthErrorMsg;
|
|
||||||
if (qp.error) {
|
|
||||||
oauthErrorMsg = "["+qp.error+"]: " +
|
|
||||||
(qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
|
|
||||||
(qp.error_uri ? "More info: "+qp.error_uri : "");
|
|
||||||
}
|
|
||||||
|
|
||||||
oauth2.errCb({
|
|
||||||
authId: oauth2.auth.name,
|
|
||||||
source: "auth",
|
|
||||||
level: "error",
|
|
||||||
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server"
|
|
||||||
});
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
|
|
||||||
}
|
|
||||||
window.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (document.readyState !== 'loading') {
|
|
||||||
run();
|
|
||||||
} else {
|
|
||||||
document.addEventListener('DOMContentLoaded', function () {
|
|
||||||
run();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
</script>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue
Block a user