Datacenter no more handle booking but is fully charged with Kube & minio allocate per NATS
This commit is contained in:
@@ -1,601 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"oc-datacenter/conf"
|
||||
"oc-datacenter/infrastructure"
|
||||
"oc-datacenter/models"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
|
||||
beego "github.com/beego/beego/v2/server/web"
|
||||
jwt "github.com/golang-jwt/jwt/v5"
|
||||
"gopkg.in/yaml.v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
type KubeInfo struct {
|
||||
Url *string
|
||||
KubeCA *string
|
||||
KubeCert *string
|
||||
KubeKey *string
|
||||
}
|
||||
|
||||
type RemoteKubeconfig struct {
|
||||
Data *string
|
||||
}
|
||||
|
||||
type KubeUser struct {
|
||||
Name string
|
||||
User struct {
|
||||
Token string
|
||||
}
|
||||
}
|
||||
|
||||
type KubeconfigToken struct {
|
||||
ApiVersion string `yaml:"apiVersion"`
|
||||
Kind string `yaml:"kind"`
|
||||
Preferences string `yaml:"preferences"`
|
||||
CurrentContext string `yaml:"current-context"`
|
||||
Clusters []struct {
|
||||
Cluster struct {
|
||||
CA string `yaml:"certificate-authority-data"`
|
||||
Server string `yaml:"server"`
|
||||
} `yaml:"cluster"`
|
||||
Name string `yaml:"name"`
|
||||
} `yaml:"clusters"`
|
||||
Contexts []struct {
|
||||
Context struct {
|
||||
Cluster string `yaml:"cluster"`
|
||||
User string `yaml:"user"`
|
||||
} `yaml:"context"`
|
||||
Name string `yaml:"name"`
|
||||
} `yaml:"contexts"`
|
||||
Users []struct {
|
||||
Name string `yaml:"name"`
|
||||
User struct {
|
||||
Token string `yaml:"token"`
|
||||
} `yaml:"user"`
|
||||
} `yaml:"users"`
|
||||
}
|
||||
|
||||
// Operations about the admiralty objects of the datacenter
|
||||
type AdmiraltyController struct {
|
||||
beego.Controller
|
||||
}
|
||||
|
||||
// @Title GetAllTargets
|
||||
// @Description find all Admiralty Target
|
||||
// @Success 200
|
||||
// @router /targets [get]
|
||||
func (c *AdmiraltyController) GetAllTargets() {
|
||||
serv, err := infrastructure.NewService()
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
HandleControllerErrors(c.Controller, 500, &err, nil)
|
||||
// c.Ctx.Output.SetStatus(500)
|
||||
// c.ServeJSON()
|
||||
// c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
return
|
||||
}
|
||||
|
||||
res, err := serv.GetTargets(c.Ctx.Request.Context())
|
||||
c.Data["json"] = res
|
||||
c.ServeJSON()
|
||||
}
|
||||
|
||||
// @Title GetOneTarget
|
||||
// @Description find one Admiralty Target
|
||||
// @Param id path string true "the name of the target to get"
|
||||
// @Success 200
|
||||
// @router /targets/:execution [get]
|
||||
func (c *AdmiraltyController) GetOneTarget() {
|
||||
id := c.Ctx.Input.Param(":execution")
|
||||
serv, err := infrastructure.NewService()
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.ServeJSON()
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
return
|
||||
}
|
||||
|
||||
res, err := serv.GetTargets(c.Ctx.Request.Context())
|
||||
if err != nil {
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.ServeJSON()
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
return
|
||||
}
|
||||
id = "target-" + id
|
||||
found := slices.Contains(res, id)
|
||||
if !found {
|
||||
c.Ctx.Output.SetStatus(404)
|
||||
c.ServeJSON()
|
||||
}
|
||||
|
||||
c.Data["json"] = id
|
||||
c.ServeJSON()
|
||||
}
|
||||
|
||||
// @Title DeleteAdmiraltySession
|
||||
// @Description find one Admiralty Target
|
||||
// @Param execution path string true "the name of the target to get"
|
||||
// @Success 200
|
||||
// @router /targets/:execution [delete]
|
||||
func (c *AdmiraltyController) DeleteAdmiraltySession() {
|
||||
id := c.Ctx.Input.Param(":execution")
|
||||
serv, err := infrastructure.NewService()
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.ServeJSON()
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
return
|
||||
}
|
||||
|
||||
err = serv.DeleteNamespace(c.Ctx.Request.Context(), id)
|
||||
if err != nil {
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.ServeJSON()
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
return
|
||||
}
|
||||
|
||||
c.Data["json"] = id
|
||||
c.ServeJSON()
|
||||
}
|
||||
|
||||
// @Title CreateAdmiraltySource
|
||||
// @Description Create an Admiralty Source on remote cluster
|
||||
// @Param execution path string true "execution id of the workflow"
|
||||
// @Success 201
|
||||
// @router /source/:execution [post]
|
||||
func (c *AdmiraltyController) CreateAdmiraltySource() {
|
||||
|
||||
execution := c.Ctx.Input.Param(":execution")
|
||||
fmt.Println("execution :: ", execution)
|
||||
fmt.Println("input :: ", c.Ctx.Input)
|
||||
serv, err := infrastructure.NewKubernetesService()
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.ServeJSON()
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
res, err := serv.CreateAdmiraltySource(c.Ctx.Request.Context(), execution)
|
||||
if err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
c.Ctx.Output.SetStatus(409)
|
||||
c.Data["json"] = map[string]string{"info": "A source already exists for this namespace : " + execution}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
// TODO : Return a description of the created resource
|
||||
var respData map[string]interface{}
|
||||
err = json.Unmarshal(res, &respData)
|
||||
if err != nil {
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.ServeJSON()
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
return
|
||||
}
|
||||
c.Ctx.Output.SetStatus(201)
|
||||
c.Data["json"] = respData
|
||||
c.ServeJSON()
|
||||
|
||||
}
|
||||
|
||||
// @Title CreateAdmiraltyTarget
|
||||
// @Description Create an Admiralty Target in the namespace associated to the executionID
|
||||
// @Param execution path string true "execution id of the workflow"
|
||||
// @Param peer path string true "peerId of the peer the target points to"
|
||||
// @Success 201
|
||||
// @router /target/:execution/:peer [post]
|
||||
func (c *AdmiraltyController) CreateAdmiraltyTarget() {
|
||||
var data map[string]interface{}
|
||||
|
||||
execution := c.Ctx.Input.Param(":execution")
|
||||
peerId := c.Ctx.Input.Param(":peer")
|
||||
|
||||
if execution == "" || peerId == "" {
|
||||
c.Ctx.Output.SetStatus(400)
|
||||
c.Data["json"] = map[string]string{"error": "parameters can be empty " + "execution: " + execution + " peer: " + peerId}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
serv, err := infrastructure.NewService()
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := serv.CreateAdmiraltyTarget(c.Ctx.Request.Context(), execution, peerId)
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
if resp == nil {
|
||||
fmt.Println("Error while trying to create Admiralty target")
|
||||
fmt.Println(resp)
|
||||
fmt.Println(err)
|
||||
c.Ctx.Output.SetStatus(401)
|
||||
c.Data["json"] = map[string]string{"error": "Could not perform the action"}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.ServeJSON()
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
return
|
||||
}
|
||||
c.Ctx.Output.SetStatus(201)
|
||||
c.Data["json"] = data
|
||||
c.ServeJSON()
|
||||
|
||||
}
|
||||
|
||||
// @Title GetKubeSecret
|
||||
// @Description Retrieve the secret created from a Kubeconfig that will be associated to an Admiralty Target
|
||||
// @Param execution path string true "execution id of the workflow"
|
||||
// @Param peer path string true "UUID of the peer to which the resource is linked"
|
||||
// @Success 200
|
||||
// @router /secret/:execution/:peer [get]
|
||||
func (c *AdmiraltyController) GetKubeSecret() {
|
||||
var data map[string]interface{}
|
||||
|
||||
execution := c.Ctx.Input.Param(":execution")
|
||||
peerId := c.Ctx.Input.Param(":peer")
|
||||
|
||||
serv, err := infrastructure.NewService()
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := serv.GetKubeconfigSecret(c.Ctx.Request.Context(), execution, peerId)
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
if resp == nil {
|
||||
c.Ctx.Output.SetStatus(404)
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.ServeJSON()
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
return
|
||||
}
|
||||
|
||||
c.Data["json"] = data
|
||||
c.ServeJSON()
|
||||
}
|
||||
|
||||
// @Title CreateKubeSecret
|
||||
// @Description Creat a secret from a Kubeconfig that will be associated to an Admiralty Target
|
||||
|
||||
// @Param execution path string true "execution id of the workflow"
|
||||
// @Param peer path string true "UUID of the peer to which the resource is linked"
|
||||
// @Param kubeconfig body controllers.RemoteKubeconfig true "Kubeconfig to use when creating secret"
|
||||
// @Success 201
|
||||
// @router /secret/:execution/:peer [post]
|
||||
func (c *AdmiraltyController) CreateKubeSecret() {
|
||||
var kubeconfig RemoteKubeconfig
|
||||
var respData map[string]interface{}
|
||||
|
||||
data := c.Ctx.Input.CopyBody(100000)
|
||||
|
||||
err := json.Unmarshal(data, &kubeconfig)
|
||||
if err != nil {
|
||||
fmt.Println("Error when retrieving the data for kubeconfig from request")
|
||||
fmt.Println(err)
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
execution := c.Ctx.Input.Param(":execution")
|
||||
peerId := c.Ctx.Input.Param(":peer")
|
||||
|
||||
serv, err := infrastructure.NewService()
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := serv.CreateKubeconfigSecret(c.Ctx.Request.Context(), *kubeconfig.Data, execution, peerId)
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal(resp, &respData)
|
||||
c.Ctx.Output.SetStatus(201)
|
||||
c.Data["json"] = respData
|
||||
c.ServeJSON()
|
||||
|
||||
}
|
||||
|
||||
// @name GetAdmiraltyNodes
|
||||
// @description Allows user to test if an admiralty connection has already been established : Target and valid Secret set up on the local host and Source set up on remote host
|
||||
// @Param execution path string true "execution id of the workflow"
|
||||
// @Param peer path string true "UUID of the peer to which the resource is linked"
|
||||
// @Success 200
|
||||
// @router /node/:execution/:peer [get]
|
||||
func (c *AdmiraltyController) GetNodeReady() {
|
||||
var secret v1.Secret
|
||||
execution := c.Ctx.Input.Param(":execution")
|
||||
peerId := c.Ctx.Input.Param(":peer")
|
||||
|
||||
serv, err := infrastructure.NewService()
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
node, err := serv.GetOneNode(c.Ctx.Request.Context(), execution, peerId)
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
if node == nil {
|
||||
c.Ctx.Output.SetStatus(404)
|
||||
c.Data["json"] = map[string]string{
|
||||
"node": "the node for " + execution + " can't be found, make sure both target and source resources are set up on local and remote hosts",
|
||||
}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := serv.GetKubeconfigSecret(c.Ctx.Request.Context(), execution, peerId)
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
if resp == nil {
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": "Nodes was up but the secret can't be found"}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
// Extract JWT token RS265 encoded
|
||||
var editedKubeconfig map[string]interface{}
|
||||
json.Unmarshal(resp, &secret)
|
||||
byteEditedKubeconfig := secret.Data["config"]
|
||||
err = yaml.Unmarshal(byteEditedKubeconfig, &editedKubeconfig)
|
||||
// err = json.Unmarshal(byteEditedKubeconfig,&editedKubeconfig)
|
||||
if err != nil {
|
||||
fmt.Println("Error while retrieving the kubeconfig from secret-", execution)
|
||||
fmt.Println(err)
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = err
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
token, err := retrieveTokenFromKonfig(editedKubeconfig)
|
||||
if err != nil {
|
||||
fmt.Println("Error while trying to retrieve token for kubeconfing")
|
||||
fmt.Println(err)
|
||||
HandleControllerErrors(c.Controller, 500, &err, nil)
|
||||
}
|
||||
|
||||
// Decode token
|
||||
isExpired, err := isTokenExpired(token)
|
||||
if err != nil {
|
||||
fmt.Println("Error veryfing token's expiration")
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = err
|
||||
c.ServeJSON()
|
||||
}
|
||||
|
||||
if *isExpired {
|
||||
c.Data["json"] = map[string]interface{}{
|
||||
"token": "token in the secret is expired and must be regenerated",
|
||||
"node": node,
|
||||
}
|
||||
c.Ctx.Output.SetStatus(410)
|
||||
c.ServeJSON()
|
||||
}
|
||||
|
||||
c.Data["json"] = map[string]interface{}{"node": node, "token": true}
|
||||
c.ServeJSON()
|
||||
|
||||
}
|
||||
|
||||
func retrieveTokenFromKonfig(editedKubeconfig map[string]interface{}) (string, error) {
|
||||
var kubeUsers []KubeUser
|
||||
b, err := yaml.Marshal(editedKubeconfig["users"])
|
||||
if err != nil {
|
||||
fmt.Println("Error while retrieving the users attribute from the Kubeconfig")
|
||||
fmt.Println(err)
|
||||
return "", err
|
||||
}
|
||||
err = yaml.Unmarshal(b, &kubeUsers)
|
||||
if err != nil {
|
||||
fmt.Println("Error while unmarshalling users attribute from kubeconfig")
|
||||
fmt.Println(err)
|
||||
return "", nil
|
||||
}
|
||||
fmt.Println(kubeUsers)
|
||||
token := kubeUsers[0].User.Token
|
||||
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func isTokenExpired(token string) (*bool, error) {
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
t, _, err := new(jwt.Parser).ParseUnverified(token, jwt.MapClaims{})
|
||||
if err != nil {
|
||||
fmt.Println("couldn't decode token")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
expiration, err := t.Claims.GetExpirationTime()
|
||||
if err != nil {
|
||||
fmt.Println("Error while checking token's expiration time")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Debug().Msg("Expiration date : " + expiration.UTC().Format("2006-01-02T15:04:05"))
|
||||
logger.Debug().Msg(fmt.Sprint("Now : ", time.Now().Unix()))
|
||||
logger.Debug().Msg(fmt.Sprint("Token : ", expiration.Unix()))
|
||||
|
||||
expired := expiration.Unix() < time.Now().Unix()
|
||||
|
||||
return &expired, nil
|
||||
}
|
||||
|
||||
// @name Get Admiralty Kubeconfig
|
||||
// @description Retrieve a kubeconfig from the host with the token to authenticate as the SA from the namespace identified with execution id
|
||||
|
||||
// @Param execution path string true "execution id of the workflow"
|
||||
// @Success 200
|
||||
// @router /kubeconfig/:execution [get]
|
||||
func (c *AdmiraltyController) GetAdmiraltyKubeconfig() {
|
||||
|
||||
execution := c.Ctx.Input.Param(":execution")
|
||||
|
||||
serv, err := infrastructure.NewService()
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
generatedToken, err := serv.GenerateToken(c.Ctx.Request.Context(), execution, 3600)
|
||||
if err != nil {
|
||||
fmt.Println("Couldn't generate a token for ns-", execution)
|
||||
fmt.Println(err)
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
kubeconfig, err := NewHostKubeWithToken(generatedToken)
|
||||
if err != nil {
|
||||
fmt.Println("Could not retrieve the Kubeconfig edited with token")
|
||||
fmt.Println(err)
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
b, err := json.Marshal(kubeconfig)
|
||||
if err != nil {
|
||||
fmt.Println("Error while marshalling kubeconfig")
|
||||
c.Ctx.Output.SetStatus(500)
|
||||
c.Data["json"] = map[string]string{"error": err.Error()}
|
||||
c.ServeJSON()
|
||||
}
|
||||
|
||||
encodedKubeconfig := base64.StdEncoding.EncodeToString(b)
|
||||
c.Data["json"] = map[string]string{
|
||||
"data": encodedKubeconfig,
|
||||
}
|
||||
json.NewEncoder(c.Ctx.ResponseWriter)
|
||||
c.ServeJSON()
|
||||
}
|
||||
|
||||
func NewHostKubeWithToken(token string) (*models.KubeConfigValue, error) {
|
||||
if len(token) == 0 {
|
||||
return nil, fmt.Errorf("you didn't provide a token to be inserted in the Kubeconfig")
|
||||
}
|
||||
|
||||
encodedCA := base64.StdEncoding.EncodeToString([]byte(conf.GetConfig().KubeCA))
|
||||
|
||||
hostKube := models.KubeConfigValue{
|
||||
APIVersion: "v1",
|
||||
CurrentContext: "default",
|
||||
Kind: "Config",
|
||||
Preferences: struct{}{},
|
||||
Clusters: []models.KubeconfigNamedCluster{
|
||||
{
|
||||
Name: "default",
|
||||
Cluster: models.KubeconfigCluster{
|
||||
Server: "https://" + conf.GetConfig().KubeHost + ":6443",
|
||||
CertificateAuthorityData: encodedCA,
|
||||
},
|
||||
},
|
||||
},
|
||||
Contexts: []models.KubeconfigNamedContext{
|
||||
{
|
||||
Name: "default",
|
||||
Context: models.KubeconfigContext{
|
||||
Cluster: "default",
|
||||
User: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
Users: []models.KubeconfigUser{
|
||||
{
|
||||
Name: "default",
|
||||
User: models.KubeconfigUserKeyPair{
|
||||
Token: token,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &hostKube, nil
|
||||
}
|
||||
@@ -1,500 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"oc-datacenter/infrastructure"
|
||||
"oc-datacenter/infrastructure/monitor"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/booking"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
beego "github.com/beego/beego/v2/server/web"
|
||||
"github.com/gorilla/websocket"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
// Operations about workspace
|
||||
type BookingController struct {
|
||||
beego.Controller
|
||||
}
|
||||
|
||||
var BookingExample booking.Booking
|
||||
|
||||
// @Title Search
|
||||
// @Description search bookings by execution
|
||||
// @Param id path string true "id execution"
|
||||
// @Param is_draft query string false "draft wished"
|
||||
// @Success 200 {workspace} models.workspace
|
||||
// @router /search/execution/:id [get]
|
||||
func (o *BookingController) ExecutionSearch() {
|
||||
/*
|
||||
* This is a sample of how to use the search function
|
||||
* The search function is used to search for data in the database
|
||||
* The search function takes in a filter and a data type
|
||||
* The filter is a struct that contains the search parameters
|
||||
* The data type is an enum that specifies the type of data to search for
|
||||
* The search function returns a list of data that matches the filter
|
||||
* The data is then returned as a json object
|
||||
*/
|
||||
// store and return Id or post with UUID
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
id := o.Ctx.Input.Param(":id")
|
||||
isDraft := o.Ctx.Input.Query("is_draft")
|
||||
f := dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by name if no filters are provided
|
||||
"execution_id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
||||
},
|
||||
}
|
||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).Search(&f, "", isDraft == "true")
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
// @Title Search
|
||||
// @Description search bookings
|
||||
// @Param start_date path string true "the word search you want to get"
|
||||
// @Param end_date path string true "the word search you want to get"
|
||||
// @Param is_draft query string false "draft wished"
|
||||
// @Success 200 {workspace} models.workspace
|
||||
// @router /search/:start_date/:end_date [get]
|
||||
func (o *BookingController) Search() {
|
||||
/*
|
||||
* This is a sample of how to use the search function
|
||||
* The search function is used to search for data in the database
|
||||
* The search function takes in a filter and a data type
|
||||
* The filter is a struct that contains the search parameters
|
||||
* The data type is an enum that specifies the type of data to search for
|
||||
* The search function returns a list of data that matches the filter
|
||||
* The data is then returned as a json object
|
||||
*/
|
||||
// store and return Id or post with UUID
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
start_date, _ := time.Parse("2006-01-02", o.Ctx.Input.Param(":start_date"))
|
||||
end_date, _ := time.Parse("2006-01-02", o.Ctx.Input.Param(":end_date"))
|
||||
isDraft := o.Ctx.Input.Query("is_draft")
|
||||
sd := primitive.NewDateTimeFromTime(start_date)
|
||||
ed := primitive.NewDateTimeFromTime(end_date)
|
||||
f := dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"execution_date": {{Operator: "gte", Value: sd}, {Operator: "lte", Value: ed}},
|
||||
},
|
||||
}
|
||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).Search(&f, "", isDraft == "true")
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
// @Title GetAll
|
||||
// @Description find booking by id
|
||||
// @Param is_draft query string false "draft wished"
|
||||
// @Success 200 {booking} models.booking
|
||||
// @router / [get]
|
||||
func (o *BookingController) GetAll() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
isDraft := o.Ctx.Input.Query("is_draft")
|
||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).LoadAll(isDraft == "true")
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
// @Title Get
|
||||
// @Description find booking by id
|
||||
// @Param id path string true "the id you want to get"
|
||||
// @Success 200 {booking} models.booking
|
||||
// @router /:id [get]
|
||||
func (o *BookingController) Get() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
id := o.Ctx.Input.Param(":id")
|
||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).LoadOne(id)
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
var upgrader = websocket.Upgrader{
|
||||
CheckOrigin: func(r *http.Request) bool { return true }, // allow all origins
|
||||
}
|
||||
|
||||
// @Title Log
|
||||
// @Description find booking by id
|
||||
// @Param id path string true "the id you want to get"
|
||||
// @Success 200 {booking} models.booking
|
||||
// @router /:id [get]
|
||||
func (o *BookingController) Log() {
|
||||
// user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
id := o.Ctx.Input.Param(":id")
|
||||
conn, err := upgrader.Upgrade(o.Ctx.ResponseWriter, o.Ctx.Request, nil)
|
||||
if err != nil {
|
||||
o.Ctx.WriteString("WebSocket upgrade failed: " + err.Error())
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
monitors, err := monitor.NewMonitorService()
|
||||
if err != nil {
|
||||
o.Ctx.WriteString("Monitor service unavailable: " + err.Error())
|
||||
return
|
||||
}
|
||||
ctx := monitor.StreamRegistry.Register(id)
|
||||
monitors.Stream(ctx, id, 1*time.Second, conn)
|
||||
}
|
||||
|
||||
// @Title Update
|
||||
// @Description create computes
|
||||
// @Param id path string true "the compute id you want to get"
|
||||
// @Param body body models.compute true "The compute content"
|
||||
// @Success 200 {compute} models.compute
|
||||
// @router /:id [put]
|
||||
func (o *BookingController) Put() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
// store and return Id or post with UUID
|
||||
var res map[string]interface{}
|
||||
id := o.Ctx.Input.Param(":id")
|
||||
book := oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).LoadOne(id)
|
||||
if book.Code != 200 {
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": nil,
|
||||
"code": book.Code,
|
||||
"error": book.Err,
|
||||
}
|
||||
o.ServeJSON()
|
||||
return
|
||||
}
|
||||
booking := book.Data.(*booking.Booking)
|
||||
if time.Now().After(booking.ExpectedStartDate) {
|
||||
o.Data["json"] = oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).UpdateOne(res, id)
|
||||
} else {
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": nil,
|
||||
"code": 409,
|
||||
"error": "booking is not already started",
|
||||
}
|
||||
}
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
// @Title Check
|
||||
// @Description check booking
|
||||
// @Param id path string "id of the datacenter"
|
||||
// @Param start_date path string "the booking start date" format "2006-01-02T15:04:05"
|
||||
// @Param end_date path string "the booking end date" format "2006-01-02T15:04:05"
|
||||
// @Param is_draft query string false "draft wished"
|
||||
// @Success 200 {object} models.object
|
||||
// @router /check/:id/:start_date/:end_date [get]
|
||||
func (o *BookingController) Check() {
|
||||
/*
|
||||
* This function is used to check if a booking is available for a specific datacenter.
|
||||
* It takes the following parameters:
|
||||
* - id: the id of the datacenter
|
||||
* - start_date: the start date of the booking/search/execution/:id
|
||||
* - end_date: the end date of the booking
|
||||
*/
|
||||
id := o.Ctx.Input.Param(":id")
|
||||
date, err := time.Parse("2006-01-02T15:04:05", o.Ctx.Input.Param(":start_date"))
|
||||
date2, err2 := time.Parse("2006-01-02T15:04:05", o.Ctx.Input.Param(":end_date"))
|
||||
if err != nil || err2 != nil {
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": map[string]interface{}{
|
||||
"is_available": false,
|
||||
},
|
||||
"code": 400,
|
||||
"error": errors.New("invalid date format"),
|
||||
}
|
||||
} else {
|
||||
bks := &booking.Booking{} // create a new booking object
|
||||
code := 200
|
||||
err := ""
|
||||
if isAvailable, err2 := bks.Check(id, date, &date2, 1); !isAvailable {
|
||||
code = 409
|
||||
err = "booking not available"
|
||||
if err2 != nil {
|
||||
err += " - " + err2.Error()
|
||||
}
|
||||
}
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": map[string]interface{}{
|
||||
"is_available": true,
|
||||
},
|
||||
"code": code,
|
||||
"error": err,
|
||||
}
|
||||
}
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
// @Title Post.
|
||||
// @Description create booking
|
||||
// @Param booking body string true "the booking you want to post"
|
||||
// @Param is_draft query string false "draft wished"
|
||||
// @Success 200 {object} models.object
|
||||
// @router / [post]
|
||||
func (o *BookingController) Post() {
|
||||
/*
|
||||
* This function is used to create a booking.
|
||||
* It takes the following parameters:
|
||||
* - booking: the booking you want to post
|
||||
* The booking is a JSON object that contains the following fields:
|
||||
* - datacenter_resource_id: the id of the datacenter
|
||||
* - workflow_execution: the workflow execution
|
||||
*/
|
||||
logger := oclib.GetLogger()
|
||||
logger.Info().Msg("Received a Booking")
|
||||
var resp booking.Booking
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
err := json.Unmarshal(o.Ctx.Input.CopyBody(10000000), &resp)
|
||||
if err != nil {
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": nil,
|
||||
"code": 422,
|
||||
"error": err,
|
||||
}
|
||||
o.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
if resp.ResourceType == tools.COMPUTE_RESOURCE {
|
||||
// later should check... health for any such as docker...
|
||||
res := oclib.NewRequest(oclib.LibDataEnum(oclib.COMPUTE_RESOURCE), user, peerID, groups, nil).LoadOne(resp.ResourceID)
|
||||
if res.Err != "" {
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": nil,
|
||||
"code": res.Code,
|
||||
"error": res.Err,
|
||||
}
|
||||
o.ServeJSON()
|
||||
return
|
||||
}
|
||||
/*serv, err := infrastructure.NewServiceByType(res.ToComputeResource().Infrastructure.String())
|
||||
if err != nil {
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": nil,
|
||||
"code": 500,
|
||||
"error": err,
|
||||
}
|
||||
o.ServeJSON()
|
||||
return
|
||||
}
|
||||
if err := serv.CheckHealth(); err != nil {
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": nil,
|
||||
"code": 500,
|
||||
"error": err,
|
||||
}
|
||||
o.ServeJSON()
|
||||
return
|
||||
}*/
|
||||
}
|
||||
// delete all previous bookings
|
||||
isDraft := o.Ctx.Input.Query("is_draft")
|
||||
res := oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).Search(&dbs.Filters{And: map[string][]dbs.Filter{
|
||||
"workflow_id": {{Operator: dbs.EQUAL.String(), Value: resp.WorkflowID}},
|
||||
"resource_id": {{Operator: dbs.EQUAL.String(), Value: resp.ResourceID}},
|
||||
}}, "", isDraft == "true")
|
||||
if res.Code != 200 {
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": nil,
|
||||
"code": res.Code,
|
||||
"error": res.Err,
|
||||
}
|
||||
o.ServeJSON()
|
||||
return
|
||||
}
|
||||
for _, b := range res.Data { // delete all previous bookings
|
||||
oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).DeleteOne(b.GetID())
|
||||
}
|
||||
b := oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil).StoreOne(resp.Serialize(&resp))
|
||||
if b.Code != 200 {
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": nil,
|
||||
"code": b.Code,
|
||||
"error": b.Err,
|
||||
}
|
||||
o.ServeJSON()
|
||||
return
|
||||
}
|
||||
if b.Data.(*booking.Booking).ResourceType == tools.COMPUTE_RESOURCE {
|
||||
monitors, monErr := monitor.NewMonitorService()
|
||||
if monErr == nil {
|
||||
go func() {
|
||||
time.Sleep(time.Until(b.Data.(*booking.Booking).ExpectedStartDate))
|
||||
ctx := monitor.StreamRegistry.Register(b.Data.(*booking.Booking).ExecutionsID)
|
||||
monitors.Stream(ctx, b.Data.GetID(), 1*time.Second, nil)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info().Msg("Creating new namespace : " + resp.ExecutionsID)
|
||||
if err := o.createNamespace(resp.ExecutionsID); err != nil {
|
||||
logger.Debug().Msg("Error when creating a namespace")
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": b.Data,
|
||||
"code": 200,
|
||||
"error": "",
|
||||
}
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
// @Title ExtendForNamespace
|
||||
// @Description ExtendForNamespace booking
|
||||
// @Param namespace path string "targetted namespace"
|
||||
// @Param resource_id path string "resource id"
|
||||
// @Param end_date path string "the booking end date" format "2006-01-02T15:04:05"
|
||||
// @Param is_draft query string false "draft wished"
|
||||
// @Success 200 {object} models.object
|
||||
// @router /extend/:resource_id/from_namespace/:namespace/to/:duration [post]
|
||||
func (o *BookingController) ExtendForNamespace() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
resourceID := o.Ctx.Input.Param(":resource_id")
|
||||
namespace := o.Ctx.Input.Param(":namespace")
|
||||
duration, err := strconv.Atoi(o.Ctx.Input.Param(":duration"))
|
||||
if err != nil {
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": map[string]interface{}{
|
||||
"is_available": false,
|
||||
},
|
||||
"code": 400,
|
||||
"error": errors.New("invalid date format"),
|
||||
}
|
||||
} else {
|
||||
o.extend(duration, resourceID, "executions_id", namespace, user, peerID, groups)
|
||||
}
|
||||
}
|
||||
|
||||
// @Title ExtendForExecution
|
||||
// @Description ExtendForExecution booking
|
||||
// @Param namespace path string "targetted namespace"
|
||||
// @Param resource_id path string "resource id"
|
||||
// @Param end_date path string "the booking end date" format "2006-01-02T15:04:05"
|
||||
// @Param is_draft query string false "draft wished"
|
||||
// @Success 200 {object} models.object
|
||||
// @router /extend/:resource_id/from_execution/:execution_id/to/:duration [post]
|
||||
func (o *BookingController) ExtendForExecution() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
resourceID := o.Ctx.Input.Param(":resource_id")
|
||||
executionID := o.Ctx.Input.Param(":execution_id")
|
||||
duration, err := strconv.Atoi(o.Ctx.Input.Param(":duration"))
|
||||
if err != nil {
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": map[string]interface{}{
|
||||
"is_available": false,
|
||||
},
|
||||
"code": 400,
|
||||
"error": errors.New("invalid date format"),
|
||||
}
|
||||
} else {
|
||||
o.extend(duration, resourceID, "execution_id", executionID, user, peerID, groups)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *BookingController) extend(duration int, resourceID string, key string, namespace string, user string, peerID string, groups []string) {
|
||||
/*
|
||||
* This function is used to check if a booking is available for a specific datacenter.
|
||||
* It takes the following parameters:
|
||||
* - id: the id of the datacenter
|
||||
* - start_date: the start date of the booking/search/execution/:id
|
||||
* - end_date: the end date of the booking
|
||||
*/
|
||||
|
||||
req := oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), user, peerID, groups, nil)
|
||||
res := req.Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"expected_end_date": {
|
||||
{Operator: dbs.GTE.String(), Value: time.Now()},
|
||||
{Operator: dbs.EQUAL.String(), Value: bson.TypeNull},
|
||||
},
|
||||
},
|
||||
And: map[string][]dbs.Filter{
|
||||
"resource_id": {{Operator: dbs.EQUAL.String(), Value: resourceID}},
|
||||
key: {{Operator: dbs.EQUAL.String(), Value: namespace}},
|
||||
"expected_start_date": {{Operator: dbs.GTE.String(), Value: time.Now()}},
|
||||
},
|
||||
}, "", false)
|
||||
if res.Err == "" && len(res.Data) > 0 {
|
||||
var id string
|
||||
datas := []utils.DBObject{}
|
||||
for _, b := range res.Data {
|
||||
book := b.(*booking.Booking)
|
||||
if book.ExpectedEndDate != nil {
|
||||
bb := book.ExpectedEndDate.Add(time.Duration(duration) * time.Second)
|
||||
if isAvailable, err := (&booking.Booking{}).Check(id, (*book.ExpectedEndDate).Add(1*time.Second), &bb, 1); isAvailable && err == nil {
|
||||
book.ExpectedEndDate = &bb
|
||||
result := req.UpdateOne(book.Serialize(book), book.GetID())
|
||||
datas = append(datas, result.Data)
|
||||
}
|
||||
}
|
||||
}
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": datas,
|
||||
"code": 200,
|
||||
"error": "",
|
||||
}
|
||||
o.ServeJSON()
|
||||
return
|
||||
} else if res.Err == "" {
|
||||
o.Data["json"] = map[string]interface{}{
|
||||
"data": map[string]interface{}{
|
||||
"is_available": false,
|
||||
},
|
||||
"code": 400,
|
||||
"error": errors.New("can't find any booking to extend"),
|
||||
}
|
||||
} else {
|
||||
o.Data["json"] = res
|
||||
}
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
func (o *BookingController) createNamespace(ns string) error {
|
||||
/*
|
||||
* This function is used to create a namespace.
|
||||
* It takes the following parameters:
|
||||
* - ns: the namespace you want to create
|
||||
*/
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
serv, err := infrastructure.NewService()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ok, err := serv.GetNamespace(o.Ctx.Request.Context(), ns)
|
||||
if ok != nil && err == nil {
|
||||
logger.Debug().Msg("A namespace with name " + ns + " already exists")
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = serv.CreateNamespace(o.Ctx.Request.Context(), ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = serv.CreateServiceAccount(o.Ctx.Request.Context(), ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
role := "argo-role"
|
||||
err = serv.CreateRole(o.Ctx.Request.Context(), ns, role,
|
||||
[][]string{
|
||||
{"coordination.k8s.io"},
|
||||
{""},
|
||||
{""}},
|
||||
[][]string{
|
||||
{"leases"},
|
||||
{"secrets"},
|
||||
{"pods"}},
|
||||
[][]string{
|
||||
{"get", "create", "update"},
|
||||
{"get"},
|
||||
{"patch"}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return serv.CreateRoleBinding(o.Ctx.Request.Context(), ns, "argo-role-binding", role)
|
||||
}
|
||||
@@ -1,9 +1,14 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"oc-datacenter/infrastructure/monitor"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
beego "github.com/beego/beego/v2/server/web"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
// Operations about workspace
|
||||
@@ -19,12 +24,12 @@ type DatacenterController struct {
|
||||
func (o *DatacenterController) GetAll() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
isDraft := o.Ctx.Input.Query("is_draft")
|
||||
storages := oclib.NewRequest(oclib.LibDataEnum(oclib.STORAGE_RESOURCE), user, peerID, groups, nil).Search(&dbs.Filters{
|
||||
storages := oclib.NewRequest(oclib.LibDataEnum(oclib.LIVE_STORAGE), user, peerID, groups, nil).Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractinstanciatedresource.abstractresource.abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: peerID}},
|
||||
},
|
||||
}, "", isDraft == "true")
|
||||
computes := oclib.NewRequest(oclib.LibDataEnum(oclib.COMPUTE_RESOURCE), user, peerID, groups, nil).Search(&dbs.Filters{
|
||||
computes := oclib.NewRequest(oclib.LibDataEnum(oclib.LIVE_DATACENTER), user, peerID, groups, nil).Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractinstanciatedresource.abstractresource.abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: peerID}},
|
||||
},
|
||||
@@ -47,14 +52,14 @@ func (o *DatacenterController) Get() {
|
||||
user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
isDraft := o.Ctx.Input.Query("is_draft")
|
||||
id := o.Ctx.Input.Param(":id")
|
||||
storages := oclib.NewRequest(oclib.LibDataEnum(oclib.STORAGE_RESOURCE), user, peerID, groups, nil).Search(&dbs.Filters{
|
||||
storages := oclib.NewRequest(oclib.LibDataEnum(oclib.LIVE_STORAGE), user, peerID, groups, nil).Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractinstanciatedresource.abstractresource.abstractobject.id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
||||
"abstractinstanciatedresource.abstractresource.abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: peerID}},
|
||||
},
|
||||
}, "", isDraft == "true")
|
||||
if len(storages.Data) == 0 {
|
||||
computes := oclib.NewRequest(oclib.LibDataEnum(oclib.COMPUTE_RESOURCE), user, peerID, groups, nil).Search(&dbs.Filters{
|
||||
computes := oclib.NewRequest(oclib.LibDataEnum(oclib.LIVE_DATACENTER), user, peerID, groups, nil).Search(&dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractinstanciatedresource.abstractresource.abstractobject.id": {{Operator: dbs.EQUAL.String(), Value: id}},
|
||||
"abstractinstanciatedresource.abstractresource.abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: peerID}},
|
||||
@@ -83,3 +88,30 @@ func (o *DatacenterController) Get() {
|
||||
}
|
||||
o.ServeJSON()
|
||||
}
|
||||
|
||||
var upgrader = websocket.Upgrader{
|
||||
CheckOrigin: func(r *http.Request) bool { return true }, // allow all origins
|
||||
}
|
||||
|
||||
// @Title Log
|
||||
// @Description find booking by id
|
||||
// @Param id path string true "the id you want to get"
|
||||
// @Success 200 {booking} models.booking
|
||||
// @router /:id [get]
|
||||
func (o *DatacenterController) Log() {
|
||||
// user, peerID, groups := oclib.ExtractTokenInfo(*o.Ctx.Request)
|
||||
id := o.Ctx.Input.Param(":id")
|
||||
conn, err := upgrader.Upgrade(o.Ctx.ResponseWriter, o.Ctx.Request, nil)
|
||||
if err != nil {
|
||||
o.Ctx.WriteString("WebSocket upgrade failed: " + err.Error())
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
monitors, err := monitor.NewMonitorService()
|
||||
if err != nil {
|
||||
o.Ctx.WriteString("Monitor service unavailable: " + err.Error())
|
||||
return
|
||||
}
|
||||
ctx := monitor.StreamRegistry.Register(id)
|
||||
monitors.Stream(ctx, id, 1*time.Second, conn)
|
||||
}
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"oc-datacenter/infrastructure"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/models/live"
|
||||
beego "github.com/beego/beego/v2/server/web"
|
||||
)
|
||||
|
||||
type MinioController struct {
|
||||
beego.Controller
|
||||
}
|
||||
|
||||
|
||||
// @Title CreateServiceAccounnt
|
||||
// @Description Add a new ServiceAccount to a Minio server using its ID and an execution ID
|
||||
// @Success 200
|
||||
// @Param executions path string true "The executionsID of the execution"
|
||||
// @Param minioId path string true "The ID of the Minio you want to reach"
|
||||
// @router /serviceaccount/:minioId/:executions [post]
|
||||
func (m *MinioController) CreateServiceAccount() {
|
||||
_, peerID, _ := oclib.ExtractTokenInfo(*m.Ctx.Request)
|
||||
// This part is solely for dev purposes and should be removed once test on
|
||||
|
||||
|
||||
executionsId := m.Ctx.Input.Param(":executions")
|
||||
minioId := m.Ctx.Input.Param(":minioId")
|
||||
|
||||
// retrieve the live storage with the minioId
|
||||
s := oclib.NewRequest(oclib.LibDataEnum(oclib.STORAGE_RESOURCE), "", "", []string{}, nil).LoadOne(minioId)
|
||||
if s.Err != "" {
|
||||
m.Ctx.Output.SetStatus(400)
|
||||
m.Data["json"] = map[string]interface{}{"error": " Could not load the storage resource with id " + minioId + ": " + s.Err}
|
||||
m.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
live := findLiveStorage(minioId, peerID)
|
||||
if live == nil {
|
||||
m.Ctx.Output.SetStatus(404)
|
||||
m.Data["json"] = map[string]interface{}{"error":"could not find the Minio instance " + s.Err}
|
||||
m.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
url := live.Source
|
||||
service := infrastructure.NewMinioService(url)
|
||||
|
||||
// call the method ctrating the svcacc
|
||||
err := service.CreateClient()
|
||||
if err != nil {
|
||||
m.Ctx.Output.SetStatus(500)
|
||||
m.Data["json"] = map[string]interface{}{"error":"could not create the client for " + minioId + " : " + err.Error()}
|
||||
m.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
access, secret, err := service.CreateCredentials(executionsId)
|
||||
if err != nil {
|
||||
m.Ctx.Output.SetStatus(500)
|
||||
m.Data["json"] = map[string]interface{}{"error":"could not create the service account for " + minioId + " : " + err.Error()}
|
||||
m.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
err = service.CreateBucket(executionsId)
|
||||
if err != nil {
|
||||
m.Ctx.Output.SetStatus(500)
|
||||
m.Data["json"] = map[string]interface{}{"error":"could not create the service account for " + minioId + " : " + err.Error()}
|
||||
m.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
// test if the namespace exists
|
||||
k, err := infrastructure.NewService()
|
||||
if err != nil {
|
||||
m.Ctx.Output.SetStatus(500)
|
||||
m.Data["json"] = map[string]string{"error": err.Error()}
|
||||
m.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
ns, err := k.GetNamespace(m.Ctx.Request.Context(), executionsId)
|
||||
if ns == nil {
|
||||
m.Ctx.Output.SetStatus(403)
|
||||
m.Data["json"] = map[string]string{"error":"Could not find the namespace corresponding to executionsID " + executionsId}
|
||||
m.ServeJSON()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
m.Ctx.Output.SetStatus(500)
|
||||
m.Data["json"] = map[string]string{"error": "Error when trying to check if namespace " + executionsId + " exists : " + err.Error()}
|
||||
m.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
// store the credentials in the namespace
|
||||
err = k.CreateSecret(m.Ctx.Request.Context(), minioId, executionsId, access, secret)
|
||||
if err != nil {
|
||||
m.Ctx.Output.SetStatus(500)
|
||||
m.Data["json"] = map[string]string{"error": "Error when storing Minio serviceAccount credentials in namespace " + executionsId + " exists : " + err.Error()}
|
||||
m.ServeJSON()
|
||||
return
|
||||
}
|
||||
|
||||
m.Data["json"] = map[string]string{"success": "created secret " + executionsId + "-secret-sa in namespace ns-" + executionsId}
|
||||
m.ServeJSON()
|
||||
}
|
||||
|
||||
func findLiveStorage(storageId string, peerId string) *live.LiveStorage {
|
||||
res := oclib.NewRequest(oclib.LibDataEnum(oclib.LIVE_STORAGE),"",peerId,[]string{},nil).LoadAll(false)
|
||||
if res.Err != "" {
|
||||
l := oclib.GetLogger()
|
||||
l.Error().Msg(res.Err)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, dbo := range res.Data {
|
||||
r := oclib.NewRequest(oclib.LibDataEnum(oclib.LIVE_STORAGE),"","",[]string{},nil).LoadOne(dbo.GetID())
|
||||
l := r.ToLiveStorage()
|
||||
for _, id := range l.ResourcesID {
|
||||
if id == storageId {
|
||||
return l
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -2,9 +2,10 @@ package controllers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"oc-datacenter/infrastructure"
|
||||
"oc-datacenter/conf"
|
||||
"strconv"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
beego "github.com/beego/beego/v2/server/web"
|
||||
)
|
||||
|
||||
@@ -31,7 +32,8 @@ func (o *SessionController) GetToken() {
|
||||
return
|
||||
}
|
||||
|
||||
serv, err := infrastructure.NewService()
|
||||
serv, err := tools.NewKubernetesService(conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort,
|
||||
conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData)
|
||||
if err != nil {
|
||||
// change code to 500
|
||||
o.Ctx.Output.SetStatus(500)
|
||||
|
||||
50
go.mod
50
go.mod
@@ -1,20 +1,20 @@
|
||||
module oc-datacenter
|
||||
|
||||
go 1.24.6
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260212123952-403913d8cf13
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260224130821-ce8ef70516f7
|
||||
github.com/beego/beego/v2 v2.3.8
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
|
||||
github.com/minio/madmin-go/v4 v4.1.1
|
||||
github.com/minio/minio-go/v7 v7.0.94
|
||||
github.com/necmettindev/randomstring v0.1.0
|
||||
go.mongodb.org/mongo-driver v1.17.4
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.32.3
|
||||
k8s.io/apimachinery v0.32.3
|
||||
k8s.io/client-go v0.32.3
|
||||
k8s.io/api v0.35.1
|
||||
k8s.io/apimachinery v0.35.1
|
||||
k8s.io/client-go v0.35.1
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -25,11 +25,11 @@ require (
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
@@ -42,7 +42,7 @@ require (
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v1.0.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
@@ -63,7 +63,7 @@ require (
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nats-io/nats.go v1.43.0 // indirect
|
||||
@@ -95,22 +95,26 @@ require (
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/crypto v0.40.0 // indirect
|
||||
golang.org/x/net v0.42.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.44.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/text v0.27.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
56
go.sum
56
go.sum
@@ -12,6 +12,12 @@ cloud.o-forge.io/core/oc-lib v0.0.0-20260204083845-d9f646aac28b h1:/TkmuO5ERpHJC
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260204083845-d9f646aac28b/go.mod h1:T0UCxRd8w+qCVVC0NEyDiWIGC5ADwEbQ7hFcvftd4Ks=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260212123952-403913d8cf13 h1:DNIPQ7C+7wjbj5RUx29wLxuIe/wiSOcuUMlLRIv6Fvs=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260212123952-403913d8cf13/go.mod h1:jmyBwmsac/4V7XPL347qawF60JsBCDmNAMfn/ySXKYo=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260224093610-a9ebad78f3a8 h1:xoC5PAz1469QxrNm8rrsq5+BtwshEt+L2Nhf90MrqrM=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260224093610-a9ebad78f3a8/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260224120019-0f6aa1fe7881 h1:1JUGErc+3Runda7iapS5sieH+yFqWrGp+ljv7Kly+hc=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260224120019-0f6aa1fe7881/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260224130821-ce8ef70516f7 h1:p9uJjMY+QkE4neA+xRmIRtAm9us94EKZqgajDdLOd0Y=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260224130821-ce8ef70516f7/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/beego/beego/v2 v2.3.8 h1:wplhB1pF4TxR+2SS4PUej8eDoH4xGfxuHfS7wAk9VBc=
|
||||
github.com/beego/beego/v2 v2.3.8/go.mod h1:8vl9+RrXqvodrl9C8yivX1e6le6deCK6RWeq8R7gTTg=
|
||||
@@ -38,16 +44,22 @@ github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/etcd-io/etcd v3.3.17+incompatible/go.mod h1:cdZ77EstHBwVtD6iTgzgvogwcjo9m4iOqoijouPJ4bs=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
|
||||
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
|
||||
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
|
||||
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
@@ -83,6 +95,8 @@ github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
|
||||
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
@@ -99,6 +113,8 @@ github.com/goraz/onion v0.1.3 h1:KhyvbDA2b70gcz/d5izfwTiOH8SmrvV43AsVzpng3n0=
|
||||
github.com/goraz/onion v0.1.3/go.mod h1:XEmz1XoBz+wxTgWB8NwuvRm4RAu3vKxvrmYtzK+XCuQ=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
|
||||
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
@@ -158,6 +174,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
|
||||
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
@@ -249,6 +267,10 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
|
||||
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
@@ -257,6 +279,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
|
||||
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
@@ -269,6 +293,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -278,6 +304,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -295,16 +323,22 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -321,11 +355,15 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
|
||||
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
@@ -336,19 +374,37 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
|
||||
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
|
||||
k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q=
|
||||
k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM=
|
||||
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
|
||||
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU=
|
||||
k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
|
||||
k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=
|
||||
k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=
|
||||
k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM=
|
||||
k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
|
||||
348
infrastructure/admiralty.go
Normal file
348
infrastructure/admiralty.go
Normal file
@@ -0,0 +1,348 @@
|
||||
package infrastructure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"oc-datacenter/conf"
|
||||
"oc-datacenter/models"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// kubeconfigChannels holds channels waiting for kubeconfig delivery (keyed by executionID).
|
||||
var kubeconfigChannels sync.Map
|
||||
|
||||
// kubeconfigEvent is the NATS payload used to transfer the kubeconfig from the source peer to the target peer.
|
||||
type KubeconfigEvent struct {
|
||||
DestPeerID string `json:"dest_peer_id"`
|
||||
ExecutionsID string `json:"executions_id"`
|
||||
Kubeconfig string `json:"kubeconfig"`
|
||||
SourcePeerID string `json:"source_peer_id"`
|
||||
// OriginID is the peer that initiated the provisioning request.
|
||||
// The PB_CONSIDERS response is routed back to this peer.
|
||||
OriginID string `json:"origin_id"`
|
||||
}
|
||||
|
||||
// admiraltyConsidersPayload is the PB_CONSIDERS payload emitted after admiralty provisioning.
|
||||
type admiraltyConsidersPayload struct {
|
||||
OriginID string `json:"origin_id"`
|
||||
ExecutionsID string `json:"executions_id"`
|
||||
Secret string `json:"secret,omitempty"`
|
||||
Error *string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// emitAdmiraltyConsiders publishes a PB_CONSIDERS back to OriginID with the result
|
||||
// of the admiralty provisioning. secret is the base64-encoded kubeconfig; err is nil on success.
|
||||
func emitAdmiraltyConsiders(executionsID, originID, secret string, provErr error) {
|
||||
var errStr *string
|
||||
if provErr != nil {
|
||||
s := provErr.Error()
|
||||
errStr = &s
|
||||
}
|
||||
payload, _ := json.Marshal(admiraltyConsidersPayload{
|
||||
OriginID: originID,
|
||||
ExecutionsID: executionsID,
|
||||
Secret: secret,
|
||||
Error: errStr,
|
||||
})
|
||||
b, _ := json.Marshal(&tools.PropalgationMessage{
|
||||
DataType: tools.COMPUTE_RESOURCE.EnumIndex(),
|
||||
Action: tools.PB_CONSIDERS,
|
||||
Payload: payload,
|
||||
})
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-datacenter",
|
||||
Datatype: -1,
|
||||
Method: int(tools.PROPALGATION_EVENT),
|
||||
Payload: b,
|
||||
})
|
||||
}
|
||||
|
||||
// AdmiraltySetter carries the execution context for an admiralty pairing.
|
||||
type AdmiraltySetter struct {
|
||||
ExecutionsID string // execution ID, used as the Kubernetes namespace
|
||||
NodeName string // name of the virtual node created by Admiralty on the target cluster
|
||||
}
|
||||
|
||||
func NewAdmiraltySetter(execIDS string) *AdmiraltySetter {
|
||||
return &AdmiraltySetter{
|
||||
ExecutionsID: execIDS,
|
||||
}
|
||||
}
|
||||
|
||||
// InitializeAsSource is called on the peer that acts as the SOURCE cluster (compute provider).
|
||||
// It creates the AdmiraltySource resource, generates a kubeconfig for the target peer,
|
||||
// and publishes it on NATS so the target peer can complete its side of the setup.
|
||||
func (s *AdmiraltySetter) InitializeAsSource(ctx context.Context, localPeerID string, destPeerID string, originID string) {
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
serv, err := tools.NewKubernetesService(conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort,
|
||||
conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData)
|
||||
if err != nil {
|
||||
logger.Error().Msg("InitializeAsSource: failed to create service: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Create the AdmiraltySource resource on this cluster (inlined from CreateAdmiraltySource controller)
|
||||
logger.Info().Msg("Creating AdmiraltySource ns-" + s.ExecutionsID)
|
||||
_, err = serv.CreateAdmiraltySource(ctx, s.ExecutionsID)
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
logger.Error().Msg("InitializeAsSource: failed to create source: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Generate a service-account token for the namespace (inlined from GetAdmiraltyKubeconfig controller)
|
||||
token, err := serv.GenerateToken(ctx, s.ExecutionsID, 3600)
|
||||
if err != nil {
|
||||
logger.Error().Msg("InitializeAsSource: failed to generate token for ns-" + s.ExecutionsID + ": " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
kubeconfig, err := buildHostKubeWithToken(token)
|
||||
if err != nil {
|
||||
logger.Error().Msg("InitializeAsSource: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
b, err := json.Marshal(kubeconfig)
|
||||
if err != nil {
|
||||
logger.Error().Msg("InitializeAsSource: failed to marshal kubeconfig: " + err.Error())
|
||||
return
|
||||
}
|
||||
encodedKubeconfig := base64.StdEncoding.EncodeToString(b)
|
||||
kube := KubeconfigEvent{
|
||||
ExecutionsID: s.ExecutionsID,
|
||||
Kubeconfig: encodedKubeconfig,
|
||||
SourcePeerID: localPeerID,
|
||||
DestPeerID: destPeerID,
|
||||
OriginID: originID,
|
||||
}
|
||||
if destPeerID == localPeerID {
|
||||
s.InitializeAsTarget(ctx, kube)
|
||||
return
|
||||
}
|
||||
// Publish the kubeconfig on NATS so the target peer can proceed
|
||||
payload, err := json.Marshal(kube)
|
||||
if err != nil {
|
||||
logger.Error().Msg("InitializeAsSource: failed to marshal kubeconfig event: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if b, err := json.Marshal(&tools.PropalgationMessage{
|
||||
DataType: -1,
|
||||
Action: tools.PB_ADMIRALTY_CONFIG,
|
||||
Payload: payload,
|
||||
}); err == nil {
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-datacenter",
|
||||
Datatype: -1,
|
||||
User: "",
|
||||
Method: int(tools.PROPALGATION_EVENT),
|
||||
Payload: b,
|
||||
})
|
||||
}
|
||||
logger.Info().Msg("InitializeAsSource: kubeconfig published for ns-" + s.ExecutionsID)
|
||||
}
|
||||
|
||||
// InitializeAsTarget is called on the peer that acts as the TARGET cluster (scheduler).
|
||||
// It waits for the kubeconfig published by the source peer via NATS, then creates
|
||||
// the Secret, AdmiraltyTarget, and polls until the virtual node appears.
|
||||
// kubeconfigCh must be obtained from RegisterKubeconfigWaiter before this goroutine starts.
|
||||
func (s *AdmiraltySetter) InitializeAsTarget(ctx context.Context, kubeconfigObj KubeconfigEvent) {
|
||||
logger := oclib.GetLogger()
|
||||
defer kubeconfigChannels.Delete(s.ExecutionsID)
|
||||
|
||||
logger.Info().Msg("InitializeAsTarget: waiting for kubeconfig from source peer ns-" + s.ExecutionsID)
|
||||
kubeconfigData := kubeconfigObj.Kubeconfig
|
||||
|
||||
serv, err := tools.NewKubernetesService(conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort,
|
||||
conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData)
|
||||
if err != nil {
|
||||
logger.Error().Msg("InitializeAsTarget: failed to create service: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// 1. Create the namespace
|
||||
logger.Info().Msg("InitializeAsTarget: creating Namespace " + s.ExecutionsID)
|
||||
if err := serv.CreateNamespace(ctx, s.ExecutionsID); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
logger.Error().Msg("InitializeAsTarget: failed to create namespace: " + err.Error())
|
||||
emitAdmiraltyConsiders(s.ExecutionsID, kubeconfigObj.OriginID, "", err)
|
||||
return
|
||||
}
|
||||
|
||||
// 2. Create the ServiceAccount sa-{executionID}
|
||||
logger.Info().Msg("InitializeAsTarget: creating ServiceAccount sa-" + s.ExecutionsID)
|
||||
if err := serv.CreateServiceAccount(ctx, s.ExecutionsID); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
logger.Error().Msg("InitializeAsTarget: failed to create service account: " + err.Error())
|
||||
emitAdmiraltyConsiders(s.ExecutionsID, kubeconfigObj.OriginID, "", err)
|
||||
return
|
||||
}
|
||||
|
||||
// 3. Create the Role
|
||||
roleName := "role-" + s.ExecutionsID
|
||||
logger.Info().Msg("InitializeAsTarget: creating Role " + roleName)
|
||||
if err := serv.CreateRole(ctx, s.ExecutionsID, roleName,
|
||||
[][]string{
|
||||
{"coordination.k8s.io"},
|
||||
{""},
|
||||
{""}},
|
||||
[][]string{
|
||||
{"leases"},
|
||||
{"secrets"},
|
||||
{"pods"}},
|
||||
[][]string{
|
||||
{"get", "create", "update"},
|
||||
{"get"},
|
||||
{"patch"}},
|
||||
); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
logger.Error().Msg("InitializeAsTarget: failed to create role: " + err.Error())
|
||||
emitAdmiraltyConsiders(s.ExecutionsID, kubeconfigObj.OriginID, "", err)
|
||||
return
|
||||
}
|
||||
|
||||
// 4. Create the RoleBinding
|
||||
rbName := "rb-" + s.ExecutionsID
|
||||
logger.Info().Msg("InitializeAsTarget: creating RoleBinding " + rbName)
|
||||
if err := serv.CreateRoleBinding(ctx, s.ExecutionsID, rbName, roleName); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
logger.Error().Msg("InitializeAsTarget: failed to create role binding: " + err.Error())
|
||||
emitAdmiraltyConsiders(s.ExecutionsID, kubeconfigObj.OriginID, "", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Create the Secret from the source peer's kubeconfig (inlined from CreateKubeSecret controller)
|
||||
logger.Info().Msg("InitializeAsTarget: creating Secret ns-" + s.ExecutionsID)
|
||||
if _, err := serv.CreateKubeconfigSecret(ctx, kubeconfigData, s.ExecutionsID, kubeconfigObj.SourcePeerID); err != nil {
|
||||
logger.Error().Msg("InitializeAsTarget: failed to create kubeconfig secret: " + err.Error())
|
||||
emitAdmiraltyConsiders(s.ExecutionsID, kubeconfigObj.OriginID, "", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Create the AdmiraltyTarget resource (inlined from CreateAdmiraltyTarget controller)
|
||||
logger.Info().Msg("InitializeAsTarget: creating AdmiraltyTarget ns-" + s.ExecutionsID)
|
||||
resp, err := serv.CreateAdmiraltyTarget(ctx, s.ExecutionsID, kubeconfigObj.SourcePeerID)
|
||||
if err != nil || resp == nil {
|
||||
logger.Error().Msg(fmt.Sprintf("InitializeAsTarget: failed to create admiralty target: %v", err))
|
||||
if err == nil {
|
||||
err = fmt.Errorf("CreateAdmiraltyTarget returned nil response")
|
||||
}
|
||||
emitAdmiraltyConsiders(s.ExecutionsID, kubeconfigObj.OriginID, "", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Poll until the virtual node appears (inlined from GetNodeReady controller)
|
||||
logger.Info().Msg("InitializeAsTarget: waiting for virtual node ns-" + s.ExecutionsID)
|
||||
s.waitForNode(ctx, serv, kubeconfigObj.SourcePeerID)
|
||||
emitAdmiraltyConsiders(s.ExecutionsID, kubeconfigObj.OriginID, kubeconfigData, nil)
|
||||
}
|
||||
|
||||
// waitForNode polls GetOneNode until the Admiralty virtual node appears on this cluster.
|
||||
func (s *AdmiraltySetter) waitForNode(ctx context.Context, serv *tools.KubernetesService, sourcePeerID string) {
|
||||
logger := oclib.GetLogger()
|
||||
for i := range 5 {
|
||||
time.Sleep(10 * time.Second)
|
||||
node, err := serv.GetOneNode(ctx, s.ExecutionsID, sourcePeerID)
|
||||
if err == nil && node != nil {
|
||||
s.NodeName = node.Name
|
||||
logger.Info().Msg("waitForNode: node ready: " + s.NodeName)
|
||||
return
|
||||
}
|
||||
if i == 4 {
|
||||
logger.Error().Msg("waitForNode: node never appeared for ns-" + s.ExecutionsID)
|
||||
return
|
||||
}
|
||||
logger.Info().Msg("waitForNode: node not ready yet, retrying...")
|
||||
}
|
||||
}
|
||||
|
||||
// TeardownAsTarget destroys all Admiralty resources created by InitializeAsTarget on the
|
||||
// target (scheduler) cluster: the AdmiraltyTarget CRD, the ServiceAccount, the Role,
|
||||
// the RoleBinding, and the namespace (namespace deletion cascades the rest).
|
||||
func (s *AdmiraltySetter) TeardownAsTarget(ctx context.Context, originID string) {
|
||||
logger := oclib.GetLogger()
|
||||
serv, err := tools.NewKubernetesService(conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort,
|
||||
conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData)
|
||||
if err != nil {
|
||||
logger.Error().Msg("TeardownAsTarget: failed to create k8s service: " + err.Error())
|
||||
return
|
||||
}
|
||||
if err := serv.DeleteNamespace(ctx, s.ExecutionsID, func() {
|
||||
logger.Info().Msg("TeardownAsTarget: namespace " + s.ExecutionsID + " deleted")
|
||||
}); err != nil {
|
||||
logger.Error().Msg("TeardownAsTarget: " + err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// TeardownAsSource destroys all Admiralty resources created by InitializeAsSource on the
|
||||
// source (compute) cluster: the AdmiraltySource CRD, the ServiceAccount, and the namespace.
|
||||
// The namespace deletion cascades the Role and RoleBinding.
|
||||
func (s *AdmiraltySetter) TeardownAsSource(ctx context.Context) {
|
||||
logger := oclib.GetLogger()
|
||||
host := conf.GetConfig().KubeHost + ":" + conf.GetConfig().KubePort
|
||||
ca := conf.GetConfig().KubeCA
|
||||
cert := conf.GetConfig().KubeCert
|
||||
data := conf.GetConfig().KubeData
|
||||
|
||||
// Delete the AdmiraltySource CRD via dynamic client
|
||||
gvrSources := schema.GroupVersionResource{
|
||||
Group: "multicluster.admiralty.io", Version: "v1alpha1", Resource: "sources",
|
||||
}
|
||||
if dyn, err := tools.NewDynamicClient(host, ca, cert, data); err != nil {
|
||||
logger.Error().Msg("TeardownAsSource: failed to create dynamic client: " + err.Error())
|
||||
} else if err := dyn.Resource(gvrSources).Namespace(s.ExecutionsID).Delete(
|
||||
ctx, "source-"+s.ExecutionsID, metav1.DeleteOptions{},
|
||||
); err != nil {
|
||||
logger.Error().Msg("TeardownAsSource: failed to delete AdmiraltySource: " + err.Error())
|
||||
}
|
||||
|
||||
// Delete the namespace (cascades SA, Role, RoleBinding)
|
||||
serv, err := tools.NewKubernetesService(host, ca, cert, data)
|
||||
if err != nil {
|
||||
logger.Error().Msg("TeardownAsSource: failed to create k8s service: " + err.Error())
|
||||
return
|
||||
}
|
||||
if err := serv.Set.CoreV1().Namespaces().Delete(ctx, s.ExecutionsID, metav1.DeleteOptions{}); err != nil {
|
||||
logger.Error().Msg("TeardownAsSource: failed to delete namespace: " + err.Error())
|
||||
return
|
||||
}
|
||||
logger.Info().Msg("TeardownAsSource: namespace " + s.ExecutionsID + " deleted")
|
||||
}
|
||||
|
||||
// buildHostKubeWithToken builds a kubeconfig pointing to this peer's cluster,
|
||||
// authenticated with the provided service-account token.
|
||||
func buildHostKubeWithToken(token string) (*models.KubeConfigValue, error) {
|
||||
if len(token) == 0 {
|
||||
return nil, fmt.Errorf("buildHostKubeWithToken: empty token")
|
||||
}
|
||||
encodedCA := base64.StdEncoding.EncodeToString([]byte(conf.GetConfig().KubeCA))
|
||||
return &models.KubeConfigValue{
|
||||
APIVersion: "v1",
|
||||
CurrentContext: "default",
|
||||
Kind: "Config",
|
||||
Preferences: struct{}{},
|
||||
Clusters: []models.KubeconfigNamedCluster{{
|
||||
Name: "default",
|
||||
Cluster: models.KubeconfigCluster{
|
||||
Server: "https://" + conf.GetConfig().KubeHost + ":6443",
|
||||
CertificateAuthorityData: encodedCA,
|
||||
},
|
||||
}},
|
||||
Contexts: []models.KubeconfigNamedContext{{
|
||||
Name: "default",
|
||||
Context: models.KubeconfigContext{Cluster: "default", User: "default"},
|
||||
}},
|
||||
Users: []models.KubeconfigUser{{
|
||||
Name: "default",
|
||||
User: models.KubeconfigUserKeyPair{Token: token},
|
||||
}},
|
||||
}, nil
|
||||
}
|
||||
244
infrastructure/booking_watchdog.go
Normal file
244
infrastructure/booking_watchdog.go
Normal file
@@ -0,0 +1,244 @@
|
||||
package infrastructure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"oc-datacenter/infrastructure/minio"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
bookingmodel "cloud.o-forge.io/core/oc-lib/models/booking"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow_execution"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
// processedBookings tracks booking IDs whose start-expiry has already been handled.
|
||||
// Resets on restart; teardown methods are idempotent so duplicate runs are safe.
|
||||
var processedBookings sync.Map
|
||||
|
||||
// processedEndBookings tracks booking IDs whose end-expiry (Admiralty source cleanup)
|
||||
// has already been triggered in this process lifetime.
|
||||
var processedEndBookings sync.Map
|
||||
|
||||
// closingStates is the set of terminal booking states after which infra must be torn down.
|
||||
var closingStates = map[enum.BookingStatus]bool{
|
||||
enum.FAILURE: true,
|
||||
enum.SUCCESS: true,
|
||||
enum.FORGOTTEN: true,
|
||||
enum.CANCELLED: true,
|
||||
}
|
||||
|
||||
// WatchBookings starts a passive loop that ticks every minute, scans bookings whose
|
||||
// ExpectedStartDate + 1 min has passed, transitions them to terminal states when needed,
|
||||
// and tears down the associated Kubernetes / Minio infrastructure.
|
||||
// Must be launched in a goroutine from main.
|
||||
func WatchBookings() {
|
||||
logger := oclib.GetLogger()
|
||||
logger.Info().Msg("BookingWatchdog: started")
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if err := scanExpiredBookings(); err != nil {
|
||||
logger.Error().Msg("BookingWatchdog: " + err.Error())
|
||||
}
|
||||
if err := scanEndedExec(); err != nil {
|
||||
logger.Error().Msg("BookingWatchdog: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanExpiredBookings queries all bookings whose start deadline has passed and
|
||||
// dispatches each one to processExpiredBooking.
|
||||
func scanExpiredBookings() error {
|
||||
myself, err := oclib.GetMySelf()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not resolve local peer: %w", err)
|
||||
}
|
||||
peerID := myself.GetID()
|
||||
|
||||
deadline := time.Now().Add(-time.Minute)
|
||||
res := oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), "", peerID, []string{}, nil).
|
||||
Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"expected_start_date": {{
|
||||
Operator: dbs.LTE.String(),
|
||||
Value: primitive.NewDateTimeFromTime(deadline),
|
||||
}},
|
||||
},
|
||||
}, "", false)
|
||||
|
||||
if res.Err != "" {
|
||||
return fmt.Errorf("booking search failed: %s", res.Err)
|
||||
}
|
||||
|
||||
for _, dbo := range res.Data {
|
||||
b, ok := dbo.(*bookingmodel.Booking)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
go processExpiredBooking(b, peerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processExpiredBooking transitions the booking to a terminal state when applicable,
|
||||
// then tears down infrastructure based on the resource type:
|
||||
// - LIVE_DATACENTER / COMPUTE_RESOURCE → Admiralty (as target) + Minio (as target)
|
||||
// - LIVE_STORAGE / STORAGE_RESOURCE → Minio (as source)
|
||||
func processExpiredBooking(b *bookingmodel.Booking, peerID string) {
|
||||
logger := oclib.GetLogger()
|
||||
ctx := context.Background()
|
||||
|
||||
// Skip bookings already handled during this process lifetime.
|
||||
if _, done := processedBookings.Load(b.GetID()); done {
|
||||
return
|
||||
}
|
||||
|
||||
// Transition non-terminal bookings.
|
||||
if !closingStates[b.State] {
|
||||
var newState enum.BookingStatus
|
||||
switch b.State {
|
||||
case enum.DRAFT, enum.DELAYED:
|
||||
// DRAFT: never launched; DELAYED: was SCHEDULED but start never arrived.
|
||||
newState = enum.FORGOTTEN
|
||||
case enum.SCHEDULED:
|
||||
// Passed its start date without ever being launched.
|
||||
newState = enum.FAILURE
|
||||
case enum.STARTED:
|
||||
// A running booking is never auto-closed by the watchdog.
|
||||
return
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
upd := oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), "", peerID, []string{}, nil).
|
||||
UpdateOne(map[string]any{"state": newState.EnumIndex()}, b.GetID())
|
||||
if upd.Err != "" {
|
||||
logger.Error().Msgf("BookingWatchdog: failed to update booking %s: %s", b.GetID(), upd.Err)
|
||||
return
|
||||
}
|
||||
b.State = newState
|
||||
logger.Info().Msgf("BookingWatchdog: booking %s (exec=%s, type=%s) → %s",
|
||||
b.GetID(), b.ExecutionsID, b.ResourceType, b.State)
|
||||
}
|
||||
|
||||
// Mark as handled before triggering async teardown (avoids double-trigger on next tick).
|
||||
processedBookings.Store(b.GetID(), struct{}{})
|
||||
|
||||
// Tear down infrastructure according to resource type.
|
||||
switch b.ResourceType {
|
||||
case tools.LIVE_DATACENTER, tools.COMPUTE_RESOURCE:
|
||||
logger.Info().Msgf("BookingWatchdog: tearing down compute infra exec=%s", b.ExecutionsID)
|
||||
go NewAdmiraltySetter(b.ExecutionsID).TeardownAsSource(ctx) // i'm the compute units.
|
||||
go teardownMinioForComputeBooking(ctx, b, peerID)
|
||||
|
||||
case tools.LIVE_STORAGE, tools.STORAGE_RESOURCE:
|
||||
logger.Info().Msgf("BookingWatchdog: tearing down storage infra exec=%s", b.ExecutionsID)
|
||||
go teardownMinioSourceBooking(ctx, b, peerID)
|
||||
}
|
||||
}
|
||||
|
||||
// scanEndedBookings queries LIVE_DATACENTER / COMPUTE_RESOURCE bookings whose
|
||||
// ExpectedEndDate + 1 min has passed and triggers TeardownAsSource for Admiralty,
|
||||
// cleaning up the compute-side namespace once the execution window is over.
|
||||
func scanEndedExec() error {
|
||||
myself, err := oclib.GetMySelf()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not resolve local peer: %w", err)
|
||||
}
|
||||
peerID := myself.GetID()
|
||||
res := oclib.NewRequest(oclib.LibDataEnum(oclib.WORKFLOW_EXECUTION), "", peerID, []string{}, nil).
|
||||
Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
// Only compute bookings require Admiralty source cleanup.
|
||||
"state": {{
|
||||
Operator: dbs.GT.String(),
|
||||
Value: 2,
|
||||
}},
|
||||
},
|
||||
}, "", false)
|
||||
|
||||
if res.Err != "" {
|
||||
return fmt.Errorf("ended-booking search failed: %s", res.Err)
|
||||
}
|
||||
|
||||
for _, dbo := range res.Data {
|
||||
b, ok := dbo.(*workflow_execution.WorkflowExecution)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
go teardownAdmiraltyTarget(b)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// teardownAdmiraltySource triggers TeardownAsSource for the compute-side namespace
|
||||
// of an execution whose expected end date has passed.
|
||||
func teardownAdmiraltyTarget(b *workflow_execution.WorkflowExecution) {
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
// Each executionsID is processed at most once per process lifetime.
|
||||
if _, done := processedEndBookings.Load(b.ExecutionsID); done {
|
||||
return
|
||||
}
|
||||
processedEndBookings.Store(b.ExecutionsID, struct{}{})
|
||||
|
||||
logger.Info().Msgf("BookingWatchdog: tearing down Admiralty source exec=%s (booking=%s)",
|
||||
b.ExecutionsID, b.GetID())
|
||||
if p, err := oclib.GetMySelf(); err == nil {
|
||||
NewAdmiraltySetter(b.ExecutionsID).TeardownAsTarget(context.Background(), p.GetID())
|
||||
}
|
||||
}
|
||||
|
||||
// teardownMinioForComputeBooking finds the LIVE_STORAGE bookings belonging to the same
|
||||
// execution and triggers Minio-as-target teardown for each (K8s secret + configmap).
|
||||
// The Minio-as-source side is handled separately by the storage booking's own watchdog pass.
|
||||
func teardownMinioForComputeBooking(ctx context.Context, computeBooking *bookingmodel.Booking, localPeerID string) {
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
res := oclib.NewRequest(oclib.LibDataEnum(oclib.BOOKING), "", localPeerID, []string{}, nil).
|
||||
Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"executions_id": {{Operator: dbs.EQUAL.String(), Value: computeBooking.ExecutionsID}},
|
||||
"resource_type": {{Operator: dbs.EQUAL.String(), Value: tools.LIVE_STORAGE.EnumIndex()}},
|
||||
},
|
||||
}, "", false)
|
||||
|
||||
if res.Err != "" || len(res.Data) == 0 {
|
||||
logger.Warn().Msgf("BookingWatchdog: no storage booking found for exec=%s", computeBooking.ExecutionsID)
|
||||
return
|
||||
}
|
||||
|
||||
for _, dbo := range res.Data {
|
||||
sb, ok := dbo.(*bookingmodel.Booking)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
event := minio.MinioDeleteEvent{
|
||||
ExecutionsID: computeBooking.ExecutionsID,
|
||||
MinioID: sb.ResourceID,
|
||||
SourcePeerID: sb.DestPeerID, // peer hosting Minio
|
||||
DestPeerID: localPeerID, // this peer (compute/target)
|
||||
OriginID: "",
|
||||
}
|
||||
minio.NewMinioSetter(computeBooking.ExecutionsID, sb.ResourceID).TeardownAsTarget(ctx, event)
|
||||
}
|
||||
}
|
||||
|
||||
// teardownMinioSourceBooking triggers Minio-as-source teardown for a storage booking:
|
||||
// revokes the scoped service account and removes the execution bucket on this Minio host.
|
||||
func teardownMinioSourceBooking(ctx context.Context, b *bookingmodel.Booking, localPeerID string) {
|
||||
event := minio.MinioDeleteEvent{
|
||||
ExecutionsID: b.ExecutionsID,
|
||||
MinioID: b.ResourceID,
|
||||
SourcePeerID: localPeerID, // this peer IS the Minio host
|
||||
DestPeerID: b.DestPeerID,
|
||||
OriginID: "",
|
||||
}
|
||||
minio.NewMinioSetter(b.ExecutionsID, b.ResourceID).TeardownAsSource(ctx, event)
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package infrastructure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"oc-datacenter/conf"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type Infrastructure interface {
|
||||
CreateNamespace(ctx context.Context, ns string) error
|
||||
DeleteNamespace(ctx context.Context, ns string) error
|
||||
GenerateToken(ctx context.Context, ns string, duration int) (string, error)
|
||||
CreateServiceAccount(ctx context.Context, ns string) error
|
||||
CreateRoleBinding(ctx context.Context, ns string, roleBinding string, role string) error
|
||||
CreateRole(ctx context.Context, ns string, role string, groups [][]string, resources [][]string, verbs [][]string) error
|
||||
GetTargets(ctx context.Context) ([]string, error)
|
||||
CreateAdmiraltySource(context context.Context, executionId string) ([]byte, error)
|
||||
CreateKubeconfigSecret(context context.Context, kubeconfig string, executionId string, peerId string) ([]byte, error)
|
||||
GetKubeconfigSecret(context context.Context, executionId string, peerId string) ([]byte, error)
|
||||
CreateAdmiraltyTarget(context context.Context, executionId string, peerId string) ([]byte, error)
|
||||
GetOneNode(context context.Context, executionID string, peerId string) (*v1.Node, error)
|
||||
GetNamespace(context context.Context, executionID string) (*v1.Namespace, error)
|
||||
CreateSecret(context context.Context, minioId string, executionID string, access string, secret string) error
|
||||
CheckHealth() error
|
||||
}
|
||||
|
||||
var _service = map[string]func() (Infrastructure, error){
|
||||
"kubernetes": NewKubernetesService,
|
||||
}
|
||||
|
||||
func NewServiceByType(t string) (Infrastructure, error) {
|
||||
service, ok := _service[t]
|
||||
if !ok {
|
||||
return nil, errors.New("service not found")
|
||||
}
|
||||
return service()
|
||||
}
|
||||
|
||||
func NewService() (Infrastructure, error) {
|
||||
service, ok := _service[conf.GetConfig().Mode]
|
||||
if !ok {
|
||||
return nil, errors.New("service not found")
|
||||
}
|
||||
return service()
|
||||
}
|
||||
@@ -1,616 +0,0 @@
|
||||
package infrastructure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"oc-datacenter/conf"
|
||||
"oc-datacenter/infrastructure/monitor"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
authv1 "k8s.io/api/authentication/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
apply "k8s.io/client-go/applyconfigurations/core/v1"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
var gvrSources = schema.GroupVersionResource{Group: "multicluster.admiralty.io", Version: "v1alpha1", Resource: "sources"}
|
||||
var gvrTargets = schema.GroupVersionResource{Group: "multicluster.admiralty.io", Version: "v1alpha1", Resource: "targets"}
|
||||
|
||||
type KubernetesService struct {
|
||||
Set *kubernetes.Clientset
|
||||
}
|
||||
|
||||
func NewDynamicClient() (*dynamic.DynamicClient, error) {
|
||||
config := &rest.Config{
|
||||
Host: conf.GetConfig().KubeHost + ":" + conf.GetConfig().KubePort,
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
CAData: []byte(conf.GetConfig().KubeCA),
|
||||
CertData: []byte(conf.GetConfig().KubeCert),
|
||||
KeyData: []byte(conf.GetConfig().KubeData),
|
||||
},
|
||||
}
|
||||
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, errors.New("Error creating Dynamic client: " + err.Error())
|
||||
}
|
||||
if dynamicClient == nil {
|
||||
return nil, errors.New("Error creating Dynamic client: dynamicClient is nil")
|
||||
}
|
||||
|
||||
return dynamicClient, nil
|
||||
}
|
||||
|
||||
func NewKubernetesService() (Infrastructure, error) {
|
||||
config := &rest.Config{
|
||||
Host: conf.GetConfig().KubeHost + ":" + conf.GetConfig().KubePort,
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
CAData: []byte(conf.GetConfig().KubeCA),
|
||||
CertData: []byte(conf.GetConfig().KubeCert),
|
||||
KeyData: []byte(conf.GetConfig().KubeData),
|
||||
},
|
||||
}
|
||||
|
||||
// Create clientset
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
fmt.Println("NewForConfig", clientset, err)
|
||||
if err != nil {
|
||||
return nil, errors.New("Error creating Kubernetes client: " + err.Error())
|
||||
}
|
||||
if clientset == nil {
|
||||
return nil, errors.New("Error creating Kubernetes client: clientset is nil")
|
||||
}
|
||||
|
||||
return &KubernetesService{
|
||||
Set: clientset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewRemoteKubernetesService(url string, ca string, cert string, key string) (Infrastructure, error) {
|
||||
decodedCa, _ := base64.StdEncoding.DecodeString(ca)
|
||||
decodedCert, _ := base64.StdEncoding.DecodeString(cert)
|
||||
decodedKey, _ := base64.StdEncoding.DecodeString(key)
|
||||
|
||||
config := &rest.Config{
|
||||
Host: url + ":6443",
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
CAData: decodedCa,
|
||||
CertData: decodedCert,
|
||||
KeyData: decodedKey,
|
||||
},
|
||||
}
|
||||
// Create clientset
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
fmt.Println("NewForConfig", clientset, err)
|
||||
if err != nil {
|
||||
return nil, errors.New("Error creating Kubernetes client: " + err.Error())
|
||||
}
|
||||
if clientset == nil {
|
||||
return nil, errors.New("Error creating Kubernetes client: clientset is nil")
|
||||
}
|
||||
|
||||
return &KubernetesService{
|
||||
Set: clientset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (k *KubernetesService) CreateNamespace(ctx context.Context, ns string) error {
|
||||
// Define the namespace
|
||||
fmt.Println("ExecutionID in CreateNamespace() : ", ns)
|
||||
namespace := &v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns,
|
||||
Labels: map[string]string{
|
||||
"multicluster-scheduler": "enabled",
|
||||
},
|
||||
},
|
||||
}
|
||||
// Create the namespace
|
||||
fmt.Println("Creating namespace...", k.Set)
|
||||
if _, err := k.Set.CoreV1().Namespaces().Create(ctx, namespace, metav1.CreateOptions{}); err != nil {
|
||||
return errors.New("Error creating namespace: " + err.Error())
|
||||
}
|
||||
fmt.Println("Namespace created successfully!")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubernetesService) CreateServiceAccount(ctx context.Context, ns string) error {
|
||||
// Create the ServiceAccount object
|
||||
serviceAccount := &v1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sa-" + ns,
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
// Create the ServiceAccount in the specified namespace
|
||||
_, err := k.Set.CoreV1().ServiceAccounts(ns).Create(ctx, serviceAccount, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return errors.New("Failed to create ServiceAccount: " + err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubernetesService) CreateRole(ctx context.Context, ns string, role string, groups [][]string, resources [][]string, verbs [][]string) error {
|
||||
// Create the Role object
|
||||
if len(groups) != len(resources) || len(resources) != len(verbs) {
|
||||
return errors.New("Invalid input: groups, resources, and verbs must have the same length")
|
||||
}
|
||||
rules := []rbacv1.PolicyRule{}
|
||||
for i, group := range groups {
|
||||
rules = append(rules, rbacv1.PolicyRule{
|
||||
APIGroups: group,
|
||||
Resources: resources[i],
|
||||
Verbs: verbs[i],
|
||||
})
|
||||
}
|
||||
r := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: role,
|
||||
Namespace: ns,
|
||||
},
|
||||
Rules: rules,
|
||||
}
|
||||
// Create the Role in the specified namespace
|
||||
_, err := k.Set.RbacV1().Roles(ns).Create(ctx, r, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return errors.New("Failed to create Role: " + err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubernetesService) CreateRoleBinding(ctx context.Context, ns string, roleBinding string, role string) error {
|
||||
// Create the RoleBinding object
|
||||
rb := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleBinding,
|
||||
Namespace: ns,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: "sa-" + ns,
|
||||
Namespace: ns,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Kind: "Role",
|
||||
Name: role,
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
},
|
||||
}
|
||||
// Create the RoleBinding in the specified namespace
|
||||
_, err := k.Set.RbacV1().RoleBindings(ns).Create(ctx, rb, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return errors.New("Failed to create RoleBinding: " + err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubernetesService) DeleteNamespace(ctx context.Context, ns string) error {
|
||||
targetGVR := schema.GroupVersionResource{
|
||||
Group: "multicluster.admiralty.io",
|
||||
Version: "v1alpha1",
|
||||
Resource: "targets",
|
||||
}
|
||||
|
||||
// Delete the Target
|
||||
dyn, err := NewDynamicClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dyn.Resource(targetGVR).Namespace(ns).Delete(context.TODO(), "target-"+ns, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = k.Set.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), "sa-"+ns, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Delete the namespace
|
||||
if err := k.Set.CoreV1().Namespaces().Delete(ctx, ns, metav1.DeleteOptions{}); err != nil {
|
||||
return errors.New("Error deleting namespace: " + err.Error())
|
||||
}
|
||||
monitor.StreamRegistry.Cancel(ns)
|
||||
fmt.Println("Namespace deleted successfully!")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the string representing the token generated for the serviceAccount
|
||||
// in the namespace identified by the value `ns` with the name sa-`ns`, which is valid for
|
||||
// `duration` seconds
|
||||
func (k *KubernetesService) GenerateToken(ctx context.Context, ns string, duration int) (string, error) {
|
||||
// Define TokenRequest (valid for 1 hour)
|
||||
d := int64(duration)
|
||||
tokenRequest := &authv1.TokenRequest{
|
||||
Spec: authv1.TokenRequestSpec{
|
||||
ExpirationSeconds: &d, // 1 hour validity
|
||||
},
|
||||
}
|
||||
// Generate the token
|
||||
token, err := k.Set.CoreV1().
|
||||
ServiceAccounts(ns).
|
||||
CreateToken(ctx, "sa-"+ns, tokenRequest, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return "", errors.New("Failed to create token for ServiceAccount: " + err.Error())
|
||||
}
|
||||
return token.Status.Token, nil
|
||||
}
|
||||
|
||||
// Needs refactoring :
|
||||
// - Retrieving the metada (in a method that Unmarshall the part of the json in a metadata object)
|
||||
func (k *KubernetesService) GetTargets(ctx context.Context) ([]string, error) {
|
||||
|
||||
var listTargets []string
|
||||
resp, err := getCDRapiKube(*k.Set, ctx, "/apis/multicluster.admiralty.io/v1alpha1/targets")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Println(string(resp))
|
||||
var targetDict map[string]interface{}
|
||||
err = json.Unmarshal(resp, &targetDict)
|
||||
if err != nil {
|
||||
fmt.Println("TODO: handle the error when unmarshalling k8s API response")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b, _ := json.MarshalIndent(targetDict, "", " ")
|
||||
fmt.Println(string(b))
|
||||
|
||||
data := targetDict["items"].([]interface{})
|
||||
|
||||
for _, item := range data {
|
||||
var metadata metav1.ObjectMeta
|
||||
item := item.(map[string]interface{})
|
||||
byteMetada, err := json.Marshal(item["metadata"])
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Error while Marshalling metadata field")
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(byteMetada, &metadata)
|
||||
if err != nil {
|
||||
fmt.Println("Error while Unmarshalling metadata field to the library object")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listTargets = append(listTargets, metadata.Name)
|
||||
}
|
||||
|
||||
return listTargets, nil
|
||||
|
||||
}
|
||||
|
||||
// Admiralty Target allows a cluster to deploy pods to remote cluster
|
||||
//
|
||||
// The remote cluster must :
|
||||
//
|
||||
// - have declared a Source resource
|
||||
//
|
||||
// - have declared the same namespace as the one where the pods are created in the local cluster
|
||||
//
|
||||
// - have delcared a serviceAccount with sufficient permission to create pods
|
||||
func (k *KubernetesService) CreateAdmiraltyTarget(context context.Context, executionId string, peerId string) ([]byte, error) {
|
||||
exists, err := k.GetKubeconfigSecret(context, executionId, peerId)
|
||||
if err != nil {
|
||||
fmt.Println("Error verifying kube-secret before creating target")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists == nil {
|
||||
fmt.Println("Target needs to be binded to a secret in namespace ", executionId)
|
||||
return nil, nil // Maybe we could create a wrapper for errors and add more info to have
|
||||
}
|
||||
|
||||
targetName := "target-" + oclib.GetConcatenatedName(peerId, executionId)
|
||||
target := map[string]interface{}{
|
||||
"apiVersion": "multicluster.admiralty.io/v1alpha1",
|
||||
"kind": "Target",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": targetName,
|
||||
"namespace": executionId,
|
||||
// "labels": map[string]interface{}{
|
||||
// "peer": peerId,
|
||||
// },
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"kubeconfigSecret": map[string]string{
|
||||
"name": "kube-secret-" + oclib.GetConcatenatedName(peerId, executionId),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
res, err := dynamicClientApply(executionId, targetName, gvrTargets, context, target)
|
||||
if err != nil {
|
||||
return nil, errors.New("Error when trying to apply Target definition :" + err.Error())
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
}
|
||||
|
||||
// Admiralty Source allows a cluster to receive pods from a remote cluster
|
||||
//
|
||||
// The source must be associated to a serviceAccount, which will execute the pods locally.
|
||||
// This serviceAccount must have sufficient permission to create and patch pods
|
||||
//
|
||||
// This method is temporary to implement the use of Admiralty, but must be edited
|
||||
// to rather contact the oc-datacenter from the remote cluster to create the source
|
||||
// locally and retrieve the token for the serviceAccount
|
||||
func (k *KubernetesService) CreateAdmiraltySource(context context.Context, executionId string) ([]byte, error) {
|
||||
|
||||
source := map[string]interface{}{
|
||||
"apiVersion": "multicluster.admiralty.io/v1alpha1",
|
||||
"kind": "Source",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "source-" + executionId,
|
||||
"namespace": executionId,
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"serviceAccountName": "sa-" + executionId,
|
||||
},
|
||||
}
|
||||
|
||||
res, err := dynamicClientApply(executionId, "source-"+executionId, gvrSources, context, source)
|
||||
if err != nil {
|
||||
return nil, errors.New("Error when trying to apply Source definition :" + err.Error())
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Create a secret from a kubeconfing. Use it to create the secret binded to an Admiralty
|
||||
// target, which must contain the serviceAccount's token value
|
||||
func (k *KubernetesService) CreateKubeconfigSecret(context context.Context, kubeconfig string, executionId string, peerId string) ([]byte, error) {
|
||||
config, err := base64.StdEncoding.DecodeString(kubeconfig)
|
||||
// config, err := base64.RawStdEncoding.DecodeString(kubeconfig)
|
||||
if err != nil {
|
||||
fmt.Println("Error while encoding kubeconfig")
|
||||
fmt.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
secretApplyConfig := apply.Secret("kube-secret-"+oclib.GetConcatenatedName(peerId, executionId),
|
||||
executionId).
|
||||
WithData(map[string][]byte{
|
||||
"config": config,
|
||||
},
|
||||
)
|
||||
|
||||
// exists, err := k.GetKubeconfigSecret(context,executionId)
|
||||
// if err != nil {
|
||||
// fmt.Println("Error verifying if kube secret exists in namespace ", executionId)
|
||||
// return nil, err
|
||||
// }
|
||||
// if exists != nil {
|
||||
// fmt.Println("kube-secret already exists in namespace", executionId)
|
||||
// fmt.Println("Overriding existing kube-secret with a newer resource")
|
||||
// // TODO : implement DeleteKubeConfigSecret(executionID)
|
||||
// deleted, err := k.DeleteKubeConfigSecret(executionId)
|
||||
// _ = deleted
|
||||
// _ = err
|
||||
// }
|
||||
|
||||
resp, err := k.Set.CoreV1().
|
||||
Secrets(executionId).
|
||||
Apply(context,
|
||||
secretApplyConfig,
|
||||
metav1.ApplyOptions{
|
||||
FieldManager: "admiralty-manager",
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Error while trying to contact API to get secret kube-secret-" + executionId)
|
||||
fmt.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
fmt.Println("Couldn't marshal resp from : ", data)
|
||||
fmt.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (k *KubernetesService) GetKubeconfigSecret(context context.Context, executionId string, peerId string) ([]byte, error) {
|
||||
resp, err := k.Set.CoreV1().
|
||||
Secrets(executionId).
|
||||
Get(context, "kube-secret-"+oclib.GetConcatenatedName(peerId, executionId), metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
fmt.Println("kube-secret not found for execution", executionId)
|
||||
return nil, nil
|
||||
}
|
||||
fmt.Println("Error while trying to contact API to get secret kube-secret-" + executionId)
|
||||
fmt.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Couldn't marshal resp from : ", data)
|
||||
fmt.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (k *KubernetesService) DeleteKubeConfigSecret(executionID string) ([]byte, error) {
|
||||
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
func (k *KubernetesService) GetNamespace(context context.Context, executionID string) (*v1.Namespace, error) {
|
||||
resp, err := k.Set.CoreV1().Namespaces().Get(context, executionID, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
logger := oclib.GetLogger()
|
||||
logger.Error().Msg("An error occured when trying to get namespace " + executionID + " : " + err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func getCDRapiKube(client kubernetes.Clientset, ctx context.Context, path string) ([]byte, error) {
|
||||
resp, err := client.RESTClient().Get().
|
||||
AbsPath(path).
|
||||
DoRaw(ctx) // from https://stackoverflow.com/questions/60764908/how-to-access-kubernetes-crd-using-client-go
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Error from k8s API when getting "+path+" : ", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func dynamicClientApply(executionId string, resourceName string, resourceDefinition schema.GroupVersionResource, ctx context.Context, object map[string]interface{}) ([]byte, error) {
|
||||
cli, err := NewDynamicClient()
|
||||
if err != nil {
|
||||
return nil, errors.New("Could not retrieve dynamic client when creating Admiralty Source : " + err.Error())
|
||||
}
|
||||
|
||||
res, err := cli.Resource(resourceDefinition).
|
||||
Namespace(executionId).
|
||||
Apply(ctx,
|
||||
resourceName,
|
||||
&unstructured.Unstructured{Object: object},
|
||||
metav1.ApplyOptions{
|
||||
FieldManager: "kubectl-client-side-apply",
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
o, err := json.Marshal(object)
|
||||
fmt.Println("Error from k8s API when applying "+fmt.Sprint(string(o))+" to "+gvrSources.String()+" : ", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We can add more info to the log with the content of resp if not nil
|
||||
resByte, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
// fmt.Println("Error trying to create a Source on remote cluster : ", err , " : ", res)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resByte, nil
|
||||
|
||||
}
|
||||
|
||||
func (k *KubernetesService) CheckHealth() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Check API server connectivity
|
||||
_, err := k.Set.ServerVersion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("API server unreachable: %v", err)
|
||||
}
|
||||
|
||||
// Check nodes status
|
||||
nodes, err := k.Set.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list nodes: %v", err)
|
||||
}
|
||||
for _, node := range nodes.Items {
|
||||
for _, condition := range node.Status.Conditions {
|
||||
if condition.Type == "Ready" && condition.Status != "True" {
|
||||
return fmt.Errorf("node %s not ready", node.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Optional: Check if all pods in kube-system are running
|
||||
pods, err := k.Set.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list pods: %v", err)
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Status.Phase != "Running" && pod.Status.Phase != "Succeeded" {
|
||||
return fmt.Errorf("pod %s in namespace kube-system is %s", pod.Name, pod.Status.Phase)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the Kubernetes' Node object corresponding to the executionID if it exists on this host
|
||||
//
|
||||
// The node is created when an admiralty Target (on host) can connect to an admiralty Source (on remote)
|
||||
func (k *KubernetesService) GetOneNode(context context.Context, executionID string, peerId string) (*v1.Node, error) {
|
||||
concatenatedName := oclib.GetConcatenatedName(peerId, executionID)
|
||||
|
||||
res, err := k.Set.CoreV1().
|
||||
Nodes().
|
||||
List(
|
||||
context,
|
||||
metav1.ListOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Println("Error getting the list of nodes from k8s API")
|
||||
fmt.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, node := range res.Items {
|
||||
if isNode := strings.Contains(node.Name, "admiralty-"+executionID+"-target-"+concatenatedName+"-"); isNode {
|
||||
return &node, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (k *KubernetesService) CreateSecret(context context.Context, minioId string, executionID string, access string, secret string) error {
|
||||
|
||||
data := map[string][]byte{
|
||||
"access-key": []byte(access),
|
||||
"secret-key": []byte(secret),
|
||||
}
|
||||
|
||||
s := v1.Secret{
|
||||
Type: v1.SecretTypeOpaque,
|
||||
Data: data,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: minioId + "-secret-s3",
|
||||
},
|
||||
}
|
||||
|
||||
_, err := k.Set.CoreV1().Secrets(executionID).Create(context, &s, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
logger := oclib.GetLogger()
|
||||
logger.Error().Msg("An error happened when creating the secret holding minio credentials in namespace " + executionID + " : " + err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns a concatenation of the peerId and namespace in order for
|
||||
// kubernetes ressources to have a unique name, under 63 characters
|
||||
// and yet identify which peer they are created for
|
||||
func getConcatenatedName(peerId string, namespace string) string {
|
||||
s := strings.Split(namespace, "-")[:2]
|
||||
n := s[0] + "-" + s[1]
|
||||
|
||||
return peerId + "-" + n
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
package infrastructure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"oc-datacenter/conf"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"github.com/minio/madmin-go/v4"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
|
||||
"github.com/necmettindev/randomstring"
|
||||
)
|
||||
|
||||
type MinioService struct{
|
||||
Url string
|
||||
RootKey string
|
||||
RootSecret string
|
||||
MinioAdminClient *madmin.AdminClient
|
||||
}
|
||||
|
||||
type StatementEntry struct {
|
||||
Effect string `json:"Effect"`
|
||||
Action []string `json:"Action"`
|
||||
Resource string `json:"Resource"`
|
||||
}
|
||||
|
||||
type PolicyDocument struct {
|
||||
Version string `json:"Version"`
|
||||
Statement []StatementEntry `json:"Statement"`
|
||||
}
|
||||
|
||||
|
||||
func NewMinioService(url string) *MinioService {
|
||||
return &MinioService{
|
||||
Url: url,
|
||||
RootKey: conf.GetConfig().MinioRootKey,
|
||||
RootSecret: conf.GetConfig().MinioRootSecret,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MinioService) CreateClient() error {
|
||||
cred := credentials.NewStaticV4(m.RootKey,m.RootSecret,"")
|
||||
cli, err := madmin.NewWithOptions(m.Url, &madmin.Options{Creds: cred, Secure: false}) // Maybe in the future we should use the secure option ?
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.MinioAdminClient = cli
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MinioService) CreateCredentials(executionId string) (string,string,error){
|
||||
|
||||
policy := PolicyDocument{
|
||||
Version: "2012-10-17",
|
||||
Statement: []StatementEntry{
|
||||
{
|
||||
Effect: "Allow",
|
||||
Action: []string{"s3:GetObject", "s3:PutObject"},
|
||||
Resource: "arn:aws:s3:::"+executionId+"/*",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
p, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
return "","",err
|
||||
}
|
||||
|
||||
randAccess, randSecret := getRandomCreds()
|
||||
|
||||
req := madmin.AddServiceAccountReq{
|
||||
Policy: p,
|
||||
TargetUser: m.RootKey,
|
||||
AccessKey: randAccess,
|
||||
SecretKey: randSecret,
|
||||
}
|
||||
|
||||
res, err := m.MinioAdminClient.AddServiceAccount(context.Background(), req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
|
||||
return res.AccessKey, res.SecretKey, nil
|
||||
|
||||
}
|
||||
|
||||
func getRandomCreds() (string, string){
|
||||
opts := randomstring.GenerationOptions{
|
||||
Length: 20,
|
||||
}
|
||||
|
||||
a, _ := randomstring.GenerateString(opts)
|
||||
|
||||
opts.Length = 40
|
||||
s, _ := randomstring.GenerateString(opts)
|
||||
|
||||
return a,s
|
||||
|
||||
}
|
||||
|
||||
func (m *MinioService) CreateBucket(executionId string) error {
|
||||
|
||||
l := oclib.GetLogger()
|
||||
cred := credentials.NewStaticV4(m.RootKey,m.RootSecret,"")
|
||||
client, err := minio.New(m.Url, &minio.Options{
|
||||
Creds: cred,
|
||||
Secure: false,
|
||||
})
|
||||
if err != nil {
|
||||
l.Error().Msg("Error when creating the minio client for the data plane")
|
||||
return err
|
||||
}
|
||||
|
||||
err = client.MakeBucket(context.Background(), executionId, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
l.Error().Msg("Error when creating the bucket for namespace " + executionId)
|
||||
return err
|
||||
}
|
||||
|
||||
l.Info().Msg("Created the bucket " + executionId + " on " + m.Url + " minio")
|
||||
return nil
|
||||
}
|
||||
219
infrastructure/minio/minio.go
Normal file
219
infrastructure/minio/minio.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"oc-datacenter/conf"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"github.com/minio/madmin-go/v4"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
"github.com/necmettindev/randomstring"
|
||||
)
|
||||
|
||||
type MinioService struct {
|
||||
Url string
|
||||
RootKey string
|
||||
RootSecret string
|
||||
MinioAdminClient *madmin.AdminClient
|
||||
}
|
||||
|
||||
type StatementEntry struct {
|
||||
Effect string `json:"Effect"`
|
||||
Action []string `json:"Action"`
|
||||
Resource string `json:"Resource"`
|
||||
}
|
||||
|
||||
type PolicyDocument struct {
|
||||
Version string `json:"Version"`
|
||||
Statement []StatementEntry `json:"Statement"`
|
||||
}
|
||||
|
||||
func NewMinioService(url string) *MinioService {
|
||||
return &MinioService{
|
||||
Url: url,
|
||||
RootKey: conf.GetConfig().MinioRootKey,
|
||||
RootSecret: conf.GetConfig().MinioRootSecret,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MinioService) CreateClient() error {
|
||||
cred := credentials.NewStaticV4(m.RootKey, m.RootSecret, "")
|
||||
cli, err := madmin.NewWithOptions(m.Url, &madmin.Options{Creds: cred, Secure: false}) // Maybe in the future we should use the secure option ?
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.MinioAdminClient = cli
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MinioService) CreateCredentials(executionId string) (string, string, error) {
|
||||
|
||||
policy := PolicyDocument{
|
||||
Version: "2012-10-17",
|
||||
Statement: []StatementEntry{
|
||||
{
|
||||
Effect: "Allow",
|
||||
Action: []string{"s3:GetObject", "s3:PutObject"},
|
||||
Resource: "arn:aws:s3:::" + executionId + "/*",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
p, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
randAccess, randSecret := getRandomCreds()
|
||||
|
||||
req := madmin.AddServiceAccountReq{
|
||||
Policy: p,
|
||||
TargetUser: m.RootKey,
|
||||
AccessKey: randAccess,
|
||||
SecretKey: randSecret,
|
||||
}
|
||||
|
||||
res, err := m.MinioAdminClient.AddServiceAccount(context.Background(), req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return res.AccessKey, res.SecretKey, nil
|
||||
|
||||
}
|
||||
|
||||
func getRandomCreds() (string, string) {
|
||||
opts := randomstring.GenerationOptions{
|
||||
Length: 20,
|
||||
}
|
||||
|
||||
a, _ := randomstring.GenerateString(opts)
|
||||
|
||||
opts.Length = 40
|
||||
s, _ := randomstring.GenerateString(opts)
|
||||
|
||||
return a, s
|
||||
|
||||
}
|
||||
func (m *MinioService) CreateMinioConfigMap(minioID string, executionId string, url string) error {
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: minioID + "artifact-repository",
|
||||
Namespace: executionId,
|
||||
},
|
||||
Data: map[string]string{
|
||||
minioID + "s3-local": fmt.Sprintf(`
|
||||
s3:
|
||||
bucket: %s
|
||||
endpoint: %s
|
||||
insecure: true
|
||||
accessKeySecret:
|
||||
name: %s-secret-s3
|
||||
key: accesskey
|
||||
secretKeySecret:
|
||||
name: %s-secret-s3
|
||||
key: secretkey
|
||||
`, minioID+"-"+executionId, url, minioID, minioID),
|
||||
},
|
||||
}
|
||||
|
||||
existing, err := clientset.CoreV1().
|
||||
ConfigMaps(executionId).
|
||||
Get(context.Background(), minioID+"artifact-repository", metav1.GetOptions{})
|
||||
|
||||
if err == nil {
|
||||
// Update
|
||||
existing.Data = configMap.Data
|
||||
_, err = clientset.CoreV1().
|
||||
ConfigMaps(executionId).
|
||||
Update(context.Background(), existing, metav1.UpdateOptions{})
|
||||
} else {
|
||||
// Create
|
||||
_, err = clientset.CoreV1().
|
||||
ConfigMaps(executionId).
|
||||
Create(context.Background(), configMap, metav1.CreateOptions{})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MinioService) CreateBucket(minioID string, executionId string) error {
|
||||
l := oclib.GetLogger()
|
||||
cred := credentials.NewStaticV4(m.RootKey, m.RootSecret, "")
|
||||
client, err := minio.New(m.Url, &minio.Options{
|
||||
Creds: cred,
|
||||
Secure: false,
|
||||
})
|
||||
if err != nil {
|
||||
l.Error().Msg("Error when creating the minio client for the data plane")
|
||||
return err
|
||||
}
|
||||
|
||||
err = client.MakeBucket(context.Background(), minioID+"-"+executionId, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
l.Error().Msg("Error when creating the bucket for namespace " + executionId)
|
||||
return err
|
||||
}
|
||||
|
||||
l.Info().Msg("Created the bucket " + minioID + "-" + executionId + " on " + m.Url + " minio")
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteCredentials revokes a scoped Minio service account by its access key.
|
||||
func (m *MinioService) DeleteCredentials(accessKey string) error {
|
||||
if err := m.MinioAdminClient.DeleteServiceAccount(context.Background(), accessKey); err != nil {
|
||||
return fmt.Errorf("DeleteCredentials: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteBucket removes the execution bucket from Minio.
|
||||
func (m *MinioService) DeleteBucket(minioID, executionId string) error {
|
||||
l := oclib.GetLogger()
|
||||
cred := credentials.NewStaticV4(m.RootKey, m.RootSecret, "")
|
||||
client, err := minio.New(m.Url, &minio.Options{Creds: cred, Secure: false})
|
||||
if err != nil {
|
||||
l.Error().Msg("Error when creating minio client for bucket deletion")
|
||||
return err
|
||||
}
|
||||
bucketName := minioID + "-" + executionId
|
||||
if err := client.RemoveBucket(context.Background(), bucketName); err != nil {
|
||||
l.Error().Msg("Error when deleting bucket " + bucketName)
|
||||
return err
|
||||
}
|
||||
l.Info().Msg("Deleted bucket " + bucketName + " on " + m.Url)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteMinioConfigMap removes the artifact-repository ConfigMap from the execution namespace.
|
||||
func (m *MinioService) DeleteMinioConfigMap(minioID, executionId string) error {
|
||||
cfg, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clientset, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return clientset.CoreV1().ConfigMaps(executionId).Delete(
|
||||
context.Background(), minioID+"artifact-repository", metav1.DeleteOptions{},
|
||||
)
|
||||
}
|
||||
297
infrastructure/minio/minio_setter.go
Normal file
297
infrastructure/minio/minio_setter.go
Normal file
@@ -0,0 +1,297 @@
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"oc-datacenter/conf"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/models/live"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// MinioCredentialEvent is the NATS payload used to transfer Minio credentials between peers.
|
||||
//
|
||||
// Two-phase protocol over PROPALGATION_EVENT (Action = PB_MINIO_CONFIG):
|
||||
// - Phase 1 – role assignment (Access == ""):
|
||||
// oc-discovery routes this to the SOURCE peer (Minio host) → InitializeAsSource.
|
||||
// - Phase 2 – credential delivery (Access != ""):
|
||||
// oc-discovery routes this to the TARGET peer (compute host) → InitializeAsTarget.
|
||||
type MinioCredentialEvent struct {
|
||||
ExecutionsID string `json:"executions_id"`
|
||||
MinioID string `json:"minio_id"`
|
||||
Access string `json:"access"`
|
||||
Secret string `json:"secret"`
|
||||
SourcePeerID string `json:"source_peer_id"`
|
||||
DestPeerID string `json:"dest_peer_id"`
|
||||
URL string `json:"url"`
|
||||
// OriginID is the peer that initiated the provisioning request.
|
||||
// The PB_CONSIDERS response is routed back to this peer.
|
||||
OriginID string `json:"origin_id"`
|
||||
}
|
||||
|
||||
// minioConsidersPayload is the PB_CONSIDERS payload emitted after minio provisioning.
|
||||
type minioConsidersPayload struct {
|
||||
OriginID string `json:"origin_id"`
|
||||
ExecutionsID string `json:"executions_id"`
|
||||
Secret string `json:"secret,omitempty"`
|
||||
Error *string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// emitConsiders publishes a PB_CONSIDERS back to OriginID with the result of
|
||||
// the minio provisioning. secret is the provisioned credential; err is nil on success.
|
||||
func emitConsiders(executionsID, originID, secret string, provErr error) {
|
||||
var errStr *string
|
||||
if provErr != nil {
|
||||
s := provErr.Error()
|
||||
errStr = &s
|
||||
}
|
||||
payload, _ := json.Marshal(minioConsidersPayload{
|
||||
OriginID: originID,
|
||||
ExecutionsID: executionsID,
|
||||
Secret: secret,
|
||||
Error: errStr,
|
||||
})
|
||||
b, _ := json.Marshal(&tools.PropalgationMessage{
|
||||
DataType: tools.STORAGE_RESOURCE.EnumIndex(),
|
||||
Action: tools.PB_CONSIDERS,
|
||||
Payload: payload,
|
||||
})
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-datacenter",
|
||||
Datatype: -1,
|
||||
Method: int(tools.PROPALGATION_EVENT),
|
||||
Payload: b,
|
||||
})
|
||||
}
|
||||
|
||||
// MinioSetter carries the execution context for a Minio credential provisioning.
|
||||
type MinioSetter struct {
|
||||
ExecutionsID string // used as both the bucket name and the K8s namespace suffix
|
||||
MinioID string // ID of the Minio storage resource
|
||||
}
|
||||
|
||||
func NewMinioSetter(execID, minioID string) *MinioSetter {
|
||||
return &MinioSetter{ExecutionsID: execID, MinioID: minioID}
|
||||
}
|
||||
|
||||
// InitializeAsSource is called on the peer that hosts the Minio instance.
|
||||
//
|
||||
// It:
|
||||
// 1. Looks up the live-storage endpoint URL for MinioID.
|
||||
// 2. Creates a scoped service account (access + secret limited to the execution bucket).
|
||||
// 3. Creates the execution bucket.
|
||||
// 4. If source and dest are the same peer, calls InitializeAsTarget directly.
|
||||
// Otherwise, publishes a MinioCredentialEvent via NATS (Phase 2) so that
|
||||
// oc-discovery can route the credentials to the compute peer.
|
||||
func (m *MinioSetter) InitializeAsSource(ctx context.Context, localPeerID, destPeerID, originID string) {
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
url, err := m.loadMinioURL(localPeerID)
|
||||
if err != nil {
|
||||
logger.Error().Msg("MinioSetter.InitializeAsSource: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
service := NewMinioService(url)
|
||||
if err := service.CreateClient(); err != nil {
|
||||
logger.Error().Msg("MinioSetter.InitializeAsSource: failed to create admin client: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
access, secret, err := service.CreateCredentials(m.ExecutionsID)
|
||||
if err != nil {
|
||||
logger.Error().Msg("MinioSetter.InitializeAsSource: failed to create service account: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if err := service.CreateBucket(m.MinioID, m.ExecutionsID); err != nil {
|
||||
logger.Error().Msg("MinioSetter.InitializeAsSource: failed to create bucket: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info().Msg("MinioSetter.InitializeAsSource: bucket and service account ready for " + m.ExecutionsID)
|
||||
|
||||
event := MinioCredentialEvent{
|
||||
ExecutionsID: m.ExecutionsID,
|
||||
MinioID: m.MinioID,
|
||||
Access: access,
|
||||
Secret: secret,
|
||||
SourcePeerID: localPeerID,
|
||||
DestPeerID: destPeerID,
|
||||
OriginID: originID,
|
||||
}
|
||||
|
||||
if destPeerID == localPeerID {
|
||||
// Same peer: store the secret locally without going through NATS.
|
||||
m.InitializeAsTarget(ctx, event)
|
||||
return
|
||||
}
|
||||
|
||||
// Cross-peer: publish credentials (Phase 2) so oc-discovery routes them to the compute peer.
|
||||
payload, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
logger.Error().Msg("MinioSetter.InitializeAsSource: failed to marshal credential event: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if b, err := json.Marshal(&tools.PropalgationMessage{
|
||||
DataType: -1,
|
||||
Action: tools.PB_MINIO_CONFIG,
|
||||
Payload: payload,
|
||||
}); err == nil {
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-datacenter",
|
||||
Datatype: -1,
|
||||
User: "",
|
||||
Method: int(tools.PROPALGATION_EVENT),
|
||||
Payload: b,
|
||||
})
|
||||
logger.Info().Msg("MinioSetter.InitializeAsSource: credentials published via NATS for " + m.ExecutionsID)
|
||||
}
|
||||
}
|
||||
|
||||
// InitializeAsTarget is called on the peer that runs the compute workload.
|
||||
//
|
||||
// It stores the Minio credentials received from the source peer (via NATS or directly)
|
||||
// as a Kubernetes secret inside the execution namespace, making them available to pods.
|
||||
func (m *MinioSetter) InitializeAsTarget(ctx context.Context, event MinioCredentialEvent) {
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
k, err := tools.NewKubernetesService(
|
||||
conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort,
|
||||
conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error().Msg("MinioSetter.InitializeAsTarget: failed to create k8s service: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if err := k.CreateSecret(ctx, event.MinioID, event.ExecutionsID, event.Access, event.Secret); err != nil {
|
||||
logger.Error().Msg("MinioSetter.InitializeAsTarget: failed to create k8s secret: " + err.Error())
|
||||
emitConsiders(event.ExecutionsID, event.OriginID, "", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := NewMinioService(event.URL).CreateMinioConfigMap(event.MinioID, event.ExecutionsID, event.URL); err == nil {
|
||||
logger.Error().Msg("MinioSetter.InitializeAsTarget: failed to create config map: " + err.Error())
|
||||
emitConsiders(event.ExecutionsID, event.OriginID, "", err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info().Msg("MinioSetter.InitializeAsTarget: Minio credentials stored in namespace " + event.ExecutionsID)
|
||||
emitConsiders(event.ExecutionsID, event.OriginID, event.Secret, nil)
|
||||
}
|
||||
|
||||
// MinioDeleteEvent is the NATS payload used to tear down Minio resources.
|
||||
// It mirrors MinioCredentialEvent but carries the access key for revocation.
|
||||
type MinioDeleteEvent struct {
|
||||
ExecutionsID string `json:"executions_id"`
|
||||
MinioID string `json:"minio_id"`
|
||||
Access string `json:"access"` // service account access key to revoke on the Minio host
|
||||
SourcePeerID string `json:"source_peer_id"`
|
||||
DestPeerID string `json:"dest_peer_id"`
|
||||
OriginID string `json:"origin_id"`
|
||||
}
|
||||
|
||||
// TeardownAsTarget is called on the peer that runs the compute workload.
|
||||
// It reads the stored access key from the K8s secret, then removes both the secret
|
||||
// and the artifact-repository ConfigMap from the execution namespace.
|
||||
// For same-peer deployments it calls TeardownAsSource directly; otherwise it
|
||||
// publishes a MinioDeleteEvent via NATS (PB_DELETE) so oc-discovery routes it to
|
||||
// the Minio host peer.
|
||||
func (m *MinioSetter) TeardownAsTarget(ctx context.Context, event MinioDeleteEvent) {
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
k, err := tools.NewKubernetesService(
|
||||
conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort,
|
||||
conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error().Msg("MinioSetter.TeardownAsTarget: failed to create k8s service: " + err.Error())
|
||||
emitConsiders(event.ExecutionsID, event.OriginID, "", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Read the access key from the K8s secret before deleting it.
|
||||
accessKey := event.Access
|
||||
if accessKey == "" {
|
||||
if secret, err := k.Set.CoreV1().Secrets(event.ExecutionsID).Get(
|
||||
ctx, event.MinioID+"-secret-s3", metav1.GetOptions{},
|
||||
); err == nil {
|
||||
accessKey = string(secret.Data["access-key"])
|
||||
}
|
||||
}
|
||||
|
||||
// Delete K8s credentials secret.
|
||||
if err := k.Set.CoreV1().Secrets(event.ExecutionsID).Delete(
|
||||
ctx, event.MinioID+"-secret-s3", metav1.DeleteOptions{},
|
||||
); err != nil {
|
||||
logger.Error().Msg("MinioSetter.TeardownAsTarget: failed to delete secret: " + err.Error())
|
||||
}
|
||||
|
||||
// Delete artifact-repository ConfigMap.
|
||||
if err := NewMinioService("").DeleteMinioConfigMap(event.MinioID, event.ExecutionsID); err != nil {
|
||||
logger.Error().Msg("MinioSetter.TeardownAsTarget: failed to delete configmap: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info().Msg("MinioSetter.TeardownAsTarget: K8s resources removed for " + event.ExecutionsID)
|
||||
|
||||
// For same-peer deployments the source cleanup runs directly here so the
|
||||
// caller (REMOVE_EXECUTION handler) doesn't have to distinguish roles.
|
||||
if event.SourcePeerID == event.DestPeerID {
|
||||
event.Access = accessKey
|
||||
m.TeardownAsSource(ctx, event)
|
||||
}
|
||||
}
|
||||
|
||||
// TeardownAsSource is called on the peer that hosts the Minio instance.
|
||||
// It revokes the scoped service account and removes the execution bucket.
|
||||
func (m *MinioSetter) TeardownAsSource(ctx context.Context, event MinioDeleteEvent) {
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
url, err := m.loadMinioURL(event.SourcePeerID)
|
||||
if err != nil {
|
||||
logger.Error().Msg("MinioSetter.TeardownAsSource: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
svc := NewMinioService(url)
|
||||
if err := svc.CreateClient(); err != nil {
|
||||
logger.Error().Msg("MinioSetter.TeardownAsSource: failed to create admin client: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if event.Access != "" {
|
||||
if err := svc.DeleteCredentials(event.Access); err != nil {
|
||||
logger.Error().Msg("MinioSetter.TeardownAsSource: failed to delete service account: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err := svc.DeleteBucket(event.MinioID, event.ExecutionsID); err != nil {
|
||||
logger.Error().Msg("MinioSetter.TeardownAsSource: failed to delete bucket: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info().Msg("MinioSetter.TeardownAsSource: Minio resources removed for " + event.ExecutionsID)
|
||||
}
|
||||
|
||||
// loadMinioURL searches through all live storages accessible by peerID to find
|
||||
// the one that references MinioID, and returns its endpoint URL.
|
||||
func (m *MinioSetter) loadMinioURL(peerID string) (string, error) {
|
||||
res := oclib.NewRequest(oclib.LibDataEnum(oclib.LIVE_STORAGE), "", peerID, []string{}, nil).LoadAll(false)
|
||||
if res.Err != "" {
|
||||
return "", fmt.Errorf("loadMinioURL: failed to load live storages: %s", res.Err)
|
||||
}
|
||||
for _, dbo := range res.Data {
|
||||
l := dbo.(*live.LiveStorage)
|
||||
if slices.Contains(l.ResourcesID, m.MinioID) {
|
||||
return l.Source, nil
|
||||
}
|
||||
|
||||
}
|
||||
return "", fmt.Errorf("loadMinioURL: no live storage found for minio ID %s", m.MinioID)
|
||||
}
|
||||
100
infrastructure/monitor/PROMETHEUS_ANALYSIS.md
Normal file
100
infrastructure/monitor/PROMETHEUS_ANALYSIS.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# Analyse de `infrastructure/prometheus.go`
|
||||
|
||||
## Ce que fait le fichier
|
||||
|
||||
Ce fichier implémente un service de monitoring qui interroge une instance **Prometheus** pour collecter des métriques de conteneurs Kubernetes associés à une réservation (Booking).
|
||||
|
||||
### Structures de données
|
||||
|
||||
| Struct | Role |
|
||||
|---|---|
|
||||
| `MetricsSnapshot` | Snapshot de métriques associé à une origine (source). **Note : cette struct locale est déclarée mais jamais utilisée** — le code utilise en réalité `models.MetricsSnapshot` de oc-lib. |
|
||||
| `Metric` | Paire nom/valeur d'une métrique. **Même remarque** — le code utilise `models.Metric`. |
|
||||
| `PrometheusResponse` | Mapping de la réponse JSON de l'API Prometheus `/api/v1/query`. |
|
||||
|
||||
### Métriques collectées (`queriesMetrics`)
|
||||
|
||||
| # | Requête PromQL | Mesure |
|
||||
|---|---|---|
|
||||
| 1 | `rate(container_cpu_usage_seconds_total{namespace}[1m]) * 100` | Utilisation CPU (%) |
|
||||
| 2 | `container_memory_usage_bytes{namespace}` | Mémoire utilisée (bytes) |
|
||||
| 3 | `container_fs_usage_bytes / container_fs_limit_bytes * 100` | Utilisation disque (%) |
|
||||
| 4 | `DCGM_FI_DEV_GPU_UTIL{namespace}` | Utilisation GPU (NVIDIA DCGM) |
|
||||
| 5 | `rate(container_fs_reads_bytes_total[1m])` | Débit lecture disque (bytes/s) |
|
||||
| 6 | `rate(container_fs_writes_bytes_total[1m])` | Débit écriture disque (bytes/s) |
|
||||
| 7 | `rate(container_network_receive_bytes_total[1m])` | Bande passante réseau entrante (bytes/s) |
|
||||
| 8 | `rate(container_network_transmit_bytes_total[1m])` | Bande passante réseau sortante (bytes/s) |
|
||||
| 9 | `rate(http_requests_total[1m])` | Requêtes HTTP/s |
|
||||
| 10 | `rate(http_requests_total{status=~"5.."}[1m]) / rate(http_requests_total[1m]) * 100` | Taux d'erreur HTTP 5xx (%) |
|
||||
|
||||
Métriques commentées (non actives) : `system_load_average`, `system_network_latency_ms`, `app_mean_time_to_repair_seconds`, `app_mean_time_between_failure_seconds`.
|
||||
|
||||
### Méthodes
|
||||
|
||||
#### `queryPrometheus(promURL, expr, namespace) Metric`
|
||||
- Construit une requête GET vers `/api/v1/query` de Prometheus.
|
||||
- Injecte le namespace dans l'expression PromQL via `fmt.Sprintf`.
|
||||
- Parse la réponse JSON et extrait la première valeur du premier résultat.
|
||||
- Retourne `-1` si aucun résultat.
|
||||
|
||||
#### `Call(book, user, peerID, groups) (Booking, map[string]MetricsSnapshot)`
|
||||
- Charge la ressource de calcul (`ComputeResource`) liée au booking.
|
||||
- Pour chaque instance de la ressource, cherche le `LiveDatacenter` correspondant.
|
||||
- Lance en **goroutine** (parallèle) l'exécution de toutes les requêtes PromQL pour chaque datacenter ayant un `MonitorPath`.
|
||||
- Attend toutes les goroutines (`sync.WaitGroup`), puis retourne les métriques groupées par instance.
|
||||
|
||||
#### `Stream(bookingID, interval, user, peerID, groups, websocket)`
|
||||
- Boucle de monitoring en continu jusqu'à `ExpectedEndDate` du booking ou signal de kill.
|
||||
- A chaque tick (`interval`), appelle `Call()` dans une goroutine.
|
||||
- Envoie les métriques en temps réel via **WebSocket**.
|
||||
- Accumule les métriques en mémoire et les persiste dans le booking tous les `max` (100) cycles.
|
||||
- Supporte un mécanisme de kill via la variable globale `Kill`.
|
||||
|
||||
---
|
||||
|
||||
## Problemes et points d'attention
|
||||
|
||||
### Bugs potentiels
|
||||
|
||||
1. **Race condition dans `Stream`** — Les variables `mets`, `bookIDS`, `book` sont partagées entre la boucle principale et les goroutines lancées à chaque tick, **sans synchronisation** (pas de mutex). Si `interval` est court, plusieurs goroutines peuvent écrire simultanément dans `mets` et `bookIDS`.
|
||||
|
||||
2. **Race condition sur `Kill`** — La variable globale `Kill` est lue dans la boucle sans verrouiller `LockKill`. Le mutex n'est utilisé que pour l'écriture.
|
||||
|
||||
3. **Structs locales inutilisées** — `MetricsSnapshot` et `Metric` (lignes 22-31) sont déclarées localement mais le code utilise `models.MetricsSnapshot` et `models.Metric`. Code mort à nettoyer.
|
||||
|
||||
4. **Requête PromQL avec double placeholder** — La requête filesystem (ligne 47) contient deux `%s` mais `queryPrometheus` ne fait qu'un seul `fmt.Sprintf(expr, namespace)`. Cela provoque un **`%!s(MISSING)`** dans la requête. Il faut passer le namespace deux fois ou réécrire la fonction.
|
||||
|
||||
5. **Pas de timeout HTTP** — `http.Get()` utilise le client par défaut sans timeout. Un Prometheus lent peut bloquer indéfiniment.
|
||||
|
||||
6. **Pas de gestion d'erreur sur `WriteJSON`** — Si le WebSocket est fermé côté client, l'écriture échoue silencieusement.
|
||||
|
||||
### Améliorations possibles
|
||||
|
||||
#### Fiabilité
|
||||
- **Ajouter un `context.Context`** à `queryPrometheus` et `Call` pour supporter les timeouts et l'annulation.
|
||||
- **Utiliser un `http.Client` avec timeout** au lieu de `http.Get`.
|
||||
- **Protéger les accès concurrents** dans `Stream` avec un `sync.Mutex` sur `mets`/`bookIDS`.
|
||||
- **Remplacer la variable globale `Kill`** par un `context.WithCancel` ou un channel, plus idiomatique en Go.
|
||||
|
||||
#### Métriques supplémentaires envisageables
|
||||
- `container_cpu_cfs_throttled_seconds_total` — Throttling CPU (le container est bridé).
|
||||
- `kube_pod_container_status_restarts_total` — Nombre de restarts (instabilité).
|
||||
- `container_memory_working_set_bytes` — Mémoire réelle utilisée (exclut le cache, plus précis que `memory_usage_bytes`).
|
||||
- `kube_pod_status_phase` — Phase du pod (Running, Pending, Failed...).
|
||||
- `container_oom_events_total` ou `kube_pod_container_status_last_terminated_reason` — Détection des OOM kills.
|
||||
- `kubelet_volume_stats_used_bytes` / `kubelet_volume_stats_capacity_bytes` — Utilisation des PVC.
|
||||
- `DCGM_FI_DEV_MEM_COPY_UTIL` — Utilisation mémoire GPU.
|
||||
- `DCGM_FI_DEV_GPU_TEMP` — Température GPU.
|
||||
- `node_cpu_seconds_total` / `node_memory_MemAvailable_bytes` — Métriques au niveau du noeud (vue globale).
|
||||
|
||||
#### Architecture
|
||||
- **Range queries** (`/api/v1/query_range`) — Actuellement seul l'instant query est utilisé. Pour le streaming sur une période, `query_range` permettrait de récupérer des séries temporelles complètes et de calculer des moyennes/percentiles.
|
||||
- **Labels dans les résultats** — Actuellement seule la première série est lue (`Result[0]`). On perd l'information si plusieurs pods/containers matchent. Agréger ou renvoyer toutes les séries.
|
||||
- **Noms de métriques lisibles** — Mapper les expressions PromQL vers des noms humains (`cpu_usage_percent`, `memory_bytes`, etc.) au lieu de stocker l'expression brute comme nom.
|
||||
- **Health check Prometheus** — Ajouter une méthode pour vérifier que Prometheus est accessible (`/-/healthy`).
|
||||
|
||||
---
|
||||
|
||||
## Résumé
|
||||
|
||||
Le fichier est **fonctionnel** pour un cas d'usage basique (collecte one-shot + streaming WebSocket), mais présente des **race conditions** dans `Stream`, un **bug sur la requête filesystem** (double `%s`), et du **code mort**. Les améliorations prioritaires sont la correction des accès concurrents et l'ajout de timeouts HTTP.
|
||||
158
infrastructure/nats.go
Normal file
158
infrastructure/nats.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package infrastructure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"oc-datacenter/infrastructure/minio"
|
||||
"sync"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
// roleWaiters maps executionID → channel expecting the role-assignment message from OC discovery.
|
||||
var roleWaiters sync.Map
|
||||
|
||||
// ArgoKubeEvent carries the peer-routing metadata for a resource provisioning event.
|
||||
//
|
||||
// When MinioID is non-empty the event concerns Minio credential provisioning;
|
||||
// otherwise it concerns Admiralty kubeconfig provisioning.
|
||||
type ArgoKubeEvent struct {
|
||||
ExecutionsID string `json:"executions_id"`
|
||||
DestPeerID string `json:"dest_peer_id"`
|
||||
Type tools.DataType `json:"data_type"`
|
||||
SourcePeerID string `json:"source_peer_id"`
|
||||
MinioID string `json:"minio_id,omitempty"`
|
||||
// OriginID is the peer that initiated the request; the PB_CONSIDERS
|
||||
// response is routed back to this peer once provisioning completes.
|
||||
OriginID string `json:"origin_id,omitempty"`
|
||||
}
|
||||
|
||||
// ListenNATS starts all NATS subscriptions for the infrastructure layer.
|
||||
// Must be launched in a goroutine from main.
|
||||
func ListenNATS() {
|
||||
tools.NewNATSCaller().ListenNats(map[tools.NATSMethod]func(tools.NATSResponse){
|
||||
// ─── ARGO_KUBE_EVENT ────────────────────────────────────────────────────────
|
||||
// Triggered by oc-discovery to notify this peer of a provisioning task.
|
||||
// Dispatches to Admiralty or Minio based on whether MinioID is set.
|
||||
tools.ARGO_KUBE_EVENT: func(resp tools.NATSResponse) {
|
||||
argo := &ArgoKubeEvent{}
|
||||
if err := json.Unmarshal(resp.Payload, argo); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if argo.Type == tools.STORAGE_RESOURCE {
|
||||
// ── Minio credential provisioning ──────────────────────────────
|
||||
setter := minio.NewMinioSetter(argo.ExecutionsID, argo.MinioID)
|
||||
if argo.SourcePeerID == argo.DestPeerID {
|
||||
// Same peer: source creates credentials and immediately stores them.
|
||||
go setter.InitializeAsSource(context.Background(), argo.SourcePeerID, argo.DestPeerID, argo.OriginID)
|
||||
} else {
|
||||
// Different peers: publish Phase-1 PB_MINIO_CONFIG (Access == "")
|
||||
// so oc-discovery routes the role-assignment to the Minio host.
|
||||
phase1 := minio.MinioCredentialEvent{
|
||||
ExecutionsID: argo.ExecutionsID,
|
||||
MinioID: argo.MinioID,
|
||||
SourcePeerID: argo.SourcePeerID,
|
||||
DestPeerID: argo.DestPeerID,
|
||||
OriginID: argo.OriginID,
|
||||
}
|
||||
if b, err := json.Marshal(phase1); err == nil {
|
||||
if b2, err := json.Marshal(&tools.PropalgationMessage{
|
||||
Payload: b,
|
||||
Action: tools.PB_MINIO_CONFIG,
|
||||
}); err == nil {
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-datacenter",
|
||||
Datatype: -1,
|
||||
User: resp.User,
|
||||
Method: int(tools.PROPALGATION_EVENT),
|
||||
Payload: b2,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// ── Admiralty kubeconfig provisioning (existing behaviour) ──────
|
||||
if argo.SourcePeerID == argo.DestPeerID {
|
||||
go NewAdmiraltySetter(argo.ExecutionsID).InitializeAsSource(
|
||||
context.Background(), argo.SourcePeerID, argo.DestPeerID, argo.OriginID)
|
||||
} else if b, err := json.Marshal(argo); err == nil {
|
||||
if b2, err := json.Marshal(&tools.PropalgationMessage{
|
||||
Payload: b,
|
||||
Action: tools.PB_ADMIRALTY_CONFIG,
|
||||
}); err == nil {
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-datacenter",
|
||||
Datatype: -1,
|
||||
User: resp.User,
|
||||
Method: int(tools.PROPALGATION_EVENT),
|
||||
Payload: b2,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// ─── PROPALGATION_EVENT ─────────────────────────────────────────────────────
|
||||
// Routes messages forwarded by oc-discovery to the right handler.
|
||||
tools.PROPALGATION_EVENT: func(resp tools.NATSResponse) {
|
||||
if resp.FromApp != "oc-discovery" {
|
||||
return
|
||||
}
|
||||
var prop tools.PropalgationMessage
|
||||
if err := json.Unmarshal(resp.Payload, &prop); err != nil {
|
||||
return
|
||||
}
|
||||
switch prop.Action {
|
||||
|
||||
// ── Admiralty ──────────────────────────────────────────────────────
|
||||
case tools.PB_ADMIRALTY_CONFIG:
|
||||
kubeconfigEvent := KubeconfigEvent{}
|
||||
if err := json.Unmarshal(prop.Payload, &kubeconfigEvent); err == nil {
|
||||
if kubeconfigEvent.Kubeconfig != "" {
|
||||
// Phase 2: kubeconfig present → this peer is the TARGET (scheduler).
|
||||
NewAdmiraltySetter(kubeconfigEvent.ExecutionsID).InitializeAsTarget(
|
||||
context.Background(), kubeconfigEvent)
|
||||
} else {
|
||||
// Phase 1: no kubeconfig → this peer is the SOURCE (compute).
|
||||
NewAdmiraltySetter(kubeconfigEvent.ExecutionsID).InitializeAsSource(
|
||||
context.Background(), kubeconfigEvent.SourcePeerID, kubeconfigEvent.DestPeerID, kubeconfigEvent.OriginID)
|
||||
}
|
||||
}
|
||||
|
||||
// ── Minio ──────────────────────────────────────────────────────────
|
||||
case tools.PB_MINIO_CONFIG:
|
||||
minioEvent := minio.MinioCredentialEvent{}
|
||||
if err := json.Unmarshal(prop.Payload, &minioEvent); err == nil {
|
||||
if minioEvent.Access != "" {
|
||||
// Phase 2: credentials present → this peer is the TARGET (compute).
|
||||
minio.NewMinioSetter(minioEvent.ExecutionsID, minioEvent.MinioID).InitializeAsTarget(
|
||||
context.Background(), minioEvent)
|
||||
} else {
|
||||
// Phase 1: no credentials → this peer is the SOURCE (Minio host).
|
||||
minio.NewMinioSetter(minioEvent.ExecutionsID, minioEvent.MinioID).InitializeAsSource(
|
||||
context.Background(), minioEvent.SourcePeerID, minioEvent.DestPeerID, minioEvent.OriginID)
|
||||
}
|
||||
}
|
||||
|
||||
// ── Deletion (routed by oc-discovery to the source peer) ───────────
|
||||
case tools.PB_DELETE:
|
||||
argo := &ArgoKubeEvent{}
|
||||
if err := json.Unmarshal(prop.Payload, argo); err != nil || argo.ExecutionsID == "" {
|
||||
return
|
||||
}
|
||||
if argo.Type == tools.STORAGE_RESOURCE {
|
||||
// Minio source teardown: revoke credentials + delete bucket.
|
||||
deleteEvent := minio.MinioDeleteEvent{}
|
||||
if err := json.Unmarshal(prop.Payload, &deleteEvent); err == nil {
|
||||
go minio.NewMinioSetter(deleteEvent.ExecutionsID, deleteEvent.MinioID).
|
||||
TeardownAsSource(context.Background(), deleteEvent)
|
||||
}
|
||||
} else {
|
||||
// Admiralty source teardown: delete AdmiraltySource + namespace.
|
||||
go NewAdmiraltySetter(argo.ExecutionsID).TeardownAsSource(context.Background())
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
5
main.go
5
main.go
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"encoding/base64"
|
||||
"oc-datacenter/conf"
|
||||
"oc-datacenter/infrastructure"
|
||||
_ "oc-datacenter/routers"
|
||||
"os"
|
||||
|
||||
@@ -36,5 +37,9 @@ func main() {
|
||||
conf.GetConfig().MinioRootKey = o.GetStringDefault("MINIO_ADMIN_ACCESS", "")
|
||||
conf.GetConfig().MinioRootSecret = o.GetStringDefault("MINIO_ADMIN_SECRET", "")
|
||||
oclib.InitAPI(appname)
|
||||
|
||||
go infrastructure.ListenNATS()
|
||||
go infrastructure.WatchBookings()
|
||||
|
||||
beego.Run()
|
||||
}
|
||||
|
||||
@@ -7,24 +7,6 @@ import (
|
||||
|
||||
func init() {
|
||||
|
||||
beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"] = append(beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"],
|
||||
beego.ControllerComments{
|
||||
Method: "GetAdmiraltyKubeconfig",
|
||||
Router: `/kubeconfig/:execution`,
|
||||
AllowHTTPMethods: []string{"get"},
|
||||
MethodParams: param.Make(),
|
||||
Filters: nil,
|
||||
Params: nil})
|
||||
|
||||
beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"] = append(beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"],
|
||||
beego.ControllerComments{
|
||||
Method: "GetNodeReady",
|
||||
Router: `/node/:execution/:peer`,
|
||||
AllowHTTPMethods: []string{"get"},
|
||||
MethodParams: param.Make(),
|
||||
Filters: nil,
|
||||
Params: nil})
|
||||
|
||||
beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"] = append(beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"],
|
||||
beego.ControllerComments{
|
||||
Method: "GetKubeSecret",
|
||||
@@ -34,33 +16,6 @@ func init() {
|
||||
Filters: nil,
|
||||
Params: nil})
|
||||
|
||||
beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"] = append(beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"],
|
||||
beego.ControllerComments{
|
||||
Method: "CreateKubeSecret",
|
||||
Router: `/secret/:execution/:peer`,
|
||||
AllowHTTPMethods: []string{"post"},
|
||||
MethodParams: param.Make(),
|
||||
Filters: nil,
|
||||
Params: nil})
|
||||
|
||||
beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"] = append(beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"],
|
||||
beego.ControllerComments{
|
||||
Method: "CreateAdmiraltySource",
|
||||
Router: `/source/:execution`,
|
||||
AllowHTTPMethods: []string{"post"},
|
||||
MethodParams: param.Make(),
|
||||
Filters: nil,
|
||||
Params: nil})
|
||||
|
||||
beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"] = append(beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"],
|
||||
beego.ControllerComments{
|
||||
Method: "CreateAdmiraltyTarget",
|
||||
Router: `/target/:execution/:peer`,
|
||||
AllowHTTPMethods: []string{"post"},
|
||||
MethodParams: param.Make(),
|
||||
Filters: nil,
|
||||
Params: nil})
|
||||
|
||||
beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"] = append(beego.GlobalControllerRouter["oc-datacenter/controllers:AdmiraltyController"],
|
||||
beego.ControllerComments{
|
||||
Method: "GetAllTargets",
|
||||
|
||||
@@ -24,26 +24,11 @@ func init() {
|
||||
&controllers.SessionController{},
|
||||
),
|
||||
),
|
||||
beego.NSNamespace("/booking",
|
||||
beego.NSInclude(
|
||||
&controllers.BookingController{},
|
||||
),
|
||||
),
|
||||
beego.NSNamespace("/version",
|
||||
beego.NSInclude(
|
||||
&controllers.VersionController{},
|
||||
),
|
||||
),
|
||||
beego.NSNamespace("/admiralty",
|
||||
beego.NSInclude(
|
||||
&controllers.AdmiraltyController{},
|
||||
),
|
||||
),
|
||||
beego.NSNamespace("/minio",
|
||||
beego.NSInclude(
|
||||
&controllers.MinioController{},
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
beego.AddNamespace(ns)
|
||||
|
||||
Reference in New Issue
Block a user