298 lines
11 KiB
Go
298 lines
11 KiB
Go
package minio
|
||
|
||
import (
|
||
"context"
|
||
"encoding/json"
|
||
"fmt"
|
||
"slices"
|
||
|
||
"oc-datacenter/conf"
|
||
|
||
oclib "cloud.o-forge.io/core/oc-lib"
|
||
"cloud.o-forge.io/core/oc-lib/models/live"
|
||
"cloud.o-forge.io/core/oc-lib/tools"
|
||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||
)
|
||
|
||
// MinioCredentialEvent is the NATS payload used to transfer Minio credentials between peers.
|
||
//
|
||
// Two-phase protocol over PROPALGATION_EVENT (Action = PB_MINIO_CONFIG):
|
||
// - Phase 1 – role assignment (Access == ""):
|
||
// oc-discovery routes this to the SOURCE peer (Minio host) → InitializeAsSource.
|
||
// - Phase 2 – credential delivery (Access != ""):
|
||
// oc-discovery routes this to the TARGET peer (compute host) → InitializeAsTarget.
|
||
type MinioCredentialEvent struct {
|
||
ExecutionsID string `json:"executions_id"`
|
||
MinioID string `json:"minio_id"`
|
||
Access string `json:"access"`
|
||
Secret string `json:"secret"`
|
||
SourcePeerID string `json:"source_peer_id"`
|
||
DestPeerID string `json:"dest_peer_id"`
|
||
URL string `json:"url"`
|
||
// OriginID is the peer that initiated the provisioning request.
|
||
// The PB_CONSIDERS response is routed back to this peer.
|
||
OriginID string `json:"origin_id"`
|
||
}
|
||
|
||
// minioConsidersPayload is the PB_CONSIDERS payload emitted after minio provisioning.
|
||
type minioConsidersPayload struct {
|
||
OriginID string `json:"origin_id"`
|
||
ExecutionsID string `json:"executions_id"`
|
||
Secret string `json:"secret,omitempty"`
|
||
Error *string `json:"error,omitempty"`
|
||
}
|
||
|
||
// emitConsiders publishes a PB_CONSIDERS back to OriginID with the result of
|
||
// the minio provisioning. secret is the provisioned credential; err is nil on success.
|
||
func emitConsiders(executionsID, originID, secret string, provErr error) {
|
||
var errStr *string
|
||
if provErr != nil {
|
||
s := provErr.Error()
|
||
errStr = &s
|
||
}
|
||
payload, _ := json.Marshal(minioConsidersPayload{
|
||
OriginID: originID,
|
||
ExecutionsID: executionsID,
|
||
Secret: secret,
|
||
Error: errStr,
|
||
})
|
||
b, _ := json.Marshal(&tools.PropalgationMessage{
|
||
DataType: tools.STORAGE_RESOURCE.EnumIndex(),
|
||
Action: tools.PB_CONSIDERS,
|
||
Payload: payload,
|
||
})
|
||
go tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
||
FromApp: "oc-datacenter",
|
||
Datatype: -1,
|
||
Method: int(tools.PROPALGATION_EVENT),
|
||
Payload: b,
|
||
})
|
||
}
|
||
|
||
// MinioSetter carries the execution context for a Minio credential provisioning.
|
||
type MinioSetter struct {
|
||
ExecutionsID string // used as both the bucket name and the K8s namespace suffix
|
||
MinioID string // ID of the Minio storage resource
|
||
}
|
||
|
||
func NewMinioSetter(execID, minioID string) *MinioSetter {
|
||
return &MinioSetter{ExecutionsID: execID, MinioID: minioID}
|
||
}
|
||
|
||
// InitializeAsSource is called on the peer that hosts the Minio instance.
|
||
//
|
||
// It:
|
||
// 1. Looks up the live-storage endpoint URL for MinioID.
|
||
// 2. Creates a scoped service account (access + secret limited to the execution bucket).
|
||
// 3. Creates the execution bucket.
|
||
// 4. If source and dest are the same peer, calls InitializeAsTarget directly.
|
||
// Otherwise, publishes a MinioCredentialEvent via NATS (Phase 2) so that
|
||
// oc-discovery can route the credentials to the compute peer.
|
||
func (m *MinioSetter) InitializeAsSource(ctx context.Context, localPeerID, destPeerID, originID string) {
|
||
logger := oclib.GetLogger()
|
||
|
||
url, err := m.loadMinioURL(localPeerID)
|
||
if err != nil {
|
||
logger.Error().Msg("MinioSetter.InitializeAsSource: " + err.Error())
|
||
return
|
||
}
|
||
|
||
service := NewMinioService(url)
|
||
if err := service.CreateClient(); err != nil {
|
||
logger.Error().Msg("MinioSetter.InitializeAsSource: failed to create admin client: " + err.Error())
|
||
return
|
||
}
|
||
|
||
access, secret, err := service.CreateCredentials(m.ExecutionsID)
|
||
if err != nil {
|
||
logger.Error().Msg("MinioSetter.InitializeAsSource: failed to create service account: " + err.Error())
|
||
return
|
||
}
|
||
|
||
if err := service.CreateBucket(m.MinioID, m.ExecutionsID); err != nil {
|
||
logger.Error().Msg("MinioSetter.InitializeAsSource: failed to create bucket: " + err.Error())
|
||
return
|
||
}
|
||
|
||
logger.Info().Msg("MinioSetter.InitializeAsSource: bucket and service account ready for " + m.ExecutionsID)
|
||
|
||
event := MinioCredentialEvent{
|
||
ExecutionsID: m.ExecutionsID,
|
||
MinioID: m.MinioID,
|
||
Access: access,
|
||
Secret: secret,
|
||
SourcePeerID: localPeerID,
|
||
DestPeerID: destPeerID,
|
||
OriginID: originID,
|
||
}
|
||
|
||
if destPeerID == localPeerID {
|
||
// Same peer: store the secret locally without going through NATS.
|
||
m.InitializeAsTarget(ctx, event)
|
||
return
|
||
}
|
||
|
||
// Cross-peer: publish credentials (Phase 2) so oc-discovery routes them to the compute peer.
|
||
payload, err := json.Marshal(event)
|
||
if err != nil {
|
||
logger.Error().Msg("MinioSetter.InitializeAsSource: failed to marshal credential event: " + err.Error())
|
||
return
|
||
}
|
||
|
||
if b, err := json.Marshal(&tools.PropalgationMessage{
|
||
DataType: -1,
|
||
Action: tools.PB_MINIO_CONFIG,
|
||
Payload: payload,
|
||
}); err == nil {
|
||
go tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
||
FromApp: "oc-datacenter",
|
||
Datatype: -1,
|
||
User: "",
|
||
Method: int(tools.PROPALGATION_EVENT),
|
||
Payload: b,
|
||
})
|
||
logger.Info().Msg("MinioSetter.InitializeAsSource: credentials published via NATS for " + m.ExecutionsID)
|
||
}
|
||
}
|
||
|
||
// InitializeAsTarget is called on the peer that runs the compute workload.
|
||
//
|
||
// It stores the Minio credentials received from the source peer (via NATS or directly)
|
||
// as a Kubernetes secret inside the execution namespace, making them available to pods.
|
||
func (m *MinioSetter) InitializeAsTarget(ctx context.Context, event MinioCredentialEvent) {
|
||
logger := oclib.GetLogger()
|
||
|
||
k, err := tools.NewKubernetesService(
|
||
conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort,
|
||
conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData,
|
||
)
|
||
if err != nil {
|
||
logger.Error().Msg("MinioSetter.InitializeAsTarget: failed to create k8s service: " + err.Error())
|
||
return
|
||
}
|
||
|
||
if err := k.CreateSecret(ctx, event.MinioID, event.ExecutionsID, event.Access, event.Secret); err != nil {
|
||
logger.Error().Msg("MinioSetter.InitializeAsTarget: failed to create k8s secret: " + err.Error())
|
||
emitConsiders(event.ExecutionsID, event.OriginID, "", err)
|
||
return
|
||
}
|
||
|
||
if err := NewMinioService(event.URL).CreateMinioConfigMap(event.MinioID, event.ExecutionsID, event.URL); err == nil {
|
||
logger.Error().Msg("MinioSetter.InitializeAsTarget: failed to create config map: " + err.Error())
|
||
emitConsiders(event.ExecutionsID, event.OriginID, "", err)
|
||
return
|
||
}
|
||
|
||
logger.Info().Msg("MinioSetter.InitializeAsTarget: Minio credentials stored in namespace " + event.ExecutionsID)
|
||
emitConsiders(event.ExecutionsID, event.OriginID, event.Secret, nil)
|
||
}
|
||
|
||
// MinioDeleteEvent is the NATS payload used to tear down Minio resources.
|
||
// It mirrors MinioCredentialEvent but carries the access key for revocation.
|
||
type MinioDeleteEvent struct {
|
||
ExecutionsID string `json:"executions_id"`
|
||
MinioID string `json:"minio_id"`
|
||
Access string `json:"access"` // service account access key to revoke on the Minio host
|
||
SourcePeerID string `json:"source_peer_id"`
|
||
DestPeerID string `json:"dest_peer_id"`
|
||
OriginID string `json:"origin_id"`
|
||
}
|
||
|
||
// TeardownAsTarget is called on the peer that runs the compute workload.
|
||
// It reads the stored access key from the K8s secret, then removes both the secret
|
||
// and the artifact-repository ConfigMap from the execution namespace.
|
||
// For same-peer deployments it calls TeardownAsSource directly; otherwise it
|
||
// publishes a MinioDeleteEvent via NATS (PB_DELETE) so oc-discovery routes it to
|
||
// the Minio host peer.
|
||
func (m *MinioSetter) TeardownAsTarget(ctx context.Context, event MinioDeleteEvent) {
|
||
logger := oclib.GetLogger()
|
||
|
||
k, err := tools.NewKubernetesService(
|
||
conf.GetConfig().KubeHost+":"+conf.GetConfig().KubePort,
|
||
conf.GetConfig().KubeCA, conf.GetConfig().KubeCert, conf.GetConfig().KubeData,
|
||
)
|
||
if err != nil {
|
||
logger.Error().Msg("MinioSetter.TeardownAsTarget: failed to create k8s service: " + err.Error())
|
||
emitConsiders(event.ExecutionsID, event.OriginID, "", err)
|
||
return
|
||
}
|
||
|
||
// Read the access key from the K8s secret before deleting it.
|
||
accessKey := event.Access
|
||
if accessKey == "" {
|
||
if secret, err := k.Set.CoreV1().Secrets(event.ExecutionsID).Get(
|
||
ctx, event.MinioID+"-secret-s3", metav1.GetOptions{},
|
||
); err == nil {
|
||
accessKey = string(secret.Data["access-key"])
|
||
}
|
||
}
|
||
|
||
// Delete K8s credentials secret.
|
||
if err := k.Set.CoreV1().Secrets(event.ExecutionsID).Delete(
|
||
ctx, event.MinioID+"-secret-s3", metav1.DeleteOptions{},
|
||
); err != nil {
|
||
logger.Error().Msg("MinioSetter.TeardownAsTarget: failed to delete secret: " + err.Error())
|
||
}
|
||
|
||
// Delete artifact-repository ConfigMap.
|
||
if err := NewMinioService("").DeleteMinioConfigMap(event.MinioID, event.ExecutionsID); err != nil {
|
||
logger.Error().Msg("MinioSetter.TeardownAsTarget: failed to delete configmap: " + err.Error())
|
||
}
|
||
|
||
logger.Info().Msg("MinioSetter.TeardownAsTarget: K8s resources removed for " + event.ExecutionsID)
|
||
|
||
// For same-peer deployments the source cleanup runs directly here so the
|
||
// caller (REMOVE_EXECUTION handler) doesn't have to distinguish roles.
|
||
if event.SourcePeerID == event.DestPeerID {
|
||
event.Access = accessKey
|
||
m.TeardownAsSource(ctx, event)
|
||
}
|
||
}
|
||
|
||
// TeardownAsSource is called on the peer that hosts the Minio instance.
|
||
// It revokes the scoped service account and removes the execution bucket.
|
||
func (m *MinioSetter) TeardownAsSource(ctx context.Context, event MinioDeleteEvent) {
|
||
logger := oclib.GetLogger()
|
||
|
||
url, err := m.loadMinioURL(event.SourcePeerID)
|
||
if err != nil {
|
||
logger.Error().Msg("MinioSetter.TeardownAsSource: " + err.Error())
|
||
return
|
||
}
|
||
|
||
svc := NewMinioService(url)
|
||
if err := svc.CreateClient(); err != nil {
|
||
logger.Error().Msg("MinioSetter.TeardownAsSource: failed to create admin client: " + err.Error())
|
||
return
|
||
}
|
||
|
||
if event.Access != "" {
|
||
if err := svc.DeleteCredentials(event.Access); err != nil {
|
||
logger.Error().Msg("MinioSetter.TeardownAsSource: failed to delete service account: " + err.Error())
|
||
}
|
||
}
|
||
|
||
if err := svc.DeleteBucket(event.MinioID, event.ExecutionsID); err != nil {
|
||
logger.Error().Msg("MinioSetter.TeardownAsSource: failed to delete bucket: " + err.Error())
|
||
}
|
||
|
||
logger.Info().Msg("MinioSetter.TeardownAsSource: Minio resources removed for " + event.ExecutionsID)
|
||
}
|
||
|
||
// loadMinioURL searches through all live storages accessible by peerID to find
|
||
// the one that references MinioID, and returns its endpoint URL.
|
||
func (m *MinioSetter) loadMinioURL(peerID string) (string, error) {
|
||
res := oclib.NewRequest(oclib.LibDataEnum(oclib.LIVE_STORAGE), "", peerID, []string{}, nil).LoadAll(false)
|
||
if res.Err != "" {
|
||
return "", fmt.Errorf("loadMinioURL: failed to load live storages: %s", res.Err)
|
||
}
|
||
for _, dbo := range res.Data {
|
||
l := dbo.(*live.LiveStorage)
|
||
if slices.Contains(l.ResourcesID, m.MinioID) {
|
||
return l.Source, nil
|
||
}
|
||
|
||
}
|
||
return "", fmt.Errorf("loadMinioURL: no live storage found for minio ID %s", m.MinioID)
|
||
}
|