added the methods to write the appropriate argo annotation when using a storage of type s3 as an artifact repository, with hardcoded credentials
This commit is contained in:
@@ -18,7 +18,6 @@ import (
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
w "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
"cloud.o-forge.io/core/oc-lib/models/workflow/graph"
|
||||
"github.com/nwtgck/go-fakelish"
|
||||
@@ -195,23 +194,32 @@ func (b *ArgoBuilder) createArgoTemplates(namespace string,
|
||||
template.Metadata.Labels["app"] = "oc-service-" + processing.GetName() // Construct the template for the k8s service and add a link in graph between k8s service and processing
|
||||
}
|
||||
|
||||
volumes = b.addStorageAnnotations(id, template, namespace, volumes)
|
||||
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, *template)
|
||||
return volumes, firstItems, lastItems
|
||||
}
|
||||
|
||||
func (b *ArgoBuilder) addStorageAnnotations(id string, template *Template, namespace string, volumes []VolumeMount) []VolumeMount {
|
||||
related := b.OriginWorkflow.GetByRelatedProcessing(id, b.OriginWorkflow.Graph.IsStorage) // Retrieve all of the storage node linked to the processing for which we create the template
|
||||
for _, r := range related {
|
||||
storage := r.Node.(*resources.StorageResource)
|
||||
for _, linkToStorage := range r.Links {
|
||||
for _, rw := range linkToStorage.StorageLinkInfos {
|
||||
var art Artifact
|
||||
artifactBaseName := strings.Join(strings.Split(storage.GetName(), " "), "-") + "-" + strings.Replace(rw.FileName,".","-",-1) // Parameter/Artifact name must consist of alpha-numeric characters, '_' or '-'
|
||||
artifactBaseName := strings.Join(strings.Split(storage.GetName(), " "), "-") + "-" + strings.Replace(rw.FileName, ".", "-", -1) // Parameter/Artifact name must consist of alpha-numeric characters, '_' or '-'
|
||||
if rw.Write {
|
||||
art = Artifact{Path: template.ReplacePerEnv(rw.Source, linkToStorage.Env)} // When we are writing to the s3 the Path element is the path to the file in the pod
|
||||
art = Artifact{Path: template.ReplacePerEnv(rw.Source, linkToStorage.Env)} // When we are writing to the s3 the Path element is the path to the file in the pod
|
||||
art.Name = artifactBaseName + "-input-write"
|
||||
} else {
|
||||
art = Artifact{Path: template.ReplacePerEnv(rw.Destination + "/" + rw.FileName, linkToStorage.Env)} // When we are reading from the s3 the Path element in pod should be the destination of the file
|
||||
art = Artifact{Path: template.ReplacePerEnv(rw.Destination+"/"+rw.FileName, linkToStorage.Env)} // When we are reading from the s3 the Path element in pod should be the destination of the file
|
||||
art.Name = artifactBaseName + "-input-read"
|
||||
}
|
||||
|
||||
if storage.StorageType == enum.S3 {
|
||||
|
||||
b.addS3annotations(&art, template, rw, linkToStorage, storage, namespace)
|
||||
}
|
||||
|
||||
if rw.Write {
|
||||
template.Outputs.Artifacts = append(template.Inputs.Artifacts, art)
|
||||
} else {
|
||||
@@ -232,11 +240,11 @@ func (b *ArgoBuilder) createArgoTemplates(namespace string,
|
||||
}, volumes)
|
||||
}
|
||||
}
|
||||
b.Workflow.Spec.Templates = append(b.Workflow.Spec.Templates, *template)
|
||||
return volumes, firstItems, lastItems
|
||||
}
|
||||
return volumes
|
||||
}
|
||||
|
||||
func (b *ArgoBuilder) addS3annotations(art *Artifact, template *Template, rw graph.StorageProcessingGraphLink, linkToStorage graph.GraphLink, storage *resources.StorageResource, namespace string) {
|
||||
|
||||
art.S3 = &Key{
|
||||
// Key: template.ReplacePerEnv(rw.Destination+"/"+rw.FileName, linkToStorage.Env),
|
||||
Insecure: true, // temporary
|
||||
@@ -251,48 +259,57 @@ func (b *ArgoBuilder) addS3annotations(art *Artifact, template *Template, rw gra
|
||||
// v0.2 : test if the storage.Source exists in the configMap and quit if not
|
||||
// v1 : v0.2 + if doesn't exist edit/create the configMap with the response from API call
|
||||
if sel != nil {
|
||||
b.addAuthInformation(sel, namespace, art)
|
||||
art.S3.Bucket = "oc-bucket" // DEFAULT : will need to update this to create an unique
|
||||
b.addAuthInformation(storage, namespace, art)
|
||||
art.S3.Bucket = "oc-bucket" // DEFAULT : will need to update this to create an unique
|
||||
art.S3.EndPoint = sel.(*resources.StorageResourceInstance).Source
|
||||
}
|
||||
}
|
||||
|
||||
func (*ArgoBuilder) addAuthInformation(sel utils.DBObject, namespace string, art *Artifact) {
|
||||
if sel.(*resources.StorageResourceInstance).Credentials != nil {
|
||||
tool, err := tools2.NewService(conf.GetConfig().Mode)
|
||||
if err != nil || tool == nil {
|
||||
logger.Error().Msg("Could not create the access secret")
|
||||
} else {
|
||||
id, err := tool.CreateAccessSecret(namespace,
|
||||
sel.(*resources.StorageResourceInstance).Credentials.Login,
|
||||
sel.(*resources.StorageResourceInstance).Credentials.Pass)
|
||||
if err == nil {
|
||||
art.S3.AccessKeySecret = &Secret{
|
||||
Name: id,
|
||||
Key: "access-key",
|
||||
}
|
||||
art.S3.SecretKeySecret = &Secret{
|
||||
Name: id,
|
||||
Key: "secret-key",
|
||||
}
|
||||
}
|
||||
func (b *ArgoBuilder) SetupS3Credentials(storage *resources.StorageResource, namespace string, tool tools2.Tool) (string, error) {
|
||||
s := tool.GetS3Secret(storage.UUID, namespace)
|
||||
// var s *v1.Secret
|
||||
|
||||
if s == nil {
|
||||
id, err := tool.CreateAccessSecret(
|
||||
"hF9wRGog75JuMdshWeEZ",
|
||||
"OwXXJkVQyb5l1aVPdOegKOtDJGoP1dJYeo8O7mDW",
|
||||
storage.UUID,
|
||||
namespace,
|
||||
)
|
||||
if err != nil {
|
||||
l := oclib.GetLogger()
|
||||
l.Fatal().Msg("Error when creating the secret holding credentials for S3 access in " + namespace + " : " + err.Error())
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// DELETE THIS AFTER FEAYURE IMPLEMENTATION
|
||||
// CODE TO TEST THE BUCKET WITH SAME CREDENTIAL
|
||||
// s.Name = "toto"
|
||||
return s.Name, nil
|
||||
|
||||
}
|
||||
|
||||
art.S3.AccessKeySecret = &Secret{
|
||||
Name: "argo-minio-secret",
|
||||
Key: "accessKeySecret",
|
||||
}
|
||||
func (b *ArgoBuilder) addAuthInformation(storage *resources.StorageResource, namespace string, art *Artifact) {
|
||||
|
||||
sel := storage.GetSelectedInstance()
|
||||
|
||||
tool, err := tools2.NewService(conf.GetConfig().Mode)
|
||||
if err != nil || tool == nil {
|
||||
logger.Fatal().Msg("Could not create the access secret :" + err.Error())
|
||||
}
|
||||
|
||||
art.S3.SecretKeySecret = &Secret{
|
||||
Name: "argo-minio-secret",
|
||||
Key: "secretKeySecret",
|
||||
}
|
||||
// DELETE THIS AFTER FEAYURE IMPLEMENTATION
|
||||
// CODE TO TEST THE BUCKET WITH SAME CREDENTIAL
|
||||
secretName, err := b.SetupS3Credentials(storage, namespace, tool) // this method return should be updated once we have decided how to retrieve credentials
|
||||
|
||||
if err == nil {
|
||||
art.S3.AccessKeySecret = &Secret{
|
||||
Name: secretName,
|
||||
Key: "access-key",
|
||||
}
|
||||
art.S3.SecretKeySecret = &Secret{
|
||||
Name: secretName,
|
||||
Key: "secret-key",
|
||||
}
|
||||
}
|
||||
|
||||
art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source+"/", "")
|
||||
art.S3.Key = strings.ReplaceAll(art.S3.Key, sel.(*resources.StorageResourceInstance).Source, "")
|
||||
@@ -305,6 +322,8 @@ func (*ArgoBuilder) addAuthInformation(sel utils.DBObject, namespace string, art
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
func (b *ArgoBuilder) addTaskToArgo(dag *Dag, graphItemID string, processing *resources.ProcessingResource,
|
||||
firstItems []string, lastItems []string) (*Dag, []string, []string) {
|
||||
unique_name := getArgoName(processing.GetName(), graphItemID)
|
||||
|
||||
Reference in New Issue
Block a user