Compare commits
	
		
			6 Commits
		
	
	
		
			3892692a07
			...
			ansible
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 2ede262abe | |||
| 140bd63559 | |||
| 90cc774341 | |||
| db10baf460 | |||
| 53fca60178 | |||
| 8b53c2e70e | 
							
								
								
									
										6
									
								
								ansible/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								ansible/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,6 @@
 | 
				
			|||||||
 | 
					create_kvm/
 | 
				
			||||||
 | 
					alpr_with_argo.yml
 | 
				
			||||||
 | 
					*.qcow*
 | 
				
			||||||
 | 
					OpenPGP*
 | 
				
			||||||
 | 
					my_hosts.yaml
 | 
				
			||||||
 | 
					Admiraltyworker_kubeconfig/*
 | 
				
			||||||
							
								
								
									
										95
									
								
								ansible/Admiralty/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										95
									
								
								ansible/Admiralty/README.md
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,95 @@
 | 
				
			|||||||
 | 
					# README
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Ansible Playbooks for Admiralty Worker Setup with Argo Workflows
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					These Ansible playbooks help configure an existing Kubernetes (K8s) cluster as an Admiralty worker for Argo Workflows. The process consists of two main steps:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					1. **Setting up a worker node**: This playbook prepares the worker cluster and generates the necessary kubeconfig.
 | 
				
			||||||
 | 
					2. **Adding the worker to the source cluster**: This playbook registers the worker cluster with the source Kubernetes cluster.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					---
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Prerequisites
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- Ansible installed on the control machine.
 | 
				
			||||||
 | 
					- Kubernetes cluster(s) with `kubectl` and `kubernetes.core` collection installed.
 | 
				
			||||||
 | 
					- Necessary permissions to create ServiceAccounts, Roles, RoleBindings, Secrets, and Custom Resources.
 | 
				
			||||||
 | 
					- `jq` installed on worker nodes.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					---
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Playbook 1: Setting Up a Worker Node
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					This playbook configures a Kubernetes cluster to become an Admiralty worker for Argo Workflows.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Variables (Pass through `--extra-vars`)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					| Variable | Description |
 | 
				
			||||||
 | 
					|----------|-------------|
 | 
				
			||||||
 | 
					| `user_prompt` | The user running the Ansible playbook |
 | 
				
			||||||
 | 
					| `namespace_prompt` | Kubernetes namespace where resources are created |
 | 
				
			||||||
 | 
					| `source_prompt` | The name of the source cluster |
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Actions Performed
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					1. Installs required dependencies (`python3`, `python3-yaml`, `python3-kubernetes`, `jq`).
 | 
				
			||||||
 | 
					2. Creates a service account for the source cluster.
 | 
				
			||||||
 | 
					3. Grants patch permissions for pods to the `argo-role`.
 | 
				
			||||||
 | 
					4. Adds the service account to `argo-rolebinding`.
 | 
				
			||||||
 | 
					5. Creates a token for the service account.
 | 
				
			||||||
 | 
					6. Creates a `Source` resource for Admiralty.
 | 
				
			||||||
 | 
					7. Retrieves the worker cluster's kubeconfig and modifies it.
 | 
				
			||||||
 | 
					8. Stores the kubeconfig locally.
 | 
				
			||||||
 | 
					9. Displays the command needed to register this worker in the source cluster.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Running the Playbook
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```sh
 | 
				
			||||||
 | 
					ansible-playbook setup_worker.yml -i <WORKER_HOST_IP>, --extra-vars "user_prompt=<YOUR_USER> namespace_prompt=<NAMESPACE> source_prompt=<SOURCE_NAME>"
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					---
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Playbook 2: Adding Worker to Source Cluster
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					This playbook registers the configured worker cluster as an Admiralty target in the source Kubernetes cluster.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Variables (Pass through `--extra-vars`)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					| Variable | Description |
 | 
				
			||||||
 | 
					|----------|-------------|
 | 
				
			||||||
 | 
					| `user_prompt` | The user running the Ansible playbook |
 | 
				
			||||||
 | 
					| `target_name` | The name of the worker cluster in the source setup |
 | 
				
			||||||
 | 
					| `target_ip` | IP of the worker cluster |
 | 
				
			||||||
 | 
					| `namespace_source` | Namespace where the target is registered |
 | 
				
			||||||
 | 
					| `serviceaccount_prompt` | The service account used in the worker |
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Actions Performed
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					1. Retrieves the stored kubeconfig from the worker setup.
 | 
				
			||||||
 | 
					2. Creates a ServiceAccount in the target namespace.
 | 
				
			||||||
 | 
					3. Stores the kubeconfig in a Kubernetes Secret.
 | 
				
			||||||
 | 
					4. Creates an Admiralty `Target` resource in the source cluster.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Running the Playbook
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```sh
 | 
				
			||||||
 | 
					ansible-playbook add_admiralty_target.yml -i <SOURCE_HOST_IP>, --extra-vars "user_prompt=<YOUR_USER> target_name=<TARGET_NAME_IN_KUBE> target_ip=<WORKER_IP> namespace_source=<NAMESPACE> serviceaccount_prompt=<SERVICE_ACCOUNT_NAME>"
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Post Playbook
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Don't forget to give the patching rights to the `serviceAccount` on the control node :
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```bash
 | 
				
			||||||
 | 
					kubectl patch role argo-role -n argo --type='json' -p '[{"op": "add", "path": "/rules/-", "value": {"apiGroups":[""],"resources":["pods"],"verbs":["patch"]}}]'
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Add the name of the `serviceAccount` in the following command
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```bash
 | 
				
			||||||
 | 
					kubectl patch rolebinding argo-binding -n argo --type='json' -p '[{"op": "add", "path": "/subjects/-", "value": {"kind": "ServiceAccount", "name": "<NAME OF THE USER ACCOUNT>", "namespace": "argo"}}]'
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Maybe we could add a play/playbook to sync the roles and rolesbinding between all nodes.
 | 
				
			||||||
 | 
					   
 | 
				
			||||||
							
								
								
									
										49
									
								
								ansible/Admiralty/add_admiralty_target.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								ansible/Admiralty/add_admiralty_target.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,49 @@
 | 
				
			|||||||
 | 
					- name: Setup an exsiting k8s cluster to become an admiralty worker for Argo Workflows
 | 
				
			||||||
 | 
					  hosts: all:!localhost
 | 
				
			||||||
 | 
					  user: "{{ user_prompt }}"
 | 
				
			||||||
 | 
					  vars:
 | 
				
			||||||
 | 
					    - service_account_name: "{{ serviceaccount_prompt }}"
 | 
				
			||||||
 | 
					    - namespace: "{{ namespace_source }}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  tasks:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Store kubeconfig value
 | 
				
			||||||
 | 
					      ansible.builtin.set_fact:
 | 
				
			||||||
 | 
					        kubeconfig: "{{ lookup('file','worker_kubeconfig/{{ target_ip }}_kubeconfig.json') | trim }}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Create the serviceAccount that will execute in the target
 | 
				
			||||||
 | 
					      kubernetes.core.k8s:
 | 
				
			||||||
 | 
					        state: present
 | 
				
			||||||
 | 
					        definition:
 | 
				
			||||||
 | 
					          apiVersion: v1
 | 
				
			||||||
 | 
					          kind: ServiceAccount
 | 
				
			||||||
 | 
					          metadata:
 | 
				
			||||||
 | 
					            name: '{{ service_account_name }}'
 | 
				
			||||||
 | 
					            namespace: '{{ namespace }}'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Create the token to authentify source
 | 
				
			||||||
 | 
					      kubernetes.core.k8s:
 | 
				
			||||||
 | 
					        state: present
 | 
				
			||||||
 | 
					        definition:
 | 
				
			||||||
 | 
					          apiVersion: v1
 | 
				
			||||||
 | 
					          kind: Secret
 | 
				
			||||||
 | 
					          type: Opaque
 | 
				
			||||||
 | 
					          metadata:
 | 
				
			||||||
 | 
					            name: admiralty-secret-{{ target_name }}
 | 
				
			||||||
 | 
					            namespace: "{{ namespace_source }}"
 | 
				
			||||||
 | 
					          data:
 | 
				
			||||||
 | 
					            config: "{{ kubeconfig | tojson | b64encode }}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Create the target ressource
 | 
				
			||||||
 | 
					      kubernetes.core.k8s:
 | 
				
			||||||
 | 
					        state: present
 | 
				
			||||||
 | 
					        definition:
 | 
				
			||||||
 | 
					          apiVersion: multicluster.admiralty.io/v1alpha1
 | 
				
			||||||
 | 
					          kind: Target
 | 
				
			||||||
 | 
					          metadata:
 | 
				
			||||||
 | 
					            name: target-{{ target_name }}
 | 
				
			||||||
 | 
					            namespace: '{{ namespace_source }}'
 | 
				
			||||||
 | 
					          spec:
 | 
				
			||||||
 | 
					            kubeconfigSecret:
 | 
				
			||||||
 | 
					              name: admiralty-secret-{{ target_name }}
 | 
				
			||||||
							
								
								
									
										2
									
								
								ansible/Admiralty/ansible.cfg
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								ansible/Admiralty/ansible.cfg
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,2 @@
 | 
				
			|||||||
 | 
					[defaults]
 | 
				
			||||||
 | 
					result_format=default
 | 
				
			||||||
							
								
								
									
										75
									
								
								ansible/Admiralty/deploy_admiralty.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										75
									
								
								ansible/Admiralty/deploy_admiralty.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,75 @@
 | 
				
			|||||||
 | 
					- name: Install Helm
 | 
				
			||||||
 | 
					  hosts: all:!localhost
 | 
				
			||||||
 | 
					  user: "{{ user_prompt }}"
 | 
				
			||||||
 | 
					  become: true
 | 
				
			||||||
 | 
					  # become_method: su
 | 
				
			||||||
 | 
					  vars:
 | 
				
			||||||
 | 
					    arch_mapping:  # Map ansible architecture {{ ansible_architecture }} names to Docker's architecture names
 | 
				
			||||||
 | 
					      x86_64: amd64
 | 
				
			||||||
 | 
					      aarch64: arm64
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  tasks:
 | 
				
			||||||
 | 
					    - name: Check if Helm does exist
 | 
				
			||||||
 | 
					      ansible.builtin.command: 
 | 
				
			||||||
 | 
					        cmd: which helm
 | 
				
			||||||
 | 
					      register: result_which
 | 
				
			||||||
 | 
					      failed_when: result_which.rc not in [ 0, 1 ]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Install helm
 | 
				
			||||||
 | 
					      when: result_which.rc == 1
 | 
				
			||||||
 | 
					      block:
 | 
				
			||||||
 | 
					        - name: download helm from source
 | 
				
			||||||
 | 
					          ansible.builtin.get_url:
 | 
				
			||||||
 | 
					            url: https://get.helm.sh/helm-v3.15.0-linux-amd64.tar.gz
 | 
				
			||||||
 | 
					            dest: ./
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        - name: unpack helm
 | 
				
			||||||
 | 
					          ansible.builtin.unarchive:
 | 
				
			||||||
 | 
					            remote_src: true
 | 
				
			||||||
 | 
					            src: helm-v3.15.0-linux-amd64.tar.gz
 | 
				
			||||||
 | 
					            dest: ./
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        - name: copy helm to path
 | 
				
			||||||
 | 
					          ansible.builtin.command:
 | 
				
			||||||
 | 
					            cmd: mv linux-amd64/helm /usr/local/bin/helm     
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- name: Install admiralty
 | 
				
			||||||
 | 
					  hosts: all:!localhost
 | 
				
			||||||
 | 
					  user: "{{ user_prompt }}"
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  tasks:
 | 
				
			||||||
 | 
					    - name: Install required python libraries   
 | 
				
			||||||
 | 
					      become: true 
 | 
				
			||||||
 | 
					      # become_method: su
 | 
				
			||||||
 | 
					      package:
 | 
				
			||||||
 | 
					        name: 
 | 
				
			||||||
 | 
					          - python3
 | 
				
			||||||
 | 
					          - python3-yaml
 | 
				
			||||||
 | 
					        state: present    
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Add jetstack repo
 | 
				
			||||||
 | 
					      ansible.builtin.shell: 
 | 
				
			||||||
 | 
					        cmd: |
 | 
				
			||||||
 | 
					          helm repo add jetstack https://charts.jetstack.io && \ 
 | 
				
			||||||
 | 
					          helm repo update 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Install cert-manager
 | 
				
			||||||
 | 
					      kubernetes.core.helm:
 | 
				
			||||||
 | 
					        chart_ref: jetstack/cert-manager
 | 
				
			||||||
 | 
					        release_name: cert-manager
 | 
				
			||||||
 | 
					        context: default
 | 
				
			||||||
 | 
					        namespace: cert-manager
 | 
				
			||||||
 | 
					        create_namespace: true
 | 
				
			||||||
 | 
					        wait: true
 | 
				
			||||||
 | 
					        set_values:
 | 
				
			||||||
 | 
					          - value: installCRDs=true
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Install admiralty
 | 
				
			||||||
 | 
					      kubernetes.core.helm:
 | 
				
			||||||
 | 
					        name: admiralty
 | 
				
			||||||
 | 
					        chart_ref: oci://public.ecr.aws/admiralty/admiralty
 | 
				
			||||||
 | 
					        namespace: admiralty
 | 
				
			||||||
 | 
					        create_namespace: true
 | 
				
			||||||
 | 
					        chart_version: 0.16.0
 | 
				
			||||||
 | 
					        wait: true
 | 
				
			||||||
							
								
								
									
										21
									
								
								ansible/Admiralty/notes_admiralty.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								ansible/Admiralty/notes_admiralty.md
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,21 @@
 | 
				
			|||||||
 | 
					Target
 | 
				
			||||||
 | 
					---
 | 
				
			||||||
 | 
					- Creer service account 
 | 
				
			||||||
 | 
					- Creer token pour service account (sa sur control == sa sur target, montre nom du sa + token pour accéder à target)
 | 
				
			||||||
 | 
					- Créer fichier kubeconfig avec token et @IP (visible pour le controler/publique) et le récupérer pour le passer au controler
 | 
				
			||||||
 | 
					- Créer une ressource source sur la target : dire qui va nous contacter
 | 
				
			||||||
 | 
					- Rajouter les mêmes roles/droits que le sa "argo" au sa du controler
 | 
				
			||||||
 | 
					    - Dans authorization rajouter le verbe Patch sur la ressource pods
 | 
				
			||||||
 | 
					    - Rajouter le sa controler dans le rolebinding
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Controler
 | 
				
			||||||
 | 
					---
 | 
				
			||||||
 | 
					- Créer le serviceAccount avec le même nom que sur la target
 | 
				
			||||||
 | 
					- Récuperer le kubeconfig de la target
 | 
				
			||||||
 | 
					- Creer un secret à partir du kubeconfig target
 | 
				
			||||||
 | 
					- Creer la resource target à laquelle on associe le secret
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Schema
 | 
				
			||||||
 | 
					---
 | 
				
			||||||
 | 
					Lorsqu'un ressource tagguée avec admiralty est exécutée sur le controller il va voir les targets en s'authentifiant avec le secret pour créer des pods avec le service account commun.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
							
								
								
									
										8
									
								
								ansible/Admiralty/old/admiralty_inventory.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								ansible/Admiralty/old/admiralty_inventory.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,8 @@
 | 
				
			|||||||
 | 
					myhosts:
 | 
				
			||||||
 | 
					  hosts:
 | 
				
			||||||
 | 
					    control:
 | 
				
			||||||
 | 
					      ansible_host: 172.16.0.184
 | 
				
			||||||
 | 
					    dc01: #oc-dev
 | 
				
			||||||
 | 
					      ansible_host: 172.16.0.187
 | 
				
			||||||
 | 
					    dc02:
 | 
				
			||||||
 | 
					      ansible_host:   
 | 
				
			||||||
							
								
								
									
										115
									
								
								ansible/Admiralty/old/create_secrets.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										115
									
								
								ansible/Admiralty/old/create_secrets.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,115 @@
 | 
				
			|||||||
 | 
					- name: Create secret from Workload
 | 
				
			||||||
 | 
					  hosts: "{{ host_prompt }}"
 | 
				
			||||||
 | 
					  user: "{{ user_prompt }}"
 | 
				
			||||||
 | 
					  vars:
 | 
				
			||||||
 | 
					    secret_exists: false
 | 
				
			||||||
 | 
					    control_ip: 192.168.122.70
 | 
				
			||||||
 | 
					    user_prompt: admrescue
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  tasks:
 | 
				
			||||||
 | 
					    - name: Can management cluster be reached
 | 
				
			||||||
 | 
					      ansible.builtin.command:
 | 
				
			||||||
 | 
					        cmd: ping -c 5 "{{ control_ip }}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Install needed packages
 | 
				
			||||||
 | 
					      become: true
 | 
				
			||||||
 | 
					      ansible.builtin.package:
 | 
				
			||||||
 | 
					        name:
 | 
				
			||||||
 | 
					          - jq
 | 
				
			||||||
 | 
					          - python3-yaml
 | 
				
			||||||
 | 
					          - python3-kubernetes
 | 
				
			||||||
 | 
					        state: present
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Get the list of existing secrets
 | 
				
			||||||
 | 
					      kubernetes.core.k8s_info:
 | 
				
			||||||
 | 
					        api_version: v1
 | 
				
			||||||
 | 
					        kind: Secret
 | 
				
			||||||
 | 
					        name: "{{ inventory_hostname | lower }}"
 | 
				
			||||||
 | 
					        namespace: default
 | 
				
			||||||
 | 
					      register: list_secrets
 | 
				
			||||||
 | 
					      failed_when: false
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Create token
 | 
				
			||||||
 | 
					      ansible.builtin.command:
 | 
				
			||||||
 | 
					        cmd: kubectl create token admiralty-control
 | 
				
			||||||
 | 
					      register: cd_token
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Retrieve config
 | 
				
			||||||
 | 
					      ansible.builtin.command:
 | 
				
			||||||
 | 
					        cmd: kubectl config view --minify --raw --output json
 | 
				
			||||||
 | 
					      register: config_info
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Display config
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd: |
 | 
				
			||||||
 | 
					          echo  > config_info.json
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Edit the config json with jq
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd: |
 | 
				
			||||||
 | 
					          CD_TOKEN="{{ cd_token.stdout }}" && \
 | 
				
			||||||
 | 
					          CD_IP="{{ control_ip }}" && \
 | 
				
			||||||
 | 
					          kubectl config view --minify --raw --output json | jq '.users[0].user={token:"'$CD_TOKEN'"} | .clusters[0].cluster.server="https://'$CD_IP':6443"'
 | 
				
			||||||
 | 
					      register: edited_config
 | 
				
			||||||
 | 
					      # failed_when: edited_config.skipped == true
 | 
				
			||||||
 | 
					      
 | 
				
			||||||
 | 
					    - name: Set fact for secret
 | 
				
			||||||
 | 
					      set_fact:
 | 
				
			||||||
 | 
					        secret: "{{ edited_config.stdout }}"
 | 
				
			||||||
 | 
					        cacheable: true
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Create the source for controller
 | 
				
			||||||
 | 
					      kubernetes.core.k8s:
 | 
				
			||||||
 | 
					        state: present
 | 
				
			||||||
 | 
					        definition:
 | 
				
			||||||
 | 
					          apiVersion: multicluster.admiralty.io/v1alpha1
 | 
				
			||||||
 | 
					          kind: Source
 | 
				
			||||||
 | 
					          metadata:
 | 
				
			||||||
 | 
					            name: admiralty-control
 | 
				
			||||||
 | 
					            namespace: default
 | 
				
			||||||
 | 
					          spec:
 | 
				
			||||||
 | 
					            serviceAccountName: admiralty-control
 | 
				
			||||||
 | 
					        
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- name: Create secret from Workload
 | 
				
			||||||
 | 
					  hosts: "{{ control_host }}"
 | 
				
			||||||
 | 
					  user: "{{ user_prompt }}"
 | 
				
			||||||
 | 
					  gather_facts: true
 | 
				
			||||||
 | 
					  vars:
 | 
				
			||||||
 | 
					    secret: "{{ hostvars[host_prompt]['secret'] }}"
 | 
				
			||||||
 | 
					    user_prompt: admrescue
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  tasks:
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					    - name: Get the list of existing secrets
 | 
				
			||||||
 | 
					      kubernetes.core.k8s_info:
 | 
				
			||||||
 | 
					        api_version: v1
 | 
				
			||||||
 | 
					        kind: Secret
 | 
				
			||||||
 | 
					        name: "{{ host_prompt | lower }}-secret"
 | 
				
			||||||
 | 
					        namespace: default
 | 
				
			||||||
 | 
					      register: list_secrets
 | 
				
			||||||
 | 
					      failed_when: false
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Test wether secret exists
 | 
				
			||||||
 | 
					      failed_when: secret == ''
 | 
				
			||||||
 | 
					      debug:
 | 
				
			||||||
 | 
					        msg: "Secret '{{ secret }}' "
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Create secret with new config
 | 
				
			||||||
 | 
					      ansible.builtin.command:
 | 
				
			||||||
 | 
					        cmd: kubectl create secret generic "{{ host_prompt | lower }}"-secret --from-literal=config='{{ secret }}'
 | 
				
			||||||
 | 
					      when: list_secrets.resources | length == 0  
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Create target for the workload cluster
 | 
				
			||||||
 | 
					      kubernetes.core.k8s:
 | 
				
			||||||
 | 
					        state: present
 | 
				
			||||||
 | 
					        definition:
 | 
				
			||||||
 | 
					          apiVersion: multicluster.admiralty.io/v1alpha1
 | 
				
			||||||
 | 
					          kind: Target
 | 
				
			||||||
 | 
					          metadata:
 | 
				
			||||||
 | 
					            name: '{{ host_prompt | lower }}'
 | 
				
			||||||
 | 
					            namespace: default
 | 
				
			||||||
 | 
					          spec:
 | 
				
			||||||
 | 
					            kubeconfigSecret:
 | 
				
			||||||
 | 
					              name: $'{{ host_prompt | lower }}'-secret 
 | 
				
			||||||
 | 
					      
 | 
				
			||||||
							
								
								
									
										33
									
								
								ansible/Admiralty/sequence_diagram.puml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								ansible/Admiralty/sequence_diagram.puml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,33 @@
 | 
				
			|||||||
 | 
					@startuml
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					actor User
 | 
				
			||||||
 | 
					participant "Ansible Playbook" as Playbook
 | 
				
			||||||
 | 
					participant "Target Node" as K8s
 | 
				
			||||||
 | 
					participant "Control Node" as ControlNode
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					User -> Playbook: Start Playbook Execution
 | 
				
			||||||
 | 
					Playbook -> Playbook: Save Target IP
 | 
				
			||||||
 | 
					Playbook -> K8s: Install Required Packages
 | 
				
			||||||
 | 
					Playbook -> K8s: Create Service Account
 | 
				
			||||||
 | 
					Playbook -> K8s: Patch Role argo-role (Add pod patch permission)
 | 
				
			||||||
 | 
					Playbook -> K8s: Patch RoleBinding argo-binding (Add service account)
 | 
				
			||||||
 | 
					Playbook -> K8s: Create Token for Service Account
 | 
				
			||||||
 | 
					Playbook -> K8s: Create Source Resource
 | 
				
			||||||
 | 
					Playbook -> K8s: Retrieve Current Kubeconfig
 | 
				
			||||||
 | 
					Playbook -> K8s: Convert Kubeconfig to JSON
 | 
				
			||||||
 | 
					Playbook -> User: Display Worker Kubeconfig
 | 
				
			||||||
 | 
					Playbook -> Playbook: Save Temporary Kubeconfig File
 | 
				
			||||||
 | 
					Playbook -> Playbook: Modify Kubeconfig JSON (Replace user token, set server IP)
 | 
				
			||||||
 | 
					Playbook -> User: Save Updated Kubeconfig File
 | 
				
			||||||
 | 
					Playbook -> User: Display Instructions for Adding Target
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					User -> Playbook: Start Additional Playbook Execution
 | 
				
			||||||
 | 
					Playbook -> Playbook: Store Kubeconfig Value
 | 
				
			||||||
 | 
					Playbook -> User: Display Kubeconfig
 | 
				
			||||||
 | 
					Playbook -> ControlNode : Copy Kubeconfig
 | 
				
			||||||
 | 
					Playbook -> ControlNode: Create Service Account on Target
 | 
				
			||||||
 | 
					Playbook -> ControlNode: Create Authentication Token for Source
 | 
				
			||||||
 | 
					Playbook -> ControlNode: Create Target Resource
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					@enduml
 | 
				
			||||||
 | 
					
 | 
				
			||||||
							
								
								
									
										110
									
								
								ansible/Admiralty/setup_admiralty_target.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										110
									
								
								ansible/Admiralty/setup_admiralty_target.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,110 @@
 | 
				
			|||||||
 | 
					- name: Setup an exsiting k8s cluster to become an admiralty worker for Argo Workflows
 | 
				
			||||||
 | 
					  hosts: all:!localhost
 | 
				
			||||||
 | 
					  user: "{{ user_prompt }}"
 | 
				
			||||||
 | 
					  # Pass these through --extr-vars
 | 
				
			||||||
 | 
					  vars:
 | 
				
			||||||
 | 
					    - namespace: "{{ namespace_prompt }}"
 | 
				
			||||||
 | 
					    - source_name: "{{ source_prompt }}"
 | 
				
			||||||
 | 
					    - service_account_name : "admiralty-{{ source_prompt }}"
 | 
				
			||||||
 | 
					  environment:
 | 
				
			||||||
 | 
					    KUBECONFIG: /home/{{ user_prompt }}/.kube/config
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  tasks:
 | 
				
			||||||
 | 
					    - name: Save target IP
 | 
				
			||||||
 | 
					      set_fact:
 | 
				
			||||||
 | 
					        target_ip : "{{ ansible_host }}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Install the appropriates packages
 | 
				
			||||||
 | 
					      become: true 
 | 
				
			||||||
 | 
					      become_method: sudo
 | 
				
			||||||
 | 
					      package:
 | 
				
			||||||
 | 
					        name: 
 | 
				
			||||||
 | 
					          - python3
 | 
				
			||||||
 | 
					          - python3-yaml
 | 
				
			||||||
 | 
					          - python3-kubernetes
 | 
				
			||||||
 | 
					          - jq
 | 
				
			||||||
 | 
					        state: present  
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    # We need to provide the source name in the command line through --extr-vars
 | 
				
			||||||
 | 
					    - name: Create a service account for the source
 | 
				
			||||||
 | 
					      kubernetes.core.k8s:
 | 
				
			||||||
 | 
					        state: present
 | 
				
			||||||
 | 
					        definition:
 | 
				
			||||||
 | 
					          apiVersion: v1
 | 
				
			||||||
 | 
					          kind: ServiceAccount
 | 
				
			||||||
 | 
					          metadata:
 | 
				
			||||||
 | 
					            name: '{{ service_account_name }}'
 | 
				
			||||||
 | 
					            namespace: '{{ namespace }}'
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					    - name: Add patch permission for pods to argo-role
 | 
				
			||||||
 | 
					      command: > 
 | 
				
			||||||
 | 
					        kubectl patch role argo-role -n {{ namespace }} --type='json' 
 | 
				
			||||||
 | 
					        -p '[{"op": "add", "path": "/rules/-", "value": {"apiGroups":[""],"resources":["pods"],"verbs":["patch"]}}]'
 | 
				
			||||||
 | 
					      register: patch_result
 | 
				
			||||||
 | 
					      changed_when: "'patched' in patch_result.stdout"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Add service account to argo-rolebinding
 | 
				
			||||||
 | 
					      ansible.builtin.command: >
 | 
				
			||||||
 | 
					        kubectl patch rolebinding argo-role-binding -n {{ namespace }} --type='json'
 | 
				
			||||||
 | 
					        -p '[{"op": "add", "path": "/subjects/-", "value": {"kind": "ServiceAccount", "name": "{{ service_account_name }}", "namespace": "{{ namespace }}"}}]'
 | 
				
			||||||
 | 
					      register: patch_result
 | 
				
			||||||
 | 
					      changed_when: "'patched' in patch_result.stdout"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Create a token for the created serivce account
 | 
				
			||||||
 | 
					      ansible.builtin.command:
 | 
				
			||||||
 | 
					        cmd: |
 | 
				
			||||||
 | 
					          kubectl create token '{{ service_account_name }}' -n {{ namespace }}
 | 
				
			||||||
 | 
					      register: token_source
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Create the source ressource
 | 
				
			||||||
 | 
					      kubernetes.core.k8s:
 | 
				
			||||||
 | 
					        state: present
 | 
				
			||||||
 | 
					        definition:
 | 
				
			||||||
 | 
					          apiVersion: multicluster.admiralty.io/v1alpha1
 | 
				
			||||||
 | 
					          kind: Source
 | 
				
			||||||
 | 
					          metadata:
 | 
				
			||||||
 | 
					            name: source-{{ source_name }}
 | 
				
			||||||
 | 
					            namespace: '{{ namespace }}'
 | 
				
			||||||
 | 
					          spec:
 | 
				
			||||||
 | 
					            serviceAccountName: "{{ service_account_name }}"
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					    - name: Retrieve the current kubeconfig as json
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd: |
 | 
				
			||||||
 | 
					          kubectl config view --minify --raw --output json 
 | 
				
			||||||
 | 
					      register: worker_kubeconfig
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Convert kubeconfig to JSON
 | 
				
			||||||
 | 
					      set_fact:
 | 
				
			||||||
 | 
					        kubeconfig_json: "{{ worker_kubeconfig.stdout | trim | from_json }}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: View worker kubeconfig
 | 
				
			||||||
 | 
					      ansible.builtin.debug:
 | 
				
			||||||
 | 
					        msg: '{{ kubeconfig_json }}'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Temporary kubeconfig file
 | 
				
			||||||
 | 
					      ansible.builtin.copy:
 | 
				
			||||||
 | 
					        content: "{{ kubeconfig_json }}"
 | 
				
			||||||
 | 
					        dest: "{{ target_ip }}_kubeconfig.json"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Modify kubeconfig JSON
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd: |
 | 
				
			||||||
 | 
					          jq '.users[0].user={token:"'{{ token_source.stdout }}'"} | .clusters[0].cluster.server="https://'{{ target_ip }}':6443"' {{ target_ip }}_kubeconfig.json
 | 
				
			||||||
 | 
					      register: kubeconfig_json
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Save updated kubeconfig
 | 
				
			||||||
 | 
					      ansible.builtin.copy:
 | 
				
			||||||
 | 
					        content: "{{ kubeconfig_json.stdout | trim | from_json | to_nice_json }}"
 | 
				
			||||||
 | 
					        dest: ./worker_kubeconfig/{{ target_ip }}_kubeconfig.json
 | 
				
			||||||
 | 
					      delegate_to: localhost
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Display informations for the creation of the target on the source host
 | 
				
			||||||
 | 
					      ansible.builtin.debug:
 | 
				
			||||||
 | 
					        msg: >
 | 
				
			||||||
 | 
					          - To add this host as a target in an Admiralty network use the following command line :
 | 
				
			||||||
 | 
					          - ansible-playbook add_admiralty_target.yml -i <SOURCE HOST IP>, --extra-vars "user_prompt=<YOUR USER> target_name=<TARGET NAME IN KUBE> target_ip={{ ansible_host }} namespace_source={{ namespace }} serviceaccount_prompt={{ service_account_name }}" 
 | 
				
			||||||
 | 
					          - Don't forget to give {{ service_account_name }} the appropriate role in namespace {{ namespace }}
 | 
				
			||||||
							
								
								
									
										121
									
								
								ansible/Admiralty/setup_minio_argo_admiralty.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										121
									
								
								ansible/Admiralty/setup_minio_argo_admiralty.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,121 @@
 | 
				
			|||||||
 | 
					- name: Setup MinIO ressources for argo workflows/admiralty
 | 
				
			||||||
 | 
					  hosts: all:!localhost
 | 
				
			||||||
 | 
					  user: "{{ user_prompt }}"
 | 
				
			||||||
 | 
					  gather_facts: true
 | 
				
			||||||
 | 
					  become_method: sudo
 | 
				
			||||||
 | 
					  vars:
 | 
				
			||||||
 | 
					    - argo_namespace: "argo"
 | 
				
			||||||
 | 
					    - uuid: "{{ uuid_prompt }}"
 | 
				
			||||||
 | 
					  tasks:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Install necessary packages
 | 
				
			||||||
 | 
					    become: true 
 | 
				
			||||||
 | 
					    package:
 | 
				
			||||||
 | 
					      name: 
 | 
				
			||||||
 | 
					        - python3-kubernetes
 | 
				
			||||||
 | 
					      state: present
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Create destination directory
 | 
				
			||||||
 | 
					    file:
 | 
				
			||||||
 | 
					      path: $HOME/minio-binaries
 | 
				
			||||||
 | 
					      state: directory
 | 
				
			||||||
 | 
					      mode: '0755'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Install mc
 | 
				
			||||||
 | 
					    ansible.builtin.get_url:
 | 
				
			||||||
 | 
					      url: "https://dl.min.io/client/mc/release/linux-amd64/mc"
 | 
				
			||||||
 | 
					      dest: $HOME/minio-binaries/
 | 
				
			||||||
 | 
					      mode: +x
 | 
				
			||||||
 | 
					      headers:
 | 
				
			||||||
 | 
					        Content-Type: "application/json"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Add mc to path
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        grep -qxF 'export PATH=$PATH:$HOME/minio-binaries' $HOME/.bashrc || echo 'export PATH=$PATH:$HOME/minio-binaries' >> $HOME/.bashrc
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  - name: Test bashrc
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd : |
 | 
				
			||||||
 | 
					        tail -n 5 $HOME/.bashrc
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Retrieve root user
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        kubectl get secrets argo-artifacts -o jsonpath="{.data.rootUser}" | base64 -d -
 | 
				
			||||||
 | 
					    register: user
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  - name: Retrieve root password
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        kubectl get secret argo-artifacts --namespace default -o jsonpath="{.data.rootPassword}" | base64 -d -
 | 
				
			||||||
 | 
					    register : password
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Set up MinIO host in mc
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        $HOME/minio-binaries/mc alias set my-minio http://127.0.0.1:9000 '{{ user.stdout }}' '{{ password.stdout }}'
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  - name: Create oc-bucket
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        $HOME/minio-binaries/mc mb oc-bucket
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Run mc admin accesskey create command
 | 
				
			||||||
 | 
					    command:  $HOME/minio-binaries/mc admin accesskey create --json my-minio
 | 
				
			||||||
 | 
					    register: minio_output
 | 
				
			||||||
 | 
					    changed_when: false  # Avoid marking the task as changed every time
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Parse JSON output
 | 
				
			||||||
 | 
					    set_fact:
 | 
				
			||||||
 | 
					      access_key: "{{ minio_output.stdout | from_json | json_query('accessKey') }}"
 | 
				
			||||||
 | 
					      secret_key: "{{ minio_output.stdout | from_json | json_query('secretKey') }}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Retrieve cluster IP for minio API
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        kubectl get service argo-artifacts -o jsonpath="{.spec.clusterIP}"
 | 
				
			||||||
 | 
					    register: minio_cluster_ip
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Create the minio secret in argo namespace
 | 
				
			||||||
 | 
					    kubernetes.core.k8s:
 | 
				
			||||||
 | 
					      state: present
 | 
				
			||||||
 | 
					      namespace: '{{ argo_namespace }}'
 | 
				
			||||||
 | 
					      name: "{{ uuuid }}-argo-artifact-secret"
 | 
				
			||||||
 | 
					      definition:
 | 
				
			||||||
 | 
					        apiVersion: v1
 | 
				
			||||||
 | 
					        kind: Secret
 | 
				
			||||||
 | 
					        type: Opaque
 | 
				
			||||||
 | 
					        stringData:
 | 
				
			||||||
 | 
					          access-key: '{{ access_key}}'
 | 
				
			||||||
 | 
					          secret-key: '{{ secret_key }}'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Create the minio secret in argo namespace
 | 
				
			||||||
 | 
					    kubernetes.core.k8s:
 | 
				
			||||||
 | 
					      state: present
 | 
				
			||||||
 | 
					      namespace: '{{ argo_namespace }}'
 | 
				
			||||||
 | 
					      definition:
 | 
				
			||||||
 | 
					        apiVersion: v1
 | 
				
			||||||
 | 
					        kind: ConfigMap
 | 
				
			||||||
 | 
					        metadata:
 | 
				
			||||||
 | 
					          name: artifact-repositories
 | 
				
			||||||
 | 
					        data:
 | 
				
			||||||
 | 
					          oc-s3-artifact-repository: |
 | 
				
			||||||
 | 
					            s3:
 | 
				
			||||||
 | 
					              bucket: oc-bucket
 | 
				
			||||||
 | 
					              endpoint: {{ minio_cluster_ip.stdout }}:9000
 | 
				
			||||||
 | 
					              insecure: true
 | 
				
			||||||
 | 
					              accessKeySecret:
 | 
				
			||||||
 | 
					                name: "{{ uuuid }}-argo-artifact-secret"
 | 
				
			||||||
 | 
					                key: access-key
 | 
				
			||||||
 | 
					              secretKeySecret:
 | 
				
			||||||
 | 
					                name: "{{ uuuid }}-argo-artifact-secret"
 | 
				
			||||||
 | 
					                key: secret-key
 | 
				
			||||||
 | 
					          
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    # ansible.builtin.shell:
 | 
				
			||||||
 | 
					    #   cmd: |
 | 
				
			||||||
 | 
					    #     kubectl create secret -n '{{ argo_namespace }}' generic argo-artifact-secret \
 | 
				
			||||||
 | 
					    #     --from-literal=access-key='{{ access_key }}' \
 | 
				
			||||||
 | 
					    #     --from-literal=secret-key='{{ secret_key }}'
 | 
				
			||||||
							
								
								
									
										149
									
								
								ansible/Admiralty/weather_test_admiralty.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										149
									
								
								ansible/Admiralty/weather_test_admiralty.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,149 @@
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
 | 
					apiVersion: argoproj.io/v1alpha1
 | 
				
			||||||
 | 
					kind: Workflow
 | 
				
			||||||
 | 
					metadata:
 | 
				
			||||||
 | 
					  generateName: harvesting-
 | 
				
			||||||
 | 
					  labels:
 | 
				
			||||||
 | 
					    example: 'true'
 | 
				
			||||||
 | 
					    workflows.argoproj.io/creator: 0d47b046-a09e-4bed-b10a-ec26783d4fe7
 | 
				
			||||||
 | 
					    workflows.argoproj.io/creator-email: pierre.bayle.at.irt-stexupery.com
 | 
				
			||||||
 | 
					    workflows.argoproj.io/creator-preferred-username: pbayle
 | 
				
			||||||
 | 
					spec:
 | 
				
			||||||
 | 
					  templates:
 | 
				
			||||||
 | 
					    - name: busybox
 | 
				
			||||||
 | 
					      inputs:
 | 
				
			||||||
 | 
					        parameters:
 | 
				
			||||||
 | 
					          - name: model
 | 
				
			||||||
 | 
					          - name: output-dir
 | 
				
			||||||
 | 
					          - name: output-file
 | 
				
			||||||
 | 
					          - name: clustername
 | 
				
			||||||
 | 
					      outputs:
 | 
				
			||||||
 | 
					        parameters:
 | 
				
			||||||
 | 
					          - name: outfile
 | 
				
			||||||
 | 
					            value: '{{inputs.parameters.output-file}}.tgz'
 | 
				
			||||||
 | 
					        artifacts:
 | 
				
			||||||
 | 
					          - name: outputs
 | 
				
			||||||
 | 
					            path: '{{inputs.parameters.output-dir}}/{{inputs.parameters.output-file}}.tgz'
 | 
				
			||||||
 | 
					            s3:
 | 
				
			||||||
 | 
					              key: '{{workflow.name}}/{{inputs.parameters.output-file}}.tgz'
 | 
				
			||||||
 | 
					      container:
 | 
				
			||||||
 | 
					        image: busybox
 | 
				
			||||||
 | 
					        command: ["/bin/sh", "-c"]
 | 
				
			||||||
 | 
					        args:
 | 
				
			||||||
 | 
					          - |
 | 
				
			||||||
 | 
					            echo "Creating tarball for model: {{inputs.parameters.model}}";
 | 
				
			||||||
 | 
					            mkdir -p {{inputs.parameters.output-dir}};
 | 
				
			||||||
 | 
					            echo $(ping 8.8.8.8 -c 4) > $(date +%Y-%m-%d__%H-%M-%S)_{{inputs.parameters.output-file}}.txt
 | 
				
			||||||
 | 
					            tar -czf {{inputs.parameters.output-dir}}/{{inputs.parameters.output-file}}.tgz *_{{inputs.parameters.output-file}}.txt;
 | 
				
			||||||
 | 
					      metadata:
 | 
				
			||||||
 | 
					        annotations:
 | 
				
			||||||
 | 
					          multicluster.admiralty.io/elect: ""
 | 
				
			||||||
 | 
					          multicluster.admiralty.io/clustername: "{{inputs.parameters.clustername}}"
 | 
				
			||||||
 | 
					   
 | 
				
			||||||
 | 
					    - name: weather-container
 | 
				
			||||||
 | 
					      inputs:
 | 
				
			||||||
 | 
					        parameters:
 | 
				
			||||||
 | 
					          - name: output-dir
 | 
				
			||||||
 | 
					          - name: output-file
 | 
				
			||||||
 | 
					          - name: clustername
 | 
				
			||||||
 | 
					      outputs:
 | 
				
			||||||
 | 
					        parameters:
 | 
				
			||||||
 | 
					          - name: outfile
 | 
				
			||||||
 | 
					            value: '{{inputs.parameters.output-file}}.tgz'
 | 
				
			||||||
 | 
					        artifacts:
 | 
				
			||||||
 | 
					          - name: outputs
 | 
				
			||||||
 | 
					            path: '{{inputs.parameters.output-dir}}/{{inputs.parameters.output-file}}.tgz'
 | 
				
			||||||
 | 
					            s3:
 | 
				
			||||||
 | 
					              insecure: true
 | 
				
			||||||
 | 
					              key: '{{workflow.name}}/{{inputs.parameters.output-file}}'
 | 
				
			||||||
 | 
					      container:
 | 
				
			||||||
 | 
					        name: weather-container
 | 
				
			||||||
 | 
					        image: pierrebirt/weather_container:latest
 | 
				
			||||||
 | 
					        #imagePullPolicy: IfNotPresent
 | 
				
			||||||
 | 
					        env:
 | 
				
			||||||
 | 
					          - name: API_KEY
 | 
				
			||||||
 | 
					            valueFrom:
 | 
				
			||||||
 | 
					                secretKeyRef:
 | 
				
			||||||
 | 
					                  name: cnes-secrets
 | 
				
			||||||
 | 
					                  key: weather-api
 | 
				
			||||||
 | 
					        args:
 | 
				
			||||||
 | 
					          - '--key'
 | 
				
			||||||
 | 
					          - "$(API_KEY)"
 | 
				
			||||||
 | 
					          - '--dir'
 | 
				
			||||||
 | 
					          - '{{inputs.parameters.output-dir}}'
 | 
				
			||||||
 | 
					          - '--file'
 | 
				
			||||||
 | 
					          - '{{inputs.parameters.output-file}}'
 | 
				
			||||||
 | 
					      metadata:
 | 
				
			||||||
 | 
					        annotations:
 | 
				
			||||||
 | 
					          multicluster.admiralty.io/elect: ""
 | 
				
			||||||
 | 
					          multicluster.admiralty.io/clustername: "{{inputs.parameters.clustername}}"
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					    - name: bucket-reader
 | 
				
			||||||
 | 
					      inputs:
 | 
				
			||||||
 | 
					        parameters:
 | 
				
			||||||
 | 
					          - name: bucket-path
 | 
				
			||||||
 | 
					          - name: logs-path
 | 
				
			||||||
 | 
					        artifacts:
 | 
				
			||||||
 | 
					          - name: retrieved-logs
 | 
				
			||||||
 | 
					            path: '{{inputs.parameters.logs-path}}'
 | 
				
			||||||
 | 
					            s3:
 | 
				
			||||||
 | 
					              key: '{{inputs.parameters.bucket-path}}'
 | 
				
			||||||
 | 
					      outputs:
 | 
				
			||||||
 | 
					        artifacts:
 | 
				
			||||||
 | 
					          - name: logs_for_test
 | 
				
			||||||
 | 
					            path:  /tmp/empty_log_for_test.log
 | 
				
			||||||
 | 
					            s3:
 | 
				
			||||||
 | 
					              key: '{{workflow.name}}/log_test.log'
 | 
				
			||||||
 | 
					      container:
 | 
				
			||||||
 | 
					        image: busybox
 | 
				
			||||||
 | 
					        command: ["/bin/sh", "-c"]
 | 
				
			||||||
 | 
					        args:
 | 
				
			||||||
 | 
					          - |  
 | 
				
			||||||
 | 
					            tar -xvf '{{inputs.parameters.logs-path}}'
 | 
				
			||||||
 | 
					            ls -la 
 | 
				
			||||||
 | 
					            cat *.txt
 | 
				
			||||||
 | 
					            touch /tmp/empty_log_for_test.log
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: harvesting-test
 | 
				
			||||||
 | 
					      inputs: {}
 | 
				
			||||||
 | 
					      outputs: {}
 | 
				
			||||||
 | 
					      metadata: {}
 | 
				
			||||||
 | 
					      dag:
 | 
				
			||||||
 | 
					        tasks:
 | 
				
			||||||
 | 
					          - name: busybox-dc02
 | 
				
			||||||
 | 
					            template: busybox
 | 
				
			||||||
 | 
					            arguments:
 | 
				
			||||||
 | 
					              parameters:
 | 
				
			||||||
 | 
					                - name: model
 | 
				
			||||||
 | 
					                  value: era-pressure-levels
 | 
				
			||||||
 | 
					                - name: output-dir
 | 
				
			||||||
 | 
					                  value: /app/data/output
 | 
				
			||||||
 | 
					                - name: output-file
 | 
				
			||||||
 | 
					                  value: fake_logs
 | 
				
			||||||
 | 
					                - name: clustername
 | 
				
			||||||
 | 
					                  value: target-dc02
 | 
				
			||||||
 | 
					          - name: weather-container-dc03
 | 
				
			||||||
 | 
					            template: weather-container
 | 
				
			||||||
 | 
					            arguments:
 | 
				
			||||||
 | 
					              parameters:
 | 
				
			||||||
 | 
					                - name: output-dir
 | 
				
			||||||
 | 
					                  value: /app/results
 | 
				
			||||||
 | 
					                - name: output-file
 | 
				
			||||||
 | 
					                  value: weather_results_23_01
 | 
				
			||||||
 | 
					                - name: clustername
 | 
				
			||||||
 | 
					                  value: target-dc03
 | 
				
			||||||
 | 
					          - name: bucket-reader
 | 
				
			||||||
 | 
					            template: bucket-reader
 | 
				
			||||||
 | 
					            dependencies: [busybox-dc02,weather-container-dc03]
 | 
				
			||||||
 | 
					            arguments:
 | 
				
			||||||
 | 
					              parameters:
 | 
				
			||||||
 | 
					                - name: bucket-path
 | 
				
			||||||
 | 
					                  value: '{{workflow.name}}/fake_logs.tgz'
 | 
				
			||||||
 | 
					                - name: logs-path
 | 
				
			||||||
 | 
					                  value: /tmp/logs.tgz
 | 
				
			||||||
 | 
					                
 | 
				
			||||||
 | 
					  entrypoint: harvesting-test
 | 
				
			||||||
 | 
					  serviceAccountName: argo-agregateur-workflow-controller
 | 
				
			||||||
 | 
					  artifactRepositoryRef: # https://argo-workflows.readthedocs.io/en/latest/fields/#s3artifactrepository
 | 
				
			||||||
 | 
					    key: admiralty-s3-artifact-repository  # Choose the artifact repository with the public IP/url
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -0,0 +1,32 @@
 | 
				
			|||||||
 | 
					{
 | 
				
			||||||
 | 
					    "apiVersion": "v1",
 | 
				
			||||||
 | 
					    "clusters": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "cluster": {
 | 
				
			||||||
 | 
					                "certificate-authority-data": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTXpneE5EVTNNekl3SGhjTk1qVXdNVEk1TVRBeE5UTXlXaGNOTXpVd01USTNNVEF4TlRNeQpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTXpneE5EVTNNekl3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFSWHFiRHBmcUtwWVAzaTFObVpCdEZ3RzNCZCtOY0RwenJKS01qOWFETlUKTUVYZmpRM3VrbzVISDVHdTFzNDRZY0p6Y29rVEFmb090QVhWS1pNMUs3YWVvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWM5MW5TYi9kaU1pbHVqR3RENjFRClc0djVKVmN3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUlnV05uSzlsU1lDY044VEFFODcwUnNOMEgwWFR6UndMNlAKOEF4Q0xwa3pDYkFDSVFDRW1LSkhveXFZRW5iZWZFU3VOYkthTHdtRkMrTE5lUHloOWxQUmhCVHdsQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",
 | 
				
			||||||
 | 
					                "server": "https://172.16.0.181:6443"
 | 
				
			||||||
 | 
					            },
 | 
				
			||||||
 | 
					            "name": "default"
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ],
 | 
				
			||||||
 | 
					    "contexts": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "context": {
 | 
				
			||||||
 | 
					                "cluster": "default",
 | 
				
			||||||
 | 
					                "user": "default"
 | 
				
			||||||
 | 
					            },
 | 
				
			||||||
 | 
					            "name": "default"
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ],
 | 
				
			||||||
 | 
					    "current-context": "default",
 | 
				
			||||||
 | 
					    "kind": "Config",
 | 
				
			||||||
 | 
					    "preferences": {},
 | 
				
			||||||
 | 
					    "users": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "name": "default",
 | 
				
			||||||
 | 
					            "user": {
 | 
				
			||||||
 | 
					                "token": "eyJhbGciOiJSUzI1NiIsImtpZCI6Ik5nT1p0NVVMUVllYko1MVhLdVIyMW01MzJjY25NdTluZ3VNQ1RmMnNTUHcifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrM3MiXSwiZXhwIjoxNzM4Njg1NzM2LCJpYXQiOjE3Mzg2ODIxMzYsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiNTNkNzU4YmMtMGUwMC00YTU5LTgzZTUtYjkyYjZmODg2NWE2Iiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJhcmdvIiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImFkbWlyYWx0eS1jb250cm9sIiwidWlkIjoiMWQ1NmEzMzktMTM0MC00NDY0LTg3OGYtMmIxY2ZiZDU1ZGJhIn19LCJuYmYiOjE3Mzg2ODIxMzYsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDphcmdvOmFkbWlyYWx0eS1jb250cm9sIn0.WMqmDvp8WZHEiupJewo2BplD0xu6yWhlgZkG4q_PpVCbHKd7cKYWnpTi_Ojmabvvw-VC5sZFZAaxZUnqdZNGf_RMrJ5pJ9B5cYtD_gsa7AGhrSz03nd5zPKvujT7-gzWmfHTpZOvWky00A2ykKLflibhJgft4FmFMxQ6rR3MWmtqeAo82wevF47ggdOiJz3kksFJPfEpk1bflumbUCk-fv76k6EljPEcFijsRur-CI4uuXdmTKb7G2TDmTMcFs9X4eGbBO2ZYOAVEw_Xafru6D-V8hWBTm-NWQiyyhdxlVdQg7BNnXJ_26GsJg4ql4Rg-Q-tXB5nGvd68g2MnGTWwg"
 | 
				
			||||||
 | 
					            }
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ]
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@@ -0,0 +1,32 @@
 | 
				
			|||||||
 | 
					{
 | 
				
			||||||
 | 
					    "apiVersion": "v1",
 | 
				
			||||||
 | 
					    "clusters": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "cluster": {
 | 
				
			||||||
 | 
					                "certificate-authority-data": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTXpnd09ESTVNRFF3SGhjTk1qVXdNVEk0TVRZME9ESTBXaGNOTXpVd01USTJNVFkwT0RJMApXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTXpnd09ESTVNRFF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFSdUV0Y2lRS3VaZUpEV214TlJBUzM3TlFib3czSkpxMWJQSjdsdTN2eEgKR2czS1hGdFVHZWNGUjQzL1Rjd0pmanQ3WFpsVm9PUldtOFozYWp3OEJPS0ZvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTB3NG1uSlUrbkU3SnpxOHExRWdWCmFUNU1mMmd3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQU9JTUtsZHk0Y044a3JmVnQyUFpLQi80eXhpOGRzM0wKaHR0b2ZrSEZtRnlsQWlCMWUraE5BamVUdVNCQjBDLzZvQnA2c21xUDBOaytrdGFtOW9EM3pvSSs0Zz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",
 | 
				
			||||||
 | 
					                "server": "https://172.16.0.184:6443"
 | 
				
			||||||
 | 
					            },
 | 
				
			||||||
 | 
					            "name": "default"
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ],
 | 
				
			||||||
 | 
					    "contexts": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "context": {
 | 
				
			||||||
 | 
					                "cluster": "default",
 | 
				
			||||||
 | 
					                "user": "default"
 | 
				
			||||||
 | 
					            },
 | 
				
			||||||
 | 
					            "name": "default"
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ],
 | 
				
			||||||
 | 
					    "current-context": "default",
 | 
				
			||||||
 | 
					    "kind": "Config",
 | 
				
			||||||
 | 
					    "preferences": {},
 | 
				
			||||||
 | 
					    "users": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "name": "default",
 | 
				
			||||||
 | 
					            "user": {
 | 
				
			||||||
 | 
					                "token": "eyJhbGciOiJSUzI1NiIsImtpZCI6InUzaGF0T1RuSkdHck1sbURrQm0waDdDeDFSS3pxZ3FVQ25aX1VrOEkzdFkifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrM3MiXSwiZXhwIjoxNzM4Njg1NzM2LCJpYXQiOjE3Mzg2ODIxMzYsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiZDFmNzQ2NmQtN2MyOS00MGNkLTg1ZTgtMjZmMzFkYWU5Nzg4Iiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJhcmdvIiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImFkbWlyYWx0eS1jb250cm9sIiwidWlkIjoiNTc0Y2E1OTQtY2IxZi00N2FiLTkxZGEtMDI0NDEwNjhjZjQwIn19LCJuYmYiOjE3Mzg2ODIxMzYsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDphcmdvOmFkbWlyYWx0eS1jb250cm9sIn0.ZJvTJawg73k5SEOG6357iYq_-w-7V4BqciURYJao_dtP_zDpcXyZ1Xw-sxNKITgLjByTkGaCJRjDtR2QdZumKtb8cl6ayv0UZMHHnFft4gtQi-ttjj69rQ5RTNA3dviPaQOQgWNAwPkUPryAM0Sjsd5pRWzXXe-NVpWQZ6ooNZeRBHyjT1Km1JoprB7i55vRJEbBnoK0laJUtHCNmLoxK5kJYQqeAtA-_ugdSJbnyTFQAG14vonZSyLWAQR-Hzw9QiqIkSEW1-fcvrrZbrVUZsl_i7tkrXSSY9EYwjrZlqIu79uToEa1oWvulGFEN6u6YGUydj9nXQJX_eDpaWvuOA"
 | 
				
			||||||
 | 
					            }
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ]
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@@ -0,0 +1,32 @@
 | 
				
			|||||||
 | 
					{
 | 
				
			||||||
 | 
					    "apiVersion": "v1",
 | 
				
			||||||
 | 
					    "clusters": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "cluster": {
 | 
				
			||||||
 | 
					                "certificate-authority-data": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTXpnd09ESTVNRFF3SGhjTk1qVXdNVEk0TVRZME9ESTBXaGNOTXpVd01USTJNVFkwT0RJMApXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTXpnd09ESTVNRFF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFSdUV0Y2lRS3VaZUpEV214TlJBUzM3TlFib3czSkpxMWJQSjdsdTN2eEgKR2czS1hGdFVHZWNGUjQzL1Rjd0pmanQ3WFpsVm9PUldtOFozYWp3OEJPS0ZvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTB3NG1uSlUrbkU3SnpxOHExRWdWCmFUNU1mMmd3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQU9JTUtsZHk0Y044a3JmVnQyUFpLQi80eXhpOGRzM0wKaHR0b2ZrSEZtRnlsQWlCMWUraE5BamVUdVNCQjBDLzZvQnA2c21xUDBOaytrdGFtOW9EM3pvSSs0Zz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",
 | 
				
			||||||
 | 
					                "server": "https://172.16.0.185:6443"
 | 
				
			||||||
 | 
					            },
 | 
				
			||||||
 | 
					            "name": "default"
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ],
 | 
				
			||||||
 | 
					    "contexts": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "context": {
 | 
				
			||||||
 | 
					                "cluster": "default",
 | 
				
			||||||
 | 
					                "user": "default"
 | 
				
			||||||
 | 
					            },
 | 
				
			||||||
 | 
					            "name": "default"
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ],
 | 
				
			||||||
 | 
					    "current-context": "default",
 | 
				
			||||||
 | 
					    "kind": "Config",
 | 
				
			||||||
 | 
					    "preferences": {},
 | 
				
			||||||
 | 
					    "users": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "name": "default",
 | 
				
			||||||
 | 
					            "user": {
 | 
				
			||||||
 | 
					                "token": "eyJhbGciOiJSUzI1NiIsImtpZCI6InUzaGF0T1RuSkdHck1sbURrQm0waDdDeDFSS3pxZ3FVQ25aX1VrOEkzdFkifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrM3MiXSwiZXhwIjoxNzM4Njg1NzM2LCJpYXQiOjE3Mzg2ODIxMzYsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiODdlYjVkYTYtYWNlMi00YzFhLTg1YjctYWY1NDI2MjA1ZWY1Iiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJhcmdvIiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImFkbWlyYWx0eS1jb250cm9sIiwidWlkIjoiZjFjNjViNDQtYmZmMC00Y2NlLTk4ZGQtMTU0YTFiYTk0YTU2In19LCJuYmYiOjE3Mzg2ODIxMzYsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDphcmdvOmFkbWlyYWx0eS1jb250cm9sIn0.SkpDamOWdyvTUk8MIMDMhuKD8qvJPpX-tjXPWX9XsfpMyjcB02kI-Cn9b8w1TnYpGJ_u3qyLzO7RlXOgSHtm7TKHOCoYudj4jNwRWqIcThxzAeTm53nlZirUU0E0eJU8cnWHGO3McAGOgkStpfVwHaTQHq2oMZ6jayQU_HuButGEvpFt2FMFEwY9pOjabYHPPOkY9ruswzNhGBRShxWxfOgCWIt8UmbrryrNeNd_kZlB0_vahuQkAskeJZd3f_hp7qnSyLd-YZa5hUrruLJBPQZRw2sPrZe0ukvdpuz7MCfE-CQzUDn6i3G6FCKzYfd-gHFIYNUowS0APHLcC-yWSQ"
 | 
				
			||||||
 | 
					            }
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ]
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@@ -0,0 +1,32 @@
 | 
				
			|||||||
 | 
					{
 | 
				
			||||||
 | 
					    "apiVersion": "v1",
 | 
				
			||||||
 | 
					    "clusters": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "cluster": {
 | 
				
			||||||
 | 
					                "certificate-authority-data": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkakNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTlRJM05EVTROVE13SGhjTk1qVXdOekUzTURrMU1EVXpXaGNOTXpVd056RTFNRGsxTURVegpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTlRJM05EVTROVE13V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUTzJzVWE4MTVDTmVxWUNPdCthREoreG5hWHRZNng3R096a0c1U1U0TEEKRE1talExRVQwZi96OG9oVU55L1JneUt0bmtqb2JnZVJhOExTdDAwc3NrMDNvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVW1zeUVyWkQvbmxtNVJReUUwR0NICk1FWlU0ZWd3Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnVXJsR3ZGZy9FVzhXdU1Nc3JmZkZTTHdmYm1saFI5MDYKYjdHaWhUNHdFRzBDSUVsb2FvWGdwNnM5c055eE1iSUwxKzNlVUtFc0k2Y2dDdldFVEZmRWtQTUIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=",
 | 
				
			||||||
 | 
					                "server": "https://172.16.0.191:6443"
 | 
				
			||||||
 | 
					            },
 | 
				
			||||||
 | 
					            "name": "default"
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ],
 | 
				
			||||||
 | 
					    "contexts": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "context": {
 | 
				
			||||||
 | 
					                "cluster": "default",
 | 
				
			||||||
 | 
					                "user": "default"
 | 
				
			||||||
 | 
					            },
 | 
				
			||||||
 | 
					            "name": "default"
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ],
 | 
				
			||||||
 | 
					    "current-context": "default",
 | 
				
			||||||
 | 
					    "kind": "Config",
 | 
				
			||||||
 | 
					    "preferences": {},
 | 
				
			||||||
 | 
					    "users": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "name": "default",
 | 
				
			||||||
 | 
					            "user": {
 | 
				
			||||||
 | 
					                "token": "eyJhbGciOiJSUzI1NiIsImtpZCI6IlZCTkEyUVJKeE9XblNpeUI1QUlMdWtLZmVpbGQ1LUpRTExvNWhkVjlEV2MifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrM3MiXSwiZXhwIjoxNzUzMTk0MjMxLCJpYXQiOjE3NTMxOTA2MzEsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiYTMyOTY0OTktNzhiZS00MzE0LTkyYjctMDQ1NTBkY2JjMGUyIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJ0ZXN0LWFkbWlyYWx0eS1hbnNpYmxlIiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImFkbWlyYWx0eS1zb3VyY2UiLCJ1aWQiOiI4YmJhMTA3Mi0wYjZiLTQwYjUtYWI4Mi04OWQ1MTkyOGIwOTIifX0sIm5iZiI6MTc1MzE5MDYzMSwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OnRlc3QtYWRtaXJhbHR5LWFuc2libGU6YWRtaXJhbHR5LXNvdXJjZSJ9.A0UJLoui_SX4dCgUZIo4kprZ3kb2WBkigvyy1e55qQMFZxRoAed6ZvR95XbHYNUoiHR-HZE04QO0QcOnFaaQDTA6fS9HHtjfPKAoqbXrpShyoHNciiQnhkwYvtEpG4bvDf0JMB9qbWGMrBoouHwx-JoQG0JeoQq-idMGiDeHhqVc86-Uy_angvRoAZGF5xmYgMPcw5-vZPGfgk1mHYx5vXNofCcmF4OqMvQaWyYmH82L5SYAYLTV39Z1aCKkDGGHt5y9dVJ0udA4E5Cx3gO2cLLLWxf8n7uFSUx8sHgFtZOGgXwN8DIrTe3Y95p09f3H7nTxjnmQ-Nce2hofLC2_ng"
 | 
				
			||||||
 | 
					            }
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ]
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										32
									
								
								ansible/Admiralty/worker_kubeconfig/target01_kubeconfig.json
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								ansible/Admiralty/worker_kubeconfig/target01_kubeconfig.json
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,32 @@
 | 
				
			|||||||
 | 
					{
 | 
				
			||||||
 | 
					    "apiVersion": "v1",
 | 
				
			||||||
 | 
					    "clusters": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "cluster": {
 | 
				
			||||||
 | 
					                "certificate-authority-data": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTXpnNU1Ua3pPRFF3SGhjTk1qVXdNakEzTURrd09UUTBXaGNOTXpVd01qQTFNRGt3T1RRMApXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTXpnNU1Ua3pPRFF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTYWsySHRMQVFUclVBSUF3ckUraDBJZ0QyS2dUcWxkNmorQlczcXRUSmcKOW9GR2FRb1lnUERvaGJtT29ueHRTeDlCSlc3elkrZEM2T3J5ekhkYzUzOGRvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVXd5UE1iOFAwaC9IR2szZ0dianozClFvOVVoQ293Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUlBVE1ETGFpeWlwaUNuQjF1QWtYMkxiRXdrYk93QlcKb1U2eDluZnRMTThQQWlFQTUza0hZYU05ZVZVdThld3REa0M3TEs3RTlkSGczQ3pSNlBxSHJjUHJTeDA9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",
 | 
				
			||||||
 | 
					                "server": "https://target01:6443"
 | 
				
			||||||
 | 
					            },
 | 
				
			||||||
 | 
					            "name": "default"
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ],
 | 
				
			||||||
 | 
					    "contexts": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "context": {
 | 
				
			||||||
 | 
					                "cluster": "default",
 | 
				
			||||||
 | 
					                "user": "default"
 | 
				
			||||||
 | 
					            },
 | 
				
			||||||
 | 
					            "name": "default"
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ],
 | 
				
			||||||
 | 
					    "current-context": "default",
 | 
				
			||||||
 | 
					    "kind": "Config",
 | 
				
			||||||
 | 
					    "preferences": {},
 | 
				
			||||||
 | 
					    "users": [
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            "name": "default",
 | 
				
			||||||
 | 
					            "user": {
 | 
				
			||||||
 | 
					                "token": "eyJhbGciOiJSUzI1NiIsImtpZCI6IlJwbHhUQ2ppREt3SmtHTWs4Z2cwdXBuWGtjTUluMVB0dFdGbUhEUVY2Y2MifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrM3MiXSwiZXhwIjoxNzQwMDUzODAyLCJpYXQiOjE3NDAwNTAyMDIsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiMThkNzdjMzctZjgyNC00MGVmLWExMDUtMzcxMzJkNjUxNzgzIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJhcmdvIiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImFkbWlyYWx0eS1jb250cm9sIiwidWlkIjoiNmExM2M4YTgtZmE0NC00NmJlLWI3ZWItYTQ0OWY3ZTMwZGM1In19LCJuYmYiOjE3NDAwNTAyMDIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDphcmdvOmFkbWlyYWx0eS1jb250cm9sIn0.DtKkkCEWLPp-9bmSbrqxvxO2kXfOW2cHlmxs5xPzTtn3DcNZ-yfUxJHxEv9Hz6-h732iljRKiWx3SrEN2ZjGq555xoOHV202NkyUqU3EWmBwmVQgvUKOZSn1tesAfI7fQp7sERa7oKz7ZZNHJ7x-nw0YBoxYa4ECRPkJKDR3uEyRsyFMaZJELi-wIUSZkeGxNR7PdQWoYPoJipnwXoyAFbT42r-pSR7nqzy0-Lx1il82klkZshPEj_CqycqJg1djoNoe4ekS7En1iljz03YqOqm1sFSOdvDRS8VGM_6Zm6e3PVwXQZVBgFy_ET1RqtxPsLyYmaPoIfPMq2xeRLoGIg"
 | 
				
			||||||
 | 
					            }
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    ]
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										70
									
								
								ansible/Argo/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								ansible/Argo/README.md
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,70 @@
 | 
				
			|||||||
 | 
					# Prerequisites
 | 
				
			||||||
 | 
					Ensure that you have the following installed on your local machine:
 | 
				
			||||||
 | 
					- Ansible
 | 
				
			||||||
 | 
					- SSH access to the target host
 | 
				
			||||||
 | 
					- Required dependencies for Kubernetes
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Two passwords are required via the prompt:
 | 
				
			||||||
 | 
					1. The username used to connect to the host via SSH.
 | 
				
			||||||
 | 
					2. The root password for privilege escalation.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- You can use a user on the name with `NOPASSWD` permissions and not use `--ask-become-pass`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- You can use `ssh-copy-id` on the remote host on the user that you will provide and not use `--ask-pass`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Deployment Instructions
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Deploying K3s
 | 
				
			||||||
 | 
					Replace `HOST_NAME` with the IP address or hostname of the target machine in `my_hosts.yaml`, then run:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```sh
 | 
				
			||||||
 | 
					ansible-playbook -i <YOUR_HOST_IP>, deploy_k3s.yml --extra-vars "user_prompt=YOUR_USER" --ask-pass --ask-become-pass
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					This playbook:
 | 
				
			||||||
 | 
					- Updates package repositories.
 | 
				
			||||||
 | 
					- Installs necessary dependencies.
 | 
				
			||||||
 | 
					- Ensures the user has `sudo` privileges.
 | 
				
			||||||
 | 
					- Downloads and installs K3s.
 | 
				
			||||||
 | 
					- Configures permissions for Kubernetes operations.
 | 
				
			||||||
 | 
					- Enables auto-completion for `kubectl`.
 | 
				
			||||||
 | 
					- Reboots the machine to apply changes.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Deploying Argo Workflows
 | 
				
			||||||
 | 
					Replace `HOST_NAME` with the IP address or hostname of the target machine in `my_hosts.yaml`, then run:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```sh
 | 
				
			||||||
 | 
					ansible-playbook -i <YOUR_HOST_IP>, deploy_argo.yml --extra-vars "user_prompt=<YOUR_USER>" --ask-pass --ask-become-pass
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					This playbook:
 | 
				
			||||||
 | 
					- Ensures the `argo` namespace exists in Kubernetes.
 | 
				
			||||||
 | 
					- Deploys Argo Workflows using the official manifest.
 | 
				
			||||||
 | 
					- Waits for the `argo-server` pod to be running.
 | 
				
			||||||
 | 
					- Patches the deployment for first-time connection issues.
 | 
				
			||||||
 | 
					- Applies a service configuration to expose Argo Workflows via NodePort.
 | 
				
			||||||
 | 
					- Installs the Argo CLI.
 | 
				
			||||||
 | 
					- Enables CLI autocompletion.
 | 
				
			||||||
 | 
					- Configures `kubectl` for Argo access.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Additional Notes
 | 
				
			||||||
 | 
					- The service account used by default is `argo:default`, which may not have sufficient permissions. Use `argo:argo` instead:
 | 
				
			||||||
 | 
					  ```sh
 | 
				
			||||||
 | 
					  argo submit -f workflow.yaml --serviceaccount=argo
 | 
				
			||||||
 | 
					  ```
 | 
				
			||||||
 | 
					- The Argo CLI is installed in `/usr/local/bin/argo`.
 | 
				
			||||||
 | 
					- The Kubernetes configuration file is copied to `~/.kube/config`.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Troubleshooting
 | 
				
			||||||
 | 
					- If the deployment fails due to permissions, ensure the user has `sudo` privileges.
 | 
				
			||||||
 | 
					- Check the status of Argo pods using:
 | 
				
			||||||
 | 
					  ```sh
 | 
				
			||||||
 | 
					  kubectl get pods -n argo
 | 
				
			||||||
 | 
					  ```
 | 
				
			||||||
 | 
					- If Argo Workflows is not accessible, verify that the NodePort service is correctly configured.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# References
 | 
				
			||||||
 | 
					- [K3s Official Documentation](https://k3s.io/)
 | 
				
			||||||
 | 
					- [Argo Workflows Documentation](https://argoproj.github.io/argo-workflows/)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
							
								
								
									
										14
									
								
								ansible/Argo/argo-service.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								ansible/Argo/argo-service.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,14 @@
 | 
				
			|||||||
 | 
					# Needed by deploy-argo.yml to change argo to a NodePort service
 | 
				
			||||||
 | 
					apiVersion: v1
 | 
				
			||||||
 | 
					kind: Service
 | 
				
			||||||
 | 
					metadata:
 | 
				
			||||||
 | 
					  name: argo-server
 | 
				
			||||||
 | 
					  namespace: argo
 | 
				
			||||||
 | 
					spec:
 | 
				
			||||||
 | 
					  type: NodePort
 | 
				
			||||||
 | 
					  selector:
 | 
				
			||||||
 | 
					    app: argo-server
 | 
				
			||||||
 | 
					  ports:
 | 
				
			||||||
 | 
					    - port: 2746
 | 
				
			||||||
 | 
					      targetPort: 2746
 | 
				
			||||||
 | 
					      nodePort: 32746
 | 
				
			||||||
							
								
								
									
										95
									
								
								ansible/Argo/deploy_argo.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										95
									
								
								ansible/Argo/deploy_argo.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,95 @@
 | 
				
			|||||||
 | 
					# ansible-playbook -i my_hosts.yaml deploy_argo.yml --ask-pass --ask-become-pass  
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Need to think about which serviceaccount will be used to launch the workflow, by default
 | 
				
			||||||
 | 
					# uses argo:default but it doesn't have enough rights, need to use argo:argo
 | 
				
			||||||
 | 
					# like '$ argo submit -f .... --serviceaccount=argo'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- name: Installation de Argo
 | 
				
			||||||
 | 
					  hosts: all
 | 
				
			||||||
 | 
					  user: "{{ user_prompt }}"
 | 
				
			||||||
 | 
					  vars:
 | 
				
			||||||
 | 
					    ARGO_VERSION: "3.5.2"
 | 
				
			||||||
 | 
					  environment:
 | 
				
			||||||
 | 
					        KUBECONFIG: /home/{{ user_prompt }}/.kube/config
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  tasks:
 | 
				
			||||||
 | 
					  - name: Create argo namespace
 | 
				
			||||||
 | 
					    kubernetes.core.k8s:
 | 
				
			||||||
 | 
					      state: present
 | 
				
			||||||
 | 
					      definition:
 | 
				
			||||||
 | 
					        apiVersion: v1
 | 
				
			||||||
 | 
					        kind: Namespace
 | 
				
			||||||
 | 
					        metadata:
 | 
				
			||||||
 | 
					          labels:
 | 
				
			||||||
 | 
					            kubernetes.io/metadata.name: argo
 | 
				
			||||||
 | 
					          name: argo
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Verifier si argo est déjà entrain de tourner
 | 
				
			||||||
 | 
					    ansible.builtin.shell: 
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        kubectl get -n argo pods | grep -q argo-server
 | 
				
			||||||
 | 
					    register: argo_server_pod
 | 
				
			||||||
 | 
					    failed_when: argo_server_pod.rc not in [ 0, 1 ]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Installing argo services
 | 
				
			||||||
 | 
					    ansible.builtin.shell: 
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        kubectl apply -n argo -f https://github.com/argoproj/argo-workflows/releases/download/v{{ ARGO_VERSION }}/install.yaml
 | 
				
			||||||
 | 
					    when: argo_server_pod.rc == 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Vérifier l'état du pod argo-server
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: | 
 | 
				
			||||||
 | 
					        argo_server_name=$(kubectl get -n argo pods | grep argo-server | cut -d ' ' -f 1)
 | 
				
			||||||
 | 
					        kubectl get -n argo pods $argo_server_name --output=jsonpath='{.status.phase}'
 | 
				
			||||||
 | 
					    register: pod_status
 | 
				
			||||||
 | 
					    retries: 30
 | 
				
			||||||
 | 
					    delay: 10
 | 
				
			||||||
 | 
					    until: pod_status.stdout == "Running"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Patch first connection bug
 | 
				
			||||||
 | 
					    ansible.builtin.shell: |
 | 
				
			||||||
 | 
					      kubectl patch deployment \
 | 
				
			||||||
 | 
					      argo-server \
 | 
				
			||||||
 | 
					      --namespace argo \
 | 
				
			||||||
 | 
					      --type='json' \
 | 
				
			||||||
 | 
					      -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": [
 | 
				
			||||||
 | 
					      "server",
 | 
				
			||||||
 | 
					      "--auth-mode=server"
 | 
				
			||||||
 | 
					      ]}]'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Copying the configuration file to new host
 | 
				
			||||||
 | 
					    copy: src=argo-service.yml  dest=$HOME mode=0755
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Applying the conf file to make the service a NodePort typ
 | 
				
			||||||
 | 
					    ansible.builtin.shell: 
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        kubectl apply -f argo-service.yml 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: download argo CLI
 | 
				
			||||||
 | 
					    become: true
 | 
				
			||||||
 | 
					    ansible.builtin.uri:
 | 
				
			||||||
 | 
					      url: " https://github.com/argoproj/argo-workflows/releases/download/v{{ ARGO_VERSION }}/argo-linux-amd64.gz"
 | 
				
			||||||
 | 
					      method: GET
 | 
				
			||||||
 | 
					      dest: /var
 | 
				
			||||||
 | 
					      status_code: 200  
 | 
				
			||||||
 | 
					      headers:
 | 
				
			||||||
 | 
					        Content-Type: "application/json"
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					  - name: Install argo CLI
 | 
				
			||||||
 | 
					    become: true
 | 
				
			||||||
 | 
					    ansible.builtin.shell: 
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        gunzip argo-linux-amd64.gz 
 | 
				
			||||||
 | 
					        chmod +x argo-linux-amd64 
 | 
				
			||||||
 | 
					        mv ./argo-linux-amd64 /usr/local/bin/argo
 | 
				
			||||||
 | 
					    args:
 | 
				
			||||||
 | 
					      chdir: /var
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Enable argo CLI autocomplete
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd:  |
 | 
				
			||||||
 | 
					        grep 'argo completion bash' $HOME/.bashrc || echo 'source <(argo completion bash)' >> $HOME/.bashrc
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
							
								
								
									
										116
									
								
								ansible/Argo/deploy_k3s.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										116
									
								
								ansible/Argo/deploy_k3s.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,116 @@
 | 
				
			|||||||
 | 
					- name: Installation k3s
 | 
				
			||||||
 | 
					  hosts: all:!localhost
 | 
				
			||||||
 | 
					  user: "{{ user_prompt }}"
 | 
				
			||||||
 | 
					  gather_facts: true
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					  tasks:
 | 
				
			||||||
 | 
					    - name: Update apt
 | 
				
			||||||
 | 
					      become: true
 | 
				
			||||||
 | 
					      # become_method: su
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd: 
 | 
				
			||||||
 | 
					          apt update -y
 | 
				
			||||||
 | 
					          
 | 
				
			||||||
 | 
					    - name: Install necessary packages
 | 
				
			||||||
 | 
					      become: true 
 | 
				
			||||||
 | 
					      # become_method: su
 | 
				
			||||||
 | 
					      package:
 | 
				
			||||||
 | 
					        name: 
 | 
				
			||||||
 | 
					          - sudo
 | 
				
			||||||
 | 
					          - curl
 | 
				
			||||||
 | 
					          - grep
 | 
				
			||||||
 | 
					          - expect
 | 
				
			||||||
 | 
					          - adduser  
 | 
				
			||||||
 | 
					        state: present  
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Test if the current user is a sudoer
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd: 
 | 
				
			||||||
 | 
					          groups {{ ansible_user_id }} | grep -q  'sudo'
 | 
				
			||||||
 | 
					      register: sudoer
 | 
				
			||||||
 | 
					      failed_when: sudoer.rc not in [ 0, 1 ]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Adding user to sudoers
 | 
				
			||||||
 | 
					      become: true 
 | 
				
			||||||
 | 
					      # become_method: su
 | 
				
			||||||
 | 
					      user: 
 | 
				
			||||||
 | 
					        name: "{{ ansible_user_id }}"
 | 
				
			||||||
 | 
					        append: true  
 | 
				
			||||||
 | 
					        groups: sudo  
 | 
				
			||||||
 | 
					      when: sudoer.rc == 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Reset ssh connection to allow user changes to affect ansible user
 | 
				
			||||||
 | 
					      ansible.builtin.meta:
 | 
				
			||||||
 | 
					        reset_connection
 | 
				
			||||||
 | 
					      when: sudoer.rc == 1 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Attendre que la déconnexion soit effective
 | 
				
			||||||
 | 
					      wait_for:
 | 
				
			||||||
 | 
					        port: 22
 | 
				
			||||||
 | 
					        delay: 10
 | 
				
			||||||
 | 
					        timeout: 120
 | 
				
			||||||
 | 
					      when: sudoer.rc == 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Download k3s
 | 
				
			||||||
 | 
					      ansible.builtin.uri:
 | 
				
			||||||
 | 
					        url: "https://get.k3s.io"
 | 
				
			||||||
 | 
					        method: GET
 | 
				
			||||||
 | 
					        dest: ./install_k3s.sh
 | 
				
			||||||
 | 
					        status_code: 200  
 | 
				
			||||||
 | 
					        headers:
 | 
				
			||||||
 | 
					          Content-Type: "application/json"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Install k3s
 | 
				
			||||||
 | 
					      become: true
 | 
				
			||||||
 | 
					      # become_method: su
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd : sh install_k3s.sh
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Add k3s group
 | 
				
			||||||
 | 
					      become: true 
 | 
				
			||||||
 | 
					      # become_method: su
 | 
				
			||||||
 | 
					      group:
 | 
				
			||||||
 | 
					        name: k3s
 | 
				
			||||||
 | 
					        state: present
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					    - name: Add user to k3s group
 | 
				
			||||||
 | 
					      become: true 
 | 
				
			||||||
 | 
					      # become_method: su
 | 
				
			||||||
 | 
					      user: 
 | 
				
			||||||
 | 
					        name: "{{ ansible_user_id }}"
 | 
				
			||||||
 | 
					        append: true  
 | 
				
			||||||
 | 
					        groups: k3s
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Ensure .kube directory exists
 | 
				
			||||||
 | 
					      ansible.builtin.file:
 | 
				
			||||||
 | 
					        path: ~/.kube
 | 
				
			||||||
 | 
					        state: directory
 | 
				
			||||||
 | 
					        mode: '0700'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Copy kubeconfig file
 | 
				
			||||||
 | 
					      become: true
 | 
				
			||||||
 | 
					      ansible.builtin.copy:
 | 
				
			||||||
 | 
					        src: /etc/rancher/k3s/k3s.yaml
 | 
				
			||||||
 | 
					        dest: /home/{{ user_prompt }}/.kube/config
 | 
				
			||||||
 | 
					        remote_src: true
 | 
				
			||||||
 | 
					        mode: '0600'
 | 
				
			||||||
 | 
					        owner: "{{ ansible_user_id }}"
 | 
				
			||||||
 | 
					        group: "{{ ansible_user_gid }}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Set KUBECONFIG environment variable in .bashrc
 | 
				
			||||||
 | 
					      ansible.builtin.lineinfile:
 | 
				
			||||||
 | 
					        path: ~/.bashrc
 | 
				
			||||||
 | 
					        line: 'export KUBECONFIG=$HOME/.kube/config'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Ensure kubectl autocompletion is enabled
 | 
				
			||||||
 | 
					      ansible.builtin.lineinfile:
 | 
				
			||||||
 | 
					        path: ~/.bashrc
 | 
				
			||||||
 | 
					        line: 'source <(kubectl completion bash)'
 | 
				
			||||||
 | 
					  
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Unconditionally reboot the machine with all defaults
 | 
				
			||||||
 | 
					      become: true
 | 
				
			||||||
 | 
					      # become_method: su
 | 
				
			||||||
 | 
					      ansible.builtin.reboot:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
							
								
								
									
										59
									
								
								ansible/MISC/deploy_admiralty_environment.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										59
									
								
								ansible/MISC/deploy_admiralty_environment.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,59 @@
 | 
				
			|||||||
 | 
					- name: Deploys VM based on local debian image
 | 
				
			||||||
 | 
					  hosts: localhost
 | 
				
			||||||
 | 
					  gather_facts: true
 | 
				
			||||||
 | 
					  become: true
 | 
				
			||||||
 | 
					  vars:
 | 
				
			||||||
 | 
					    # debian_image: "/var/lib/libvirt/images"
 | 
				
			||||||
 | 
					    # vm: "{{ item }}"
 | 
				
			||||||
 | 
					    ssh_pub_key: "/home/pierre/.ssh/id_rsa.pub"
 | 
				
			||||||
 | 
					    root: root
 | 
				
			||||||
 | 
					    os: https://cloud.debian.org/images/cloud/bullseye/latest/debian-11-generic-amd64.qcow2
 | 
				
			||||||
 | 
					    checksum: ""
 | 
				
			||||||
 | 
					    xml_template: debian_template
 | 
				
			||||||
 | 
					    machines:
 | 
				
			||||||
 | 
					      - name: control_test
 | 
				
			||||||
 | 
					        ip: 192.168.122.80
 | 
				
			||||||
 | 
					      # - name: DC01_test
 | 
				
			||||||
 | 
					      #   ip: 192.168.122.81
 | 
				
			||||||
 | 
					      # - name: DC02_test
 | 
				
			||||||
 | 
					      #   ip: 192.168.122.82
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  tasks:
 | 
				
			||||||
 | 
					    - name: Is os image present
 | 
				
			||||||
 | 
					      ansible.builtin.fail:
 | 
				
			||||||
 | 
					        msg: You did not provide an image to build from
 | 
				
			||||||
 | 
					      when:
 | 
				
			||||||
 | 
					        os == ""
 | 
				
			||||||
 | 
					    - name: Is XML template present
 | 
				
			||||||
 | 
					      ansible.builtin.stat:
 | 
				
			||||||
 | 
					        path: "create_kvm/templates/{{ xml_template }}.xml.j2"
 | 
				
			||||||
 | 
					      register: xml_present
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: XML not present
 | 
				
			||||||
 | 
					      ansible.builtin.fail:
 | 
				
			||||||
 | 
					        msg: You did not provide a valid xml template
 | 
				
			||||||
 | 
					      when: not (xml_present.stat.exists)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: KVM Provision role
 | 
				
			||||||
 | 
					      ansible.builtin.include_role:
 | 
				
			||||||
 | 
					        name: create_kvm
 | 
				
			||||||
 | 
					      vars:
 | 
				
			||||||
 | 
					        # libvirt_pool_dir: "{{ pool_dir }}"
 | 
				
			||||||
 | 
					        os_image: "{{ os }}"
 | 
				
			||||||
 | 
					        template_file: "{{ xml_template }}.xml.j2"
 | 
				
			||||||
 | 
					        vm_name: "{{ item.name }}"
 | 
				
			||||||
 | 
					        ssh_key: "{{ ssh_pub_key }}"
 | 
				
			||||||
 | 
					        root_pwd: "{{ root }}"
 | 
				
			||||||
 | 
					      loop:
 | 
				
			||||||
 | 
					        "{{ machines }}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Set up the wanted IP
 | 
				
			||||||
 | 
					      ansible.builtin.include_tasks:
 | 
				
			||||||
 | 
					        file: setup_vm_ip.yml
 | 
				
			||||||
 | 
					      loop:
 | 
				
			||||||
 | 
					        "{{ machines }}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        
 | 
				
			||||||
 | 
					# for control,dc01,dc02
 | 
				
			||||||
 | 
					# 192.168.122.70 + 1
 | 
				
			||||||
 | 
					# /var/lib/libvirt/images/debian11-2-1-clone.qcow2
 | 
				
			||||||
							
								
								
									
										32
									
								
								ansible/MISC/deploy_mosquitto.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								ansible/MISC/deploy_mosquitto.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,32 @@
 | 
				
			|||||||
 | 
					- name: Installation k3s
 | 
				
			||||||
 | 
					  hosts: "{{ host_prompt }}" 
 | 
				
			||||||
 | 
					  user: "{{ user_prompt }}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  tasks:
 | 
				
			||||||
 | 
					    - name: install package
 | 
				
			||||||
 | 
					      become: true
 | 
				
			||||||
 | 
					      ansible.builtin.package:
 | 
				
			||||||
 | 
					        name:
 | 
				
			||||||
 | 
					          - mosquitto
 | 
				
			||||||
 | 
					          - mosquitto-clients
 | 
				
			||||||
 | 
					        state: present
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: configure mosquitto conf
 | 
				
			||||||
 | 
					      become: true
 | 
				
			||||||
 | 
					      ansible.builtin.lineinfile:
 | 
				
			||||||
 | 
					        path: /etc/mosquitto/conf.d/mosquitto.conf
 | 
				
			||||||
 | 
					        line: allow_anonymous true
 | 
				
			||||||
 | 
					        create: true
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: configure mosquitto conf
 | 
				
			||||||
 | 
					      become: true
 | 
				
			||||||
 | 
					      ansible.builtin.lineinfile:
 | 
				
			||||||
 | 
					        path: /etc/mosquitto/conf.d/mosquitto.conf
 | 
				
			||||||
 | 
					        line: listener 1883 0.0.0.0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: restart mosquitto
 | 
				
			||||||
 | 
					      become: true
 | 
				
			||||||
 | 
					      ansible.builtin.service:
 | 
				
			||||||
 | 
					        name: mosquitto
 | 
				
			||||||
 | 
					        state: restarted
 | 
				
			||||||
 | 
					
 | 
				
			||||||
							
								
								
									
										15
									
								
								ansible/MISC/setup_vm_ip.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								ansible/MISC/setup_vm_ip.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
 | 
					- name: Retrieve network info
 | 
				
			||||||
 | 
					  ansible.builtin.command:
 | 
				
			||||||
 | 
					    cmd: virsh domifaddr "{{ item.name }}"
 | 
				
			||||||
 | 
					  register: output_domifaddr
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- name: Extract vm's current ip
 | 
				
			||||||
 | 
					  vars:
 | 
				
			||||||
 | 
					    pattern: '(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
 | 
				
			||||||
 | 
					  ansible.builtin.set_fact:
 | 
				
			||||||
 | 
					    current_ip: "{{ output_domifaddr.stdout | regex_search(pattern, '\\1') }}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- name: Show ip
 | 
				
			||||||
 | 
					  ansible.builtin.debug:
 | 
				
			||||||
 | 
					    msg: "{{ current_ip.0 }}"
 | 
				
			||||||
							
								
								
									
										111
									
								
								ansible/Minio/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										111
									
								
								ansible/Minio/README.md
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,111 @@
 | 
				
			|||||||
 | 
					# MinIO
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Deploy Minio 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					This playbook installs MinIO on a Kubernetes cluster using Helm and retrieves necessary credentials and access information.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Variables
 | 
				
			||||||
 | 
					| Variable | Description |
 | 
				
			||||||
 | 
					|----------|-------------|
 | 
				
			||||||
 | 
					| `user_prompt` | SSH user to execute commands |
 | 
				
			||||||
 | 
					| `host_name_prompt` | Hostname of the target machine |
 | 
				
			||||||
 | 
					| `memory_req` | Memory allocation for MinIO (`2Gi` by default) |
 | 
				
			||||||
 | 
					| `storage_req` | Storage allocation for MinIO (`20Gi` by default) |
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Steps Executed
 | 
				
			||||||
 | 
					1. Install necessary Python libraries.
 | 
				
			||||||
 | 
					2. Check if Helm is installed and install it if not present.
 | 
				
			||||||
 | 
					3. Add and update the MinIO Helm repository.
 | 
				
			||||||
 | 
					4. Deploy MinIO using Helm if it is not already running.
 | 
				
			||||||
 | 
					5. Retrieve the MinIO credentials (root user and password).
 | 
				
			||||||
 | 
					6. Retrieve the MinIO UI console external IP and API internal IP.
 | 
				
			||||||
 | 
					7. Display login credentials and connection details.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Running the Playbook
 | 
				
			||||||
 | 
					```sh
 | 
				
			||||||
 | 
					ansible-playbook -i inventory deploy_minio.yml --extra-vars "user_prompt=your-user host_name_prompt=your-host"
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Setting up MinIO access
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/!\ This part can be automated with this **[ansible playbook](https://github.com/pi-B/ansible-oc/blob/main/setup_minio_admiralty.yml)** which is designed to create ressources in a Argo-Workflows/Admiralty combo.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/!\ If you still want to setup the host manually **and** aim to use admiralty, give the ressources an **unique name** and be sure to make this uniqueness accessible (in an environment variable, in a conf file...)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- With the output of the last tasks, create a secret in argo namespace to give access to the minio API. We need to use the `create` verb because apply creates a non-functionning secret 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```bash
 | 
				
			||||||
 | 
					kubectl create secret -n <name of your argo namespace> generic argo-artifact-secret \
 | 
				
			||||||
 | 
					    --from-literal=access-key=<your access key> \
 | 
				
			||||||
 | 
					    --from-literal=secret-key=<your secret key>
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- Create a ConfigMap, which will be used by argo to create the S3 artifact, the content must match the one from the previously created secret
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```yaml
 | 
				
			||||||
 | 
					apiVersion: v1
 | 
				
			||||||
 | 
					kind: ConfigMap
 | 
				
			||||||
 | 
					metadata:
 | 
				
			||||||
 | 
					  # If you want to use this config map by default, name it "artifact-repositories".
 | 
				
			||||||
 | 
					  name: artifact-repositories
 | 
				
			||||||
 | 
					  # annotations:
 | 
				
			||||||
 | 
					  #   # v3.0 and after - if you want to use a specific key, put that key into this annotation.
 | 
				
			||||||
 | 
					  #   workflows.argoproj.io/default-artifact-repository: oc-s3-artifact-repository
 | 
				
			||||||
 | 
					data:
 | 
				
			||||||
 | 
					  oc-s3-artifact-repository: |
 | 
				
			||||||
 | 
					    s3:
 | 
				
			||||||
 | 
					      bucket: oc-bucket
 | 
				
			||||||
 | 
					      endpoint: [ retrieve cluster with kubectl get service argo-artifacts -o jsonpath="{.spec.clusterIP}" ]:9000
 | 
				
			||||||
 | 
					      insecure: true
 | 
				
			||||||
 | 
					      accessKeySecret: 
 | 
				
			||||||
 | 
					        name: argo-artifact-secret
 | 
				
			||||||
 | 
					        key: access-key
 | 
				
			||||||
 | 
					      secretKeySecret:
 | 
				
			||||||
 | 
					        name: argo-artifact-secret
 | 
				
			||||||
 | 
					        key: secret-key
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Ansible Playbook setup MinIO
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Purpose
 | 
				
			||||||
 | 
					This playbook sets up MinIO to work with Argo Workflows, including creating the required buckets and secrets.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Variables
 | 
				
			||||||
 | 
					| Variable | Description |
 | 
				
			||||||
 | 
					|----------|-------------|
 | 
				
			||||||
 | 
					| `user_prompt` | SSH user to execute commands |
 | 
				
			||||||
 | 
					| `uuid_prompt` | Unique identifier for the Argo secret |
 | 
				
			||||||
 | 
					| `argo_namespace` | Kubernetes namespace for Argo (`argo` by default) |
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Steps Executed
 | 
				
			||||||
 | 
					1. Install necessary dependencies.
 | 
				
			||||||
 | 
					2. Download and configure MinIO Client (`mc`).
 | 
				
			||||||
 | 
					3. Retrieve MinIO credentials (root user and password).
 | 
				
			||||||
 | 
					4. Configure `mc` to connect to MinIO.
 | 
				
			||||||
 | 
					5. Create a new S3 bucket (`oc-bucket`).
 | 
				
			||||||
 | 
					6. Generate a new access key and secret key for MinIO.
 | 
				
			||||||
 | 
					7. Retrieve the MinIO API cluster IP.
 | 
				
			||||||
 | 
					8. Create a Kubernetes Secret to store MinIO credentials.
 | 
				
			||||||
 | 
					9. Create a Kubernetes ConfigMap for MinIO artifact repository configuration.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Running the Playbook
 | 
				
			||||||
 | 
					```sh
 | 
				
			||||||
 | 
					ansible-playbook -i inventory setup_minio_resources.yml --extra-vars "user_prompt=your-user uuid_prompt=unique-id"
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					---
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Expected Output
 | 
				
			||||||
 | 
					Upon successful execution, you should see:
 | 
				
			||||||
 | 
					- MinIO deployed and accessible.
 | 
				
			||||||
 | 
					- MinIO UI console credentials displayed.
 | 
				
			||||||
 | 
					- MinIO bucket (`oc-bucket`) created.
 | 
				
			||||||
 | 
					- Secrets and ConfigMaps properly configured in Kubernetes.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					For any issues, check Ansible logs and validate configurations manually using:
 | 
				
			||||||
 | 
					```sh
 | 
				
			||||||
 | 
					kubectl get pods -n default
 | 
				
			||||||
 | 
					kubectl get secrets -n argo
 | 
				
			||||||
 | 
					kubectl get configmaps -n argo
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
							
								
								
									
										134
									
								
								ansible/Minio/deploy_minio.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										134
									
								
								ansible/Minio/deploy_minio.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,134 @@
 | 
				
			|||||||
 | 
					- name: Deploy MinIO
 | 
				
			||||||
 | 
					  hosts: all:!localhost
 | 
				
			||||||
 | 
					  user: "{{ user_prompt }}"
 | 
				
			||||||
 | 
					  vars:
 | 
				
			||||||
 | 
					    host_name: "{{ host_name_prompt }}"
 | 
				
			||||||
 | 
					    memory_req: "2Gi"
 | 
				
			||||||
 | 
					    storage_req: "20Gi"
 | 
				
			||||||
 | 
					  environment:
 | 
				
			||||||
 | 
					    KUBECONFIG: /home/{{ user_prompt }}/.kube/config
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  tasks:
 | 
				
			||||||
 | 
					    - name: Install yaml library for python
 | 
				
			||||||
 | 
					      become: true
 | 
				
			||||||
 | 
					      ansible.builtin.package:
 | 
				
			||||||
 | 
					        name: ansible
 | 
				
			||||||
 | 
					        state: present
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Check if Helm does exist
 | 
				
			||||||
 | 
					      ansible.builtin.command: 
 | 
				
			||||||
 | 
					        cmd: which helm
 | 
				
			||||||
 | 
					      register: result_which
 | 
				
			||||||
 | 
					      failed_when: result_which.rc not in [ 0, 1 ]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Install helm
 | 
				
			||||||
 | 
					      when: result_which.rc == 1
 | 
				
			||||||
 | 
					      block:
 | 
				
			||||||
 | 
					        - name: Download helm from source
 | 
				
			||||||
 | 
					          ansible.builtin.get_url:
 | 
				
			||||||
 | 
					            url: https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
 | 
				
			||||||
 | 
					            dest: ./get_helm.sh
 | 
				
			||||||
 | 
					            mode: 0700
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        - name: Launch helm install script
 | 
				
			||||||
 | 
					          become: true
 | 
				
			||||||
 | 
					          ansible.builtin.shell:
 | 
				
			||||||
 | 
					            cmd: |
 | 
				
			||||||
 | 
					              ./get_helm.sh
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Test if MinIO is already installed
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd : helm repo list | grep 'https://charts.min.io/'
 | 
				
			||||||
 | 
					      register: minio_charts
 | 
				
			||||||
 | 
					      failed_when: minio_charts.rc not in [0,1]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Add helm repo MinIO
 | 
				
			||||||
 | 
					      kubernetes.core.helm_repository:
 | 
				
			||||||
 | 
					        repo_url: https://charts.min.io/
 | 
				
			||||||
 | 
					        repo_state: present
 | 
				
			||||||
 | 
					        repo_name: minio
 | 
				
			||||||
 | 
					      when: minio_charts.rc == 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Update helm repo
 | 
				
			||||||
 | 
					      ansible.builtin.command:
 | 
				
			||||||
 | 
					        cmd : |
 | 
				
			||||||
 | 
					          helm repo update
 | 
				
			||||||
 | 
					      when: minio_charts.rc == 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Test is argo-artifact is already running
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        helm list | grep -w "argo-artifacts" | wc -l
 | 
				
			||||||
 | 
					      register: argo_artifact_deployed
 | 
				
			||||||
 | 
					      failed_when: argo_artifact_deployed.rc not in [ 0, 1 ]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Initialize MinIO
 | 
				
			||||||
 | 
					      when: argo_artifact_deployed.stdout == "0"
 | 
				
			||||||
 | 
					      kubernetes.core.helm:
 | 
				
			||||||
 | 
					        name: argo-artifacts
 | 
				
			||||||
 | 
					        chart_ref: minio/minio
 | 
				
			||||||
 | 
					        release_namespace: default
 | 
				
			||||||
 | 
					        values:
 | 
				
			||||||
 | 
					          service:
 | 
				
			||||||
 | 
					            type: LoadBalancer
 | 
				
			||||||
 | 
					          fullnameOverride: argo-artifacts
 | 
				
			||||||
 | 
					          resources:
 | 
				
			||||||
 | 
					            requests:
 | 
				
			||||||
 | 
					              memory: "{{ memory_req }}"
 | 
				
			||||||
 | 
					          replicas: 2
 | 
				
			||||||
 | 
					          volumeClaimTemplates:
 | 
				
			||||||
 | 
					            spec:
 | 
				
			||||||
 | 
					              resources:
 | 
				
			||||||
 | 
					                requests: "{{ storage_req }}" 
 | 
				
			||||||
 | 
					          consoleService:
 | 
				
			||||||
 | 
					            type: LoadBalancer 
 | 
				
			||||||
 | 
					            # port: 9001
 | 
				
			||||||
 | 
					        state: present
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Retrieve root user
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd: |
 | 
				
			||||||
 | 
					          kubectl get secret argo-artifacts --namespace default -o jsonpath="{.data.rootUser}"
 | 
				
			||||||
 | 
					      register : user_encoded
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Decode root user 
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd: |
 | 
				
			||||||
 | 
					          echo {{ user_encoded.stdout }} | base64 -d
 | 
				
			||||||
 | 
					      register: user
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Retrieve root password
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd: |
 | 
				
			||||||
 | 
					          kubectl get secret argo-artifacts --namespace default -o jsonpath="{.data.rootPassword}"
 | 
				
			||||||
 | 
					      register : password_encoded
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Decode root password 
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd: |
 | 
				
			||||||
 | 
					          echo {{ password_encoded.stdout }} | base64 -d
 | 
				
			||||||
 | 
					      register: password
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Retrieve console ip
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd: |
 | 
				
			||||||
 | 
					          kubectl get service argo-artifacts-console -o jsonpath="{.status.loadBalancer.ingress[0].ip}"
 | 
				
			||||||
 | 
					      register : ip_console
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Retrieve API internal ip
 | 
				
			||||||
 | 
					      ansible.builtin.shell:
 | 
				
			||||||
 | 
					        cmd: |
 | 
				
			||||||
 | 
					          kubectl get service argo-artifacts -o jsonpath="{.spec.clusterIP}"
 | 
				
			||||||
 | 
					      register : ip_api
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    - name: Display info
 | 
				
			||||||
 | 
					      debug: 
 | 
				
			||||||
 | 
					        msg :
 | 
				
			||||||
 | 
					          "
 | 
				
			||||||
 | 
					          MinIO UI console info 
 | 
				
			||||||
 | 
					            external IP GUI : {{ ip_console.stdout }}
 | 
				
			||||||
 | 
					            user : {{ user.stdout }}
 | 
				
			||||||
 | 
					            password : {{ password.stdout }}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					          IP API : {{ ip_api.stdout }}
 | 
				
			||||||
 | 
					          "
 | 
				
			||||||
							
								
								
									
										8
									
								
								ansible/Minio/secret-cnes.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								ansible/Minio/secret-cnes.yaml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,8 @@
 | 
				
			|||||||
 | 
					apiVersion: v1
 | 
				
			||||||
 | 
					kind: Secret
 | 
				
			||||||
 | 
					metadata:
 | 
				
			||||||
 | 
					  name: cnes-secrets
 | 
				
			||||||
 | 
					type: Opaque
 | 
				
			||||||
 | 
					stringData:
 | 
				
			||||||
 | 
					  weather-api: 1d2b4ad68a4375388e64f5353d33186c
 | 
				
			||||||
 | 
					  era-5: 3e8457b6-f5eb-4405-a09c-78403a14c4d1
 | 
				
			||||||
							
								
								
									
										142
									
								
								ansible/Minio/setup_minio_oc_bucket.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										142
									
								
								ansible/Minio/setup_minio_oc_bucket.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,142 @@
 | 
				
			|||||||
 | 
					- name: Installation k3s
 | 
				
			||||||
 | 
					  hosts: all:!localhost
 | 
				
			||||||
 | 
					  user: "{{ user_prompt }}"
 | 
				
			||||||
 | 
					  gather_facts: true
 | 
				
			||||||
 | 
					  become_method: sudo
 | 
				
			||||||
 | 
					  vars:
 | 
				
			||||||
 | 
					    - argo_namespace: argo
 | 
				
			||||||
 | 
					    - MC_PATH: $HOME/minio-binaries
 | 
				
			||||||
 | 
					    - MINIO_NAME: my-minio
 | 
				
			||||||
 | 
					    - UUID: "{{ uuid_prompt }}"
 | 
				
			||||||
 | 
					  environment:
 | 
				
			||||||
 | 
					    - KUBECONFIG: /home/{{ user_prompt }}/.kube/config
 | 
				
			||||||
 | 
					  tasks:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Install necessary packages
 | 
				
			||||||
 | 
					    become: true 
 | 
				
			||||||
 | 
					    package:
 | 
				
			||||||
 | 
					      name: 
 | 
				
			||||||
 | 
					        - python3-kubernetes
 | 
				
			||||||
 | 
					        - python3-jmespath
 | 
				
			||||||
 | 
					      state: present
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Create destination directory
 | 
				
			||||||
 | 
					    file:
 | 
				
			||||||
 | 
					      path: $HOME/minio-binaries
 | 
				
			||||||
 | 
					      state: directory
 | 
				
			||||||
 | 
					      mode: '0755'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Install mc
 | 
				
			||||||
 | 
					    ansible.builtin.get_url:
 | 
				
			||||||
 | 
					      url: "https://dl.min.io/client/mc/release/linux-amd64/mc"
 | 
				
			||||||
 | 
					      dest: $HOME/minio-binaries/mc
 | 
				
			||||||
 | 
					      mode: +x
 | 
				
			||||||
 | 
					      headers:
 | 
				
			||||||
 | 
					        Content-Type: "application/json"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Add mc to path
 | 
				
			||||||
 | 
					    ansible.builtin.lineinfile:
 | 
				
			||||||
 | 
					      path: $HOME/.bashrc
 | 
				
			||||||
 | 
					      line: export PATH=$PATH:$HOME/minio-binaries
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Is mc already set up for the local minio
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        "{{ MC_PATH }}"/mc admin info {{ MINIO_NAME }}
 | 
				
			||||||
 | 
					    register: minio_info
 | 
				
			||||||
 | 
					    failed_when: minio_info.rc not in [0,1]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Retrieve root user
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        kubectl get secrets argo-artifacts -o jsonpath="{.data.rootUser}" | base64 -d -
 | 
				
			||||||
 | 
					    register: user
 | 
				
			||||||
 | 
					    when: minio_info.rc == 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Retrieve root password
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        kubectl get secret argo-artifacts --namespace default -o jsonpath="{.data.rootPassword}" | base64 -d -
 | 
				
			||||||
 | 
					    register : password
 | 
				
			||||||
 | 
					    when: minio_info.rc == 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Set up MinIO host in mc
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        "{{ MC_PATH }}"/mc alias set {{ MINIO_NAME }} http://127.0.0.1:9000 '{{ user.stdout }}' '{{ password.stdout }}'
 | 
				
			||||||
 | 
					    failed_when: user.stdout == "" or password.stdout == ""
 | 
				
			||||||
 | 
					    when: minio_info.rc == 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Does oc-bucket already exist
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        "{{ MC_PATH }}"/mc ls my-minio | grep -q oc-bucket
 | 
				
			||||||
 | 
					    register: bucket_exists
 | 
				
			||||||
 | 
					    failed_when: bucket_exists.rc not in [0,1]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Create oc-bucket
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        "{{ MC_PATH }}"/mc mb {{ MINIO_NAME }}/oc-bucket
 | 
				
			||||||
 | 
					    when: bucket_exists.rc == 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Run mc admin accesskey create command
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        {{ MC_PATH }}/mc admin accesskey create --json {{ MINIO_NAME }}
 | 
				
			||||||
 | 
					    register: minio_output
 | 
				
			||||||
 | 
					    changed_when: false  # Avoid marking the task as changed every time
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Parse JSON output
 | 
				
			||||||
 | 
					    set_fact:
 | 
				
			||||||
 | 
					      access_key: "{{ minio_output.stdout | from_json | json_query('accessKey') }}"
 | 
				
			||||||
 | 
					      secret_key: "{{ minio_output.stdout | from_json | json_query('secretKey') }}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Retrieve cluster IP for minio API
 | 
				
			||||||
 | 
					    ansible.builtin.shell:
 | 
				
			||||||
 | 
					      cmd: |
 | 
				
			||||||
 | 
					        kubectl get service argo-artifacts -o jsonpath="{.spec.clusterIP}"
 | 
				
			||||||
 | 
					    register: minio_cluster_ip
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Create the minio secret in argo namespace
 | 
				
			||||||
 | 
					    kubernetes.core.k8s:
 | 
				
			||||||
 | 
					      state: present
 | 
				
			||||||
 | 
					      namespace: '{{ argo_namespace }}'
 | 
				
			||||||
 | 
					      name: "{{ UUID }}-argo-artifact-secret"
 | 
				
			||||||
 | 
					      definition:
 | 
				
			||||||
 | 
					        apiVersion: v1
 | 
				
			||||||
 | 
					        kind: Secret
 | 
				
			||||||
 | 
					        type: Opaque
 | 
				
			||||||
 | 
					        stringData:
 | 
				
			||||||
 | 
					          access-key: '{{ access_key }}'
 | 
				
			||||||
 | 
					          secret-key: '{{ secret_key }}'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - name: Create the minio secret in argo namespace
 | 
				
			||||||
 | 
					    kubernetes.core.k8s:
 | 
				
			||||||
 | 
					      state: present
 | 
				
			||||||
 | 
					      namespace: '{{ argo_namespace }}'
 | 
				
			||||||
 | 
					      definition:
 | 
				
			||||||
 | 
					        apiVersion: v1
 | 
				
			||||||
 | 
					        kind: ConfigMap
 | 
				
			||||||
 | 
					        metadata:
 | 
				
			||||||
 | 
					          name: artifact-repositories
 | 
				
			||||||
 | 
					        data:
 | 
				
			||||||
 | 
					          oc-s3-artifact-repository: |
 | 
				
			||||||
 | 
					            s3:
 | 
				
			||||||
 | 
					              bucket: oc-bucket
 | 
				
			||||||
 | 
					              endpoint: {{ minio_cluster_ip.stdout }}:9000
 | 
				
			||||||
 | 
					              insecure: true
 | 
				
			||||||
 | 
					              accessKeySecret:
 | 
				
			||||||
 | 
					                name: "{{ UUID }}-argo-artifact-secret"
 | 
				
			||||||
 | 
					                key: access-key
 | 
				
			||||||
 | 
					              secretKeySecret:
 | 
				
			||||||
 | 
					                name: "{{ UUID }}-argo-artifact-secret"
 | 
				
			||||||
 | 
					                key: secret-key
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    # ansible.builtin.shell:
 | 
				
			||||||
 | 
					    #   cmd: |
 | 
				
			||||||
 | 
					    #     kubectl create secret -n '{{ argo_namespace }}' generic argo-artifact-secret \
 | 
				
			||||||
 | 
					    #     --from-literal=access-key='{{ access_key }}' \
 | 
				
			||||||
 | 
					    #     --from-literal=secret-key='{{ secret_key }}'
 | 
				
			||||||
							
								
								
									
										86
									
								
								ansible/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										86
									
								
								ansible/README.md
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,86 @@
 | 
				
			|||||||
 | 
					Login : admrescue/admrescue
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Requirement
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					**Ansible** (+ pip): 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					If you don't have `pip` yet
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					curl https://bootstrap.pypa.io/get-pip.py -o /tmp/get-pip.py
 | 
				
			||||||
 | 
					python3 /tmp/get-pip.py --user
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					python3 -m pip install --user ansible
 | 
				
			||||||
 | 
					pip install -r requirement.txt
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					**Ansible collections**:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					ansible-galaxy collection install kubernetes.core
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Mosquitto
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					`sudo apt update && apt install -y mosquitto mosquitto-clients`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					need to add a conf file in `/etc/mosquitto/conf.d/mosquitto.conf` containing :
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					allow_anonymous true
 | 
				
			||||||
 | 
					listener 1883 0.0.0.0   
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					`sudo systemctl restart mosquitto`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Launch the mosquitto client to receive message on the machine that hosts the mosquitto server : `sudo mosquitto_sub -h 127.0.0.1 -t argo/alpr`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Argo 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Execute/submite a workflow
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					argo submit PATH_TO_YAML --watch --serviceaccount=argo -n argo
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Troubleshoot
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## k3s bind to local port
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					On certain distro you might already have an other mini k8s. A sign of this is k3s being able to install, start but never being stable, restarting non stop. 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					You should try to see if the port used by k3s are arlready binded : 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					> sudo netstat -tuln | grep -E '6443|10250'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					If those ports are already in use then you should identify which service run behidn them and then stop them and preferably uninstall them. 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					We have already encountered an instance of `Ubuntu Server` with minikube already installed.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Remove minikube
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```bash
 | 
				
			||||||
 | 
					sudo systemctl stop snap.microk8s.daemon-kubelite
 | 
				
			||||||
 | 
					sudo systemctl disable snap.microk8s.daemon-kubelite
 | 
				
			||||||
 | 
					sudo systemctl restart k3s
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Use local container images
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					We have encountered difficulties declaring container images that correspond to local images (stored in docker.io/library/)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					We used a docker hub repository to pull our customized image. For this we need to create a secret holding the login informations to a docker account that has access to this repository, which we then link to the serviceAccount running the workflow :
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Create the secret in the argo namespace
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					kubectl create secret docker-registry regcred   --docker-username=[DOCKER HUB USERNAME]   --docker-password=[DOCKER HUB PASSWORD] -n argo
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					Patch the `argo` serviceAccount to use the secret when pulling image
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					kubectl patch serviceaccount argo -n argo   -p '{"imagePullSecrets": [{"name": "regcred"}]}'
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
							
								
								
									
										3
									
								
								ansible/ansible.cfg
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								ansible/ansible.cfg
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,3 @@
 | 
				
			|||||||
 | 
					[defaults]
 | 
				
			||||||
 | 
					stdout_callback = yaml
 | 
				
			||||||
 | 
					stderr_callback = yaml
 | 
				
			||||||
							
								
								
									
										154
									
								
								ansible/notes.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										154
									
								
								ansible/notes.md
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,154 @@
 | 
				
			|||||||
 | 
					Login : admrescue/admrescue
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Deploy VM with ansible
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					TODO : check with yves or benjamin how to create a qcow2 image with azerty layout and ssh ready
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Deploy k3s
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Two password are asked via the prompt :
 | 
				
			||||||
 | 
					- First the user that you are connecting to on the host via ssh 
 | 
				
			||||||
 | 
					- Second the root password
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					`ansible-playbook -i my_hosts.yaml deploy_k3s.yml --extra-vars " user_prompt=<YOUR_USER>"  --ask-pass --ask-become-pass`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Deploy Argo
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					password  to provide is the one to the user you are connecting to on the host via ssh 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					`ansible-playbook -i my_hosts.yaml deploy_argo.yml --extra-vars " user_prompt=<YOUR_USER>" --ask-pass --ask-become-pass`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Deploy Admirality
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Install the kubernetes.core collection : `ansible-galaxy collection install kubernetes.core` for ansible to be able to use some kubectl tools.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Install and prepare Admiralty
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					This play prepare your machine to use Admiralty in kubernetes. It installs helm, cert-manager and admiralty, then configure your clusters to be an admiralty source or target.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/!\ TODO : declare the list of target and source in a play's vars
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					`ansible-playbook -i my_hosts.yaml deploy_admiralty.yml --extra-vars "host_prompt=HOSTNAME user_prompt=<YOUR_USER>"  --ask-pass --ask-become-pass`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Share kubeconfig for the control cluster
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					`ansible-playbook -i ../my_hosts.yaml create_secrets.yml --extra-vars "host_prompt=WORKLOAD_HOST user_prompt=<YOUR_USER> control_host=CONTROL_HOST" --ask-pass --ask-become-pass`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# MinIO
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- Limit the Memory 
 | 
				
			||||||
 | 
					- Limit the replica
 | 
				
			||||||
 | 
					- Limit volumeClaimTemplates.spec.resources.requests 
 | 
				
			||||||
 | 
					- Add LoadBalancer for WebUI
 | 
				
			||||||
 | 
					- Corrected command : 
 | 
				
			||||||
 | 
					> kubectl get secret argo-artifacts --namespace default -o jsonpath="{.data.rootUser}" | base64 --decode
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					> kubectl get secret argo-artifacts --namespace default -o jsonpath="{.data.rootPassword}" | base64 --decode
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- With the output of the last tasks, create a secret in argo namespace to give access to the minio API
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					apiVersion: v1
 | 
				
			||||||
 | 
					kind: Secret
 | 
				
			||||||
 | 
					metadata:
 | 
				
			||||||
 | 
					  name: argo-minio-secret
 | 
				
			||||||
 | 
					type: Opaque
 | 
				
			||||||
 | 
					data:
 | 
				
			||||||
 | 
					  accessKeySecret: [base64 ENCODED VALUE]
 | 
				
			||||||
 | 
					  secretKeySecret: [base64 ENCODED VALUE]
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- Create a ConfigMap, which will be used by argo to create the S3 artifact, the content can match the one from the previously created secret
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					apiVersion: v1
 | 
				
			||||||
 | 
					kind: ConfigMap
 | 
				
			||||||
 | 
					metadata:
 | 
				
			||||||
 | 
					  # If you want to use this config map by default, name it "artifact-repositories". Otherwise, you can provide a reference to a
 | 
				
			||||||
 | 
					  # different config map in `artifactRepositoryRef.configMap`.
 | 
				
			||||||
 | 
					  name: artifact-repositories
 | 
				
			||||||
 | 
					  # annotations:
 | 
				
			||||||
 | 
					  #   # v3.0 and after - if you want to use a specific key, put that key into this annotation.
 | 
				
			||||||
 | 
					  #   workflows.argoproj.io/default-artifact-repository: oc-s3-artifact-repository
 | 
				
			||||||
 | 
					data:
 | 
				
			||||||
 | 
					  oc-s3-artifact-repository: |
 | 
				
			||||||
 | 
					    s3:
 | 
				
			||||||
 | 
					      bucket: oc-bucket
 | 
				
			||||||
 | 
					      endpoint: [ retrieve cluster with kubectl get service argo-artifacts -o jsonpath="{.spec.clusterIP}" ]:9000
 | 
				
			||||||
 | 
					      insecure: true
 | 
				
			||||||
 | 
					      accessKeySecret: 
 | 
				
			||||||
 | 
					        name: argo-minio-secret
 | 
				
			||||||
 | 
					        key: accessKeySecret
 | 
				
			||||||
 | 
					      secretKeySecret:
 | 
				
			||||||
 | 
					        name: argo-minio-secret
 | 
				
			||||||
 | 
					        key: secretKeySecret
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Use custom container image : local registry
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Mosquitto
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					`sudo apt update && apt install -y mosquitto mosquitto-clients`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					need to add a conf file in `/etc/mosquitto/conf.d/mosquitto.conf` containing :
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					allow_anonymous true
 | 
				
			||||||
 | 
					listener 1883 0.0.0.0   
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					`sudo systemctl restart mosquitto`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Launch the mosquitto client to receive message on the machine that hosts the mosquitto server : `sudo mosquitto_sub -h 127.0.0.1 -t argo/alpr`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Argo 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Execute/submite a workflow
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					argo submit PATH_TO_YAML --watch --serviceaccount=argo -n argo
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Troubleshoot
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## k3s bind to local port
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					On certain distro you might already have an other mini k8s. A sign of this is k3s being able to install, start but never being stable, restarting non stop. 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					You should try to see if the port used by k3s are arlready binded : 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					> sudo netstat -tuln | grep -E '6443|10250'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					If those ports are already in use then you should identify which service run behidn them and then stop them and preferably uninstall them. 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					We have already encountered an instance of `Ubuntu Server` with minikube already installed.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Remove minikube
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```bash
 | 
				
			||||||
 | 
					sudo systemctl stop snap.microk8s.daemon-kubelite
 | 
				
			||||||
 | 
					sudo systemctl disable snap.microk8s.daemon-kubelite
 | 
				
			||||||
 | 
					sudo systemctl restart k3s
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Use local container images
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					We have encountered difficulties declaring container images that correspond to local images (stored in docker.io/library/)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					We used a docker hub repository to pull our customized image. For this we need to create a secret holding the login informations to a docker account that has access to this repository, which we then link to the serviceAccount running the workflow :
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Create the secret in the argo namespace
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					kubectl create secret docker-registry regcred   --docker-username=[DOCKER HUB USERNAME]   --docker-password=[DOCKER HUB PASSWORD] -n argo
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					Patch the `argo` serviceAccount to use the secret when pulling image
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					kubectl patch serviceaccount argo -n argo   -p '{"imagePullSecrets": [{"name": "regcred"}]}'
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
							
								
								
									
										36
									
								
								ansible/requirements.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										36
									
								
								ansible/requirements.txt
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,36 @@
 | 
				
			|||||||
 | 
					ansible-compat==24.6.0
 | 
				
			||||||
 | 
					ansible-core==2.17.0
 | 
				
			||||||
 | 
					ansible-creator==24.5.0
 | 
				
			||||||
 | 
					ansible-lint==24.5.0
 | 
				
			||||||
 | 
					attrs==23.2.0
 | 
				
			||||||
 | 
					black==24.4.2
 | 
				
			||||||
 | 
					bracex==2.4
 | 
				
			||||||
 | 
					cffi==1.16.0
 | 
				
			||||||
 | 
					click==8.1.7
 | 
				
			||||||
 | 
					cryptography==42.0.7
 | 
				
			||||||
 | 
					filelock==3.14.0
 | 
				
			||||||
 | 
					importlib_metadata==7.1.0
 | 
				
			||||||
 | 
					Jinja2==3.1.4
 | 
				
			||||||
 | 
					jmespath==1.0.1
 | 
				
			||||||
 | 
					jsonschema==4.22.0
 | 
				
			||||||
 | 
					jsonschema-specifications==2023.12.1
 | 
				
			||||||
 | 
					markdown-it-py==3.0.0
 | 
				
			||||||
 | 
					MarkupSafe==2.1.5
 | 
				
			||||||
 | 
					mdurl==0.1.2
 | 
				
			||||||
 | 
					mypy-extensions==1.0.0
 | 
				
			||||||
 | 
					packaging==24.0
 | 
				
			||||||
 | 
					pathspec==0.12.1
 | 
				
			||||||
 | 
					platformdirs==4.2.2
 | 
				
			||||||
 | 
					pycparser==2.22
 | 
				
			||||||
 | 
					Pygments==2.18.0
 | 
				
			||||||
 | 
					PyYAML==6.0.1
 | 
				
			||||||
 | 
					referencing==0.35.1
 | 
				
			||||||
 | 
					resolvelib==1.0.1
 | 
				
			||||||
 | 
					rich==13.7.1
 | 
				
			||||||
 | 
					rpds-py==0.18.1
 | 
				
			||||||
 | 
					ruamel.yaml==0.18.6
 | 
				
			||||||
 | 
					ruamel.yaml.clib==0.2.8
 | 
				
			||||||
 | 
					subprocess-tee==0.4.1
 | 
				
			||||||
 | 
					wcmatch==8.5.2
 | 
				
			||||||
 | 
					yamllint==1.35.1
 | 
				
			||||||
 | 
					zipp==3.19.0
 | 
				
			||||||
@@ -1,6 +1,5 @@
 | 
				
			|||||||
#!/bin/bash
 | 
					#!/bin/bash
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					 | 
				
			||||||
REPOS=(
 | 
					REPOS=(
 | 
				
			||||||
    "oc-auth"
 | 
					    "oc-auth"
 | 
				
			||||||
    "oc-catalog"
 | 
					    "oc-catalog"
 | 
				
			||||||
@@ -19,7 +18,7 @@ REPOS=(
 | 
				
			|||||||
clone_repo() {
 | 
					clone_repo() {
 | 
				
			||||||
    local repo_url="https://cloud.o-forge.io/core/$1.git"
 | 
					    local repo_url="https://cloud.o-forge.io/core/$1.git"
 | 
				
			||||||
    local repo_name=$(basename "$repo_url" .git)
 | 
					    local repo_name=$(basename "$repo_url" .git)
 | 
				
			||||||
    local branch=$2
 | 
					    local branche=$2
 | 
				
			||||||
    echo "Processing repository: $repo_name"
 | 
					    echo "Processing repository: $repo_name"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if [ ! -d "$repo_name" ]; then
 | 
					    if [ ! -d "$repo_name" ]; then
 | 
				
			||||||
@@ -30,18 +29,17 @@ clone_repo() {
 | 
				
			|||||||
            exit 1
 | 
					            exit 1
 | 
				
			||||||
        fi 
 | 
					        fi 
 | 
				
			||||||
    fi
 | 
					    fi
 | 
				
			||||||
 | 
					    echo "Check in $branche & pull"
 | 
				
			||||||
 | 
					    ls
 | 
				
			||||||
    echo "Repository '$repo_name' already exists. Pulling latest changes..."
 | 
					    echo "Repository '$repo_name' already exists. Pulling latest changes..."
 | 
				
			||||||
    cd "$repo_name" && git pull origin $branch && cd ..
 | 
					    cd "$repo_name" && git checkout $branche && git pull 
 | 
				
			||||||
 | 
					    cd ..
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
cd ..
 | 
					cd ..
 | 
				
			||||||
# Iterate through each repository in the list
 | 
					# Iterate through each repository in the list
 | 
				
			||||||
branch = "main"
 | 
					 | 
				
			||||||
if [ -n "$1" ]; then
 | 
					 | 
				
			||||||
    branch = $1
 | 
					 | 
				
			||||||
fi
 | 
					 | 
				
			||||||
for repo in "${REPOS[@]}"; do
 | 
					for repo in "${REPOS[@]}"; do
 | 
				
			||||||
    clone_repo $repo $branch
 | 
					    clone_repo $repo ${1:-main}
 | 
				
			||||||
done
 | 
					done
 | 
				
			||||||
 | 
					
 | 
				
			||||||
echo "All repositories processed successfully."
 | 
					echo "All repositories processed successfully."
 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										1
									
								
								docker/db/datas/peer.json
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								docker/db/datas/peer.json
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1 @@
 | 
				
			|||||||
 | 
					[{"_id":"c0cece97-7730-4c2a-8c20-a30944564106","failed_execution":null,"abstractobject":{"update_date":{"$date":"2025-03-27T09:13:13.230Z"},"access_mode":0,"id":"c0cece97-7730-4c2a-8c20-a30944564106","name":"local","is_draft":false,"creation_date":{"$date":"2025-03-27T09:13:13.230Z"}},"url":"http://localhost:8000","wallet_address":"my-wallet","public_key":"-----BEGIN RSA PUBLIC KEY-----\nMIICCgKCAgEAw2pdG6wMtuLcP0+k1LFvIb0DQo/oHW2uNJaEJK74plXqp4ztz2dR\nb+RQHFLeLuqk4i/zc3b4K3fKPXSlwnVPJCwzPrnyT8jYGOZVlWlETiV9xeJhu6s/\nBh6g1PWz75XjjwV50iv/CEiLNBT23f/3J44wrQzygqNQCiQSALdxWLAEl4l5kHSa\n9oMyV70/Uql94/ayMARZsHgp9ZvqQKbkZPw6yzVMfCBxQozlNlo315OHevudhnhp\nDRjN5I7zWmqYt6rbXJJC7Y3Izdvzn7QI88RqjSRST5I/7Kz3ndCqrOnI+OQUE5NT\nREyQebphvQfTDTKlRPXkdyktdK2DH28Zj6ZF3yjQvN35Q4zhOzlq77dO5IhhopI7\nct8dZH1T1nYkvdyCA/EVMtQsASmBOitH0Y0ACoXQK5Kb6nm/TcM/9ZSJUNiEMuy5\ngBZ3YKE9oa4cpTpPXwcA+S/cU7HPNnQAsvD3iJi8GTW9uJs84pn4/WhpQqmXd4rv\nhKWECCN3fHy01fUs/U0PaSj2jDY/kQVeXoikNMzPUjdZd9m816TIBh3v3aVXCH/0\niTHHAxctvDgMRb2fpvRJ/wwnYjFG9RpamVFDMvC9NffuYzWAA9IRIY4cqgerfHrV\nZ2HHiPTDDvDAIsvImXZc/h7mXN6m3RCQ4Qywy993wd9gUdgg/qnynHcCAwEAAQ==\n-----END RSA PUBLIC KEY-----\n","state":1}]
 | 
				
			||||||
							
								
								
									
										4
									
								
								docker/kube.exemple.env
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								docker/kube.exemple.env
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,4 @@
 | 
				
			|||||||
 | 
					KUBERNETES_SERVICE_HOST=192.168.47.20
 | 
				
			||||||
 | 
					KUBE_CA="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTWpNeE1USXdNell3SGhjTk1qUXdPREE0TVRBeE16VTJXaGNOTXpRd09EQTJNVEF4TXpVMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTWpNeE1USXdNell3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTVlk3ZHZhNEdYTVdkMy9jMlhLN3JLYjlnWXgyNSthaEE0NmkyNVBkSFAKRktQL2UxSVMyWVF0dzNYZW1TTUQxaStZdzJSaVppNUQrSVZUamNtNHdhcnFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWtlUVJpNFJiODduME5yRnZaWjZHClc2SU55NnN3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUlnRXA5ck04WmdNclRZSHYxZjNzOW5DZXZZeWVVa3lZUk4KWjUzazdoaytJS1FDSVFDbk05TnVGKzlTakIzNDFacGZ5ays2NEpWdkpSM3BhcmVaejdMd2lhNm9kdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
 | 
				
			||||||
 | 
					KUBE_CERT="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJWUxWNkFPQkdrU1F3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOekl6TVRFeU1ETTJNQjRYRFRJME1EZ3dPREV3TVRNMU5sb1hEVEkxTURndwpPREV3TVRNMU5sb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJGQ2Q1MFdPeWdlQ2syQzcKV2FrOWY4MVAvSkJieVRIajRWOXBsTEo0ck5HeHFtSjJOb2xROFYxdUx5RjBtOTQ2Nkc0RmRDQ2dqaXFVSk92Swp3NVRPNnd5alNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVFJkOFI5cXVWK2pjeUVmL0ovT1hQSzMyS09XekFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQTArbThqTDBJVldvUTZ0dnB4cFo4NVlMalF1SmpwdXM0aDdnSXRxS3NmUVVDSUI2M2ZNdzFBMm5OVWU1TgpIUGZOcEQwSEtwcVN0Wnk4djIyVzliYlJUNklZCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTWpNeE1USXdNell3SGhjTk1qUXdPREE0TVRBeE16VTJXaGNOTXpRd09EQTJNVEF4TXpVMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTWpNeE1USXdNell3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRc3hXWk9pbnIrcVp4TmFEQjVGMGsvTDF5cE01VHAxOFRaeU92ektJazQKRTFsZWVqUm9STW0zNmhPeVljbnN3d3JoNnhSUnBpMW5RdGhyMzg0S0Z6MlBvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTBYZkVmYXJsZm8zTWhIL3lmemx6Cnl0OWlqbHN3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUxJL2dNYnNMT3MvUUpJa3U2WHVpRVMwTEE2cEJHMXgKcnBlTnpGdlZOekZsQWlFQW1wdjBubjZqN3M0MVI0QzFNMEpSL0djNE53MHdldlFmZWdEVGF1R2p3cFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
 | 
				
			||||||
 | 
					KUBE_DATA="LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU5ZS1BFb1dhd1NKUzJlRW5oWmlYMk5VZlY1ZlhKV2krSVNnV09TNFE5VTlvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFVUozblJZN0tCNEtUWUx0WnFUMS96VS84a0Z2Sk1lUGhYMm1Vc25pczBiR3FZblkyaVZEeApYVzR2SVhTYjNqcm9iZ1YwSUtDT0twUWs2OHJEbE03ckRBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo="
 | 
				
			||||||
							
								
								
									
										36
									
								
								docker/start-demo.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										36
									
								
								docker/start-demo.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,36 @@
 | 
				
			|||||||
 | 
					#!/bin/bash
 | 
				
			||||||
 | 
					KUBERNETES_ENV_FILE=$(realpath ${1:-"./kube.exemple.env"})
 | 
				
			||||||
 | 
					HOST=${2:-"http://localhost:8000"}
 | 
				
			||||||
 | 
					docker network create oc | true
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					docker compose down
 | 
				
			||||||
 | 
					cd ./tools && docker compose  -f ./docker-compose.dev.yml up --force-recreate -d
 | 
				
			||||||
 | 
					docker compose  -f ./docker-compose.traefik.yml up --force-recreate -d  && cd ..
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					cd ./db && ./add.sh && cd ..
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					cd ../..
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					REPOS=(
 | 
				
			||||||
 | 
					    "oc-auth"
 | 
				
			||||||
 | 
					    "oc-catalog"
 | 
				
			||||||
 | 
					    "oc-datacenter"
 | 
				
			||||||
 | 
					    "oc-monitord"
 | 
				
			||||||
 | 
					    "oc-peer"
 | 
				
			||||||
 | 
					    "oc-shared"
 | 
				
			||||||
 | 
					    "oc-scheduler"
 | 
				
			||||||
 | 
					    "oc-schedulerd"
 | 
				
			||||||
 | 
					    "oc-workflow"
 | 
				
			||||||
 | 
					    "oc-workspace"
 | 
				
			||||||
 | 
					    "oc-front"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					for i in "${REPOS[@]}"
 | 
				
			||||||
 | 
					do
 | 
				
			||||||
 | 
					   echo "Building $i"
 | 
				
			||||||
 | 
					   docker kill $i | true
 | 
				
			||||||
 | 
					   docker rm $i | true
 | 
				
			||||||
 | 
					   cd ./$i 
 | 
				
			||||||
 | 
					   cp $KUBERNETES_ENV_FILE ./env.env
 | 
				
			||||||
 | 
					   docker build . -t $i --build-arg=HOST=$HOST && docker compose up -d
 | 
				
			||||||
 | 
					   cd ..
 | 
				
			||||||
 | 
					done
 | 
				
			||||||
@@ -1,9 +1,11 @@
 | 
				
			|||||||
#!/bin/bash
 | 
					#!/bin/bash
 | 
				
			||||||
 | 
					export KUBERNETES_ENV_FILE=$(realpath ${KUBERNETES_ENV_FILE=:-"./kube.exemple.env"})
 | 
				
			||||||
 | 
					export HOST=${HOST:-"http://localhost:8000"}
 | 
				
			||||||
docker network create oc | true
 | 
					docker network create oc | true
 | 
				
			||||||
 | 
					
 | 
				
			||||||
docker compose down
 | 
					docker compose down
 | 
				
			||||||
cd ./tools && docker compose  -f ./docker-compose.dev.yml up --force-recreate -d && cd ..
 | 
					cd ./tools && docker compose  -f ./docker-compose.dev.yml up --force-recreate -d
 | 
				
			||||||
cd ./tools && docker compose  -f ./docker-compose.traefik.yml up --force-recreate -d  && cd ..
 | 
					docker compose  -f ./docker-compose.traefik.yml up --force-recreate -d  && cd ..
 | 
				
			||||||
 | 
					
 | 
				
			||||||
cd ../..
 | 
					cd ../..
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -26,6 +28,7 @@ do
 | 
				
			|||||||
   docker kill $i | true
 | 
					   docker kill $i | true
 | 
				
			||||||
   docker rm $i | true
 | 
					   docker rm $i | true
 | 
				
			||||||
   cd ./$i 
 | 
					   cd ./$i 
 | 
				
			||||||
   docker build . -t $i && docker compose up -d
 | 
					   cp $KUBERNETES_ENV_FILE ./env.env
 | 
				
			||||||
 | 
					   make run-docker
 | 
				
			||||||
   cd ..
 | 
					   cd ..
 | 
				
			||||||
done
 | 
					done
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,6 +22,8 @@ do
 | 
				
			|||||||
   docker rm $i | true
 | 
					   docker rm $i | true
 | 
				
			||||||
done
 | 
					done
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					docker volume rm tools_oc-data
 | 
				
			||||||
 | 
					
 | 
				
			||||||
cd ../..
 | 
					cd ../..
 | 
				
			||||||
 | 
					
 | 
				
			||||||
REPOS=(
 | 
					REPOS=(
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -69,6 +69,27 @@ services:
 | 
				
			|||||||
      - GF_SECURITY_ADMIN_PASSWORD=pfnirt                   # Change this to anything but admin to not have a password change page at startup
 | 
					      - GF_SECURITY_ADMIN_PASSWORD=pfnirt                   # Change this to anything but admin to not have a password change page at startup
 | 
				
			||||||
      - GF_SECURITY_ADMIN_USER=admin
 | 
					      - GF_SECURITY_ADMIN_USER=admin
 | 
				
			||||||
      - GF_SECURITY_DISABLE_INITIAL_ADMIN_PASSWORD_CHANGE=true
 | 
					      - GF_SECURITY_DISABLE_INITIAL_ADMIN_PASSWORD_CHANGE=true
 | 
				
			||||||
 | 
					  hydra:
 | 
				
			||||||
 | 
					      container_name: hydra
 | 
				
			||||||
 | 
					      image: oryd/hydra:v2.2.0
 | 
				
			||||||
 | 
					      environment:
 | 
				
			||||||
 | 
					        SECRETS_SYSTEM: oc-auth-got-secret
 | 
				
			||||||
 | 
					        LOG_LEAK_SENSITIVE_VALUES: true
 | 
				
			||||||
 | 
					        # OAUTH2_TOKEN_HOOK_URL: http://oc-auth:8080/oc/claims
 | 
				
			||||||
 | 
					        URLS_SELF_ISSUER: http://hydra:4444
 | 
				
			||||||
 | 
					        URLS_SELF_PUBLIC: http://hydra:4444
 | 
				
			||||||
 | 
					        WEBFINGER_OIDC_DISCOVERY_SUPPORTED_SCOPES: profile,email,phone,roles
 | 
				
			||||||
 | 
					        WEBFINGER_OIDC_DISCOVERY_SUPPORTED_CLAIMS: name,family_name,given_name,nickname,email,phone_number
 | 
				
			||||||
 | 
					        DSN: memory
 | 
				
			||||||
 | 
					      command: serve all --dev
 | 
				
			||||||
 | 
					      networks:
 | 
				
			||||||
 | 
					        - oc
 | 
				
			||||||
 | 
					      ports:
 | 
				
			||||||
 | 
					        - "4444:4444"
 | 
				
			||||||
 | 
					        - "4445:4445"
 | 
				
			||||||
 | 
					      deploy:
 | 
				
			||||||
 | 
					        restart_policy:
 | 
				
			||||||
 | 
					          condition: on-failure
 | 
				
			||||||
  hydra-client: 
 | 
					  hydra-client: 
 | 
				
			||||||
        image: oryd/hydra:v2.2.0
 | 
					        image: oryd/hydra:v2.2.0
 | 
				
			||||||
        container_name: hydra-client
 | 
					        container_name: hydra-client
 | 
				
			||||||
@@ -106,27 +127,6 @@ services:
 | 
				
			|||||||
            interval: 10s
 | 
					            interval: 10s
 | 
				
			||||||
            timeout: 10s
 | 
					            timeout: 10s
 | 
				
			||||||
            retries: 10
 | 
					            retries: 10
 | 
				
			||||||
  hydra:
 | 
					 | 
				
			||||||
      container_name: hydra
 | 
					 | 
				
			||||||
      image: oryd/hydra:v2.2.0
 | 
					 | 
				
			||||||
      environment:
 | 
					 | 
				
			||||||
        SECRETS_SYSTEM: oc-auth-got-secret
 | 
					 | 
				
			||||||
        LOG_LEAK_SENSITIVE_VALUES: true
 | 
					 | 
				
			||||||
        # OAUTH2_TOKEN_HOOK_URL: http://oc-auth:8080/oc/claims
 | 
					 | 
				
			||||||
        URLS_SELF_ISSUER: http://hydra:4444
 | 
					 | 
				
			||||||
        URLS_SELF_PUBLIC: http://hydra:4444
 | 
					 | 
				
			||||||
        WEBFINGER_OIDC_DISCOVERY_SUPPORTED_SCOPES: profile,email,phone,roles
 | 
					 | 
				
			||||||
        WEBFINGER_OIDC_DISCOVERY_SUPPORTED_CLAIMS: name,family_name,given_name,nickname,email,phone_number
 | 
					 | 
				
			||||||
        DSN: memory
 | 
					 | 
				
			||||||
      command: serve all --dev
 | 
					 | 
				
			||||||
      networks:
 | 
					 | 
				
			||||||
        - oc
 | 
					 | 
				
			||||||
      ports:
 | 
					 | 
				
			||||||
        - "4444:4444"
 | 
					 | 
				
			||||||
        - "4445:4445"
 | 
					 | 
				
			||||||
      deploy:
 | 
					 | 
				
			||||||
        restart_policy:
 | 
					 | 
				
			||||||
          condition: on-failure
 | 
					 | 
				
			||||||
  ldap:
 | 
					  ldap:
 | 
				
			||||||
    image: pgarrett/ldap-alpine
 | 
					    image: pgarrett/ldap-alpine
 | 
				
			||||||
    container_name: ldap
 | 
					    container_name: ldap
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -10,9 +10,9 @@ services:
 | 
				
			|||||||
    command:
 | 
					    command:
 | 
				
			||||||
      - "--api.insecure=true"
 | 
					      - "--api.insecure=true"
 | 
				
			||||||
      - "--providers.docker=true"
 | 
					      - "--providers.docker=true"
 | 
				
			||||||
      - "--entrypoints.web.address=:80"
 | 
					      - "--entrypoints.web.address=:8000"
 | 
				
			||||||
    ports:
 | 
					    ports:
 | 
				
			||||||
      - "80:80"  # Expose Traefik on port 80    
 | 
					      - "8000:8000"  # Expose Traefik on port 8000    
 | 
				
			||||||
    volumes:
 | 
					    volumes:
 | 
				
			||||||
      - /var/run/docker.sock:/var/run/docker.sock
 | 
					      - /var/run/docker.sock:/var/run/docker.sock
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										4
									
								
								env.env
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								env.env
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,4 @@
 | 
				
			|||||||
 | 
					KUBERNETES_SERVICE_HOST=192.168.1.169
 | 
				
			||||||
 | 
					KUBE_CA="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTWpNeE1USXdNell3SGhjTk1qUXdPREE0TVRBeE16VTJXaGNOTXpRd09EQTJNVEF4TXpVMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTWpNeE1USXdNell3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTVlk3ZHZhNEdYTVdkMy9jMlhLN3JLYjlnWXgyNSthaEE0NmkyNVBkSFAKRktQL2UxSVMyWVF0dzNYZW1TTUQxaStZdzJSaVppNUQrSVZUamNtNHdhcnFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWtlUVJpNFJiODduME5yRnZaWjZHClc2SU55NnN3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUlnRXA5ck04WmdNclRZSHYxZjNzOW5DZXZZeWVVa3lZUk4KWjUzazdoaytJS1FDSVFDbk05TnVGKzlTakIzNDFacGZ5ays2NEpWdkpSM3BhcmVaejdMd2lhNm9kdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
 | 
				
			||||||
 | 
					KUBE_CERT="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJWUxWNkFPQkdrU1F3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOekl6TVRFeU1ETTJNQjRYRFRJME1EZ3dPREV3TVRNMU5sb1hEVEkxTURndwpPREV3TVRNMU5sb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJGQ2Q1MFdPeWdlQ2syQzcKV2FrOWY4MVAvSkJieVRIajRWOXBsTEo0ck5HeHFtSjJOb2xROFYxdUx5RjBtOTQ2Nkc0RmRDQ2dqaXFVSk92Swp3NVRPNnd5alNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVFJkOFI5cXVWK2pjeUVmL0ovT1hQSzMyS09XekFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQTArbThqTDBJVldvUTZ0dnB4cFo4NVlMalF1SmpwdXM0aDdnSXRxS3NmUVVDSUI2M2ZNdzFBMm5OVWU1TgpIUGZOcEQwSEtwcVN0Wnk4djIyVzliYlJUNklZCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTWpNeE1USXdNell3SGhjTk1qUXdPREE0TVRBeE16VTJXaGNOTXpRd09EQTJNVEF4TXpVMgpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTWpNeE1USXdNell3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRc3hXWk9pbnIrcVp4TmFEQjVGMGsvTDF5cE01VHAxOFRaeU92ektJazQKRTFsZWVqUm9STW0zNmhPeVljbnN3d3JoNnhSUnBpMW5RdGhyMzg0S0Z6MlBvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTBYZkVmYXJsZm8zTWhIL3lmemx6Cnl0OWlqbHN3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUxJL2dNYnNMT3MvUUpJa3U2WHVpRVMwTEE2cEJHMXgKcnBlTnpGdlZOekZsQWlFQW1wdjBubjZqN3M0MVI0QzFNMEpSL0djNE53MHdldlFmZWdEVGF1R2p3cFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
 | 
				
			||||||
 | 
					KUBE_DATA="LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU5ZS1BFb1dhd1NKUzJlRW5oWmlYMk5VZlY1ZlhKV2krSVNnV09TNFE5VTlvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFVUozblJZN0tCNEtUWUx0WnFUMS96VS84a0Z2Sk1lUGhYMm1Vc25pczBiR3FZblkyaVZEeApYVzR2SVhTYjNqcm9iZ1YwSUtDT0twUWs2OHJEbE03ckRBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo="
 | 
				
			||||||
		Reference in New Issue
	
	Block a user