--- # Playbook to configure Google as an identity provider and set up cluster roles (refactored) - name: Configure OpenShift OAuth and RBAC hosts: localhost connection: local gather_facts: false vars_files: - ../secrets.yml pre_tasks: - name: Ensure required collections are available ansible.builtin.debug: msg: "Ensuring kubernetes.core collection is available in execution environment" - name: Verify cluster variables are set ansible.builtin.assert: that: - cluster_region is defined - cluster_name is defined fail_msg: "Cluster region and name must be defined in inventory" - name: Check if this is a hub cluster ansible.builtin.set_fact: is_hub_cluster: "{{ vars[cluster_region][cluster_name]['is_hub'] | default(false) }}" - name: Retrieve kubeadmin password for the cluster ansible.builtin.set_fact: kubeadmin_password: "{{ vars[cluster_region][cluster_name]['kubeadmin_password'] | default('') }}" roles: - role: kubernetes_auth tasks: - name: Create container-mom-system namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Namespace metadata: name: container-mom-system kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" when: is_hub_cluster | bool - name: Create cert-manager namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Namespace metadata: name: cert-manager kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create cloudflare-secret in cert-manager namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: cloudflare-secret namespace: cert-manager type: Opaque stringData: api-token: "{{ global.cloudflare }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Enable the RouteExternalCertificate feature - name: Enable RouteExternalCertificate feature gate kubernetes.core.k8s: state: present definition: apiVersion: config.openshift.io/v1 kind: FeatureGate metadata: name: cluster spec: featureSet: CustomNoUpgrade customNoUpgrade: enabled: - RouteExternalCertificate kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" register: feature_gate_result - name: Wait for feature gate to be applied (60 seconds) ansible.builtin.pause: seconds: 60 when: feature_gate_result.changed | bool # Hub cluster specific tasks - name: Setup hub cluster registry credentials when: is_hub_cluster | bool block: - name: Extract Docker registry credentials from vault ansible.builtin.set_fact: registry_password: "{{ global.registry_token }}" registry_server: "ghcr.io" registry_username: "pfeifferj" registry_email: "hi@josie.lol" - name: Create proper Docker config JSON ansible.builtin.set_fact: auth_string: "{{ registry_username }}:{{ registry_password }}" docker_config_json: "{{ {'auths': {'https://' + registry_server: {'username': registry_username, 'password': registry_password, 'email': registry_email, 'auth': (registry_username + ':' + registry_password) | b64encode }}} | to_json }}" - name: Create container-mom registry secret kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: container-mom-registry namespace: container-mom-system type: kubernetes.io/dockerconfigjson stringData: .dockerconfigjson: "{{ docker_config_json }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create container-mom MongoDB secret (production) kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: container-mom-mongodb namespace: container-mom-system type: Opaque stringData: uri: "{{ global.prod.mongodb_uri | default('') }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Create portal namespace and secrets - name: Create container-mom-portal namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Namespace metadata: name: container-mom-portal labels: app.kubernetes.io/name: container-mom-portal app.kubernetes.io/part-of: container-mom kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create registry secret in portal namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: container-mom-registry namespace: container-mom-portal type: kubernetes.io/dockerconfigjson stringData: .dockerconfigjson: "{{ docker_config_json }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Create landing page namespace and secrets - name: Create container-mom-landing namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Namespace metadata: name: container-mom-landing labels: app.kubernetes.io/name: container-mom-landing app.kubernetes.io/part-of: container-mom kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create registry secret in landing namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: container-mom-registry namespace: container-mom-landing type: kubernetes.io/dockerconfigjson stringData: .dockerconfigjson: "{{ docker_config_json }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Setup test environment - name: Create container-mom-test namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Namespace metadata: name: container-mom-system-test kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create container-mom registry secret in test namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: container-mom-registry namespace: container-mom-system-test type: kubernetes.io/dockerconfigjson stringData: .dockerconfigjson: "{{ docker_config_json }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create container-mom MongoDB secret (test) kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: container-mom-mongodb namespace: container-mom-system-test type: Opaque stringData: uri: "{{ global.test.mongodb_uri | default('') }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Setup test portal namespace - name: Create container-mom-portal-test namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Namespace metadata: name: container-mom-portal-test labels: app.kubernetes.io/name: container-mom-portal app.kubernetes.io/part-of: container-mom environment: test kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create registry secret in portal test namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: container-mom-registry namespace: container-mom-portal-test type: kubernetes.io/dockerconfigjson stringData: .dockerconfigjson: "{{ docker_config_json }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Setup test landing namespace - name: Create container-mom-landing-test namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Namespace metadata: name: container-mom-landing-test labels: app.kubernetes.io/name: container-mom-landing app.kubernetes.io/part-of: container-mom environment: test kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create registry secret in landing test namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: container-mom-registry namespace: container-mom-landing-test type: kubernetes.io/dockerconfigjson stringData: .dockerconfigjson: "{{ docker_config_json }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Get list of regions from vault ansible.builtin.set_fact: vault_regions: "{{ lookup('dict', vars) | selectattr('key', 'match', '^[a-z]{3}$') | map(attribute='key') | list }}" - name: Initialize empty managed_clusters list ansible.builtin.set_fact: managed_clusters: [] - name: Loop through each region include_tasks: tasks/process_region.yml loop: "{{ vault_regions }}" loop_control: loop_var: current_region - name: Create comprehensive cluster access secrets for all managed clusters kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: "{{ item.cluster }}-{{ item.region }}-cluster-access" namespace: container-mom-system labels: container-mom.io/cluster-type: "managed" container-mom.io/region: "{{ item.region }}" type: Opaque stringData: password: "{{ vars[item.region][item.cluster].kubeadmin_password }}" username: "kubeadmin" cluster_name: "{{ item.cluster }}" region: "{{ item.region }}" api_url: "https://api.{{ item.cluster }}.{{ item.region }}.container.mom:6443" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" loop: "{{ managed_clusters | default([]) }}" # Gitea Installation and Configuration - name: Set up Gitea Git Server when: is_hub_cluster | bool block: - name: Set default OpenShift storage class ansible.builtin.set_fact: openshift_default_storage_class: "{{ vars[cluster_region][cluster_name]['storage_class'] | default('gitea-local-storage') }}" - name: Delete existing PersistentVolumes if they exist kubernetes.core.k8s: state: absent api_version: v1 kind: PersistentVolume name: "{{ item }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" ignore_errors: true loop: - gitea-local-pv-data - gitea-local-pv-postgresql - gitea-local-pv-postgresql-ha-0 - gitea-local-pv-postgresql-ha-1 - gitea-local-pv-postgresql-ha-2 - gitea-local-pv-redis-0 - gitea-local-pv-redis-1 - gitea-local-pv-redis-2 - name: Create local storage class for Gitea kubernetes.core.k8s: state: present definition: apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: gitea-local-storage annotations: storageclass.kubernetes.io/is-default-class: "true" provisioner: kubernetes.io/no-provisioner volumeBindingMode: WaitForFirstConsumer reclaimPolicy: Delete kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create local PersistentVolumes for Gitea kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: PersistentVolume metadata: name: gitea-local-pv-data spec: capacity: storage: 10Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Delete storageClassName: gitea-local-storage local: path: /var/gitea-data nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: node-role.kubernetes.io/worker operator: Exists kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create local PersistentVolume for PostgreSQL kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: PersistentVolume metadata: name: gitea-local-pv-postgresql spec: capacity: storage: 8Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Delete storageClassName: gitea-local-storage local: path: /var/gitea-postgresql nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: node-role.kubernetes.io/worker operator: Exists kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create local PersistentVolumes for PostgreSQL HA nodes kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: PersistentVolume metadata: name: "gitea-local-pv-postgresql-ha-{{ item }}" spec: capacity: storage: 8Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Delete storageClassName: gitea-local-storage local: path: "/var/gitea-postgresql-ha-{{ item }}" nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: node-role.kubernetes.io/worker operator: Exists kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" loop: [0, 1, 2] - name: Create local PersistentVolumes for Redis Cluster nodes kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: PersistentVolume metadata: name: "gitea-local-pv-redis-{{ item }}" spec: capacity: storage: 8Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Delete storageClassName: gitea-local-storage local: path: "/var/gitea-redis-{{ item }}" nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: node-role.kubernetes.io/worker operator: Exists kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" loop: [0, 1, 2] - name: Create container-mom-git namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Namespace metadata: name: container-mom-git labels: app.kubernetes.io/name: gitea app.kubernetes.io/part-of: container-mom kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create ServiceAccount for storage job kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: ServiceAccount metadata: name: gitea namespace: container-mom-git kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Add SCC for privileged pods kubernetes.core.k8s: state: present definition: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: gitea-privileged roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:openshift:scc:privileged subjects: - kind: ServiceAccount name: gitea namespace: container-mom-git kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Delete existing Job if it exists kubernetes.core.k8s: state: absent api_version: batch/v1 kind: Job name: gitea-storage-init namespace: container-mom-git kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" ignore_errors: true - name: Create Job to initialize local storage directories kubernetes.core.k8s: state: present definition: apiVersion: batch/v1 kind: Job metadata: name: gitea-storage-init namespace: container-mom-git spec: ttlSecondsAfterFinished: 100 template: spec: # Use a more generic node selector that will work with any worker node nodeSelector: node-role.kubernetes.io/worker: "" containers: - name: init-local-dirs image: registry.redhat.io/openshift4/ose-cli:latest command: - /bin/sh - -c - | mkdir -p /host/var/gitea-data /host/var/gitea-postgresql chmod 777 /host/var/gitea-data /host/var/gitea-postgresql echo "Storage directories created and permissions set" securityContext: privileged: true volumeMounts: - name: host mountPath: /host volumes: - name: host hostPath: path: / restartPolicy: Never serviceAccountName: gitea kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Wait for job to complete kubernetes.core.k8s_info: api_version: batch/v1 kind: Job name: gitea-storage-init namespace: container-mom-git kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" register: job_status until: job_status.resources[0].status.succeeded is defined and job_status.resources[0].status.succeeded > 0 retries: 15 delay: 10 - name: Get Gitea admin credentials from vault ansible.builtin.set_fact: gitea_admin_username: "{{ global.gitea.admin_username | default('gitea_admin') }}" gitea_admin_password: "{{ global.gitea.admin_password | default('gitea_admin') }}" gitea_admin_email: "{{ global.gitea.admin_email | default('gitea@container.mom') }}" gitea_db_password: "{{ global.gitea.db_password | default('gitea') }}" - name: Install Gitea using Helm kubernetes.core.helm: name: gitea chart_ref: gitea chart_repo_url: https://dl.gitea.io/charts/ release_namespace: container-mom-git wait: true wait_timeout: 600s # Increased timeout to 10 minutes (added 's' unit for seconds) values: gitea: admin: username: "{{ gitea_admin_username }}" password: "{{ gitea_admin_password }}" email: "{{ gitea_admin_email }}" passwordMode: keepUpdated config: server: ROOT_URL: "https://git.container.mom" DOMAIN: "git.container.mom" SSH_DOMAIN: "git.container.mom" PROTOCOL: https HTTP_PORT: 3000 SSH_PORT: 22 SSH_LISTEN_PORT: 2222 primary: persistence: enabled: true size: 10Gi storageClass: "{{ openshift_default_storage_class }}" podSecurityContext: fsGroup: null seccompProfile: type: RuntimeDefault containerSecurityContext: runAsNonRoot: true allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault capabilities: drop: - ALL postgresql: enabled: true global: postgresql: auth: username: gitea password: "{{ gitea_db_password }}" database: gitea primary: persistence: enabled: true size: 8Gi storageClass: "{{ openshift_default_storage_class }}" postgresql-ha: enabled: false redis-cluster: enabled: false ingress: enabled: true annotations: route.openshift.io/termination: edge route.openshift.io/insecureEdgeTerminationPolicy: Redirect hosts: - host: "git.container.mom" paths: - path: / pathType: Prefix image: repository: gitea/gitea tag: "1.21.2" pullPolicy: IfNotPresent rootless: true persistence: enabled: true size: 10Gi storageClass: "{{ openshift_default_storage_class }}" resources: requests: cpu: 200m memory: 256Mi limits: cpu: 1 memory: 1Gi serviceAccount: create: true name: gitea initContainers: resources: requests: cpu: 100m memory: 128Mi limits: cpu: 500m memory: 256Mi securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault capabilities: drop: - ALL securityContext: null persistence: enabled: true size: 10Gi storageClass: "{{ openshift_default_storage_class }}" resources: requests: cpu: 200m memory: 256Mi limits: cpu: 1 memory: 1Gi serviceAccount: create: true name: gitea initContainers: resources: requests: cpu: 100m memory: 128Mi limits: cpu: 500m memory: 256Mi securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault capabilities: drop: - ALL securityContext: null kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # OAuth setup - name: Generate random OAuth client secret ansible.builtin.set_fact: oauth_client_secret: "{{ lookup('password', '/dev/null chars=ascii_letters,digits length=32') }}" - name: Create OpenShift OAuth client kubernetes.core.k8s: state: present definition: apiVersion: oauth.openshift.io/v1 kind: OAuthClient metadata: name: "{{ cluster_name }}-oauth-client" secret: "{{ oauth_client_secret }}" redirectURIs: - "https://argocd.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom/api/dex/callback" grantMethod: auto kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Store OAuth client details as facts ansible.builtin.set_fact: argocd_client_id: "{{ cluster_name }}-oauth-client" argocd_client_secret: "{{ oauth_client_secret }}" # Google identity provider setup - name: Create Google OAuth secret kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: google-secret namespace: openshift-config type: Opaque stringData: clientSecret: "{{ vars[cluster_region][cluster_name]['google_client_secret'] | default('') }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Configure OAuth for Google identity provider kubernetes.core.k8s: state: present definition: apiVersion: config.openshift.io/v1 kind: OAuth metadata: name: cluster spec: identityProviders: - name: googleidp mappingMethod: claim type: Google google: clientID: "{{ vars[cluster_region][cluster_name]['google_client_id'] | default('') }}" clientSecret: name: google-secret hostedDomain: "{{ google_hosted_domain | default(omit) }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # RBAC setup - name: Create ClusterRoleBinding for cluster-admin role for specific user kubernetes.core.k8s: state: present definition: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: google-cluster-admin roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - apiGroup: rbac.authorization.k8s.io kind: User name: pfeifferj@archlinux.ch kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create ClusterRoleBinding for cluster-viewer role for container.mom domain kubernetes.core.k8s: state: present definition: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: google-cluster-viewer-container-mom roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: view subjects: - apiGroup: rbac.authorization.k8s.io kind: Group name: container.mom kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Deploy ArgoCD - name: Deploy latest version of argocd kubernetes.core.helm: name: argocd release_namespace: argocd create_namespace: true chart_ref: "https://github.com/argoproj/argo-helm/releases/download/argo-cd-7.8.11/argo-cd-7.8.11.tgz" values: openshift: enabled: true redis: enabled: true server: route: enabled: true hostname: argocd.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom dex: enabled: true config: | connectors: - type: openshift id: openshift name: OpenShift config: clientID: "{{ argocd_client_id }}" clientSecret: "{{ argocd_client_secret }}" redirectURI: "https://argocd.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom/api/dex/callback" configs: cm: oidc.config: | name: openshift issuer: {{ openshift_cluster_api }}/oauth2/default clientID: "{{ argocd_client_id }}" clientSecret: "{{ argocd_client_secret }}" redirectURI: "https://argocd.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom/api/dex/callback" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Deploy Workload ArgoCD - name: Set up Workload ArgoCD when: is_hub_cluster | bool block: - name: Create wkl-argocd namespace kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Namespace metadata: name: wkl-argocd labels: app.kubernetes.io/name: wkl-argocd app.kubernetes.io/part-of: container-mom kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Deploy workload ArgoCD instance kubernetes.core.helm: name: wkl-argocd release_namespace: wkl-argocd create_namespace: true chart_ref: "https://github.com/argoproj/argo-helm/releases/download/argo-cd-7.8.11/argo-cd-7.8.11.tgz" wait: true values: nameOverride: "wkl-argocd" fullnameOverride: "wkl-argocd" global: deploymentStrategy: type: RollingUpdate openshift: enabled: true redis: enabled: true server: route: enabled: true hostname: wkl-argocd.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom annotations: route.openshift.io/termination: edge dex: enabled: true config: | connectors: - type: openshift id: openshift name: OpenShift config: clientID: "{{ argocd_client_id }}" clientSecret: "{{ argocd_client_secret }}" redirectURI: "https://wkl-argocd.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom/api/dex/callback" rbac: policy.csv: | g, container.mom, role:admin configs: cm: application.instanceLabelKey: "wkl-argocd.argoproj.io/instance" oidc.config: | name: openshift issuer: https://api.{{ cluster_name }}.{{ cluster_region }}.container.mom:6443/oauth2/default clientID: "{{ argocd_client_id }}" clientSecret: "{{ argocd_client_secret }}" redirectURI: "https://wkl-argocd.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom/api/dex/callback" resource.exclusions: | - apiGroups: - internal.open-cluster-management.io kinds: - ManagedClusterInfo clusters: - "*" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Create ArgoCD specific certificate for workload ArgoCD - name: Include cert_management role for workload ArgoCD certificate include_role: name: cert_management vars: certificate_name: "wkl-argocd-cert" certificate_namespace: "wkl-argocd" certificate_secret_name: "wkl-argocd-tls" certificate_common_name: "wkl-argocd.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom" certificate_dns_names: - "wkl-argocd.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom" certificate_issuer: "letsencrypt-prod" certificate_issuer_kind: "ClusterIssuer" certificate_wait_retries: 30 certificate_wait_delay: 10 role_name: "wkl-argocd-tls-reader" rolebinding_name: "wkl-argocd-tls-reader-binding" router_service_account: "router" router_namespace: "openshift-ingress" create_route_role: true # Get workload ArgoCD route info - name: Get workload ArgoCD route kubernetes.core.k8s_info: api_version: route.openshift.io/v1 kind: Route name: wkl-argocd-server namespace: wkl-argocd kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" register: wkl_argocd_route ignore_errors: true # Check if workload ArgoCD TLS secret exists - name: Check if workload ArgoCD TLS secret exists kubernetes.core.k8s_info: api_version: v1 kind: Secret name: "wkl-argocd-tls" namespace: "wkl-argocd" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" register: wkl_argocd_tls_secret ignore_errors: true # Update workload ArgoCD route to use external certificate - name: Update workload ArgoCD route to use external certificate kubernetes.core.k8s: state: present definition: apiVersion: route.openshift.io/v1 kind: Route metadata: name: wkl-argocd-server namespace: wkl-argocd annotations: haproxy.router.openshift.io/timeout: 60s route.openshift.io/termination: edge spec: host: "wkl-argocd.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom" port: targetPort: "{{ wkl_argocd_route.resources[0].spec.port.targetPort }}" tls: termination: edge insecureEdgeTerminationPolicy: None externalCertificate: name: "wkl-argocd-tls" to: kind: Service name: wkl-argocd-server weight: 100 wildcardPolicy: None kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" when: - wkl_argocd_route.resources is defined - wkl_argocd_route.resources | length > 0 - wkl_argocd_tls_secret.resources is defined - wkl_argocd_tls_secret.resources | length > 0 # workload ArgoCD cluster-admin permissions - name: Create a ClusterRoleBinding to grant workload ArgoCD needed privileges kubernetes.core.k8s: state: present definition: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: wkl-argocd-admin roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: admin subjects: - kind: ServiceAccount name: wkl-argocd-application-controller namespace: wkl-argocd kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Configure workload ArgoCD to deploy to managed clusters - name: Get list of managed cluster secrets kubernetes.core.k8s_info: api_version: v1 kind: Secret namespace: container-mom-system label_selectors: - container-mom.io/cluster-type=managed kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" register: managed_cluster_secrets ignore_errors: true - name: Create ArgoCD cluster secrets for each managed cluster kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: "managed-cluster-{{ item.metadata.name }}" namespace: wkl-argocd labels: argocd.argoproj.io/secret-type: cluster type: Opaque stringData: name: "{{ item.data.cluster_name | b64decode }}-{{ item.data.region | b64decode }}" server: "{{ item.data.api_url | b64decode }}" config: | { "bearerToken": "{{ lookup('kubernetes.core.k8s_jwt_token', namespace='wkl-argocd', service_account='wkl-argocd-application-controller', kubeconfig=k8s_auth_params.kubeconfig, validate_certs=k8s_auth_params.validate_certs) }}", "tlsClientConfig": { "insecure": false } } username: "{{ item.data.username | b64decode }}" password: "{{ item.data.password | b64decode }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" loop: "{{ managed_cluster_secrets.resources | default([]) }}" when: managed_cluster_secrets.resources is defined # Create a ServiceAccount token for workload ArgoCD to access the managed clusters - name: Create ServiceAccount token for workload ArgoCD kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: wkl-argocd-token namespace: wkl-argocd annotations: kubernetes.io/service-account.name: wkl-argocd-application-controller type: kubernetes.io/service-account-token kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Grant workload ArgoCD cluster-admin access to managed clusters - name: Create cluster-admin ClusterRoleBinding for workload ArgoCD on all clusters kubernetes.core.k8s: state: present definition: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: wkl-argocd-cluster-admin roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: wkl-argocd-application-controller namespace: wkl-argocd kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Create GitHub repository credentials secret for workload ArgoCD - name: Create GitHub repo credentials secret for workload ArgoCD kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: wkl-argocd-github-repo namespace: wkl-argocd labels: argocd.argoproj.io/secret-type: repository type: Opaque stringData: type: git url: https://github.com/pfeifferj/container-mom-go.git username: "pfeifferj" password: "{{ global.repo_token | default('') }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Create Gitea repository credentials secret for workload ArgoCD - name: Create Gitea repo credentials secret for workload ArgoCD kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: wkl-argocd-gitea-repo namespace: wkl-argocd labels: argocd.argoproj.io/secret-type: repository type: Opaque stringData: type: git url: "https://git.container-mom.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom" username: "{{ gitea_admin_username }}" password: "{{ gitea_admin_password }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Add privileged SCC to default service account in argocd namespace - name: Grant privileged SCC to default service account in argocd namespace kubernetes.core.k8s: state: present definition: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: argocd-default-privileged roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:openshift:scc:privileged subjects: - kind: ServiceAccount name: default namespace: argocd kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Create GitHub repository credentials secret for ArgoCD - name: Create GitHub repo credentials secret for ArgoCD kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: argocd-github-repo namespace: argocd labels: argocd.argoproj.io/secret-type: repository type: Opaque stringData: type: git url: https://github.com/pfeifferj/container-mom-go.git username: "pfeifferj" password: "{{ global.repo_token | default('') }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Create Gitea repository credentials secret for ArgoCD - name: Create Gitea repo credentials secret for ArgoCD kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Secret metadata: name: argocd-gitea-repo namespace: argocd labels: argocd.argoproj.io/secret-type: repository type: Opaque stringData: type: git url: "https://git.container-mom.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom" username: "{{ gitea_admin_username }}" password: "{{ gitea_admin_password }}" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" when: is_hub_cluster | bool # Deploy Cert Manager - name: Deploy cert-manager kubernetes.core.helm: name: cert-manager release_namespace: cert-manager create_namespace: true chart_ref: "https://charts.jetstack.io/charts/cert-manager-v1.17.1.tgz" wait: true values: installCRDs: true prometheus: enabled: false webhook: timeoutSeconds: 10 kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Create ClusterIssuer for Let's Encrypt kubernetes.core.k8s: state: present definition: apiVersion: cert-manager.io/v1 kind: ClusterIssuer metadata: name: letsencrypt-prod spec: acme: server: https://acme-v02.api.letsencrypt.org/directory email: admin@container.mom privateKeySecretRef: name: letsencrypt-prod solvers: - dns01: cloudflare: email: admin@container.mom apiTokenSecretRef: name: cloudflare-secret key: api-token kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" - name: Ensure container-mom-system namespace exists kubernetes.core.k8s: state: present definition: apiVersion: v1 kind: Namespace metadata: name: container-mom-system kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Prepare certificate variables - name: Set certificate DNS names ansible.builtin.set_fact: base_domain: "{{ dns.fqdn | regex_replace('^.*?\\.(.*)$', '\\1') }}" cert_dns_names: - "*.{{ dns.fqdn | regex_replace('^.*?\\.(.*)$', '\\1') }}" - "{{ dns.fqdn | regex_replace('^.*?\\.(.*)$', '\\1') }}" - "*.apps.{{ cluster_name }}.{{ cluster_region }}.{{ dns.fqdn | regex_replace('^.*?\\.(.*)$', '\\1') }}" - name: Add custom domains to certificate DNS names (excluding redundant domains) ansible.builtin.set_fact: cert_dns_names: "{{ cert_dns_names + [item + '.' + base_domain] }}" loop: "{{ dns.custom_domains | default([]) }}" when: - item != '@' # Skip domains that would be covered by wildcards we already have - not (item + '.' + base_domain == dns.fqdn) - not (item | regex_search('^[^.]+$') and ('*.' + base_domain) in cert_dns_names) # Create wildcard certificate using role - name: Include cert_management role for wildcard certificate include_role: name: cert_management vars: certificate_name: "{{ cluster_name }}-{{ cluster_region }}-wildcard" certificate_namespace: "container-mom-system" certificate_secret_name: "{{ cluster_name }}-{{ cluster_region }}-tls" certificate_common_name: "*.{{ base_domain }}" certificate_dns_names: "{{ cert_dns_names }}" certificate_issuer: "letsencrypt-prod" certificate_issuer_kind: "ClusterIssuer" certificate_wait_retries: 60 certificate_wait_delay: 10 role_name: "{{ cluster_name }}-{{ cluster_region }}-wildcard-reader" rolebinding_name: "{{ cluster_name }}-{{ cluster_region }}-wildcard-reader-binding" router_service_account: "router" router_namespace: "openshift-ingress" create_route_role: true # Get ArgoCD route info - name: Get ArgoCD route kubernetes.core.k8s_info: api_version: route.openshift.io/v1 kind: Route name: argocd-server namespace: argocd kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" register: argocd_route ignore_errors: true # Create ArgoCD specific certificate using role - name: Include cert_management role for ArgoCD certificate include_role: name: cert_management vars: certificate_name: "argocd-cert" certificate_namespace: "argocd" certificate_secret_name: "argocd-tls" certificate_common_name: "argocd.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom" certificate_dns_names: - "argocd.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom" certificate_issuer: "letsencrypt-prod" certificate_issuer_kind: "ClusterIssuer" certificate_wait_retries: 30 certificate_wait_delay: 10 role_name: "argocd-tls-reader" rolebinding_name: "argocd-tls-reader-binding" router_service_account: "router" router_namespace: "openshift-ingress" create_route_role: true # Check if ArgoCD TLS secret exists - name: Check if ArgoCD TLS secret exists kubernetes.core.k8s_info: api_version: v1 kind: Secret name: "argocd-tls" namespace: "argocd" kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" register: argocd_tls_secret ignore_errors: true # Update ArgoCD route to use external certificate - name: Update ArgoCD route to use external certificate kubernetes.core.k8s: state: present definition: apiVersion: route.openshift.io/v1 kind: Route metadata: name: argocd-server namespace: argocd annotations: haproxy.router.openshift.io/timeout: 60s route.openshift.io/termination: edge spec: host: "argocd.apps.{{ cluster_name }}.{{ cluster_region }}.container.mom" port: targetPort: "{{ argocd_route.resources[0].spec.port.targetPort }}" tls: termination: edge insecureEdgeTerminationPolicy: None externalCertificate: name: "argocd-tls" to: kind: Service name: argocd-server weight: 100 wildcardPolicy: None kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" when: - argocd_route.resources is defined - argocd_route.resources | length > 0 - argocd_tls_secret.resources is defined - argocd_tls_secret.resources | length > 0 register: argocd_route_update # ArgoCD cluster-admin permissions - name: Create a ClusterRoleBinding to grant ArgoCD cluster-admin privileges kubernetes.core.k8s: state: present definition: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: argocd-cluster-admin roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: argocd-application-controller namespace: argocd kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Configure app-of-apps - name: Configure the Application of Applications (app-of-apps) kubernetes.core.k8s: state: present apply: true definition: apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: app-of-apps namespace: argocd spec: destination: namespace: argocd server: https://kubernetes.default.svc project: default source: path: "manifests/00-argocd-app-of-apps-chart" repoURL: https://github.com/pfeifferj/container-mom-go.git targetRevision: main syncPolicy: automated: prune: false selfHeal: true kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" # Hub-specific final configuration - name: Create container-mom AppProject for hub cluster kubernetes.core.k8s: state: present definition: apiVersion: argoproj.io/v1alpha1 kind: AppProject metadata: name: container-mom namespace: argocd spec: description: Container Mom Applications sourceRepos: - '*' destinations: - namespace: '*' server: '*' clusterResourceWhitelist: - group: '*' kind: '*' namespaceResourceWhitelist: - group: '*' kind: '*' kubeconfig: "{{ k8s_auth_params.kubeconfig }}" validate_certs: "{{ k8s_auth_params.validate_certs }}" when: is_hub_cluster | bool post_tasks: - name: Cleanup temporary files ansible.builtin.file: path: "{{ kubeconfig_cleanup_dir }}" state: absent when: kubeconfig_cleanup_required | default(false) ignore_errors: true