# Warning: This playbook is not idempotent and will naively do a re-installation from scratch! - name: Start a container for hosting the bootstrap image containers.podman.podman_container: name: rhcos_image_cache image: registry.access.redhat.com/ubi9/httpd-24 recreate: true ports: - "{{ ocp_rhcos_image_cache_port }}:{{ ocp_rhcos_image_cache_port}}/tcp" volumes: - "{{ ocp_rhcos_image_cache_path }}:/var/www/html:Z" tags: - cache - name: Get the checksum of the uncompressed bootstrap image ansible.builtin.slurp: src: "{{ ocp_rhcos_image_cache_path }}/rhcos-latest.qcow2.sha256" register: bootstrap_checksum - name: Get the URL of the bootstrap image set_fact: bootstrap_os_image: "http://{{ ansible_default_ipv4.address }}:{{ ocp_rhcos_image_cache_port }}/rhcos-latest.qcow2.gz?sha256={{ bootstrap_checksum.content | b64decode }}" - name: Instantiate the install-config.yaml ansible.builtin.template: src: install-config.yaml.j2 dest: "{{ ocp_cluster_artifacts }}/install-config.yaml" tags: - config - name: Write a copy to the upper directory ansible.builtin.template: src: install-config.yaml.j2 dest: "{{ ocp_cluster_artifacts }}/../install-config.yaml" tags: - config - name: Render the installation manifests ansible.builtin.command: "openshift-baremetal-install create manifests --dir {{ ocp_cluster_artifacts }}" tags: - config - name: "Copy additional MachineConfig manifests to {{ ocp_cluster_artifacts }}/openshift" ansible.builtin.template: src: "{{ item }}" dest: "{{ ocp_cluster_artifacts }}/openshift/{{ item }}" loop: - 99-worker-multipath.yaml - 99-master-multipath.yaml tags: - config - name: Render the ignition files ansible.builtin.command: "openshift-baremetal-install create ignition-configs --dir {{ ocp_cluster_artifacts }}" tags: - config - ignition # https://access.redhat.com/solutions/6169152 # password hash was created via `ansible -m debug -a msg="{{ 'redhat' | password_hash('sha512') }}" localhost` - name: Set a password for the `core` user tags: - config - ignition ansible.builtin.include_tasks: file: patch_ignition.yaml vars: ignition_file: "{{ item }}" core_password_hash: "$6$htrbEble5xvsiUae$qySjmlmemEbML4QfEuOODOR4.FOUkG1ZihW4MLViRD/pGSNx3KC0/5slPXvO9ZrjijSJGZP8vTje7Kcb24uQf0" passwd_entry: passwd: users: - name: core password_hash: "{{ core_password_hash }}" loop: - master.ign - worker.ign - name: Install the cluster (asynchronously) tags: - install block: - name: Trigger the cluster installation ansible.builtin.command: "openshift-baremetal-install create cluster --dir {{ ocp_cluster_artifacts }} --log-level debug" async: 6000 # bootstrap (20min timeout) + cluster (45min timeout) + buffer poll: 0 register: install_status - name: Inform user of the process ansible.builtin.debug: msg: - "You may run `tail -f {{ ocp_cluster_artifacts }}/.openshift_install.log` to follow the installation process in another terminal." - "In addition, you can SSH into the bootstrap node via `ssh -o UserKnownHostsFile=/dev/null core@{{ ocp_bootstrap_external_static_ip }}" - name: Sleep to grant the installer time to get started ansible.builtin.wait_for: timeout: 60 - name: Check if the OCP bootstrap is finished ansible.builtin.command: "openshift-baremetal-install wait-for bootstrap-complete --dir {{ ocp_cluster_artifacts }} --log-level debug" register: bootstrap_status retries: 60 delay: 60 until: bootstrap_status.rc == 0 - name: Set the `provisioningInterface` in metal3 kubernetes.core.k8s_json_patch: kubeconfig: /auth/kubeconfig kind: Provisioning name: provisioning-configuration api_version: metal3.io/v1alpha1 patch: - op: add path: /spec/provisioningInterface value: pxe # TODO: infer from inventory, hardcoding is eevil delegate_to: localhost register: patch_status retries: 60 delay: 5 # Our assumption: if the operation was successful, then `patch_status` will not have the `.error` member until: not (patch_status.error is defined) - name: Wait for the cluster installation to complete ansible.builtin.command: "openshift-baremetal-install wait-for install-complete --dir {{ ocp_cluster_artifacts }}" register: install_progress retries: 60 delay: 60 until: install_progress.rc == 0 rescue: - name: Clean-up the resources of the asynchronous task # https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_async.html # When running with poll: 0, Ansible will not automatically clean up the async job cache file. You will need to manually clean this up with the async_status module with mode: cleanup. ansible.builtin.async_status: jid: "{{ install_status.ansible_job_id }}" mode: cleanup - name: Inform the user of the failure ansible.builtin.fail: msg: - The cluster installation failed but the playbook has no insight on the particular reason. - It is possible that the installation itself is progressing nicely but we ran into a (hardcoded) timeout. - "Feel free to dig a bit deeper to decide whether to retry the installation (potentially with a fix :)) or debug first." always: - name: Print `patch_status` ansible.builtin.debug: var: patch_status when: patch_status is defined - name: Label the nodes when labels are defined ansible.builtin.command: "oc --kubeconfig /auth/kubeconfig label node {{ item.0 }} {{ item.1 }}" loop: "{{ ocp_workers | product(labels) | list }}" when: labels is defined tags: - install - name: Tell the user how to proceed with the installation ansible.builtin.debug: msg: - "The installation is complete. You can find the initial cluster credentials under {{ ocp_cluster_artifacts }}/auth" - Run the following command to proceed with the configuration of Secrets & ArgoCD. - "export ANSIBLE_NAVIGATOR_EXECUTION_ENVIRONMENT_VOLUME_MOUNTS={{ ocp_cluster_artifacts }}/auth/:/auth;ansible-navigator run playbooks/10_bootstrap_k8s.yaml -m stdout -i {{ inventory_dir }}"