--- # Warning: This playbook is not idempotent and will naively do a re-installation from scratch! # Prerequisites tasks - name: Check if openshift-baremetal-install exists ansible.builtin.command: which openshift-baremetal-install register: install_binary_check ignore_errors: true tags: - prereq - name: Download OKD installation program when: install_binary_check.rc != 0 block: - name: Create temporary download directory ansible.builtin.tempfile: state: directory suffix: okd_install register: temp_dir - name: Download OKD installer ansible.builtin.get_url: url: "https://github.com/okd-project/okd/releases/download/4.17.0-okd-scos.0/openshift-install-linux-4.17.0-okd-scos.0.tar.gz" dest: "{{ temp_dir.path }}/openshift-install-linux.tar.gz" mode: '0644' - name: Extract OKD installer ansible.builtin.unarchive: src: "{{ temp_dir.path }}/openshift-install-linux.tar.gz" dest: "{{ temp_dir.path }}" remote_src: yes - name: Move openshift-install binary to /usr/local/bin ansible.builtin.copy: src: "{{ temp_dir.path }}/openshift-install" dest: "/usr/local/bin/openshift-baremetal-install" mode: '0755' remote_src: yes become: true - name: Cleanup temporary directory ansible.builtin.file: path: "{{ temp_dir.path }}" state: absent tags: - prereq - name: Check if oc binary exists ansible.builtin.command: which oc register: oc_binary_check ignore_errors: true tags: - prereq - name: Install OpenShift CLI (oc) when: oc_binary_check.rc != 0 block: - name: Create temporary download directory ansible.builtin.tempfile: state: directory suffix: oc_client register: oc_temp_dir - name: Download OpenShift CLI ansible.builtin.get_url: url: "https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz" dest: "{{ oc_temp_dir.path }}/oc.tar.gz" mode: '0644' - name: Extract OpenShift CLI ansible.builtin.unarchive: src: "{{ oc_temp_dir.path }}/oc.tar.gz" dest: "{{ oc_temp_dir.path }}" remote_src: yes - name: Move oc binary to /usr/local/bin ansible.builtin.copy: src: "{{ oc_temp_dir.path }}/oc" dest: "/usr/local/bin/oc" mode: '0755' remote_src: yes become: true - name: Cleanup temporary directory ansible.builtin.file: path: "{{ oc_temp_dir.path }}" state: absent tags: - prereq - name: Create a directory for cluster artifacts ansible.builtin.file: path: "{{ okd_cluster_artifacts }}" state: directory mode: '0755' tags: - config - name: Instantiate the install-config.yaml ansible.builtin.template: src: install-config.yaml.j2 dest: "{{ okd_cluster_artifacts }}/install-config.yaml" tags: - config - name: Write a copy to the upper directory ansible.builtin.template: src: install-config.yaml.j2 dest: "{{ okd_cluster_artifacts }}/../install-config.yaml" tags: - config - name: Render the installation manifests ansible.builtin.command: "openshift-baremetal-install create manifests --dir {{ okd_cluster_artifacts }}" tags: - config - name: Ensure control plane nodes are not schedulable ansible.builtin.replace: path: "{{ okd_cluster_artifacts }}/manifests/cluster-scheduler-02-config.yml" regexp: 'mastersSchedulable: true' replace: 'mastersSchedulable: false' tags: - config - name: "Copy additional MachineConfig manifests to {{ okd_cluster_artifacts }}/openshift" ansible.builtin.template: src: "{{ item }}" dest: "{{ okd_cluster_artifacts }}/openshift/{{ item }}" loop: - 99-worker-multipath.yaml - 99-master-multipath.yaml tags: - config - name: Render the ignition files ansible.builtin.command: "openshift-baremetal-install create ignition-configs --dir {{ okd_cluster_artifacts }}" tags: - config - ignition # https://access.redhat.com/solutions/6169152 # password hash was created via `ansible -m debug -a msg="{{ 'redhat' | password_hash('sha512') }}" localhost` - name: Set a password for the `core` user tags: - config - ignition ansible.builtin.include_tasks: file: patch_ignition.yaml vars: ignition_file: "{{ item }}" core_password_hash: "$6$htrbEble5xvsiUae$qySjmlmemEbML4QfEuOODOR4.FOUkG1ZihW4MLViRD/pGSNx3KC0/5slPXvO9ZrjijSJGZP8vTje7Kcb24uQf0" passwd_entry: passwd: users: - name: core password_hash: "{{ core_password_hash }}" loop: - master.ign - worker.ign - name: Install the cluster (asynchronously) tags: - install block: - name: Trigger the cluster installation ansible.builtin.command: "openshift-baremetal-install create cluster --dir {{ okd_cluster_artifacts }} --log-level debug" async: 6000 # bootstrap (20min timeout) + cluster (45min timeout) + buffer poll: 0 register: install_status - name: Inform user of the process ansible.builtin.debug: msg: - "You may run `tail -f {{ okd_cluster_artifacts }}/.openshift_install.log` to follow the installation process in another terminal." - "In addition, you can SSH into the bootstrap node via `ssh -o UserKnownHostsFile=/dev/null core@{{ okd_bootstrap_external_static_ip }}" - name: Sleep to grant the installer time to get started ansible.builtin.wait_for: timeout: 60 - name: Check if the OCP bootstrap is finished ansible.builtin.command: "openshift-baremetal-install wait-for bootstrap-complete --dir {{ okd_cluster_artifacts }} --log-level debug" register: bootstrap_status retries: 60 delay: 60 until: bootstrap_status.rc == 0 - name: Set the `provisioningInterface` in metal3 kubernetes.core.k8s_json_patch: kubeconfig: /auth/kubeconfig kind: Provisioning name: provisioning-configuration api_version: metal3.io/v1alpha1 patch: - op: add path: /spec/provisioningInterface value: pxe # TODO: infer from inventory, hardcoding is eevil delegate_to: localhost register: patch_status retries: 60 delay: 5 # Our assumption: if the operation was successful, then `patch_status` will not have the `.error` member until: not (patch_status.error is defined) - name: Wait for the cluster installation to complete ansible.builtin.command: "openshift-baremetal-install wait-for install-complete --dir {{ okd_cluster_artifacts }}" register: install_progress retries: 60 delay: 60 until: install_progress.rc == 0 rescue: - name: Clean-up the resources of the asynchronous task # https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_async.html # When running with poll: 0, Ansible will not automatically clean up the async job cache file. You will need to manually clean this up with the async_status module with mode: cleanup. ansible.builtin.async_status: jid: "{{ install_status.ansible_job_id }}" mode: cleanup - name: Inform the user of the failure ansible.builtin.fail: msg: - The cluster installation failed but the playbook has no insight on the particular reason. - It is possible that the installation itself is progressing nicely but we ran into a (hardcoded) timeout. - "Feel free to dig a bit deeper to decide whether to retry the installation (potentially with a fix :)) or debug first." always: - name: Print `patch_status` ansible.builtin.debug: var: patch_status when: patch_status is defined - name: Label the nodes when labels are defined ansible.builtin.command: "oc --kubeconfig /auth/kubeconfig label node {{ item.0 }} {{ item.1 }}" loop: "{{ okd_workers | product(labels) | list }}" when: labels is defined tags: - install - name: Tell the user how to proceed with the installation ansible.builtin.debug: msg: - "The installation is complete. You can find the initial cluster credentials under {{ okd_cluster_artifacts }}/auth" - Run the following command to proceed with the configuration of Secrets & ArgoCD. - "export ANSIBLE_NAVIGATOR_EXECUTION_ENVIRONMENT_VOLUME_MOUNTS={{ okd_cluster_artifacts }}/auth/:/auth;ansible-navigator run playbooks/10_bootstrap_k8s.yaml -m stdout -i {{ inventory_dir }}" - name: Make masters schedulable when hybrid mode is enabled ansible.builtin.replace: path: "{{ okd_cluster_artifacts }}/manifests/cluster-scheduler-02-config.yml" regexp: 'mastersSchedulable: false' replace: 'mastersSchedulable: true' when: okd_make_masters_schedulable | default(false) tags: - config