#!/bin/bash
# Quick demo script for Karpenter IBM Cloud Provider
# This script automates the demo for easier execution

set -e

# Set kubeconfig
export KUBECONFIG=/home/josie/development/karpenter-ibm/kubeconfig

# Colors for output
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color

# Function to print colored headers
print_header() {
    echo -e "\n${BLUE}=== $1 ===${NC}\n"
}

# Function to print info
print_info() {
    echo -e "${GREEN}$1${NC}"
}

# Function to wait with message
wait_with_message() {
    local seconds=$1
    local message=$2
    echo -e "${YELLOW}${message} (${seconds}s)...${NC}"
    sleep $seconds
}

# Main demo
clear
print_header "Karpenter IBM Cloud Provider Demo"
print_info "Demonstrating automatic node provisioning on IBM Cloud VPC"
echo "Date: $(date)"
echo ""

# Initial state
print_header "Current Cluster State"
kubectl get nodes
echo ""
kubectl get pods -n karpenter | grep karpenter

# Apply manifests
print_header "Creating Karpenter Resources"
print_info "Applying IBMNodeClass..."
kubectl apply -f asciinema-demo/manifests/01-nodeclass.yaml
kubectl get ibmnodeclass demo-vpc-nodeclass

echo ""
print_info "Applying NodePool..."
kubectl apply -f asciinema-demo/manifests/02-nodepool.yaml
kubectl get nodepool demo-nodepool

wait_with_message 5 "Resources created"

# Scale up
print_header "Triggering Scale-Up"
print_info "Deploying nginx workload..."
kubectl apply -f asciinema-demo/manifests/03-scale-up-workload-simple.yaml

print_info "Current cluster capacity:"
kubectl get nodes -o wide
echo ""
print_info "Scaling to 8 replicas to exceed current node capacity..."
kubectl scale deployment nginx-demo --replicas=8

wait_with_message 15 "Pods are pending, checking scheduling..."
kubectl get pods -l app=nginx-demo -o wide

print_info "Pending pods should trigger node provisioning:"
kubectl get pods -l app=nginx-demo | grep Pending || echo "All pods scheduled on existing nodes"

wait_with_message 30 "Waiting for NodeClaims to be created..."

print_info "NodeClaims created:"
kubectl get nodeclaims

# If no NodeClaims, the existing node has enough capacity - add more load
if ! kubectl get nodeclaims --no-headers 2>/dev/null | grep -q .; then
    print_info "No NodeClaims yet - adding resource-intensive job to force provisioning..."
    kubectl apply -f asciinema-demo/manifests/04-resource-hungry-job.yaml
    wait_with_message 15 "Waiting for resource-intensive job to schedule..."
    kubectl get pods -l app=resource-job
    
    print_info "Checking for NodeClaims again:"
    kubectl get nodeclaims
fi

wait_with_message 180 "Waiting for nodes to join cluster..."

print_header "Check NodeClaims again:"
print_info "All NodeClaims in cluster:"
kubectl get NodeLcaims
echo ""

<add imbcloud command here to list instances>

wait_with_message 60 "Waiting for nodes for another 30s"

print_info "All nodes in cluster:"
kubectl get nodes -o wide
echo ""
print_info "Karpenter-provisioned nodes:"
kubectl get nodes -l karpenter.sh/nodepool=demo-nodepool || echo "No Karpenter nodes found"
echo ""
print_info "NodeClaims status:"
kubectl get nodeclaims
echo ""
print_info "All workload pods:"
kubectl get pods -l app=nginx-demo
kubectl get pods -l app=resource-job || echo "No resource job pods"

# Skip additional workload section if already applied
if ! kubectl get job resource-intensive-job >/dev/null 2>&1; then
    print_header "Adding Resource-Intensive Workload"
    kubectl apply -f asciinema-demo/manifests/04-resource-hungry-job.yaml
    wait_with_message 30 "Job pods scheduling"
    kubectl get pods -l app=resource-job
fi

# Scale down
print_header "Triggering Scale-Down"
print_info "Deleting workloads..."
kubectl delete deployment nginx-demo --wait=false
kubectl delete job resource-intensive-job --wait=false || echo "Job already deleted or not found"

print_info "Nodes before consolidation:"
kubectl get nodes -o wide

wait_with_message 60 "Waiting for node consolidation to begin..."

print_info "Checking intermediate cleanup state:"
kubectl get nodes -o wide
echo ""
print_info "NodeClaims status:"
kubectl get nodeclaims || echo "No NodeClaims found"

wait_with_message 60 "Waiting for garbage collection to complete..."

print_header "Scale-Down Complete"
print_info "Final nodes in cluster:"
kubectl get nodes -o wide
echo ""
print_info "Remaining Karpenter nodes:"
kubectl get nodes -l karpenter.sh/nodepool=demo-nodepool || echo "No Karpenter nodes remaining"
echo ""
print_info "Final NodeClaims status:"
kubectl get nodeclaims || echo "All NodeClaims cleaned up"

# Events
print_header "Recent Karpenter Events"
kubectl get events -n karpenter --sort-by='.lastTimestamp' | grep -i karpenter | tail -10 || echo "No recent events"

# Cleanup
print_header "Cleaning Up"
kubectl delete nodepool demo-nodepool --wait=false
kubectl delete ibmnodeclass demo-vpc-nodeclass --wait=false

print_header "Demo Complete!"