#!/usr/bin/env bash
set -euo pipefail

unset region
unset cluster_name

resolve_daylily_res_dir() {
    if [[ -n "${DAYLILY_EC_RESOURCES_DIR:-}" ]]; then
        echo "${DAYLILY_EC_RESOURCES_DIR}"
        return 0
    fi
    if command -v daylily-ec >/dev/null 2>&1; then
        daylily-ec resources-dir
        return 0
    fi
    # Dev fallback: repo checkout.
    local script_dir repo_root
    script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
    repo_root="$(cd "${script_dir}/.." && pwd)"
    if [[ -d "${repo_root}/config" && -d "${repo_root}/bin" ]]; then
        echo "${repo_root}"
        return 0
    fi
    return 1
}

require_cmd() {
    if ! command -v "$1" >/dev/null 2>&1; then
        echo "Error: required command '$1' not found in PATH" >&2
        exit 1
    fi
}

# ---------------------------------------------------------------------------
# Flag parsing  (backward-compatible: falls back to interactive prompts)
# ---------------------------------------------------------------------------
usage() {
    cat <<'EOF'
Usage: daylily-delete-ephemeral-cluster [options]

Options:
  --region REGION          AWS region where the cluster is located
  --cluster-name NAME      AWS ParallelCluster cluster name
  --profile PROFILE        AWS profile to use (defaults to AWS_PROFILE)
  --yes                    Skip the FSx deletion confirmation prompt
  -h, --help               Show this help message and exit

When flags are omitted the script falls back to interactive prompts.
EOF
}

flag_region=""
flag_cluster_name=""
flag_profile=""
flag_yes=0

while [[ $# -gt 0 ]]; do
    case "$1" in
        --region)        flag_region="$2";       shift 2 ;;
        --cluster-name)  flag_cluster_name="$2"; shift 2 ;;
        --profile)       flag_profile="$2";      shift 2 ;;
        --yes)           flag_yes=1;             shift   ;;
        -h|--help)       usage; exit 0                   ;;
        *)               echo "Unknown option: $1" >&2; usage; exit 1 ;;
    esac
done

prompt_non_empty() {
    local prompt="$1"
    local value=""
    while [[ -z "$value" ]]; do
        read -r -p "$prompt" value
    done
    echo "$value"
}

delete_cluster() {
    local name="$1"
    local reg="$2"

    echo "Deleting cluster '$name' in region '$reg'..."
    pcluster delete-cluster -n "$name" --region "$reg"

    echo "Monitoring cluster deletion status..."
    while true; do
        local desc status
        if ! desc="$(pcluster describe-cluster -n "$name" --region "$reg" 2>/dev/null)"; then
            echo "Cluster deletion completed successfully."
            break
        fi
        status="$(
          python3 - <<'PY'
import json, sys
try:
    d = json.load(sys.stdin)
except Exception:
    sys.exit(0)
print(d.get("clusterStatus", ""))
PY
        <<<"$desc"
        )"
        if [[ -z "$status" ]]; then
            echo "Cluster deletion completed successfully."
            break
        elif [[ "$status" == "DELETE_FAILED" ]]; then
            echo "Error: Cluster deletion failed."
            exit 1
        else
            echo "Current status: $status"
            sleep 15
        fi
    done
}

teardown_heartbeat_best_effort() {
    local name="$1"
    local reg="$2"
    local prof="$3"

    echo "Tearing down heartbeat notification infrastructure (best-effort)..."
    local account_id topic_arn schedule_name lambda_name
    account_id="$(aws sts get-caller-identity --profile "$prof" --region "$reg" --query Account --output text 2>/dev/null || true)"
    if [[ -z "$account_id" ]]; then
        echo "Warning: unable to resolve AWS account id; skipping heartbeat teardown." >&2
        return 0
    fi

    topic_arn="arn:aws:sns:${reg}:${account_id}:daylily-${name}-heartbeat"
    schedule_name="daylily-${name}-heartbeat"
    schedule_name="${schedule_name:0:64}"
    lambda_name="daylily-${name}-heartbeat"

    aws scheduler delete-schedule \
        --profile "$prof" --region "$reg" \
        --group-name default --name "$schedule_name" >/dev/null 2>&1 || true
    aws lambda delete-function \
        --profile "$prof" --region "$reg" \
        --function-name "$lambda_name" >/dev/null 2>&1 || true
    aws sns delete-topic \
        --profile "$prof" --region "$reg" \
        --topic-arn "$topic_arn" >/dev/null 2>&1 || true
}

require_cmd aws
require_cmd pcluster
require_cmd python3

aws_profile="${flag_profile:-${AWS_PROFILE:-}}"
if [[ -z "$aws_profile" ]]; then
    echo "Error: AWS profile not specified. Set AWS_PROFILE or pass --profile." >&2
    exit 1
fi
export AWS_PROFILE="$aws_profile"

if [[ -n "$flag_region" ]]; then
    region="$flag_region"
else
    region="$(prompt_non_empty "Enter the AWS region where the cluster is located: ")"
fi

if [[ -n "$flag_cluster_name" ]]; then
    cluster_name="$flag_cluster_name"
else
    cluster_name="$(prompt_non_empty "Enter the AWS ParallelCluster cluster name: ")"
fi

if [[ -z "$region" || -z "$cluster_name" ]]; then
    echo "Error: region and cluster name are required." >&2
    exit 1
fi

if ! pcluster describe-cluster -n "$cluster_name" --region "$region" >/dev/null 2>&1; then
    echo "ERROR: Cluster ($cluster_name) does not exist in region ($region)" >&2
    exit 1
fi


fsx_associations=$(aws fsx describe-file-systems \
    --profile "$aws_profile" \
    --region "$region" \
    --query "FileSystems[?contains(Tags[?Key=='parallelcluster:cluster-name'].Value | [0], '$cluster_name')].FileSystemId" \
    --output text)

if [[ "$fsx_associations" == "" ]]; then
    echo "No FSx filesystems associated with the cluster."
else
    echo "~~WARNING~~ "
    echo "   FSx filesystems are still associated with the cluster:"
    echo "$fsx_associations"
    echo ""
    echo "If you wish to export FSX data back to S3, please do so via the FSX console, or you may run the following command:"
    if RES_DIR="$(resolve_daylily_res_dir 2>/dev/null)"; then
        echo "  ${RES_DIR}/bin/daylily-export-fsx-to-s3"
    else
        echo "  daylily-export-fsx-to-s3"
    fi
    echo ""
    sleep 2
    echo "If you wish to proceed with deleting this cluster, the FSX filesystem will be deleted or preserved given the parameters set during creation."
    echo ""
    if [[ "$flag_yes" == "1" ]]; then
        echo "Skipping confirmation (--yes flag provided)."
    else
        read -p "Type 'please delete' to proceed with cluster deletion: " confirmation
        if [[ "$confirmation" != "please delete" ]]; then
            echo "Aborting cluster deletion."
            exit 1
        fi
    fi
fi

teardown_heartbeat_best_effort "$cluster_name" "$region" "$aws_profile"


delete_cluster "$cluster_name" "$region"

echo "Deletion of $cluster_name is complete."
