#!/usr/bin/env bash
# setup-ollama.sh — Install Ollama and pull the inference model (Phase 2)
# Non-interactive; suitable for server environments.
set -euo pipefail

MODEL="${1:-phi4}"

echo "Installing Ollama..."
curl -fsSL https://ollama.com/install.sh | sh

echo "Pulling model: $MODEL ..."
ollama pull "$MODEL"

echo "Verifying Ollama is running..."
if curl -sf http://localhost:11434/api/tags > /dev/null; then
    echo "Ollama is up. Model $MODEL ready."
else
    echo "ERROR: Ollama did not start. Check 'systemctl status ollama'." >&2
    exit 1
fi
