#!/bin/bash
# FoundationsAI Setup and Start Script
# This script helps configure and start the FoundationsAI MCP server
# It checks for dependencies, prompts for model downloads, and provides a user-friendly interface
# Color codes
CYAN='\033[0;36m'
YELLOW='\033[1;33m'
GREEN='\033[0;32m'
RED='\033[0;31m'
WHITE='\033[1;37m'
NC='\033[0m' # No Color

# Function to prompt user for model download
download_model() {
    echo ""
    echo -e "${YELLOW}Recommended models:${NC}"
    echo -e "${WHITE}1. llama3.3:70b (Production - <1s response time on RTX 5090, highest quality)${NC}"
    echo -e "${WHITE}2. llama3.1:8b (Development - 1-2s response time, good quality)${NC}"
    echo -e "${WHITE}3. llama3.2:3b (Fast - 1-2s response time, lower quality)${NC}"
    echo ""
    read -p "Which model to download? (1/2/3 or skip): " modelChoice

    case $modelChoice in
        1)
            echo -e "${YELLOW}Downloading llama3.3:70b model... (this may take 30-60 minutes, ~40GB)${NC}"
            ollama pull llama3.3:70b
            echo -e "${GREEN}Model downloaded! Update LLM_MODEL=llama3.3:70b in .env${NC}"
            echo -e "${CYAN}Note: Achieves <1s response time on RTX 5090 (Blackwell)${NC}"
            ;;
        2)
            echo -e "${YELLOW}Downloading llama3.1:8b model... (this may take 5-10 minutes)${NC}"
            ollama pull llama3.1:8b
            echo -e "${GREEN}Model downloaded! Update LLM_MODEL=llama3.1:8b in .env${NC}"
            ;;
        3)
            echo -e "${YELLOW}Downloading llama3.2:3b model... (this may take 3-5 minutes)${NC}"
            ollama pull llama3.2:3b
            echo -e "${GREEN}Model downloaded! Update LLM_MODEL=llama3.2:3b in .env${NC}"
            ;;
        *)
            echo -e "${YELLOW}Skipping model download. You can download later with: ollama pull llama3.3:70b${NC}"
            ;;
    esac
}

echo -e "${CYAN}==================================${NC}"
echo -e "${CYAN}FoundationsAI MCP Setup & Start${NC}"
echo -e "${CYAN}==================================${NC}"
echo ""

# Check Python version
echo -e "${YELLOW}Checking Python version...${NC}"
pythonVersion=$(python3 --version 2>&1)
echo -e "${GREEN}Found: $pythonVersion${NC}"

# Check if virtual environment exists
if [ ! -d "venv" ]; then
    echo -e "${YELLOW}Creating virtual environment (isolated from system packages)...${NC}"
    python3.11 -m venv venv --clear 2>/dev/null || python3 -m venv venv --clear
    echo -e "${GREEN}Virtual environment created!${NC}"
else
    echo -e "${GREEN}Virtual environment already exists${NC}"
fi

# Activate virtual environment
echo -e "${YELLOW}Activating virtual environment...${NC}"
source venv/bin/activate

# Install dependencies
echo -e "${YELLOW}Installing dependencies from requirements.txt...${NC}"
echo -e "${CYAN}This may take several minutes on first run. Installation logs:${NC}"
echo ""
pip install -r requirements.txt --no-cache-dir --progress-bar on
install_status=$?
echo ""
if [ $install_status -eq 0 ]; then
    echo -e "${GREEN}✓ All dependencies installed successfully!${NC}"
else
    echo -e "${RED}✗ Some dependencies failed to install. Check logs above.${NC}"
    exit 1
fi

# Check for .env file
if [ ! -f ".env" ]; then
    echo ""
    echo -e "${RED}WARNING: .env file not found!${NC}"
    read -p "Create .env from template? (y/n): " createEnv

    if [ "$createEnv" = "y" ]; then
        cp .env.example .env
        echo -e "${GREEN}.env file created from template${NC}"
        echo -e "${YELLOW}IMPORTANT: Edit .env file with your configuration!${NC}"

        read -p "Open .env in editor now? (y/n): " editNow
        if [ "$editNow" = "y" ]; then
            ${EDITOR:-nano} .env
        fi
    fi
else
    echo -e "${GREEN}.env file found${NC}"
fi

# Check Ollama
echo ""
echo -e "${YELLOW}Checking Ollama installation...${NC}"
if command -v ollama &> /dev/null; then
    ollamaVersion=$(ollama --version 2>&1)
    echo -e "${GREEN}Ollama found: $ollamaVersion${NC}"

    # Check if Ollama server is running, if not start it
    echo -e "${YELLOW}Checking Ollama server status...${NC}"
    if ! ollama list &> /dev/null; then
        echo -e "${YELLOW}Ollama server not running. Starting...${NC}"
        ollama serve > ollama.log 2>&1 &
        OLLAMA_PID=$!
        echo -e "${CYAN}Ollama server started in background (PID: $OLLAMA_PID)${NC}"
        echo -e "${CYAN}Logs are being written to: ollama.log${NC}"

        # Wait for Ollama server to be ready
        echo -e "${YELLOW}Waiting for Ollama server to start...${NC}"
        for i in {1..30}; do
            if ollama list &> /dev/null; then
                echo -e "${GREEN}✓ Ollama server is ready!${NC}"
                break
            fi
            sleep 1
            echo -n "."
        done
        echo ""
    else
        echo -e "${GREEN}Ollama server is already running${NC}"
    fi

    # Check if llama model is installed
    echo -e "${YELLOW}Checking for llama models...${NC}"
    models=$(ollama list 2>&1)
    if echo "$models" | grep -qE "llama3\.3:70b"; then
        echo -e "${GREEN}Llama3.3:70b model is installed (Production - RTX 5090 optimized)${NC}"
    elif echo "$models" | grep -qE "llama3\.3|llama3\.2|llama3\.1|llama3"; then
        echo -e "${GREEN}Llama model is installed${NC}"
        echo -e "${YELLOW}Note: llama3.3:70b is recommended for production (sub-second on RTX 5090)${NC}"
    else
        echo -e "${RED}No llama models found!${NC}"
        download_model
    fi
else
    echo -e "${RED}Ollama not found!${NC}"
    echo -e "${YELLOW}Ollama is required to run FoundationsAI${NC}"
    echo ""
    read -p "Would you like to install Ollama now? (y/n): " installOllama

    if [ "$installOllama" = "y" ]; then
        echo -e "${YELLOW}Installing Ollama... This may take a few minutes.${NC}"
        echo ""

        # Install Ollama (run in foreground to wait for completion)
        if curl -fsSL https://ollama.com/install.sh | sh; then
            echo -e "${GREEN}✓ Ollama installed successfully!${NC}"

            # Now start Ollama server in background
            echo -e "${YELLOW}Starting Ollama server...${NC}"
            ollama serve > ollama.log 2>&1 &
            OLLAMA_PID=$!
            echo -e "${CYAN}Ollama server is running in background (PID: $OLLAMA_PID)${NC}"
            echo -e "${CYAN}Logs are being written to: ollama.log${NC}"

            # Wait for Ollama server to be ready (check if it responds)
            echo -e "${YELLOW}Waiting for Ollama server to start...${NC}"
            for i in {1..30}; do
                if ollama list &> /dev/null; then
                    echo -e "${GREEN}✓ Ollama server is ready!${NC}"
                    break
                fi
                sleep 1
                echo -n "."
            done
            echo ""

            # Verify Ollama is responsive
            if ollama list &> /dev/null; then
                # Now prompt to download a model
                download_model
            else
                echo -e "${RED}✗ Ollama server failed to start properly. Check ollama.log for details.${NC}"
            fi
        else
            echo -e "${RED}✗ Ollama installation failed. Check your internet connection and try again.${NC}"
            echo -e "${YELLOW}You can also install manually from https://ollama.ai${NC}"
        fi
    else
        echo -e "${YELLOW}Skipping Ollama installation.${NC}"
        echo -e "${YELLOW}Please install Ollama manually from https://ollama.ai${NC}"
        echo -e "${RED}FoundationsAI requires Ollama to function.${NC}"
    fi
fi

# Check GPU/CUDA
echo ""
echo -e "${YELLOW}Checking GPU and CUDA setup...${NC}"
if command -v nvidia-smi &> /dev/null; then
    gpuInfo=$(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader 2>&1 | head -n 1)
    echo -e "${GREEN}GPU detected: $gpuInfo${NC}"

    # Check CUDA
    if command -v nvcc &> /dev/null; then
        cudaVersion=$(nvcc --version | grep "release" | awk '{print $5}' | cut -d',' -f1)
        echo -e "${GREEN}CUDA version: $cudaVersion${NC}"
    else
        echo -e "${YELLOW}WARNING: nvcc not found. CUDA toolkit may not be installed.${NC}"
    fi

    # Suggest running GPU diagnostics
    echo -e "${YELLOW}Tip: Run 'python3 check_gpu.py' to verify GPU configuration${NC}"
else
    echo -e "${YELLOW}WARNING: nvidia-smi not found. GPU may not be available.${NC}"
    echo -e "${YELLOW}Models will run on CPU (much slower)${NC}"
fi

# Check MongoDB
echo ""
echo -e "${YELLOW}Checking MongoDB connection...${NC}"
read -p "Is MongoDB running and configured? (y/n): " mongoCheck
if [ "$mongoCheck" != "y" ]; then
    echo -e "${YELLOW}WARNING: MongoDB is required for conversation history${NC}"
    echo -e "${YELLOW}Set ENABLE_CONVERSATION_HISTORY=false in .env to disable${NC}"
fi

# Start options
echo ""
echo -e "${CYAN}==================================${NC}"
echo -e "${CYAN}Ready to start FoundationsAI!${NC}"
echo -e "${CYAN}==================================${NC}"
echo ""
echo -e "${YELLOW}Choose startup mode:${NC}"
echo -e "${WHITE}1. Start API server (LLMAPI.py)${NC}"
echo -e "${WHITE}2. Interactive CLI (model.py)${NC}"
echo -e "${WHITE}3. Exit${NC}"
echo ""

read -p "Enter choice (1-3): " choice

case $choice in
    1)
        echo ""
        echo -e "${GREEN}Starting FoundationsAI API server...${NC}"
        echo -e "${CYAN}API will be available at: http://localhost:7654${NC}"
        echo -e "${YELLOW}Press Ctrl+C to stop${NC}"
        echo ""
        python LLMAPI.py
        ;;
    2)
        echo ""
        echo -e "${GREEN}Starting interactive CLI...${NC}"
        echo ""
        python model.py
        ;;
    3)
        echo -e "${YELLOW}Exiting...${NC}"
        exit 0
        ;;
    *)
        echo -e "${RED}Invalid choice${NC}"
        exit 1
        ;;
esac
