Spaces:
Starting
Starting
| # Start with Python 3.11 | |
| FROM python:3.11 | |
| # Working Directory Setup | |
| WORKDIR /app | |
| # Install basic tools (Quick step) | |
| RUN apt-get update && apt-get install -y --no-install-recommends \ | |
| build-essential \ | |
| cmake \ | |
| libgl1 \ | |
| && rm -rf /var/lib/apt/lists/* | |
| # === 🛡️ THE FINAL FIX: CORRECT WHEEL INSTALL === | |
| # Ye link (jllllll/AVX2/cpu) specially HF jaise servers ke liye hai. | |
| # 1. Compile nahi karega (Fast install). | |
| # 2. 'manylinux' wheel uthayega (No 'musl' error crash). | |
| RUN pip install --upgrade pip && \ | |
| pip install llama-cpp-python==0.2.90 \ | |
| --extra-index-url https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/AVX2/cpu \ | |
| --prefer-binary \ | |
| --no-cache-dir | |
| # Install remaining dependencies from requirements.txt | |
| COPY requirements.txt . | |
| RUN pip install --no-cache-dir -r requirements.txt | |
| # Copy App Code | |
| COPY . . | |
| # Set Permissions | |
| RUN chmod -R 777 /app | |
| # Open Port | |
| EXPOSE 7860 | |
| # Launch | |
| CMD ["uvicorn", "api.main:app", "--host", "0.0.0.0", "--port", "7860"] |