File size: 1,002 Bytes
3e2ca02 d2020f6 0914e96 3e2ca02 28f6f67 0914e96 3e2ca02 d2020f6 b901ed7 d2020f6 3e2ca02 b88fafc 3e2ca02 b88fafc 129a64e 3e2ca02 b901ed7 b88fafc d2020f6 3e2ca02 0914e96 3e2ca02 b901ed7 3e2ca02 bc8cf7e b88fafc 3e2ca02 b901ed7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
# Start with Python 3.11
FROM python:3.11
# Working Directory Setup
WORKDIR /app
# Install basic tools (Quick step)
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
cmake \
libgl1 \
&& rm -rf /var/lib/apt/lists/*
# === 🛡️ THE FINAL FIX: CORRECT WHEEL INSTALL ===
# Ye link (jllllll/AVX2/cpu) specially HF jaise servers ke liye hai.
# 1. Compile nahi karega (Fast install).
# 2. 'manylinux' wheel uthayega (No 'musl' error crash).
RUN pip install --upgrade pip && \
pip install llama-cpp-python==0.2.90 \
--extra-index-url https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/AVX2/cpu \
--prefer-binary \
--no-cache-dir
# Install remaining dependencies from requirements.txt
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy App Code
COPY . .
# Set Permissions
RUN chmod -R 777 /app
# Open Port
EXPOSE 7860
# Launch
CMD ["uvicorn", "api.main:app", "--host", "0.0.0.0", "--port", "7860"] |