fastcpu-api / Dockerfile
wolfofbackstreet's picture
Update Dockerfile
38e32d9 verified
# Use an Ubuntu-based image with Python 3.10
FROM python:3.10-slim
# Set working directory
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
ffmpeg \
libsm6 \
libxext6 \
git \
&& rm -rf /var/lib/apt/lists/*
# Install UV for faster dependency installation
RUN pip install uv
# Copy requirements and install
COPY requirements.txt .
RUN uv pip install --system -r requirements.txt
RUN pip install -q "optimum-intel[openvino,diffusers]@git+https://github.com/huggingface/optimum-intel.git" "transformers>=4.33" --extra-index-url https://download.pytorch.org/whl/cpu
# Create cache directories with write permissions
RUN mkdir -p /app/cache/huggingface /app/cache/openvino /app/matplotlib_cache /app/openvino_cache \
&& chmod -R 777 /app/cache /app/matplotlib_cache /app/openvino_cache
# Set environment variables for cache directories
ENV HF_HOME=/app/cache/huggingface
ENV MPLCONFIGDIR=/app/matplotlib_cache
ENV OPENVINO_TELEMETRY_DIR=/app/openvino_cache
# Pre-download base SDXL model
RUN python -c "from optimum.intel.openvino import OVStableDiffusionPipeline; \
OVStableDiffusionPipeline.from_pretrained('rupeshs/hyper-sd-sdxl-1-step-openvino-int8', ov_config={'CACHE_DIR': '/app/cache/openvino'})"
# Copy application code
COPY app.py .
# Expose port (default 5000, configurable via PORT env variable)
ENV PORT=7860
EXPOSE $PORT
# Command to run the Flask app
CMD ["python", "app.py"]