94 lines
2.6 KiB
Docker
94 lines
2.6 KiB
Docker
FROM intel/deep-learning-essentials:2025.2.0-0-devel-ubuntu24.04 AS vllm-base
|
|
|
|
RUN rm /etc/apt/sources.list.d/intel-graphics.list
|
|
|
|
RUN apt-get update -y && \
|
|
apt-get install -y --no-install-recommends --fix-missing \
|
|
curl \
|
|
ffmpeg \
|
|
git \
|
|
gpg \
|
|
libsndfile1 \
|
|
libsm6 \
|
|
libxext6 \
|
|
libgl1 \
|
|
lsb-release \
|
|
numactl \
|
|
python3 \
|
|
python3-dev \
|
|
python3-pip \
|
|
python3-venv \
|
|
wget \
|
|
&& apt-get clean \
|
|
&& rm -rf /var/lib/apt/lists/{apt,dpkg,cache,log}
|
|
|
|
WORKDIR /workspace
|
|
RUN git clone https://github.com/vllm-project/vllm.git
|
|
WORKDIR /workspace/vllm
|
|
|
|
# Setup the docker pip shell
|
|
RUN { \
|
|
echo '#!/bin/bash' ; \
|
|
echo 'source /workspace/venv/bin/activate' ; \
|
|
echo 'if [[ "${1}" != "" ]]; then bash -c "${@}"; else bash -i; fi' ; \
|
|
} > /workspace/shell ; \
|
|
chmod +x /workspace/shell
|
|
|
|
SHELL [ "/workspace/shell" ]
|
|
|
|
RUN python3 -m venv --system-site-packages /workspace/venv
|
|
|
|
#COPY requirements/xpu.txt /workspace/vllm/requirements/xpu.txt
|
|
#COPY requirements/common.txt /workspace/vllm/requirements/common.txt
|
|
|
|
#RUN --mount=type=cache,target=/root/.cache/pip \
|
|
RUN pip install --no-cache-dir \
|
|
-r requirements/xpu.txt
|
|
|
|
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/"
|
|
|
|
#ARG GIT_REPO_CHECK=0
|
|
#RUN --mount=type=bind,source=.git,target=.git \
|
|
RUN bash tools/check_repo.sh
|
|
|
|
ENV VLLM_TARGET_DEVICE=xpu
|
|
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
|
|
|
# --mount=type=bind,source=.git,target=.git \
|
|
#RUN --mount=type=cache,target=/root/.cache/pip \
|
|
RUN python3 setup.py install
|
|
|
|
CMD ["/bin/bash"]
|
|
|
|
FROM vllm-base AS vllm-openai
|
|
|
|
SHELL [ "/workspace/shell" ]
|
|
|
|
# install additional dependencies for openai api server
|
|
#RUN --mount=type=cache,target=/root/.cache/pip \
|
|
RUN pip install accelerate hf_transfer pytest 'modelscope!=1.15.0'
|
|
|
|
|
|
ENV VLLM_USAGE_SOURCE=production-docker-image
|
|
ENV TRITON_XPU_PROFILE=1
|
|
# install development dependencies (for testing)
|
|
RUN python3 -m pip install -e tests/vllm_test_utils
|
|
|
|
SHELL [ "/bin/bash", "-c" ]
|
|
|
|
# RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
|
|
# gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
|
|
# RUN echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy unified" | \
|
|
# tee /etc/apt/sources.list.d/intel-gpu-jammy.list
|
|
|
|
# RUN apt-get update \
|
|
# && apt-get install -y \
|
|
# libze-intel-gpu1 \
|
|
# libze1 \
|
|
# intel-opencl-icd \
|
|
# clinfo \
|
|
# && apt-get clean \
|
|
# && rm -rf /var/lib/apt/lists/{apt,dpkg,cache,log}
|
|
|
|
#ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"]
|
|
ENTRYPOINT ["/bin/bash"] |