Skip to content

Commit 0e814df

Browse files
authored
devops : remove clblast + LLAMA_CUDA -> GGML_CUDA (ggml-org#8139)
ggml-ci
1 parent a95631e commit 0e814df

12 files changed

+19
-103
lines changed

.devops/full-cuda.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ COPY . .
2727
# Set nvcc architecture
2828
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
2929
# Enable CUDA
30-
ENV LLAMA_CUDA=1
30+
ENV GGML_CUDA=1
3131
# Enable cURL
3232
ENV LLAMA_CURL=1
3333

.devops/full-rocm.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ COPY . .
3636
# Set nvcc architecture
3737
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
39-
ENV LLAMA_HIPBLAS=1
39+
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang
4141
ENV CXX=/opt/rocm/llvm/bin/clang++
4242

.devops/llama-cli-cuda.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ COPY . .
2121
# Set nvcc architecture
2222
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
2323
# Enable CUDA
24-
ENV LLAMA_CUDA=1
24+
ENV GGML_CUDA=1
2525

2626
RUN make -j$(nproc) llama-cli
2727

.devops/llama-cli-intel.Dockerfile

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,19 +2,19 @@ ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
22

33
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
44

5-
ARG LLAMA_SYCL_F16=OFF
5+
ARG GGML_SYCL_F16=OFF
66
RUN apt-get update && \
77
apt-get install -y git
88

99
WORKDIR /app
1010

1111
COPY . .
1212

13-
RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
14-
echo "LLAMA_SYCL_F16 is set" && \
15-
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
13+
RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
14+
echo "GGML_SYCL_F16 is set" && \
15+
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
1616
fi && \
17-
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
17+
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
1818
cmake --build build --config Release --target llama-cli
1919

2020
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime

.devops/llama-cli-rocm.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ COPY . .
3636
# Set nvcc architecture
3737
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
39-
ENV LLAMA_HIPBLAS=1
39+
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang
4141
ENV CXX=/opt/rocm/llvm/bin/clang++
4242

.devops/llama-cli-vulkan.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key
1414
# Build it
1515
WORKDIR /app
1616
COPY . .
17-
RUN cmake -B build -DLLAMA_VULKAN=1 && \
17+
RUN cmake -B build -DGGML_VULKAN=1 && \
1818
cmake --build build --config Release --target llama-cli
1919

2020
# Clean up

.devops/llama-cpp-clblast.srpm.spec

Lines changed: 0 additions & 84 deletions
This file was deleted.

.devops/llama-cpp-cuda.srpm.spec

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ CPU inference for Meta's Lllama2 models using default options.
3232
%setup -n llama.cpp-master
3333

3434
%build
35-
make -j LLAMA_CUDA=1
35+
make -j GGML_CUDA=1
3636

3737
%install
3838
mkdir -p %{buildroot}%{_bindir}/

.devops/llama-server-cuda.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ COPY . .
2121
# Set nvcc architecture
2222
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
2323
# Enable CUDA
24-
ENV LLAMA_CUDA=1
24+
ENV GGML_CUDA=1
2525
# Enable cURL
2626
ENV LLAMA_CURL=1
2727

.devops/llama-server-intel.Dockerfile

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,19 +2,19 @@ ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
22

33
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
44

5-
ARG LLAMA_SYCL_F16=OFF
5+
ARG GGML_SYCL_F16=OFF
66
RUN apt-get update && \
77
apt-get install -y git libcurl4-openssl-dev
88

99
WORKDIR /app
1010

1111
COPY . .
1212

13-
RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
14-
echo "LLAMA_SYCL_F16 is set" && \
15-
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
13+
RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
14+
echo "GGML_SYCL_F16 is set" && \
15+
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
1616
fi && \
17-
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
17+
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
1818
cmake --build build --config Release --target llama-server
1919

2020
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime

.devops/llama-server-rocm.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ COPY . .
3636
# Set nvcc architecture
3737
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
39-
ENV LLAMA_HIPBLAS=1
39+
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang
4141
ENV CXX=/opt/rocm/llvm/bin/clang++
4242

.devops/llama-server-vulkan.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key
1414
# Build it
1515
WORKDIR /app
1616
COPY . .
17-
RUN cmake -B build -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 && \
17+
RUN cmake -B build -DGGML_VULKAN=1 -DLLAMA_CURL=1 && \
1818
cmake --build build --config Release --target llama-server
1919

2020
# Clean up

0 commit comments

Comments
 (0)