Skip to content

Commit 044995e

Browse files
HanClintoNeo Zhang
authored andcommitted
Removes multiple newlines at the end of files that is breaking the editorconfig step of CI. (ggml-org#8258)
1 parent 6b695b5 commit 044995e

File tree

22 files changed

+0
-24
lines changed

22 files changed

+0
-24
lines changed

.github/ISSUE_TEMPLATE/config.yml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,5 +9,3 @@ contact_links:
99
- name: Want to contribute?
1010
url: https://github.com/ggerganov/llama.cpp/wiki/contribute
1111
about: Head to the contribution guide page of the wiki for areas you can help with
12-
13-

common/common.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -459,4 +459,3 @@ void yaml_dump_string_multiline(FILE * stream, const char * prop_name, const cha
459459
void yaml_dump_non_result_info(
460460
FILE * stream, const gpt_params & params, const llama_context * lctx,
461461
const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);
462-

examples/embedding/README.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,4 +58,3 @@ The above command will output space-separated float values.
5858
```powershell
5959
embedding.exe -p 'Castle<#sep#>Stronghold<#sep#>Dog<#sep#>Cat' --embd-separator '<#sep#>' --embd-normalize 2 --embd-output-format '' -m './path/to/model.gguf' --n-gpu-layers 99 --log-disable 2>/dev/null
6060
```
61-

examples/infill/infill.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -659,4 +659,3 @@ int main(int argc, char ** argv) {
659659

660660
return 0;
661661
}
662-

examples/lookup/README.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,3 @@ More info:
1010

1111
https://github.com/ggerganov/llama.cpp/pull/4484
1212
https://github.com/ggerganov/llama.cpp/issues/4226
13-

examples/main-cmake-pkg/.gitignore

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,4 +48,3 @@
4848
build*/
4949
out/
5050
tmp/
51-

examples/main-cmake-pkg/CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,4 +30,3 @@ target_include_directories(${TARGET} PRIVATE ${_common_path})
3030
install(TARGETS ${TARGET} RUNTIME)
3131
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
3232
target_compile_features(${TARGET} PRIVATE cxx_std_11)
33-

examples/server-embd.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,4 +31,3 @@ async def main():
3131
embedding2 = np.array(result[j])
3232
similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
3333
print(f"Similarity between {i} and {j}: {similarity:.2f}")
34-

examples/server/tests/features/passkey.feature

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,4 +52,3 @@ Feature: Passkey / Self-extend with context shift
5252
#| TheBloke/Llama-2-7B-GGUF | llama-2-7b.Q2_K.gguf | 4096 | 3 | 16384 | 512 | 4 | 512 | 500 | 300 | 1234 | 5 | 1234 |
5353
#| TheBloke/Mixtral-8x7B-v0.1-GGUF | mixtral-8x7b-v0.1.Q2_K.gguf | 32768 | 2 | 16384 | 512 | 4 | 512 | 500 | 100 | 0987 | 5 | 0
5454
# 987 |
55-

examples/server/themes/buttons-top/index.html

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1054,4 +1054,3 @@ <h1>llama.cpp</h1>
10541054
</body>
10551055

10561056
</html>
1057-

examples/server/themes/wild/index.html

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1058,4 +1058,3 @@
10581058
</body>
10591059

10601060
</html>
1061-

examples/sycl/run-llama2.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,4 +34,3 @@ fi
3434

3535
#use multiple GPUs with same max compute units
3636
#ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/llama-2-7b.Q4_0.gguf -p "${INPUT2}" -n 400 -e -ngl 33 -s 0
37-

examples/sycl/win-build-sycl.bat

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,4 +31,3 @@ exit /B 0
3131
:ERROR
3232
echo comomand error: %errorlevel%
3333
exit /B %errorlevel%
34-

examples/sycl/win-run-llama2.bat

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,3 @@ set INPUT2="Building a website can be done in 10 simple steps:\nStep 1:"
77

88

99
.\build\bin\llama-cli.exe -m models\llama-2-7b.Q4_0.gguf -p %INPUT2% -n 400 -e -ngl 33 -s 0
10-
11-

ggml/include/ggml-metal.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,4 +63,3 @@ GGML_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend);
6363
#ifdef __cplusplus
6464
}
6565
#endif
66-

ggml/src/ggml-cuda/cpy.cu

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -487,4 +487,3 @@ void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) {
487487
GGML_ASSERT(false);
488488
}
489489
}
490-

ggml/src/ggml-metal.metal

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6537,4 +6537,3 @@ template [[host_name("kernel_mul_mv_id_iq3_s_f32")]] kernel kernel_mul_mv_id_t
65376537
template [[host_name("kernel_mul_mv_id_iq2_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_s_f32_impl>>;
65386538
template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_nl_f32_impl>>;
65396539
template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_xs_f32_impl>>;
6540-

ggml/src/ggml-quants.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,4 +130,3 @@ void iq3xs_free_impl(int grid_size);
130130
#ifdef __cplusplus
131131
}
132132
#endif
133-

ggml/src/ggml-vulkan-shaders.hpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144954,4 +144954,3 @@ unsigned char sum_rows_f32_data[] = {
144954144954

144955144955
};
144956144956
const uint64_t sum_rows_f32_len = 2112;
144957-

scripts/pod-llama.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,4 +210,3 @@ fi
210210
# more benches
211211
#GGML_CUDA=1 make -j && ./llama-batched-bench ./models/codellama-7b/ggml-model-q4_k.gguf 4096 1 99 1 512,3200 128,128,800 1
212212
#GGML_CUDA=1 make -j && ./llama-batched-bench ./models/codellama-13b/ggml-model-q4_k.gguf 4096 1 99 1 512,3200 128,128,800 1
213-

src/unicode-data.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7030,4 +7030,3 @@ const std::vector<range_nfd> unicode_ranges_nfd = { // start, last, nfd
70307030
{0x02FA1C, 0x02FA1C, 0x009F3B},
70317031
{0x02FA1D, 0x02FA1D, 0x02A600},
70327032
};
7033-

tests/test-rope.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -218,4 +218,3 @@ int main(int /*argc*/, const char ** /*argv*/) {
218218

219219
return 0;
220220
}
221-

0 commit comments

Comments
 (0)