Skip to content

Commit ea47119

Browse files
authored
Merge branch 'master' into gg/bpe-preprocess
2 parents 77cbb79 + bc4bba3 commit ea47119

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

78 files changed

+4558
-457
lines changed

.flake8

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,17 @@
11
[flake8]
22
max-line-length = 125
33
ignore = E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503
4-
exclude = examples/*,examples/*/**,*/**/__init__.py,scripts/gen-unicode-data.py,tests/test-tokenizer-0.py
4+
exclude =
5+
# Do not traverse examples
6+
examples,
7+
# Do not include package initializers
8+
__init__.py,
9+
# No need to traverse our git directory
10+
.git,
11+
# There's no value in checking cache directories
12+
__pycache__,
13+
# No need to include the build path
14+
build,
15+
# This contains builds that we don't want to check
16+
dist # This is generated with `python build .` for package releases
17+
# max-complexity = 10

.github/workflows/bench.yml

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,19 @@ jobs:
5252
ftype: q4_0
5353
pr_comment_enabled: "true"
5454

55-
if: ${{ github.event.inputs.gpu-series == 'Standard_NC4as_T4_v3' || github.event.schedule || github.event.pull_request || github.head_ref == 'master' || github.ref_name == 'master' || github.event.push.ref == 'refs/heads/master' }}
55+
if: |
56+
inputs.gpu-series == 'Standard_NC4as_T4_v3'
57+
|| (
58+
github.event_name == 'schedule'
59+
&& github.ref_name == 'master'
60+
&& github.repository_owner == 'ggerganov'
61+
)
62+
|| github.event_name == 'pull_request_target'
63+
|| (
64+
github.event_name == 'push'
65+
&& github.event.ref == 'refs/heads/master'
66+
&& github.repository_owner == 'ggerganov'
67+
)
5668
steps:
5769
- name: Clone
5870
id: checkout

CMakeLists.txt

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,8 @@ set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for
103103
set(LLAMA_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING
104104
"llama: max. batch size for using peer access")
105105
option(LLAMA_CUDA_NO_PEER_COPY "llama: do not use peer to peer copies" OFF)
106+
option(LLAMA_CUDA_NO_VMM "llama: do not try to use CUDA VMM" OFF)
107+
106108
option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF)
107109
option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF)
108110
option(LLAMA_HIP_UMA "llama: use HIP unified memory architecture" OFF)
@@ -403,12 +405,16 @@ if (LLAMA_CUDA)
403405
list(APPEND GGML_SOURCES_CUDA "ggml-cuda.cu")
404406

405407
add_compile_definitions(GGML_USE_CUDA)
408+
add_compile_definitions(GGML_CUDA_USE_GRAPHS)
406409
if (LLAMA_CUDA_FORCE_DMMV)
407410
add_compile_definitions(GGML_CUDA_FORCE_DMMV)
408411
endif()
409412
if (LLAMA_CUDA_FORCE_MMQ)
410413
add_compile_definitions(GGML_CUDA_FORCE_MMQ)
411414
endif()
415+
if (LLAMA_CUDA_NO_VMM)
416+
add_compile_definitions(GGML_CUDA_NO_VMM)
417+
endif()
412418
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
413419
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
414420
if (DEFINED LLAMA_CUDA_DMMV_Y)
@@ -434,7 +440,11 @@ if (LLAMA_CUDA)
434440
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
435441
endif()
436442

437-
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cuda_driver)
443+
if (LLAMA_CUDA_NO_VMM)
444+
# No VMM requested, no need to link directly with the cuda driver lib (libcuda.so)
445+
else()
446+
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cuda_driver) # required by cuDeviceGetAttribute(), cuMemGetAllocationGranularity(...), ...
447+
endif()
438448

439449
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
440450
# 52 == lowest CUDA 12 standard

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -433,7 +433,7 @@ ifdef LLAMA_CUDA
433433
else
434434
CUDA_PATH ?= /usr/local/cuda
435435
endif
436-
MK_CPPFLAGS += -DGGML_USE_CUDA -I$(CUDA_PATH)/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include
436+
MK_CPPFLAGS += -DGGML_USE_CUDA -I$(CUDA_PATH)/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include -DGGML_CUDA_USE_GRAPHS
437437
MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L$(CUDA_PATH)/lib64 -L/usr/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L/usr/lib/wsl/lib
438438
OBJS += ggml-cuda.o
439439
OBJS += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))

README.md

Lines changed: 22 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,8 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others)
2020

2121
### Hot topics
2222

23-
- **BPE pre-tokenization support has been added: https://github.com/ggerganov/llama.cpp/pull/6920**
23+
- **Initial Flash-Attention support: https://github.com/ggerganov/llama.cpp/pull/5021**
24+
- BPE pre-tokenization support has been added: https://github.com/ggerganov/llama.cpp/pull/6920
2425
- MoE memory layout has been updated - reconvert models for `mmap` support and regenerate `imatrix` https://github.com/ggerganov/llama.cpp/pull/6387
2526
- Model sharding instructions using `gguf-split` https://github.com/ggerganov/llama.cpp/discussions/6404
2627
- Fix major bug in Metal batched inference https://github.com/ggerganov/llama.cpp/pull/6225
@@ -139,7 +140,6 @@ Typically finetunes of the base models below are supported as well.
139140
- [x] [MobileVLM 1.7B/3B models](https://huggingface.co/models?search=mobileVLM)
140141
- [x] [Yi-VL](https://huggingface.co/models?search=Yi-VL)
141142
- [x] [Mini CPM](https://huggingface.co/models?search=MiniCPM)
142-
- [x] [Moondream](https://huggingface.co/vikhyatk/moondream2)
143143

144144
**HTTP server**
145145

@@ -712,6 +712,8 @@ Building the program with BLAS support may lead to some performance improvements
712712
713713
To obtain the official LLaMA 2 weights please see the <a href="#obtaining-and-using-the-facebook-llama-2-model">Obtaining and using the Facebook LLaMA 2 model</a> section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face.
714714
715+
Note: `convert.py` does not support LLaMA 3, you can use `convert-hf-to-gguf.py` with LLaMA 3 downloaded from Hugging Face.
716+
715717
```bash
716718
# obtain the official LLaMA model weights and place them in ./models
717719
ls ./models
@@ -933,25 +935,35 @@ If your issue is with model generation quality, then please at least scan the fo
933935
934936
### Android
935937
936-
#### Building the Project using Android NDK
937-
You can easily run `llama.cpp` on Android device with [termux](https://termux.dev/).
938+
#### Build on Android using Termux
939+
[Termux](https://github.com/termux/termux-app#installation) is a method to execute `llama.cpp` on an Android device (no root required).
940+
```
941+
apt update && apt upgrade -y
942+
apt install git make cmake
943+
```
938944
939-
First, install the essential packages for termux:
945+
It's recommended to move your model inside the `~/` directory for best performance:
940946
```
941-
pkg install clang wget git cmake
947+
cd storage/downloads
948+
mv model.gguf ~/
942949
```
943-
Second, obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake:
944950
945-
You can execute the following commands on your computer to avoid downloading the NDK to your mobile. Of course, you can also do this in Termux.
951+
[Get the code](https://github.com/ggerganov/llama.cpp#get-the-code) & [follow the Linux build instructions](https://github.com/ggerganov/llama.cpp#build) to build `llama.cpp`.
946952
953+
#### Building the Project using Android NDK
954+
Obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake.
955+
956+
Execute the following commands on your computer to avoid downloading the NDK to your mobile. Alternatively, you can also do this in Termux:
947957
```
948958
$ mkdir build-android
949959
$ cd build-android
950960
$ export NDK=<your_ndk_directory>
951961
$ cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod ..
952962
$ make
953963
```
954-
Install [termux](https://termux.dev/) on your device and run `termux-setup-storage` to get access to your SD card.
964+
965+
Install [termux](https://github.com/termux/termux-app#installation) on your device and run `termux-setup-storage` to get access to your SD card (if Android 11+ then run the command twice).
966+
955967
Finally, copy these built `llama` binaries and the model file to your device storage. Because the file permissions in the Android sdcard cannot be changed, you can copy the executable files to the `/data/data/com.termux/files/home/bin` path, and then execute the following commands in Termux to add executable permission:
956968
957969
(Assumed that you have pushed the built executable files to the /sdcard/llama.cpp/bin path using `adb push`)
@@ -973,53 +985,10 @@ $cd /data/data/com.termux/files/home/bin
973985
$./main -m ../model/llama-2-7b-chat.Q4_K_M.gguf -n 128 -cml
974986
```
975987
976-
Here is a demo of an interactive session running on Pixel 5 phone:
988+
Here's a demo of an interactive session running on Pixel 5 phone:
977989
978990
https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4
979991
980-
#### Building the Project using Termux (F-Droid)
981-
Termux from F-Droid offers an alternative route to execute the project on an Android device. This method empowers you to construct the project right from within the terminal, negating the requirement for a rooted device or SD Card.
982-
983-
Outlined below are the directives for installing the project using OpenBLAS and CLBlast. This combination is specifically designed to deliver peak performance on recent devices that feature a GPU.
984-
985-
If you opt to utilize OpenBLAS, you'll need to install the corresponding package.
986-
```
987-
apt install libopenblas
988-
```
989-
990-
Subsequently, if you decide to incorporate CLBlast, you'll first need to install the requisite OpenCL packages:
991-
```
992-
apt install ocl-icd opencl-headers opencl-clhpp clinfo
993-
```
994-
995-
In order to compile CLBlast, you'll need to first clone the respective Git repository, which can be found at this URL: https://github.com/CNugteren/CLBlast. Alongside this, clone this repository into your home directory. Once this is done, navigate to the CLBlast folder and execute the commands detailed below:
996-
```
997-
cmake .
998-
make
999-
cp libclblast.so* $PREFIX/lib
1000-
cp ./include/clblast.h ../llama.cpp
1001-
```
1002-
1003-
Following the previous steps, navigate to the LlamaCpp directory. To compile it with OpenBLAS and CLBlast, execute the command provided below:
1004-
```
1005-
cp /data/data/com.termux/files/usr/include/openblas/cblas.h .
1006-
cp /data/data/com.termux/files/usr/include/openblas/openblas_config.h .
1007-
make LLAMA_CLBLAST=1 //(sometimes you need to run this command twice)
1008-
```
1009-
1010-
Upon completion of the aforementioned steps, you will have successfully compiled the project. To run it using CLBlast, a slight adjustment is required: a command must be issued to direct the operations towards your device's physical GPU, rather than the virtual one. The necessary command is detailed below:
1011-
```
1012-
GGML_OPENCL_PLATFORM=0
1013-
GGML_OPENCL_DEVICE=0
1014-
export LD_LIBRARY_PATH=/vendor/lib64:$LD_LIBRARY_PATH
1015-
```
1016-
1017-
(Note: some Android devices, like the Zenfone 8, need the following command instead - "export LD_LIBRARY_PATH=/system/vendor/lib64:$LD_LIBRARY_PATH". Source: https://www.reddit.com/r/termux/comments/kc3ynp/opencl_working_in_termux_more_in_comments/ )
1018-
1019-
For easy and swift re-execution, consider documenting this final part in a .sh script file. This will enable you to rerun the process with minimal hassle.
1020-
1021-
Place your desired model into the `~/llama.cpp/models/` directory and execute the `./main (...)` script.
1022-
1023992
### Docker
1024993
1025994
#### Prerequisites

ci/run.sh

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -160,9 +160,8 @@ function gg_run_test_scripts_debug {
160160

161161
set -e
162162

163-
# TODO: too slow, run on dedicated node
164-
#(cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
165-
#(cd ./examples/quantize && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
163+
(cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
164+
(cd ./examples/quantize && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
166165

167166
set +e
168167
}
@@ -695,8 +694,10 @@ test $ret -eq 0 && gg_run ctest_release
695694
if [ -z ${GG_BUILD_LOW_PERF} ]; then
696695
test $ret -eq 0 && gg_run embd_bge_small
697696

698-
test $ret -eq 0 && gg_run test_scripts_debug
699-
test $ret -eq 0 && gg_run test_scripts_release
697+
if [ -z ${GG_BUILD_CLOUD} ] || [ ${GG_BUILD_EXTRA_TESTS_0} ]; then
698+
test $ret -eq 0 && gg_run test_scripts_debug
699+
test $ret -eq 0 && gg_run test_scripts_release
700+
fi
700701

701702
if [ -z ${GG_BUILD_VRAM_GB} ] || [ ${GG_BUILD_VRAM_GB} -ge 8 ]; then
702703
if [ -z ${GG_BUILD_CUDA} ]; then

common/common.cpp

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
#include "common.h"
2+
// Change JSON_ASSERT from assert() to GGML_ASSERT:
3+
#define JSON_ASSERT GGML_ASSERT
24
#include "json.hpp"
35
#include "json-schema-to-grammar.h"
46
#include "llama.h"
@@ -76,7 +78,7 @@ int32_t get_num_physical_cores() {
7678
// enumerate the set of thread siblings, num entries is num cores
7779
std::unordered_set<std::string> siblings;
7880
for (uint32_t cpu=0; cpu < UINT32_MAX; ++cpu) {
79-
std::ifstream thread_siblings("/sys/devices/system/cpu"
81+
std::ifstream thread_siblings("/sys/devices/system/cpu/cpu"
8082
+ std::to_string(cpu) + "/topology/thread_siblings");
8183
if (!thread_siblings.is_open()) {
8284
break; // no more cpus
@@ -911,6 +913,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
911913
params.instruct = true;
912914
return true;
913915
}
916+
if (arg == "-cnv" || arg == "--conversation") {
917+
params.conversation = true;
918+
return true;
919+
}
914920
if (arg == "-cml" || arg == "--chatml") {
915921
params.chatml = true;
916922
return true;
@@ -1417,6 +1423,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
14171423
printf(" --version show version and build info\n");
14181424
printf(" -i, --interactive run in interactive mode\n");
14191425
printf(" --interactive-first run in interactive mode and wait for input right away\n");
1426+
printf(" -cnv, --conversation run in conversation mode (does not print special tokens and suffix/prefix)\n");
14201427
printf(" -ins, --instruct run in instruction mode (use with Alpaca models)\n");
14211428
printf(" -cml, --chatml run in chatml mode (use with ChatML-compatible models)\n");
14221429
printf(" --multiline-input allows you to write or paste multiple lines without ending each in '\\'\n");
@@ -1964,18 +1971,18 @@ static bool llama_download_file(const std::string & url, const std::string & pat
19641971
try {
19651972
metadata_in >> metadata;
19661973
fprintf(stderr, "%s: previous metadata file found %s: %s\n", __func__, metadata_path.c_str(), metadata.dump().c_str());
1967-
if (metadata.contains("url") && metadata["url"].is_string()) {
1968-
auto previous_url = metadata["url"].get<std::string>();
1974+
if (metadata.contains("url") && metadata.at("url").is_string()) {
1975+
auto previous_url = metadata.at("url").get<std::string>();
19691976
if (previous_url != url) {
19701977
fprintf(stderr, "%s: Model URL mismatch: %s != %s\n", __func__, url.c_str(), previous_url.c_str());
19711978
return false;
19721979
}
19731980
}
1974-
if (metadata.contains("etag") && metadata["etag"].is_string()) {
1975-
etag = metadata["etag"];
1981+
if (metadata.contains("etag") && metadata.at("etag").is_string()) {
1982+
etag = metadata.at("etag");
19761983
}
1977-
if (metadata.contains("lastModified") && metadata["lastModified"].is_string()) {
1978-
last_modified = metadata["lastModified"];
1984+
if (metadata.contains("lastModified") && metadata.at("lastModified").is_string()) {
1985+
last_modified = metadata.at("lastModified");
19791986
}
19801987
} catch (const nlohmann::json::exception & e) {
19811988
fprintf(stderr, "%s: error reading metadata file %s: %s\n", __func__, metadata_path.c_str(), e.what());

common/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ struct gpt_params {
140140
bool random_prompt = false; // do not randomize prompt if none provided
141141
bool use_color = false; // use color to distinguish generations and inputs
142142
bool interactive = false; // interactive mode
143+
bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
143144
bool chatml = false; // chatml mode (used for models trained on chatml syntax)
144145
bool prompt_cache_all = false; // save user input and generations to prompt cache
145146
bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it

common/json-schema-to-grammar.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,8 @@
11
#pragma once
2+
3+
#include "ggml.h"
4+
// Change JSON_ASSERT from assert() to GGML_ASSERT:
5+
#define JSON_ASSERT GGML_ASSERT
26
#include "json.hpp"
37

48
std::string json_schema_to_grammar(const nlohmann::ordered_json& schema);

common/sampling.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@ struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_
3535

3636
result->prev.resize(params.n_prev);
3737

38+
result->n_considered = 0;
39+
3840
llama_sampling_set_rng_seed(result, params.seed);
3941

4042
return result;
@@ -64,6 +66,7 @@ void llama_sampling_reset(llama_sampling_context * ctx) {
6466

6567
std::fill(ctx->prev.begin(), ctx->prev.end(), 0);
6668
ctx->cur.clear();
69+
ctx->n_considered = 0;
6770
}
6871

6972
void llama_sampling_set_rng_seed(struct llama_sampling_context * ctx, uint32_t seed) {
@@ -253,6 +256,8 @@ static llama_token llama_sampling_sample_impl(
253256
}
254257
}
255258

259+
ctx_sampling->n_considered = cur_p.size;
260+
256261
return id;
257262
}
258263

common/sampling.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ struct llama_sampling_context {
8181
// TODO: replace with ring-buffer
8282
std::vector<llama_token> prev;
8383
std::vector<llama_token_data> cur;
84+
size_t n_considered;
8485

8586
std::mt19937 rng;
8687
};

convert-hf-to-gguf-update.py

100644100755
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
#!/usr/bin/env python3
2+
13
# This script downloads the tokenizer models of the specified models from Huggingface and
24
# generates the get_vocab_base_pre() function for convert-hf-to-gguf.py
35
#
@@ -64,6 +66,10 @@ class TOKENIZER_TYPE(IntEnum):
6466
{"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
6567
{"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
6668
{"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
69+
{"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", },
70+
{"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", },
71+
{"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", },
72+
{"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", },
6773
]
6874

6975
# make directory "models/tokenizers" if it doesn't exist
@@ -104,6 +110,14 @@ def download_file_with_auth(url, token, save_path):
104110
save_path = f"models/tokenizers/{name}/tokenizer.json"
105111
download_file_with_auth(url, token, save_path)
106112

113+
# if downloaded file is less than 1KB, we likely need to download an LFS instead
114+
if os.path.getsize(save_path) < 1024:
115+
# remove the file
116+
os.remove(save_path)
117+
url = f"{repo}/resolve/main/tokenizer.json"
118+
save_path = f"models/tokenizers/{name}/tokenizer.json"
119+
download_file_with_auth(url, token, save_path)
120+
107121
if tokt == TOKENIZER_TYPE.SPM:
108122
url = f"{repo}/resolve/main/tokenizer.model"
109123
save_path = f"models/tokenizers/{name}/tokenizer.model"
@@ -139,6 +153,8 @@ def download_file_with_auth(url, token, save_path):
139153
# print the "pre_tokenizer" content from the tokenizer.json
140154
with open(f"models/tokenizers/{name}/tokenizer.json", "r", encoding="utf-8") as f:
141155
cfg = json.load(f)
156+
normalizer = cfg["normalizer"]
157+
logger.info("normalizer: " + json.dumps(normalizer, indent=4))
142158
pre_tokenizer = cfg["pre_tokenizer"]
143159
logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
144160

0 commit comments

Comments
 (0)