Skip to content

Commit 9b84ae1

Browse files
authored
examples : add GBNF validator program (#5948)
* Revising GBNF validator program to be much simpler. * Changing from streams to using cstdio * Adding final newline character.
1 parent 4399f13 commit 9b84ae1

File tree

5 files changed

+171
-20
lines changed

5 files changed

+171
-20
lines changed

Makefile

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -867,6 +867,10 @@ passkey: examples/passkey/passkey.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
867867
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
868868
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
869869

870+
gbnf-validator: examples/gbnf-validator/gbnf-validator.cpp ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS)
871+
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
872+
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
873+
870874
ifeq ($(UNAME_S),Darwin)
871875
swift: examples/batched.swift
872876
(cd examples/batched.swift; make build)
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
set(TARGET gbnf-validator)
2+
add_executable(${TARGET} gbnf-validator.cpp)
3+
install(TARGETS ${TARGET} RUNTIME)
4+
target_link_libraries(${TARGET} PRIVATE common grammar-parser llama ${CMAKE_THREAD_LIBS_INIT})
5+
target_compile_features(${TARGET} PRIVATE cxx_std_11)
Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
#define LLAMA_API_INTERNAL
2+
3+
#include "grammar-parser.h"
4+
#include "ggml.h"
5+
#include "llama.h"
6+
#include "unicode.h"
7+
8+
#include <cstdio>
9+
#include <cstdlib>
10+
#include <string>
11+
#include <vector>
12+
13+
static bool llama_sample_grammar_string(struct llama_grammar * grammar, const std::string & input_str, size_t & error_pos, std::string & error_msg) {
14+
auto decoded = decode_utf8(input_str, {});
15+
const auto & code_points = decoded.first;
16+
17+
size_t pos = 0;
18+
for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
19+
auto prev_stacks = grammar->stacks;
20+
grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it);
21+
if (grammar->stacks.empty()) {
22+
error_pos = pos;
23+
error_msg = "Unexpected character '" + unicode_cpt_to_utf8(*it) + "'";
24+
grammar->stacks = prev_stacks;
25+
return false;
26+
}
27+
++pos;
28+
}
29+
30+
for (const auto & stack : grammar->stacks) {
31+
if (stack.empty()) {
32+
return true;
33+
}
34+
}
35+
36+
error_pos = pos;
37+
error_msg = "Unexpected end of input";
38+
return false;
39+
}
40+
41+
static void print_error_message(const std::string & input_str, size_t error_pos, const std::string & error_msg) {
42+
fprintf(stdout, "Input string is invalid according to the grammar.\n");
43+
fprintf(stdout, "Error: %s at position %zu\n", error_msg.c_str(), error_pos);
44+
fprintf(stdout, "\n");
45+
fprintf(stdout, "Input string:\n");
46+
fprintf(stdout, "%s", input_str.substr(0, error_pos).c_str());
47+
if (error_pos < input_str.size()) {
48+
fprintf(stdout, "\033[1;31m%c", input_str[error_pos]);
49+
if (error_pos+1 < input_str.size()) {
50+
fprintf(stdout, "\033[0;31m%s", input_str.substr(error_pos+1).c_str());
51+
}
52+
fprintf(stdout, "\033[0m\n");
53+
}
54+
}
55+
56+
int main(int argc, char** argv) {
57+
if (argc != 3) {
58+
fprintf(stdout, "Usage: %s <grammar_filename> <input_filename>\n", argv[0]);
59+
return 1;
60+
}
61+
62+
const std::string grammar_filename = argv[1];
63+
const std::string input_filename = argv[2];
64+
65+
// Read the GBNF grammar file
66+
FILE* grammar_file = fopen(grammar_filename.c_str(), "r");
67+
if (!grammar_file) {
68+
fprintf(stdout, "Failed to open grammar file: %s\n", grammar_filename.c_str());
69+
return 1;
70+
}
71+
72+
fseek(grammar_file, 0, SEEK_END);
73+
size_t grammar_size = ftell(grammar_file);
74+
fseek(grammar_file, 0, SEEK_SET);
75+
76+
std::string grammar_str(grammar_size, ' ');
77+
fread(&grammar_str[0], 1, grammar_size, grammar_file);
78+
fclose(grammar_file);
79+
80+
// Parse the GBNF grammar
81+
auto parsed_grammar = grammar_parser::parse(grammar_str.c_str());
82+
83+
// will be empty (default) if there are parse errors
84+
if (parsed_grammar.rules.empty()) {
85+
fprintf(stdout, "%s: failed to parse grammar\n", __func__);
86+
return 1;
87+
}
88+
89+
// Ensure that there is a "root" node.
90+
if (parsed_grammar.symbol_ids.find("root") == parsed_grammar.symbol_ids.end()) {
91+
fprintf(stdout, "%s: grammar does not contain a 'root' symbol\n", __func__);
92+
return 1;
93+
}
94+
95+
std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
96+
97+
// Create the LLAMA grammar
98+
auto grammar = llama_grammar_init(
99+
grammar_rules.data(),
100+
grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
101+
102+
// Read the input file
103+
FILE* input_file = fopen(input_filename.c_str(), "r");
104+
if (!input_file) {
105+
fprintf(stdout, "Failed to open input file: %s\n", input_filename.c_str());
106+
return 1;
107+
}
108+
109+
fseek(input_file, 0, SEEK_END);
110+
size_t input_size = ftell(input_file);
111+
fseek(input_file, 0, SEEK_SET);
112+
113+
std::string input_str(input_size, ' ');
114+
fread(&input_str[0], 1, input_size, input_file);
115+
fclose(input_file);
116+
117+
// Validate the input string against the grammar
118+
size_t error_pos;
119+
std::string error_msg;
120+
bool is_valid = llama_sample_grammar_string(grammar, input_str, error_pos, error_msg);
121+
122+
if (is_valid) {
123+
fprintf(stdout, "Input string is valid according to the grammar.\n");
124+
} else {
125+
print_error_message(input_str, error_pos, error_msg);
126+
}
127+
128+
// Clean up
129+
llama_grammar_free(grammar);
130+
131+
return 0;
132+
}

llama.cpp

Lines changed: 2 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -11621,28 +11621,10 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
1162111621
// grammar - internal
1162211622
//
1162311623

11624-
struct llama_partial_utf8 {
11625-
uint32_t value; // bit value so far (unshifted)
11626-
int n_remain; // num bytes remaining; -1 indicates invalid sequence
11627-
};
11628-
11629-
struct llama_grammar {
11630-
const std::vector<std::vector<llama_grammar_element>> rules;
11631-
std::vector<std::vector<const llama_grammar_element *>> stacks;
11632-
11633-
// buffer for partially generated UTF-8 sequence from accepted tokens
11634-
llama_partial_utf8 partial_utf8;
11635-
};
11636-
11637-
struct llama_grammar_candidate {
11638-
size_t index;
11639-
const uint32_t * code_points;
11640-
llama_partial_utf8 partial_utf8;
11641-
};
1164211624

1164311625
// Decodes a UTF-8 string which may end in an incomplete sequence. Adds a terminating 0 for use as
1164411626
// pointer. If an invalid sequence is encountered, returns `llama_partial_utf8.n_remain == -1`.
11645-
static std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
11627+
std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
1164611628
const std::string & src,
1164711629
llama_partial_utf8 partial_start) {
1164811630
static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 4 };
@@ -11844,7 +11826,7 @@ static void llama_grammar_advance_stack(
1184411826
// be positioned at a character range (see `llama_grammar_advance_stack`), and
1184511827
// produces the N possible stacks if the given char is accepted at those
1184611828
// positions
11847-
static std::vector<std::vector<const llama_grammar_element *>> llama_grammar_accept(
11829+
std::vector<std::vector<const llama_grammar_element *>> llama_grammar_accept(
1184811830
const std::vector<std::vector<llama_grammar_element>> & rules,
1184911831
const std::vector<std::vector<const llama_grammar_element *>> & stacks,
1185011832
const uint32_t chr) {

llama.h

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1007,10 +1007,38 @@ extern "C" {
10071007

10081008
struct ggml_tensor;
10091009

1010+
struct llama_partial_utf8 {
1011+
uint32_t value; // bit value so far (unshifted)
1012+
int n_remain; // num bytes remaining; -1 indicates invalid sequence
1013+
};
1014+
1015+
struct llama_grammar {
1016+
const std::vector<std::vector<llama_grammar_element>> rules;
1017+
std::vector<std::vector<const llama_grammar_element *>> stacks;
1018+
1019+
// buffer for partially generated UTF-8 sequence from accepted tokens
1020+
llama_partial_utf8 partial_utf8;
1021+
};
1022+
1023+
struct llama_grammar_candidate {
1024+
size_t index;
1025+
const uint32_t * code_points;
1026+
llama_partial_utf8 partial_utf8;
1027+
};
1028+
10101029
const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
10111030
struct llama_context * ctx
10121031
);
10131032

1033+
std::vector<std::vector<const llama_grammar_element *>> llama_grammar_accept(
1034+
const std::vector<std::vector<llama_grammar_element>> & rules,
1035+
const std::vector<std::vector<const llama_grammar_element *>> & stacks,
1036+
const uint32_t chr);
1037+
1038+
std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
1039+
const std::string & src,
1040+
llama_partial_utf8 partial_start);
1041+
10141042
#endif // LLAMA_API_INTERNAL
10151043

10161044
#endif // LLAMA_H

0 commit comments

Comments
 (0)