Skip to content

Commit 359cbe3

Browse files
authored
ggml-quants, llama : removed excess checks (#7274)
1 parent e18bc6a commit 359cbe3

File tree

3 files changed

+4
-8
lines changed

3 files changed

+4
-8
lines changed

common/common.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2553,7 +2553,7 @@ void dump_string_yaml_multiline(FILE * stream, const char * prop_name, const cha
25532553
size_t pos_start = 0;
25542554
size_t pos_found = 0;
25552555

2556-
if (!data_str.empty() && (std::isspace(data_str[0]) || std::isspace(data_str.back()))) {
2556+
if (std::isspace(data_str[0]) || std::isspace(data_str.back())) {
25572557
data_str = std::regex_replace(data_str, std::regex("\n"), "\\n");
25582558
data_str = std::regex_replace(data_str, std::regex("\""), "\\\"");
25592559
data_str = std::regex_replace(data_str, std::regex(R"(\\[^n"])"), R"(\$&)");

ggml-quants.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1986,7 +1986,7 @@ static void quantize_row_q3_K_impl(const float * restrict x, block_q3_K * restri
19861986

19871987
for (int j = 0; j < QK_K/16; ++j) {
19881988
if (quant_weights) {
1989-
const float * qw = quant_weights ? quant_weights + QK_K * i + 16*j : NULL;
1989+
const float * qw = quant_weights + QK_K * i + 16*j;
19901990
for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j+l]*x[16*j+l]);
19911991
} else {
19921992
for (int l = 0; l < 16; ++l) weight[l] = x[16*j+l]*x[16*j+l];

llama.cpp

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13904,9 +13904,7 @@ llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_
1390413904

1390513905
// Sample the next word X using top-k sampling
1390613906
llama_sample_top_k(nullptr, candidates, int(k), 1);
13907-
if (ctx) {
13908-
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
13909-
}
13907+
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
1391013908
llama_token X = llama_sample_token(ctx, candidates);
1391113909
t_start_sample_us = ggml_time_us();
1391213910

@@ -13920,9 +13918,7 @@ llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_
1392013918
// Update mu using the learning rate and error
1392113919
*mu = *mu - eta * e;
1392213920

13923-
if (ctx) {
13924-
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
13925-
}
13921+
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
1392613922
return X;
1392713923
}
1392813924

0 commit comments

Comments
 (0)