Skip to content

Commit 9fa007c

Browse files
committed
fix microbatch output counting; add attention_type context parameter
1 parent 139cc62 commit 9fa007c

File tree

4 files changed

+36
-10
lines changed

4 files changed

+36
-10
lines changed

common/common.cpp

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -472,6 +472,14 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
472472
else { invalid_param = true; }
473473
return true;
474474
}
475+
if (arg == "--attention") {
476+
CHECK_ARG
477+
std::string value(argv[i]);
478+
/**/ if (value == "causal") { params.attention_type = LLAMA_ATTENTION_TYPE_CAUSAL; }
479+
else if (value == "non-causal") { params.attention_type = LLAMA_ATTENTION_TYPE_NON_CAUSAL; }
480+
else { invalid_param = true; }
481+
return true;
482+
}
475483
if (arg == "--defrag-thold" || arg == "-dt") {
476484
CHECK_ARG
477485
params.defrag_thold = std::stof(argv[i]);
@@ -1454,8 +1462,10 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
14541462
"For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead" });
14551463

14561464
options.push_back({ "embedding" });
1457-
options.push_back({ "embedding", " --pooling {none,mean,cls}",
1465+
options.push_back({ "embedding", " --pooling {none,mean,cls,last}",
14581466
"pooling type for embeddings, use model default if unspecified" });
1467+
options.push_back({ "embedding", " --attention {causal,non-causal}",
1468+
"attention type for embeddings, use model default if unspecified" });
14591469

14601470
options.push_back({ "context hacking" });
14611471
options.push_back({ "*", " --rope-scaling {none,linear,yarn}",
@@ -2144,6 +2154,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
21442154
cparams.yarn_beta_slow = params.yarn_beta_slow;
21452155
cparams.yarn_orig_ctx = params.yarn_orig_ctx;
21462156
cparams.pooling_type = params.pooling_type;
2157+
cparams.attention_type = params.attention_type;
21472158
cparams.defrag_thold = params.defrag_thold;
21482159
cparams.cb_eval = params.cb_eval;
21492160
cparams.cb_eval_user_data = params.cb_eval_user_data;

common/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,7 @@ struct gpt_params {
9999
enum llama_split_mode split_mode = LLAMA_SPLIT_MODE_LAYER; // how to split the model across GPUs
100100
enum llama_rope_scaling_type rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
101101
enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_UNSPECIFIED; // pooling type for embeddings
102+
enum llama_attention_type attention_type = LLAMA_ATTENTION_TYPE_UNSPECIFIED; // attention type for embeddings
102103

103104
// // sampling parameters
104105
struct llama_sampling_params sparams;

include/llama.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,12 @@ extern "C" {
179179
LLAMA_POOLING_TYPE_LAST = 3,
180180
};
181181

182+
enum llama_attention_type {
183+
LLAMA_ATTENTION_TYPE_UNSPECIFIED = -1,
184+
LLAMA_ATTENTION_TYPE_CAUSAL = 0,
185+
LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1,
186+
};
187+
182188
enum llama_split_mode {
183189
LLAMA_SPLIT_MODE_NONE = 0, // single GPU
184190
LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs
@@ -296,6 +302,7 @@ extern "C" {
296302

297303
enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
298304
enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
305+
enum llama_attention_type attention_type; // attention type to use for embeddings
299306

300307
// ref: https://github.com/ggerganov/llama.cpp/pull/2054
301308
float rope_freq_base; // RoPE base frequency, 0 = from model

src/llama.cpp

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12715,7 +12715,7 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
1271512715
}
1271612716
}
1271712717

12718-
if (cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
12718+
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
1271912719
const int64_t n_tokens = batch.n_tokens;
1272012720

1272112721
GGML_ASSERT(lctx.inp_mean);
@@ -12747,7 +12747,7 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
1274712747
}
1274812748
}
1274912749

12750-
if (cparams.pooling_type == LLAMA_POOLING_TYPE_CLS) {
12750+
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_CLS) {
1275112751
const int64_t n_tokens = batch.n_tokens;
1275212752

1275312753
GGML_ASSERT(lctx.inp_cls);
@@ -12768,7 +12768,7 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
1276812768
}
1276912769
}
1277012770

12771-
if (cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) {
12771+
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) {
1277212772
const int64_t n_tokens = batch.n_tokens;
1277312773

1277412774
GGML_ASSERT(lctx.inp_cls);
@@ -12990,14 +12990,15 @@ static int llama_decode_internal(
1299012990
std::vector<llama_seq_id *> seq_id_arr;
1299112991
std::vector<std::vector<llama_seq_id>> seq_id;
1299212992

12993+
// this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens
12994+
const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE;
12995+
1299312996
// count outputs
12994-
if (cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE) {
12995-
n_outputs = n_tokens_all;
12996-
} else if (batch_all.logits) {
12997+
if (batch_all.logits && !embd_pooled) {
1299712998
for (uint32_t i = 0; i < n_tokens_all; ++i) {
1299812999
n_outputs += batch_all.logits[i] != 0;
1299913000
}
13000-
} else if (lctx.logits_all) {
13001+
} else if (lctx.logits_all || embd_pooled) {
1300113002
n_outputs = n_tokens_all;
1300213003
} else {
1300313004
// keep last output only
@@ -13043,7 +13044,7 @@ static int llama_decode_internal(
1304313044
{
1304413045
int32_t n_outputs_new = 0;
1304513046

13046-
if (u_batch.logits) {
13047+
if (u_batch.logits && !embd_pooled) {
1304713048
for (uint32_t i = 0; i < n_tokens; i++) {
1304813049
n_outputs_new += u_batch.logits[i] != 0;
1304913050
}
@@ -17202,6 +17203,7 @@ struct llama_context_params llama_context_default_params() {
1720217203
/*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
1720317204
/*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED,
1720417205
/*.pooling_type =*/ LLAMA_POOLING_TYPE_UNSPECIFIED,
17206+
/*.attention_type =*/ LLAMA_ATTENTION_TYPE_UNSPECIFIED,
1720517207
/*.rope_freq_base =*/ 0.0f,
1720617208
/*.rope_freq_scale =*/ 0.0f,
1720717209
/*.yarn_ext_factor =*/ -1.0f,
@@ -17448,7 +17450,6 @@ struct llama_context * llama_new_context_with_model(
1744817450
}
1744917451

1745017452
cparams.yarn_attn_factor *= hparams.rope_attn_factor;
17451-
cparams.causal_attn = hparams.causal_attn;
1745217453

1745317454
if (cparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
1745417455
if (hparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
@@ -17458,6 +17459,12 @@ struct llama_context * llama_new_context_with_model(
1745817459
}
1745917460
}
1746017461

17462+
if (params.attention_type == LLAMA_ATTENTION_TYPE_UNSPECIFIED) {
17463+
cparams.causal_attn = hparams.causal_attn;
17464+
} else {
17465+
cparams.causal_attn = params.attention_type == LLAMA_ATTENTION_TYPE_CAUSAL;
17466+
}
17467+
1746117468
if (params.seed == LLAMA_DEFAULT_SEED) {
1746217469
params.seed = time(NULL);
1746317470
}

0 commit comments

Comments
 (0)