Skip to content

Commit c54b2bc

Browse files
committed
Revert "Revert "llama : dynamic temperature sampling (ggml-org#4972)""
This reverts commit 8c7db9a.
1 parent 302f586 commit c54b2bc

File tree

4 files changed

+88
-1
lines changed

4 files changed

+88
-1
lines changed

common/sampling.cpp

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,8 @@ static void sampler_queue(
129129
const int n_vocab = llama_n_vocab(llama_get_model(ctx_main));
130130

131131
const float temp = params.temp;
132+
const float dynatemp_range = params.dynatemp_range;
133+
const float dynatemp_exponent = params.dynatemp_exponent;
132134
const int32_t top_k = params.top_k <= 0 ? n_vocab : params.top_k;
133135
const float top_p = params.top_p;
134136
const float min_p = params.min_p;
@@ -143,7 +145,15 @@ static void sampler_queue(
143145
case 'y': llama_sample_typical (ctx_main, &cur_p, typical_p, min_keep); break;
144146
case 'p': llama_sample_top_p (ctx_main, &cur_p, top_p, min_keep); break;
145147
case 'm': llama_sample_min_p (ctx_main, &cur_p, min_p, min_keep); break;
146-
case 't': llama_sample_temp (ctx_main, &cur_p, temp); break;
148+
case 't':
149+
if (dynatemp_range > 0) {
150+
float dynatemp_min = std::max(0.0f, temp - dynatemp_range);
151+
float dynatemp_max = std::max(0.0f, temp + dynatemp_range);
152+
llama_sample_entropy(ctx_main, &cur_p, dynatemp_min, dynatemp_max, dynatemp_exponent);
153+
} else {
154+
llama_sample_temp(ctx_main, &cur_p, temp);
155+
}
156+
break;
147157
default : break;
148158
}
149159
}

common/sampling.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@ typedef struct llama_sampling_params {
1818
float tfs_z = 1.00f; // 1.0 = disabled
1919
float typical_p = 1.00f; // 1.0 = disabled
2020
float temp = 0.80f; // <= 0.0 to sample greedily, 0.0 to not output probabilities
21+
float dynatemp_range = 0.00f; // 0.0 = disabled
22+
float dynatemp_exponent = 1.00f; // controls how entropy maps to temperature in dynamic temperature sampler
2123
int32_t penalty_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size)
2224
float penalty_repeat = 1.10f; // 1.0 = disabled
2325
float penalty_freq = 0.00f; // 0.0 = disabled

llama.cpp

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8708,6 +8708,73 @@ void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * c
87088708
}
87098709
}
87108710

8711+
void llama_sample_entropy(struct llama_context * ctx, llama_token_data_array * candidates_p, float min_temp, float max_temp, float exponent_val) {
8712+
const int64_t t_start_sample_us = ggml_time_us();
8713+
8714+
// no need to do anything if there is only one (or zero) candidates
8715+
if(candidates_p->size <= 1) {
8716+
return;
8717+
}
8718+
8719+
// Calculate maximum possible entropy
8720+
float max_entropy = -logf(1.0f / candidates_p->size);
8721+
8722+
llama_sample_softmax(nullptr, candidates_p);
8723+
8724+
// Calculate entropy of the softmax probabilities
8725+
float entropy = 0.0f;
8726+
for (size_t i = 0; i < candidates_p->size; ++i) {
8727+
float prob = candidates_p->data[i].p;
8728+
if (prob > 0.0f) { // Ensure no log(0)
8729+
entropy -= prob * logf(prob);
8730+
}
8731+
}
8732+
8733+
// Normalize the entropy (max_entropy cannot be 0 here because we checked candidates_p->size != 1 above)
8734+
float normalized_entropy = entropy / max_entropy;
8735+
8736+
// Map the normalized entropy to the desired temperature range using the power function
8737+
float dyn_temp = min_temp + (max_temp - min_temp) * powf(normalized_entropy, exponent_val);
8738+
8739+
#ifdef DEBUG
8740+
LLAMA_LOG_INFO("Your text maxtemp value is: %f\n", max_temp);
8741+
LLAMA_LOG_INFO("Entropy: %f\n", entropy);
8742+
LLAMA_LOG_INFO("Max Possible Entropy: %f\n", max_entropy);
8743+
LLAMA_LOG_INFO("Normalized Entropy: %f\n", normalized_entropy);
8744+
LLAMA_LOG_INFO("Exponent: %f\n", exponent_val);
8745+
LLAMA_LOG_INFO("Dynamic Temperature (dyn_temp): %f\n", dyn_temp);
8746+
#endif
8747+
8748+
// Apply the dynamically calculated temperature scaling
8749+
for (size_t i = 0; i < candidates_p->size; ++i) {
8750+
candidates_p->data[i].logit /= dyn_temp;
8751+
}
8752+
8753+
// Re-compute softmax probabilities after scaling logits with dynamic temperature
8754+
double max_l_double = candidates_p->data[0].logit;
8755+
double cum_sum_double = 0.0;
8756+
for (size_t i = 0; i < candidates_p->size; ++i) {
8757+
double p = exp(candidates_p->data[i].logit - max_l_double);
8758+
candidates_p->data[i].p = p; // Store the scaled probability
8759+
cum_sum_double += p;
8760+
}
8761+
for (size_t i = 0; i < candidates_p->size; ++i) {
8762+
candidates_p->data[i].p /= cum_sum_double; // Re-normalize the probabilities
8763+
}
8764+
8765+
#ifdef DEBUG
8766+
// Print the updated top 25 probabilities after temperature scaling
8767+
LLAMA_LOG_INFO("\nUpdated Top 25 Probabilities After Dynamic Temperature Scaling (in percentages):\n");
8768+
for (size_t i = 0; i < 25 && i < candidates_p->size; ++i) {
8769+
LLAMA_LOG_INFO("Token %zu: %f%%\n", i + 1, candidates_p->data[i].p * 100.0f);
8770+
}
8771+
#endif
8772+
8773+
if (ctx) {
8774+
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
8775+
}
8776+
}
8777+
87118778
void llama_sample_temp(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
87128779
const int64_t t_start_sample_us = ggml_time_us();
87138780

llama.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -778,6 +778,14 @@ extern "C" {
778778
float p,
779779
size_t min_keep);
780780

781+
/// @details Dynamic temperature implementation described in the paper https://arxiv.org/abs/2309.02772.
782+
LLAMA_API void llama_sample_entropy(
783+
struct llama_context * ctx,
784+
llama_token_data_array * candidates_p,
785+
float min_temp,
786+
float max_temp,
787+
float exponent_val);
788+
781789
LLAMA_API void llama_sample_temp(
782790
struct llama_context * ctx,
783791
llama_token_data_array * candidates,

0 commit comments

Comments
 (0)