Skip to content

Commit 01eed46

Browse files
committed
Merge branch 'master' into ik/more_metal_optimizations
2 parents 74df0de + 571083f commit 01eed46

File tree

2 files changed

+29
-2
lines changed

2 files changed

+29
-2
lines changed

examples/server/server.cpp

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1379,7 +1379,13 @@ int main(int argc, char **argv)
13791379
}
13801380
}
13811381

1382-
const json data = format_final_response(llama, llama.generated_text, llama.generated_token_probs);
1382+
auto probs = llama.generated_token_probs;
1383+
if (llama.params.n_probs > 0 && llama.stopped_word) {
1384+
const std::vector<llama_token> stop_word_toks = llama_tokenize(llama.ctx, llama.stopping_word, false);
1385+
probs = std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.end() - stop_word_toks.size());
1386+
}
1387+
1388+
const json data = format_final_response(llama, llama.generated_text, probs);
13831389

13841390
llama_print_timings(llama.ctx);
13851391

@@ -1456,7 +1462,11 @@ int main(int argc, char **argv)
14561462

14571463
if (!llama.has_next_token) {
14581464
// Generation is done, send extra information.
1459-
const json data = format_final_response(llama, "", llama.generated_token_probs);
1465+
const json data = format_final_response(
1466+
llama,
1467+
"",
1468+
std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.begin() + sent_token_probs_index)
1469+
);
14601470

14611471
const std::string str =
14621472
"data: " +

ggml-cuda.cu

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,12 +81,29 @@
8181
#if defined(GGML_USE_HIPBLAS)
8282
#define __CUDA_ARCH__ 1300
8383

84+
#ifndef __has_builtin
85+
#define __has_builtin(x) 0
86+
#endif
87+
8488
typedef int8_t int8x4_t __attribute__((ext_vector_type(4)));
8589
static __device__ __forceinline__ int __vsubss4(const int a, const int b) {
8690
const int8x4_t va = reinterpret_cast<const int8x4_t&>(a);
8791
const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b);
92+
#if __has_builtin(__builtin_elementwise_sub_sat)
8893
const int8x4_t c = __builtin_elementwise_sub_sat(va, vb);
8994
return reinterpret_cast<const int&>(c);
95+
#else
96+
int8x4_t c;
97+
int16_t tmp;
98+
#pragma unroll
99+
for (int i = 0; i < 4; i++) {
100+
tmp = va[i] - vb[i];
101+
if(tmp > std::numeric_limits<int8_t>::max()) tmp = std::numeric_limits<int8_t>::max();
102+
if(tmp < std::numeric_limits<int8_t>::min()) tmp = std::numeric_limits<int8_t>::min();
103+
c[i] = tmp;
104+
}
105+
return reinterpret_cast<int&>(c);
106+
#endif // __has_builtin(__builtin_elementwise_sub_sat)
90107
}
91108

92109
static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {

0 commit comments

Comments
 (0)