Skip to content

Commit e8a7fd4

Browse files
authored
metal : support FA without mask + add asserts (#7278)
* ggml : fa without mask + add asserts ggml-ci * metal : support non-contiguous KV ggml-ci
1 parent a5e3fde commit e8a7fd4

File tree

5 files changed

+85
-75
lines changed

5 files changed

+85
-75
lines changed

ggml-metal.m

Lines changed: 38 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -2512,13 +2512,14 @@ static enum ggml_status ggml_metal_graph_compute(
25122512
} break;
25132513
case GGML_OP_FLASH_ATTN_EXT:
25142514
{
2515-
GGML_ASSERT(ne00 % 4 == 0);
2515+
GGML_ASSERT(ne00 % 4 == 0);
2516+
GGML_ASSERT(ne11 % 32 == 0);
2517+
25162518
GGML_ASSERT(src0->type == GGML_TYPE_F32);
25172519

2518-
struct ggml_tensor * src3 = gf->nodes[i]->src[3];
2520+
GGML_ASSERT(ggml_are_same_shape (src1, src2));
25192521

2520-
GGML_ASSERT(ggml_are_same_shape(src1, src2));
2521-
GGML_ASSERT(src3);
2522+
struct ggml_tensor * src3 = gf->nodes[i]->src[3];
25222523

25232524
size_t offs_src3 = 0;
25242525

@@ -2528,6 +2529,11 @@ static enum ggml_status ggml_metal_graph_compute(
25282529
GGML_ASSERT(!src3 || src3->ne[1] >= GGML_PAD(src0->ne[1], 8) &&
25292530
"the Flash-Attention Metal kernel requires the mask to be padded to 8 and at least n_queries big");
25302531

2532+
const uint64_t nb20 = src2 ? src2->nb[0] : 0; GGML_UNUSED(nb20);
2533+
const uint64_t nb21 = src2 ? src2->nb[1] : 0;
2534+
const uint64_t nb22 = src2 ? src2->nb[2] : 0;
2535+
const uint64_t nb23 = src2 ? src2->nb[3] : 0;
2536+
25312537
const int64_t ne30 = src3 ? src3->ne[0] : 0; GGML_UNUSED(ne30);
25322538
//const int64_t ne31 = src3 ? src3->ne[1] : 0;
25332539
const int64_t ne32 = src3 ? src3->ne[2] : 0; GGML_UNUSED(ne32);
@@ -2590,34 +2596,35 @@ static enum ggml_status ggml_metal_graph_compute(
25902596
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
25912597
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
25922598
[encoder setBuffer:id_src2 offset:offs_src2 atIndex:2];
2593-
[encoder setBuffer:id_src3 offset:offs_src3 atIndex:3];
2599+
if (id_src3) {
2600+
[encoder setBuffer:id_src3 offset:offs_src3 atIndex:3];
2601+
} else {
2602+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:3];
2603+
}
25942604
[encoder setBuffer:id_dst offset:offs_dst atIndex:4];
2595-
[encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:5];
2596-
[encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:6];
2597-
[encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:7];
2598-
[encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:8];
2599-
[encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:9];
2600-
[encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:10];
2601-
[encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:11];
2602-
[encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:12];
2603-
[encoder setBytes:&ne10 length:sizeof( int64_t) atIndex:13];
2604-
[encoder setBytes:&ne11 length:sizeof( int64_t) atIndex:14];
2605-
[encoder setBytes:&ne12 length:sizeof( int64_t) atIndex:15];
2606-
[encoder setBytes:&ne13 length:sizeof( int64_t) atIndex:16];
2607-
[encoder setBytes:&nb10 length:sizeof(uint64_t) atIndex:17];
2608-
[encoder setBytes:&nb11 length:sizeof(uint64_t) atIndex:18];
2609-
[encoder setBytes:&nb12 length:sizeof(uint64_t) atIndex:19];
2610-
[encoder setBytes:&nb13 length:sizeof(uint64_t) atIndex:20];
2611-
[encoder setBytes:&nb31 length:sizeof(uint64_t) atIndex:21];
2612-
[encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:22];
2613-
[encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:23];
2614-
[encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:24];
2615-
[encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:25];
2616-
[encoder setBytes:&scale length:sizeof( float) atIndex:26];
2617-
[encoder setBytes:&max_bias length:sizeof( float) atIndex:27];
2618-
[encoder setBytes:&m0 length:sizeof(m0) atIndex:28];
2619-
[encoder setBytes:&m1 length:sizeof(m1) atIndex:29];
2620-
[encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:30];
2605+
[encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:5];
2606+
[encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:6];
2607+
[encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:7];
2608+
[encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:8];
2609+
[encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:9];
2610+
[encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:10];
2611+
[encoder setBytes:&ne11 length:sizeof( int64_t) atIndex:11];
2612+
[encoder setBytes:&ne12 length:sizeof( int64_t) atIndex:12];
2613+
[encoder setBytes:&ne13 length:sizeof( int64_t) atIndex:13];
2614+
[encoder setBytes:&nb11 length:sizeof(uint64_t) atIndex:14];
2615+
[encoder setBytes:&nb12 length:sizeof(uint64_t) atIndex:15];
2616+
[encoder setBytes:&nb13 length:sizeof(uint64_t) atIndex:16];
2617+
[encoder setBytes:&nb21 length:sizeof(uint64_t) atIndex:17];
2618+
[encoder setBytes:&nb22 length:sizeof(uint64_t) atIndex:18];
2619+
[encoder setBytes:&nb23 length:sizeof(uint64_t) atIndex:19];
2620+
[encoder setBytes:&nb31 length:sizeof(uint64_t) atIndex:20];
2621+
[encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:21];
2622+
[encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:22];
2623+
[encoder setBytes:&scale length:sizeof( float) atIndex:23];
2624+
[encoder setBytes:&max_bias length:sizeof( float) atIndex:24];
2625+
[encoder setBytes:&m0 length:sizeof(m0) atIndex:25];
2626+
[encoder setBytes:&m1 length:sizeof(m1) atIndex:26];
2627+
[encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:27];
26212628

26222629
if (!use_vec_kernel) {
26232630
// half8x8 kernel

ggml-metal.metal

Lines changed: 20 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -2049,27 +2049,24 @@ typedef void (flash_attn_ext_f16_t)(
20492049
device const char * v,
20502050
device const char * mask,
20512051
device float * dst,
2052-
constant int64_t & ne00,
20532052
constant int64_t & ne01,
20542053
constant int64_t & ne02,
20552054
constant int64_t & ne03,
2056-
constant uint64_t & nb00,
20572055
constant uint64_t & nb01,
20582056
constant uint64_t & nb02,
20592057
constant uint64_t & nb03,
2060-
constant int64_t & ne10,
20612058
constant int64_t & ne11,
20622059
constant int64_t & ne12,
20632060
constant int64_t & ne13,
2064-
constant uint64_t & nb10,
20652061
constant uint64_t & nb11,
20662062
constant uint64_t & nb12,
20672063
constant uint64_t & nb13,
2064+
constant uint64_t & nb21,
2065+
constant uint64_t & nb22,
2066+
constant uint64_t & nb23,
20682067
constant uint64_t & nb31,
2069-
constant int64_t & ne0,
20702068
constant int64_t & ne1,
20712069
constant int64_t & ne2,
2072-
constant int64_t & ne3,
20732070
constant float & scale,
20742071
constant float & max_bias,
20752072
constant float & m0,
@@ -2090,27 +2087,24 @@ kernel void kernel_flash_attn_ext_f16(
20902087
device const char * v,
20912088
device const char * mask,
20922089
device float * dst,
2093-
constant int64_t & ne00,
20942090
constant int64_t & ne01,
20952091
constant int64_t & ne02,
20962092
constant int64_t & ne03,
2097-
constant uint64_t & nb00,
20982093
constant uint64_t & nb01,
20992094
constant uint64_t & nb02,
21002095
constant uint64_t & nb03,
2101-
constant int64_t & ne10,
21022096
constant int64_t & ne11,
21032097
constant int64_t & ne12,
21042098
constant int64_t & ne13,
2105-
constant uint64_t & nb10,
21062099
constant uint64_t & nb11,
21072100
constant uint64_t & nb12,
21082101
constant uint64_t & nb13,
2102+
constant uint64_t & nb21,
2103+
constant uint64_t & nb22,
2104+
constant uint64_t & nb23,
21092105
constant uint64_t & nb31,
2110-
constant int64_t & ne0,
21112106
constant int64_t & ne1,
21122107
constant int64_t & ne2,
2113-
constant int64_t & ne3,
21142108
constant float & scale,
21152109
constant float & max_bias,
21162110
constant float & m0,
@@ -2180,10 +2174,6 @@ kernel void kernel_flash_attn_ext_f16(
21802174
const short ne22 = ne12;
21812175
const short ne23 = ne13;
21822176

2183-
const uint nb21 = nb11;
2184-
const uint nb22 = nb12;
2185-
const uint nb23 = nb13;
2186-
21872177
// broadcast
21882178
const short rk2 = ne02/ne12;
21892179
const short rk3 = ne03/ne13;
@@ -2247,11 +2237,16 @@ kernel void kernel_flash_attn_ext_f16(
22472237
simdgroup_multiply_accumulate(mqk, mq[i], mk, mqk);
22482238
}
22492239

2250-
// mqk = mqk*scale + mask*slope
2251-
simdgroup_half8x8 mm;
2252-
simdgroup_load(mm, mp + ic + 8*cc, nb31/sizeof(half), 0, false);
2253-
simdgroup_multiply(mm, mslope, mm);
2254-
simdgroup_multiply_accumulate(mqk, mqk, mscale, mm);
2240+
if (mask != q) {
2241+
// mqk = mqk*scale + mask*slope
2242+
simdgroup_half8x8 mm;
2243+
simdgroup_load(mm, mp + ic + 8*cc, nb31/sizeof(half), 0, false);
2244+
simdgroup_multiply(mm, mslope, mm);
2245+
simdgroup_multiply_accumulate(mqk, mqk, mscale, mm);
2246+
} else {
2247+
// mqk = mqk*scale
2248+
simdgroup_multiply(mqk, mscale, mqk);
2249+
}
22552250

22562251
simdgroup_store(mqk, ss + 8*cc, TF, 0, false);
22572252
}
@@ -2425,27 +2420,24 @@ kernel void kernel_flash_attn_ext_vec_f16(
24252420
device const char * v,
24262421
device const char * mask,
24272422
device float * dst,
2428-
constant int64_t & ne00,
24292423
constant int64_t & ne01,
24302424
constant int64_t & ne02,
24312425
constant int64_t & ne03,
2432-
constant uint64_t & nb00,
24332426
constant uint64_t & nb01,
24342427
constant uint64_t & nb02,
24352428
constant uint64_t & nb03,
2436-
constant int64_t & ne10,
24372429
constant int64_t & ne11,
24382430
constant int64_t & ne12,
24392431
constant int64_t & ne13,
2440-
constant uint64_t & nb10,
24412432
constant uint64_t & nb11,
24422433
constant uint64_t & nb12,
24432434
constant uint64_t & nb13,
2435+
constant uint64_t & nb21,
2436+
constant uint64_t & nb22,
2437+
constant uint64_t & nb23,
24442438
constant uint64_t & nb31,
2445-
constant int64_t & ne0,
24462439
constant int64_t & ne1,
24472440
constant int64_t & ne2,
2448-
constant int64_t & ne3,
24492441
constant float & scale,
24502442
constant float & max_bias,
24512443
constant float & m0,
@@ -2521,10 +2513,6 @@ kernel void kernel_flash_attn_ext_vec_f16(
25212513
const short ne22 = ne12;
25222514
const short ne23 = ne13;
25232515

2524-
const uint nb21 = nb11;
2525-
const uint nb22 = nb12;
2526-
const uint nb23 = nb13;
2527-
25282516
// broadcast
25292517
const short rk2 = ne02/ne12;
25302518
const short rk3 = ne03/ne13;
@@ -2589,8 +2577,7 @@ kernel void kernel_flash_attn_ext_vec_f16(
25892577

25902578
// mqk = mqk*scale + mask*slope
25912579
if (tiisg == 0) {
2592-
float4 mm = (float4) mp4[ic/4 + cc];
2593-
mqk = mqk*scale + mm*slope;
2580+
mqk = mqk*scale + ((mask != q) ? ((float4) mp4[ic/4 + cc])*slope : (float4) 0.0f);
25942581

25952582
ss4[cc] = mqk;
25962583
}

ggml.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2824,6 +2824,16 @@ bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor
28242824
(t0->ne[3] == t1->ne[3] );
28252825
}
28262826

2827+
bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
2828+
static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
2829+
2830+
return
2831+
(t0->nb[0] == t1->nb[0] ) &&
2832+
(t0->nb[1] == t1->nb[1] ) &&
2833+
(t0->nb[2] == t1->nb[2] ) &&
2834+
(t0->nb[3] == t1->nb[3] );
2835+
}
2836+
28272837
// check if t1 can be represented as a repeatition of t0
28282838
static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
28292839
static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");

ggml.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -766,7 +766,8 @@ extern "C" {
766766
GGML_API bool ggml_is_3d (const struct ggml_tensor * tensor);
767767
GGML_API int ggml_n_dims (const struct ggml_tensor * tensor); // returns 1 for scalars
768768

769-
GGML_API bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1);
769+
GGML_API bool ggml_are_same_shape (const struct ggml_tensor * t0, const struct ggml_tensor * t1);
770+
GGML_API bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1);
770771

771772
// use this to compute the memory overhead of a tensor
772773
GGML_API size_t ggml_tensor_overhead(void);

tests/test-backend-ops.cpp

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1487,25 +1487,27 @@ struct test_flash_attn_ext : public test_case {
14871487
const int64_t kv; // kv size
14881488
const int64_t nb; // batch size
14891489

1490+
const bool mask; // use mask
1491+
14901492
const float max_bias; // ALiBi
14911493

14921494
std::string vars() override {
1493-
return VARS_TO_STR5(hs, nh, kv, nb, max_bias);
1495+
return VARS_TO_STR6(hs, nh, kv, nb, mask, max_bias);
14941496
}
14951497

14961498
double max_nmse_err() override {
14971499
return 5e-4;
14981500
}
14991501

1500-
test_flash_attn_ext(int64_t hs = 128, int64_t nh = 32, int64_t kv = 96, int64_t nb = 8, float max_bias = 0.0f)
1501-
: hs(hs), nh(nh), kv(kv), nb(nb), max_bias(max_bias) {}
1502+
test_flash_attn_ext(int64_t hs = 128, int64_t nh = 32, int64_t kv = 96, int64_t nb = 8, bool mask = true, float max_bias = 0.0f)
1503+
: hs(hs), nh(nh), kv(kv), nb(nb), mask(mask), max_bias(max_bias) {}
15021504

15031505
ggml_tensor * build_graph(ggml_context * ctx) override {
15041506
ggml_tensor * q = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, hs, nb, nh, 1);
15051507
ggml_tensor * k = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, hs, kv, nh, 1);
15061508
ggml_tensor * v = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, hs, kv, nh, 1);
1507-
ggml_tensor * mask = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), 1, 1);
1508-
ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, mask, 1.0f/sqrtf(hs), max_bias);
1509+
ggml_tensor * m = mask ? ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), 1, 1) : nullptr;
1510+
ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, m, 1.0f/sqrtf(hs), max_bias);
15091511
return out;
15101512
}
15111513
};
@@ -2175,11 +2177,14 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
21752177
test_cases.emplace_back(new test_leaky_relu());
21762178

21772179
for (int hs : { 64, 80, 128, 256, }) {
2178-
for (float max_bias : {0.0f, 8.0f}) {
2179-
for (int nh : { 32, }) {
2180-
for (int kv : { 512, 1024, }) {
2181-
for (int nb : { 1, 2, 4, 8, }) {
2182-
test_cases.emplace_back(new test_flash_attn_ext(hs, nh, kv, nb, max_bias));
2180+
for (bool mask : { true, false } ) {
2181+
for (float max_bias : { 0.0f, 8.0f }) {
2182+
if (!mask && max_bias > 0.0f) continue;
2183+
for (int nh : { 32, }) {
2184+
for (int kv : { 512, 1024, }) {
2185+
for (int nb : { 1, 2, 4, 8, }) {
2186+
test_cases.emplace_back(new test_flash_attn_ext(hs, nh, kv, nb, mask, max_bias));
2187+
}
21832188
}
21842189
}
21852190
}

0 commit comments

Comments
 (0)