|
| 1 | +create table t1 (a int primary key) engine=ndb; |
| 2 | +create table t2 (a int primary key) engine=ndb; |
| 3 | +create procedure do_insert(count int) |
| 4 | +begin |
| 5 | +set @x = 0; |
| 6 | +repeat |
| 7 | +insert into t1 values |
| 8 | +(@x+0), (@x+1), (@x+2), (@x+3), |
| 9 | +(@x+4), (@x+5), (@x+6), (@x+7), |
| 10 | +(@x+8), (@x+9), (@x+10), (@x+11), |
| 11 | +(@x+12), (@x+13), (@x+14), (@x+15), |
| 12 | +(@x+16), (@x+17), (@x+18), (@x+19), |
| 13 | +(@x+20), (@x+21), (@x+22), (@x+23), |
| 14 | +(@x+24), (@x+25), (@x+26), (@x+27), |
| 15 | +(@x+28), (@x+29), (@x+30), (@x+31); |
| 16 | +set @x = @x + 32; |
| 17 | +until @x >= count |
| 18 | +end repeat; |
| 19 | +end % |
| 20 | +begin; |
| 21 | +call do_insert(500000); |
| 22 | +rollback; |
| 23 | +begin; |
| 24 | +call do_insert(500000); |
| 25 | +commit; |
| 26 | +begin; |
| 27 | +insert into t2 select * from t1; |
| 28 | +rollback; |
| 29 | +begin; |
| 30 | +insert into t2 select * from t1; |
| 31 | +commit; |
| 32 | +begin; |
| 33 | +delete from t2; |
| 34 | +commit; |
| 35 | +begin; |
| 36 | +delete from t1; |
| 37 | +commit; |
| 38 | +drop procedure do_insert; |
| 39 | +drop table t1,t2; |
| 40 | +############################## |
| 41 | +# |
| 42 | +# Bug#34189965 |
| 43 | +# DbAcc Validate(WITH_DEBUG) of Unique Key locks has |
| 44 | +# exponential overhead. |
| 45 | +# |
| 46 | +# In order to guarante read consistency when using an unique index, |
| 47 | +# shared read locks are set (and held) on the rows. When e.g. a |
| 48 | +# join query join in the *same* row multiple times, each access |
| 49 | +# results in additional locks being set. Such locks are all added |
| 50 | +# to the same 'parallel queue' the the ACC block, which can end up |
| 51 | +# with thousands of locks in the same parallel queue. |
| 52 | +# |
| 53 | +# When debug compiled, ACC validates the consistency of the queues, |
| 54 | +# iterating the entire queue. The overhead of that may become so |
| 55 | +# huge that the block thread scheduler starts complaining about |
| 56 | +# being 'struck in thread', 'oversleept' and 'sleeploop 10!' |
| 57 | +create table t ( |
| 58 | +pk int primary key, |
| 59 | +uq int, |
| 60 | +i int, |
| 61 | +unique key (uq), |
| 62 | +key(i) |
| 63 | +) engine = ndbcluster; |
| 64 | +create table t100000 ( |
| 65 | +i int |
| 66 | +) engine = ndbcluster; |
| 67 | +insert into t values |
| 68 | +(0,0,1), (1,1,1), (2,2,1), (3,3,1), (4,4,1), |
| 69 | +(5,5,1), (6,6,1), (7,7,1), (8,8,1), (9,9,1); |
| 70 | +insert into t100000 |
| 71 | +select t1.i from |
| 72 | +t as t1 |
| 73 | +straight_join t as t2 on t2.i=t1.i |
| 74 | +straight_join t as t3 on t3.i=t2.i |
| 75 | +straight_join t as t4 on t4.i=t3.i |
| 76 | +straight_join t as t5 on t5.i=t4.i; |
| 77 | +set ndb_join_pushdown=off; |
| 78 | +select /*+ BKA(t) */ count(*) from |
| 79 | +t100000 as t1 |
| 80 | +straight_join t on t.uq=t1.i; |
| 81 | +count(*) |
| 82 | +100000 |
| 83 | +set ndb_join_pushdown=default; |
| 84 | +drop table t; |
| 85 | +drop table t100000; |
0 commit comments