23
23
import io .netty .util .internal .logging .InternalLoggerFactory ;
24
24
import org .jctools .queues .MessagePassingQueue ;
25
25
26
+ import java .util .ArrayDeque ;
27
+ import java .util .Queue ;
26
28
import java .util .concurrent .atomic .AtomicIntegerFieldUpdater ;
27
29
30
+ import static io .netty .util .internal .PlatformDependent .newMpscQueue ;
28
31
import static java .lang .Math .max ;
29
32
import static java .lang .Math .min ;
30
33
@@ -50,6 +53,7 @@ public String toString() {
50
53
private static final int DEFAULT_MAX_CAPACITY_PER_THREAD ;
51
54
private static final int RATIO ;
52
55
private static final int DEFAULT_QUEUE_CHUNK_SIZE_PER_THREAD ;
56
+ private static final boolean BLOCKING_POOL ;
53
57
54
58
static {
55
59
// In the future, we might have different maxCapacity for different object types.
@@ -69,15 +73,19 @@ public String toString() {
69
73
// bursts.
70
74
RATIO = max (0 , SystemPropertyUtil .getInt ("io.netty.recycler.ratio" , 8 ));
71
75
76
+ BLOCKING_POOL = SystemPropertyUtil .getBoolean ("io.netty.recycler.blocking" , false );
77
+
72
78
if (logger .isDebugEnabled ()) {
73
79
if (DEFAULT_MAX_CAPACITY_PER_THREAD == 0 ) {
74
80
logger .debug ("-Dio.netty.recycler.maxCapacityPerThread: disabled" );
75
81
logger .debug ("-Dio.netty.recycler.ratio: disabled" );
76
82
logger .debug ("-Dio.netty.recycler.chunkSize: disabled" );
83
+ logger .debug ("-Dio.netty.recycler.blocking: disabled" );
77
84
} else {
78
85
logger .debug ("-Dio.netty.recycler.maxCapacityPerThread: {}" , DEFAULT_MAX_CAPACITY_PER_THREAD );
79
86
logger .debug ("-Dio.netty.recycler.ratio: {}" , RATIO );
80
87
logger .debug ("-Dio.netty.recycler.chunkSize: {}" , DEFAULT_QUEUE_CHUNK_SIZE_PER_THREAD );
88
+ logger .debug ("-Dio.netty.recycler.blocking: {}" , BLOCKING_POOL );
81
89
}
82
90
}
83
91
}
@@ -252,9 +260,11 @@ private static final class LocalPool<T> {
252
260
@ SuppressWarnings ("unchecked" )
253
261
LocalPool (int maxCapacity , int ratioInterval , int chunkSize ) {
254
262
this .ratioInterval = ratioInterval ;
255
- // If the queue is of type MessagePassingQueue we can use a special LocalPoolQueue implementation.
256
- pooledHandles = (MessagePassingQueue <DefaultHandle <T >>) PlatformDependent
257
- .newMpscQueue (chunkSize , maxCapacity );
263
+ if (BLOCKING_POOL ) {
264
+ pooledHandles = new BlockingMessageQueue <DefaultHandle <T >>(maxCapacity );
265
+ } else {
266
+ pooledHandles = (MessagePassingQueue <DefaultHandle <T >>) newMpscQueue (chunkSize , maxCapacity );
267
+ }
258
268
ratioCounter = ratioInterval ; // Start at interval so the first one will be recycled.
259
269
}
260
270
@@ -279,4 +289,112 @@ DefaultHandle<T> newHandle() {
279
289
return null ;
280
290
}
281
291
}
292
+
293
+ /**
294
+ * This is an implementation of {@link MessagePassingQueue}, similar to what might be returned from
295
+ * {@link PlatformDependent#newMpscQueue(int)}, but intended to be used for debugging purpose.
296
+ * The implementation relies on synchronised monitor locks for thread-safety.
297
+ * The {@code drain} and {@code fill} bulk operations are not supported by this implementation.
298
+ */
299
+ private static final class BlockingMessageQueue <T > implements MessagePassingQueue <T > {
300
+ private final Queue <T > deque ;
301
+ private final int maxCapacity ;
302
+
303
+ BlockingMessageQueue (int maxCapacity ) {
304
+ this .maxCapacity = maxCapacity ;
305
+ // This message passing queue is backed by an ArrayDeque instance,
306
+ // made thread-safe by synchronising on `this` BlockingMessageQueue instance.
307
+ // Why ArrayDeque?
308
+ // We use ArrayDeque instead of LinkedList or LinkedBlockingQueue because it's more space efficient.
309
+ // We use ArrayDeque instead of ArrayList because we need the queue APIs.
310
+ // We use ArrayDeque instead of ConcurrentLinkedQueue because CLQ is unbounded and has O(n) size().
311
+ // We use ArrayDeque instead of ArrayBlockingQueue because ABQ allocates its max capacity up-front,
312
+ // and these queues will usually have large capacities, in potentially great numbers (one per thread),
313
+ // but often only have comparatively few items in them.
314
+ deque = new ArrayDeque <T >();
315
+ }
316
+
317
+ @ Override
318
+ public synchronized boolean offer (T e ) {
319
+ if (deque .size () == maxCapacity ) {
320
+ return false ;
321
+ }
322
+ return deque .offer (e );
323
+ }
324
+
325
+ @ Override
326
+ public synchronized T poll () {
327
+ return deque .poll ();
328
+ }
329
+
330
+ @ Override
331
+ public synchronized T peek () {
332
+ return deque .peek ();
333
+ }
334
+
335
+ @ Override
336
+ public synchronized int size () {
337
+ return deque .size ();
338
+ }
339
+
340
+ @ Override
341
+ public synchronized void clear () {
342
+ deque .clear ();
343
+ }
344
+
345
+ @ Override
346
+ public synchronized boolean isEmpty () {
347
+ return deque .isEmpty ();
348
+ }
349
+
350
+ @ Override
351
+ public int capacity () {
352
+ return maxCapacity ;
353
+ }
354
+
355
+ @ Override
356
+ public boolean relaxedOffer (T e ) {
357
+ return offer (e );
358
+ }
359
+
360
+ @ Override
361
+ public T relaxedPoll () {
362
+ return poll ();
363
+ }
364
+
365
+ @ Override
366
+ public T relaxedPeek () {
367
+ return peek ();
368
+ }
369
+
370
+ @ Override
371
+ public int drain (Consumer <T > c , int limit ) {
372
+ throw new UnsupportedOperationException ();
373
+ }
374
+
375
+ @ Override
376
+ public int fill (Supplier <T > s , int limit ) {
377
+ throw new UnsupportedOperationException ();
378
+ }
379
+
380
+ @ Override
381
+ public int drain (Consumer <T > c ) {
382
+ throw new UnsupportedOperationException ();
383
+ }
384
+
385
+ @ Override
386
+ public int fill (Supplier <T > s ) {
387
+ throw new UnsupportedOperationException ();
388
+ }
389
+
390
+ @ Override
391
+ public void drain (Consumer <T > c , WaitStrategy wait , ExitCondition exit ) {
392
+ throw new UnsupportedOperationException ();
393
+ }
394
+
395
+ @ Override
396
+ public void fill (Supplier <T > s , WaitStrategy wait , ExitCondition exit ) {
397
+ throw new UnsupportedOperationException ();
398
+ }
399
+ }
282
400
}
0 commit comments