16
16
17
17
package org .springframework .kafka .listener ;
18
18
19
- import java .time .Duration ;
20
19
import java .util .ArrayList ;
21
20
import java .util .Collections ;
22
21
import java .util .HashMap ;
23
22
import java .util .HashSet ;
24
- import java .util .Iterator ;
25
23
import java .util .List ;
26
24
import java .util .Map ;
27
25
import java .util .Set ;
37
35
38
36
import org .springframework .kafka .KafkaException ;
39
37
import org .springframework .kafka .KafkaException .Level ;
38
+ import org .springframework .kafka .support .KafkaUtils ;
40
39
import org .springframework .lang .Nullable ;
41
40
import org .springframework .util .backoff .BackOff ;
42
41
50
49
*
51
50
* @author Gary Russell
52
51
* @author Francois Rosiere
52
+ * @author Wang Zhiyang
53
53
* @since 2.8
54
54
*
55
55
*/
@@ -120,7 +120,7 @@ public void setReclassifyOnExceptionChange(boolean reclassifyOnExceptionChange)
120
120
@ Override
121
121
protected void notRetryable (Stream <Class <? extends Exception >> notRetryable ) {
122
122
if (this .fallbackBatchHandler instanceof ExceptionClassifier handler ) {
123
- notRetryable .forEach (ex -> handler . addNotRetryableExceptions ( ex ) );
123
+ notRetryable .forEach (handler :: addNotRetryableExceptions );
124
124
}
125
125
}
126
126
@@ -178,7 +178,6 @@ protected <K, V> ConsumerRecords<K, V> handle(Exception thrownException, Consume
178
178
else {
179
179
return String .format ("Record not found in batch, index %d out of bounds (0, %d); "
180
180
+ "re-seeking batch" , index , data .count () - 1 );
181
-
182
181
}
183
182
});
184
183
fallback (thrownException , data , consumer , container , invokeListener );
@@ -201,11 +200,9 @@ private int findIndex(ConsumerRecords<?, ?> data, ConsumerRecord<?, ?> record) {
201
200
return -1 ;
202
201
}
203
202
int i = 0 ;
204
- Iterator <?> iterator = data .iterator ();
205
- while (iterator .hasNext ()) {
206
- ConsumerRecord <?, ?> candidate = (ConsumerRecord <?, ?>) iterator .next ();
207
- if (candidate .topic ().equals (record .topic ()) && candidate .partition () == record .partition ()
208
- && candidate .offset () == record .offset ()) {
203
+ for (ConsumerRecord <?, ?> datum : data ) {
204
+ if (datum .topic ().equals (record .topic ()) && datum .partition () == record .partition ()
205
+ && datum .offset () == record .offset ()) {
209
206
break ;
210
207
}
211
208
i ++;
@@ -220,29 +217,25 @@ private <K, V> ConsumerRecords<K, V> seekOrRecover(Exception thrownException, @N
220
217
if (data == null ) {
221
218
return ConsumerRecords .empty ();
222
219
}
223
- Iterator <?> iterator = data .iterator ();
224
- List <ConsumerRecord <?, ?>> toCommit = new ArrayList <>();
225
220
List <ConsumerRecord <?, ?>> remaining = new ArrayList <>();
226
221
int index = indexArg ;
227
- while ( iterator . hasNext ()) {
228
- ConsumerRecord <?, ?> record = ( ConsumerRecord <?, ?>) iterator . next ();
222
+ Map < TopicPartition , OffsetAndMetadata > offsets = new HashMap <>();
223
+ for ( ConsumerRecord <?, ?> datum : data ) {
229
224
if (index -- > 0 ) {
230
- toCommit .add (record );
225
+ offsets .compute (new TopicPartition (datum .topic (), datum .partition ()),
226
+ (key , val ) -> ListenerUtils .createOffsetAndMetadata (container , datum .offset () + 1 ));
231
227
}
232
228
else {
233
- remaining .add (record );
229
+ remaining .add (datum );
234
230
}
235
231
}
236
- Map <TopicPartition , OffsetAndMetadata > offsets = new HashMap <>();
237
- toCommit .forEach (rec -> offsets .compute (new TopicPartition (rec .topic (), rec .partition ()),
238
- (key , val ) -> ListenerUtils .createOffsetAndMetadata (container , rec .offset () + 1 )));
239
232
if (offsets .size () > 0 ) {
240
233
commit (consumer , container , offsets );
241
234
}
242
235
if (isSeekAfterError ()) {
243
236
if (remaining .size () > 0 ) {
244
237
SeekUtils .seekOrRecover (thrownException , remaining , consumer , container , false ,
245
- getFailureTracker ():: recovered , this .logger , getLogLevel ());
238
+ getFailureTracker (), this .logger , getLogLevel ());
246
239
ConsumerRecord <?, ?> recovered = remaining .get (0 );
247
240
commit (consumer , container ,
248
241
Collections .singletonMap (new TopicPartition (recovered .topic (), recovered .partition ()),
@@ -254,35 +247,43 @@ private <K, V> ConsumerRecords<K, V> seekOrRecover(Exception thrownException, @N
254
247
return ConsumerRecords .empty ();
255
248
}
256
249
else {
257
- if (indexArg == 0 ) {
258
- return (ConsumerRecords <K , V >) data ; // first record just rerun the whole thing
259
- }
260
- else {
250
+ if (remaining .size () > 0 ) {
261
251
try {
262
252
if (getFailureTracker ().recovered (remaining .get (0 ), thrownException , container ,
263
253
consumer )) {
264
254
remaining .remove (0 );
265
255
}
266
256
}
267
257
catch (Exception e ) {
258
+ if (SeekUtils .isBackoffException (thrownException )) {
259
+ this .logger .debug (e , () -> KafkaUtils .format (remaining .get (0 ))
260
+ + " included in remaining due to retry back off " + thrownException );
261
+ }
262
+ else {
263
+ this .logger .error (e , KafkaUtils .format (remaining .get (0 ))
264
+ + " included in remaining due to " + thrownException );
265
+ }
268
266
}
269
- Map <TopicPartition , List <ConsumerRecord <K , V >>> remains = new HashMap <>();
270
- remaining .forEach (rec -> remains .computeIfAbsent (new TopicPartition (rec .topic (), rec .partition ()),
271
- tp -> new ArrayList <ConsumerRecord <K , V >>()).add ((ConsumerRecord <K , V >) rec ));
272
- return new ConsumerRecords <>(remains );
273
267
}
268
+ if (remaining .isEmpty ()) {
269
+ return ConsumerRecords .empty ();
270
+ }
271
+ Map <TopicPartition , List <ConsumerRecord <K , V >>> remains = new HashMap <>();
272
+ remaining .forEach (rec -> remains .computeIfAbsent (new TopicPartition (rec .topic (), rec .partition ()),
273
+ tp -> new ArrayList <>()).add ((ConsumerRecord <K , V >) rec ));
274
+ return new ConsumerRecords <>(remains );
274
275
}
275
276
}
276
277
277
- private void commit (Consumer <?, ?> consumer , MessageListenerContainer container , Map <TopicPartition , OffsetAndMetadata > offsets ) {
278
+ private void commit (Consumer <?, ?> consumer , MessageListenerContainer container ,
279
+ Map <TopicPartition , OffsetAndMetadata > offsets ) {
278
280
279
- boolean syncCommits = container .getContainerProperties ().isSyncCommits ();
280
- Duration timeout = container .getContainerProperties ().getSyncCommitTimeout ();
281
- if (syncCommits ) {
282
- consumer .commitSync (offsets , timeout );
281
+ ContainerProperties properties = container .getContainerProperties ();
282
+ if (properties .isSyncCommits ()) {
283
+ consumer .commitSync (offsets , properties .getSyncCommitTimeout ());
283
284
}
284
285
else {
285
- OffsetCommitCallback commitCallback = container . getContainerProperties () .getCommitCallback ();
286
+ OffsetCommitCallback commitCallback = properties .getCommitCallback ();
286
287
if (commitCallback == null ) {
287
288
commitCallback = LOGGING_COMMIT_CALLBACK ;
288
289
}
@@ -304,8 +305,8 @@ private BatchListenerFailedException getBatchListenerFailedException(Throwable t
304
305
throwable = throwable .getCause ();
305
306
checked .add (throwable );
306
307
307
- if (throwable instanceof BatchListenerFailedException ) {
308
- target = ( BatchListenerFailedException ) throwable ;
308
+ if (throwable instanceof BatchListenerFailedException batchListenerFailedException ) {
309
+ target = batchListenerFailedException ;
309
310
break ;
310
311
}
311
312
}
0 commit comments