Skip to content

Commit ab3945d

Browse files
committed
Fix New Sonar Issues
1 parent a3ee610 commit ab3945d

File tree

3 files changed

+15
-12
lines changed

3 files changed

+15
-12
lines changed

spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerProperties.java

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323

2424
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
2525
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
26-
import org.apache.kafka.common.errors.AuthenticationException;
2726

2827
import org.springframework.kafka.support.LogIfLevelEnabled;
2928
import org.springframework.kafka.support.TopicPartitionOffset;
@@ -360,9 +359,11 @@ public Duration getAuthorizationExceptionRetryInterval() {
360359
}
361360

362361
/**
363-
* Set the interval between retries after and {@link AuthenticationException} or
364-
* {@code AuthorizationException} is thrown by {@code KafkaConsumer}. By default the
365-
* field is null and retries are disabled. In such case the container will be stopped.
362+
* Set the interval between retries after and
363+
* {@link org.apache.kafka.common.errors.AuthenticationException} or
364+
* {@code org.apache.kafka.common.errors.AuthorizationException} is thrown by
365+
* {@code KafkaConsumer}. By default the field is null and retries are disabled. In
366+
* such case the container will be stopped.
366367
*
367368
* The interval must be less than {@code max.poll.interval.ms} consumer property.
368369
*
@@ -385,9 +386,11 @@ public Duration getAuthExceptionRetryInterval() {
385386
}
386387

387388
/**
388-
* Set the interval between retries after and {@link AuthenticationException} or
389-
* {@code AuthorizationException} is thrown by {@code KafkaConsumer}. By default the
390-
* field is null and retries are disabled. In such case the container will be stopped.
389+
* Set the interval between retries after and
390+
* {@link org.apache.kafka.common.errors.AuthenticationException} or
391+
* {@code org.apache.kafka.common.errors.AuthorizationException} is thrown by
392+
* {@code KafkaConsumer}. By default the field is null and retries are disabled. In
393+
* such case the container will be stopped.
391394
*
392395
* The interval must be less than {@code max.poll.interval.ms} consumer property.
393396
*

spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DeadLetterPublishingRecovererFactory.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -194,10 +194,10 @@ private int getAttempts(ConsumerRecord<?, ?> consumerRecord) {
194194
Header header = consumerRecord.headers().lastHeader(RetryTopicHeaders.DEFAULT_HEADER_ATTEMPTS);
195195
if (header != null) {
196196
byte[] value = header.value();
197-
if (value.length == 1) { // backwards compatibility
197+
if (value.length == Byte.BYTES) { // backwards compatibility
198198
return value[0];
199199
}
200-
else if (value.length == 4) {
200+
else if (value.length == Integer.BYTES) {
201201
return ByteBuffer.wrap(value).getInt();
202202
}
203203
else {
@@ -213,7 +213,7 @@ private Headers addHeaders(ConsumerRecord<?, ?> consumerRecord, Exception e, int
213213
byte[] originalTimestampHeader = getOriginalTimestampHeaderBytes(consumerRecord);
214214
headers.add(RetryTopicHeaders.DEFAULT_HEADER_ORIGINAL_TIMESTAMP, originalTimestampHeader);
215215
headers.add(RetryTopicHeaders.DEFAULT_HEADER_ATTEMPTS,
216-
ByteBuffer.wrap(new byte[4]).putInt(attempts + 1).array());
216+
ByteBuffer.wrap(new byte[Integer.BYTES]).putInt(attempts + 1).array());
217217
headers.add(RetryTopicHeaders.DEFAULT_HEADER_BACKOFF_TIMESTAMP,
218218
BigInteger.valueOf(getNextExecutionTimestamp(consumerRecord, e, originalTimestampHeader))
219219
.toByteArray());

spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DeadLetterPublishingRecovererFactoryTests.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ void shouldIncreaseAttemptsInLegacyHeader() {
162162
ProducerRecord producerRecord = producerRecordCaptor.getValue();
163163
Header attemptsHeader = producerRecord.headers().lastHeader(RetryTopicHeaders.DEFAULT_HEADER_ATTEMPTS);
164164
assertThat(attemptsHeader).isNotNull();
165-
assertThat(attemptsHeader.value().length).isEqualTo(4); // handled a legacy one byte header ok
165+
assertThat(attemptsHeader.value().length).isEqualTo(Integer.BYTES); // handled a legacy one byte header ok
166166
assertThat(ByteBuffer.wrap(attemptsHeader.value()).getInt()).isEqualTo(128);
167167
}
168168

@@ -195,7 +195,7 @@ void shouldIncreaseAttemptsInNewHeader() {
195195
ProducerRecord producerRecord = producerRecordCaptor.getValue();
196196
Header attemptsHeader = producerRecord.headers().lastHeader(RetryTopicHeaders.DEFAULT_HEADER_ATTEMPTS);
197197
assertThat(attemptsHeader).isNotNull();
198-
assertThat(attemptsHeader.value().length).isEqualTo(4);
198+
assertThat(attemptsHeader.value().length).isEqualTo(Integer.BYTES);
199199
assertThat(ByteBuffer.wrap(attemptsHeader.value()).getInt()).isEqualTo(128);
200200
}
201201

0 commit comments

Comments
 (0)