|
1 | 1 | /*
|
2 |
| - * Copyright 2019 the original author or authors. |
| 2 | + * Copyright 2019-2020 the original author or authors. |
3 | 3 | *
|
4 | 4 | * Licensed under the Apache License, Version 2.0 (the "License");
|
5 | 5 | * you may not use this file except in compliance with the License.
|
|
27 | 27 | import java.util.Optional;
|
28 | 28 |
|
29 | 29 | import org.apache.kafka.clients.consumer.ConsumerConfig;
|
| 30 | +import org.apache.kafka.clients.consumer.ConsumerRecord; |
30 | 31 | import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
31 | 32 | import org.apache.kafka.clients.producer.ProducerConfig;
|
32 | 33 | import org.apache.kafka.clients.producer.ProducerRecord;
|
33 | 34 | import org.apache.kafka.clients.producer.RecordMetadata;
|
| 35 | +import org.apache.kafka.common.KafkaException; |
34 | 36 | import org.apache.kafka.common.TopicPartition;
|
35 | 37 | import org.junit.jupiter.api.AfterEach;
|
36 | 38 | import org.junit.jupiter.api.BeforeAll;
|
37 | 39 | import org.junit.jupiter.api.BeforeEach;
|
38 | 40 | import org.junit.jupiter.api.Test;
|
| 41 | +import org.reactivestreams.Publisher; |
39 | 42 | import org.reactivestreams.Subscription;
|
40 | 43 |
|
41 | 44 | import org.springframework.kafka.support.converter.MessagingMessageConverter;
|
|
56 | 59 | /**
|
57 | 60 | * @author Mark Norkin
|
58 | 61 | * @author Gary Russell
|
| 62 | + * @author Will Kennedy |
59 | 63 | *
|
60 | 64 | * @since 2.3.0
|
61 | 65 | */
|
@@ -281,4 +285,82 @@ public void shouldSendOffsetsToTransaction() {
|
281 | 285 | .verify(DEFAULT_VERIFY_TIMEOUT);
|
282 | 286 | }
|
283 | 287 |
|
| 288 | + @Test |
| 289 | + public void shouldSendOneRecordTransactionallyViaTemplateAsSenderRecordAndReceiveItExactlyOnceWithException() { |
| 290 | + ProducerRecord<Integer, String> producerRecord = |
| 291 | + new ProducerRecord<>(REACTIVE_INT_KEY_TOPIC, DEFAULT_PARTITION, DEFAULT_TIMESTAMP, DEFAULT_KEY, |
| 292 | + DEFAULT_VALUE); |
| 293 | + |
| 294 | + StepVerifier.create(reactiveKafkaProducerTemplate |
| 295 | + .sendTransactionally(SenderRecord.create(producerRecord, null)) |
| 296 | + .then()) |
| 297 | + .expectComplete() |
| 298 | + .verify(); |
| 299 | + |
| 300 | + StepVerifier.create(reactiveKafkaConsumerTemplate |
| 301 | + .receiveExactlyOnce(reactiveKafkaProducerTemplate.transactionManager()) |
| 302 | + .concatMap(consumerRecordFlux -> sendAndCommit(consumerRecordFlux, true)) |
| 303 | + .onErrorResume(error -> reactiveKafkaProducerTemplate.transactionManager().abort().then(Mono.error(error))) |
| 304 | + ) |
| 305 | + .expectErrorMatches(throwable -> throwable instanceof KafkaException && |
| 306 | + throwable.getMessage().equals("TransactionalId reactive.transaction: Invalid transition " + |
| 307 | + "attempted from state READY to state ABORTING_TRANSACTION")) |
| 308 | + .verify(); |
| 309 | + |
| 310 | + StepVerifier.create(reactiveKafkaConsumerTemplate |
| 311 | + .receive().doOnNext(receiverRecord -> receiverRecord.receiverOffset().acknowledge())) |
| 312 | + .assertNext(receiverRecord -> assertThat(receiverRecord.value()).isEqualTo(DEFAULT_VALUE)) |
| 313 | + .thenCancel() |
| 314 | + .verify(DEFAULT_VERIFY_TIMEOUT); |
| 315 | + } |
| 316 | + |
| 317 | + @Test |
| 318 | + public void shouldSendOneRecordTransactionallyViaTemplateAsSenderRecordAndReceiveItExactlyOnce() { |
| 319 | + ProducerRecord<Integer, String> producerRecord = |
| 320 | + new ProducerRecord<>(REACTIVE_INT_KEY_TOPIC, DEFAULT_PARTITION, DEFAULT_TIMESTAMP, DEFAULT_KEY, |
| 321 | + DEFAULT_VALUE); |
| 322 | + |
| 323 | + StepVerifier.create(reactiveKafkaProducerTemplate.sendTransactionally(SenderRecord.create(producerRecord, null)) |
| 324 | + .then()) |
| 325 | + .expectComplete() |
| 326 | + .verify(); |
| 327 | + |
| 328 | + StepVerifier.create(reactiveKafkaConsumerTemplate |
| 329 | + .receiveExactlyOnce(reactiveKafkaProducerTemplate.transactionManager()) |
| 330 | + .concatMap(consumerRecordFlux -> sendAndCommit(consumerRecordFlux, false)) |
| 331 | + .onErrorResume(error -> reactiveKafkaProducerTemplate.transactionManager().abort().then(Mono.error(error))) |
| 332 | + ) |
| 333 | + .assertNext(senderResult -> { |
| 334 | + assertThat(senderResult.correlationMetadata().intValue()).isEqualTo(DEFAULT_KEY); |
| 335 | + assertThat(senderResult.recordMetadata().offset()).isGreaterThan(0); |
| 336 | + }) |
| 337 | + .thenCancel() |
| 338 | + .verify(DEFAULT_VERIFY_TIMEOUT); |
| 339 | + |
| 340 | + StepVerifier.create(reactiveKafkaConsumerTemplate |
| 341 | + .receive().doOnNext(receiverRecord -> receiverRecord.receiverOffset().acknowledge())) |
| 342 | + .assertNext(receiverRecord -> { |
| 343 | + assertThat(receiverRecord.value()).isEqualTo(DEFAULT_VALUE + "xyz"); |
| 344 | + assertThat(receiverRecord.offset()).isGreaterThan(0); |
| 345 | + }) |
| 346 | + .thenCancel() |
| 347 | + .verify(DEFAULT_VERIFY_TIMEOUT); |
| 348 | + } |
| 349 | + |
| 350 | + private Flux<SenderResult<Integer>> sendAndCommit(Flux<ConsumerRecord<Integer, String>> fluxConsumerRecord, boolean failCommit) { |
| 351 | + return reactiveKafkaProducerTemplate |
| 352 | + .send(fluxConsumerRecord.map(this::toSenderRecord) |
| 353 | + .concatWith(failCommit ? |
| 354 | + doThrowKafkaException() : |
| 355 | + reactiveKafkaProducerTemplate.transactionManager().commit())); |
| 356 | + } |
| 357 | + |
| 358 | + private Publisher<? extends SenderRecord<Integer, String, Integer>> doThrowKafkaException() { |
| 359 | + throw new KafkaException(); |
| 360 | + } |
| 361 | + |
| 362 | + private SenderRecord<Integer, String, Integer> toSenderRecord(ConsumerRecord<Integer, String> record) { |
| 363 | + return SenderRecord.create(REACTIVE_INT_KEY_TOPIC, record.partition(), null, record.key(), record.value() + "xyz", record.key()); |
| 364 | + } |
| 365 | + |
284 | 366 | }
|
0 commit comments