|
22 | 22 | import io.rsocket.internal.LimitableRequestPublisher;
|
23 | 23 | import io.rsocket.internal.UnboundedProcessor;
|
24 | 24 | import io.rsocket.util.NonBlockingHashMapLong;
|
25 |
| -import org.reactivestreams.Publisher; |
26 |
| -import org.reactivestreams.Subscriber; |
27 |
| -import reactor.core.Disposable; |
28 |
| -import reactor.core.publisher.*; |
29 |
| - |
30 |
| -import javax.annotation.Nullable; |
31 | 25 | import java.time.Duration;
|
32 | 26 | import java.util.concurrent.atomic.AtomicBoolean;
|
33 | 27 | import java.util.concurrent.atomic.AtomicInteger;
|
34 | 28 | import java.util.function.Consumer;
|
35 | 29 | import java.util.function.Function;
|
36 | 30 | import java.util.function.Supplier;
|
| 31 | +import javax.annotation.Nullable; |
| 32 | +import org.reactivestreams.Publisher; |
| 33 | +import org.reactivestreams.Subscriber; |
| 34 | +import reactor.core.Disposable; |
| 35 | +import reactor.core.publisher.*; |
37 | 36 |
|
38 | 37 | /** Client Side of a RSocket socket. Sends {@link Frame}s to a {@link RSocketServer} */
|
39 | 38 | class RSocketClient implements RSocket {
|
@@ -88,36 +87,25 @@ class RSocketClient implements RSocket {
|
88 | 87 | started
|
89 | 88 | .thenMany(Flux.interval(tickPeriod))
|
90 | 89 | .doOnSubscribe(s -> timeLastTickSentMs = System.currentTimeMillis())
|
91 |
| - .concatMap(i -> sendKeepAlive(ackTimeoutMs, missedAcks)) |
92 |
| - .doOnError( |
| 90 | + .subscribe( |
| 91 | + i -> sendKeepAlive(ackTimeoutMs, missedAcks), |
93 | 92 | t -> {
|
94 | 93 | errorConsumer.accept(t);
|
95 | 94 | connection.dispose();
|
96 |
| - }) |
97 |
| - .subscribe(); |
| 95 | + }); |
98 | 96 | }
|
99 | 97 |
|
100 |
| - connection |
101 |
| - .onClose() |
102 |
| - .doFinally( |
103 |
| - signalType -> { |
104 |
| - cleanup(); |
105 |
| - }) |
106 |
| - .doOnError(errorConsumer) |
107 |
| - .subscribe(); |
| 98 | + connection.onClose().doFinally(signalType -> cleanup()).subscribe(null, errorConsumer); |
108 | 99 |
|
109 | 100 | connection
|
110 | 101 | .send(sendProcessor)
|
111 |
| - .doOnError(this::handleSendProcessorError) |
112 | 102 | .doFinally(this::handleSendProcessorCancel)
|
113 |
| - .subscribe(); |
| 103 | + .subscribe(null, this::handleSendProcessorError); |
114 | 104 |
|
115 | 105 | connection
|
116 | 106 | .receive()
|
117 | 107 | .doOnSubscribe(subscription -> started.onComplete())
|
118 |
| - .doOnNext(this::handleIncomingFrames) |
119 |
| - .doOnError(errorConsumer) |
120 |
| - .subscribe(); |
| 108 | + .subscribe(this::handleIncomingFrames, errorConsumer); |
121 | 109 | }
|
122 | 110 |
|
123 | 111 | private void handleSendProcessorError(Throwable t) {
|
@@ -152,23 +140,20 @@ private void handleSendProcessorCancel(SignalType t) {
|
152 | 140 | }
|
153 | 141 | }
|
154 | 142 |
|
155 |
| - private Mono<Void> sendKeepAlive(long ackTimeoutMs, int missedAcks) { |
156 |
| - return Mono.fromRunnable( |
157 |
| - () -> { |
158 |
| - long now = System.currentTimeMillis(); |
159 |
| - if (now - timeLastTickSentMs > ackTimeoutMs) { |
160 |
| - int count = missedAckCounter.incrementAndGet(); |
161 |
| - if (count >= missedAcks) { |
162 |
| - String message = |
163 |
| - String.format( |
164 |
| - "Missed %d keep-alive acks with a threshold of %d and a ack timeout of %d ms", |
165 |
| - count, missedAcks, ackTimeoutMs); |
166 |
| - throw new ConnectionException(message); |
167 |
| - } |
168 |
| - } |
| 143 | + private void sendKeepAlive(long ackTimeoutMs, int missedAcks) { |
| 144 | + long now = System.currentTimeMillis(); |
| 145 | + if (now - timeLastTickSentMs > ackTimeoutMs) { |
| 146 | + int count = missedAckCounter.incrementAndGet(); |
| 147 | + if (count >= missedAcks) { |
| 148 | + String message = |
| 149 | + String.format( |
| 150 | + "Missed %d keep-alive acks with a threshold of %d and a ack timeout of %d ms", |
| 151 | + count, missedAcks, ackTimeoutMs); |
| 152 | + throw new ConnectionException(message); |
| 153 | + } |
| 154 | + } |
169 | 155 |
|
170 |
| - sendProcessor.onNext(Frame.Keepalive.from(Unpooled.EMPTY_BUFFER, true)); |
171 |
| - }); |
| 156 | + sendProcessor.onNext(Frame.Keepalive.from(Unpooled.EMPTY_BUFFER, true)); |
172 | 157 | }
|
173 | 158 |
|
174 | 159 | @Override
|
@@ -380,14 +365,12 @@ public Frame apply(Payload payload) {
|
380 | 365 | }
|
381 | 366 | });
|
382 | 367 |
|
383 |
| - requestFrames |
384 |
| - .doOnNext(sendProcessor::onNext) |
385 |
| - .doOnError( |
386 |
| - t -> { |
387 |
| - errorConsumer.accept(t); |
388 |
| - receiver.dispose(); |
389 |
| - }) |
390 |
| - .subscribe(); |
| 368 | + requestFrames.subscribe( |
| 369 | + sendProcessor::onNext, |
| 370 | + t -> { |
| 371 | + errorConsumer.accept(t); |
| 372 | + receiver.dispose(); |
| 373 | + }); |
391 | 374 | } else {
|
392 | 375 | sendOneFrame(Frame.RequestN.from(streamId, l));
|
393 | 376 | }
|
@@ -415,10 +398,10 @@ private boolean contains(int streamId) {
|
415 | 398 |
|
416 | 399 | protected void cleanup() {
|
417 | 400 | try {
|
418 |
| - for (UnicastProcessor<Payload> subscriber: receivers.values()) { |
| 401 | + for (UnicastProcessor<Payload> subscriber : receivers.values()) { |
419 | 402 | cleanUpSubscriber(subscriber);
|
420 | 403 | }
|
421 |
| - for (LimitableRequestPublisher p: senders.values()) { |
| 404 | + for (LimitableRequestPublisher p : senders.values()) { |
422 | 405 | cleanUpLimitableRequestPublisher(p);
|
423 | 406 | }
|
424 | 407 |
|
|
0 commit comments