Skip to content

Commit d2a7fa7

Browse files
committed
Revert " Patch pylint warnings so tests pass again (dpkp#184)"
This reverts commit 5e461a7.
1 parent 222db1b commit d2a7fa7

File tree

4 files changed

+3
-17
lines changed

4 files changed

+3
-17
lines changed

kafka/admin/client.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -503,8 +503,6 @@ def _get_cluster_metadata(self, topics=None, auto_topic_creation=False):
503503
topics=topics,
504504
allow_auto_topic_creation=auto_topic_creation
505505
)
506-
else:
507-
raise IncompatibleBrokerVersion(f"MetadataRequest for {version} is not supported")
508506

509507
future = self._send_request_to_node(
510508
self._client.least_loaded_node(),
@@ -1012,7 +1010,6 @@ def _describe_consumer_groups_send_request(self, group_id, group_coordinator_id,
10121010
def _describe_consumer_groups_process_response(self, response):
10131011
"""Process a DescribeGroupsResponse into a group description."""
10141012
if response.API_VERSION <= 3:
1015-
group_description = None
10161013
assert len(response.groups) == 1
10171014
for response_field, response_name in zip(response.SCHEMA.fields, response.SCHEMA.names):
10181015
if isinstance(response_field, Array):
@@ -1048,8 +1045,6 @@ def _describe_consumer_groups_process_response(self, response):
10481045
if response.API_VERSION <=2:
10491046
described_group_information_list.append(None)
10501047
group_description = GroupInformation._make(described_group_information_list)
1051-
if group_description is None:
1052-
raise Errors.BrokerResponseError("No group description received")
10531048
error_code = group_description.error_code
10541049
error_type = Errors.for_code(error_code)
10551050
# Java has the note: KAFKA-6789, we can retry based on the error code

kafka/coordinator/consumer.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -628,15 +628,10 @@ def _send_offset_commit_request(self, offsets):
628628
) for partition, offset in partitions.items()]
629629
) for topic, partitions in offset_data.items()]
630630
)
631-
else:
632-
# TODO: We really shouldn't need this here to begin with, but I'd like to get
633-
# pylint to stop complaining.
634-
raise Exception(f"Unsupported Broker API: {self.config['api_version']}")
635631

636632
log.debug("Sending offset-commit request with %s for group %s to %s",
637633
offsets, self.group_id, node_id)
638634

639-
640635
future = Future()
641636
_f = self._client.send(node_id, request)
642637
_f.add_callback(self._handle_offset_commit_response, offsets, future, time.time())

kafka/record/default_records.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -187,14 +187,12 @@ def _maybe_uncompress(self) -> None:
187187
data = memoryview(self._buffer)[self._pos:]
188188
if compression_type == self.CODEC_GZIP:
189189
uncompressed = gzip_decode(data)
190-
elif compression_type == self.CODEC_SNAPPY:
190+
if compression_type == self.CODEC_SNAPPY:
191191
uncompressed = snappy_decode(data.tobytes())
192-
elif compression_type == self.CODEC_LZ4:
192+
if compression_type == self.CODEC_LZ4:
193193
uncompressed = lz4_decode(data.tobytes())
194-
elif compression_type == self.CODEC_ZSTD:
194+
if compression_type == self.CODEC_ZSTD:
195195
uncompressed = zstd_decode(data.tobytes())
196-
else:
197-
raise NotImplementedError(f"Compression type {compression_type} is not supported")
198196
self._buffer = bytearray(uncompressed)
199197
self._pos = 0
200198
self._decompressed = True

kafka/record/legacy_records.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -461,8 +461,6 @@ def _maybe_compress(self) -> bool:
461461
compressed = lz4_encode_old_kafka(data)
462462
else:
463463
compressed = lz4_encode(data)
464-
else:
465-
raise NotImplementedError(f"Compression type {self._compression_type} is not supported")
466464
size = self.size_in_bytes(
467465
0, timestamp=0, key=None, value=compressed)
468466
# We will try to reuse the same buffer if we have enough space

0 commit comments

Comments
 (0)