Skip to content

Commit 868115c

Browse files
author
Dana Powers
committed
Raise an error if we attempt to group duplicate topic-partition payloads
- previously this would simply drop one of the payloads
1 parent 3a4ceef commit 868115c

File tree

3 files changed

+10
-1
lines changed

3 files changed

+10
-1
lines changed

kafka/client.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -423,6 +423,8 @@ def send_produce_request(self, payloads=[], acks=1, timeout=1000,
423423
424424
Arguments:
425425
payloads (list of ProduceRequest): produce requests to send to kafka
426+
ProduceRequest payloads must not contain duplicates for any
427+
topic-partition.
426428
acks (int, optional): how many acks the servers should receive from replica
427429
brokers before responding to the request. If it is 0, the server
428430
will not send any response. If it is 1, the server will wait

kafka/util.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,9 @@ def relative_unpack(fmt, data, cur):
8282
def group_by_topic_and_partition(tuples):
8383
out = collections.defaultdict(dict)
8484
for t in tuples:
85+
assert t.topic not in out or t.partition not in out[t.topic], \
86+
'Duplicate {0}s for {1} {2}'.format(t.__class__.__name__,
87+
t.topic, t.partition)
8588
out[t.topic][t.partition] = t
8689
return out
8790

test/test_util.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,6 @@ def test_group_by_topic_and_partition(self):
107107
t = kafka.common.TopicAndPartition
108108

109109
l = [
110-
t("a", 1),
111110
t("a", 1),
112111
t("a", 2),
113112
t("a", 3),
@@ -124,3 +123,8 @@ def test_group_by_topic_and_partition(self):
124123
3: t("b", 3),
125124
}
126125
})
126+
127+
# should not be able to group duplicate topic-partitions
128+
t1 = t("a", 1)
129+
with self.assertRaises(AssertionError):
130+
kafka.util.group_by_topic_and_partition([t1, t1])

0 commit comments

Comments
 (0)