Skip to content

Commit 8b9c7e5

Browse files
committed
Sync tests and fixtures with kafka 0.8.0-beta1 tag
1 parent d640ddf commit 8b9c7e5

File tree

5 files changed

+33
-29
lines changed

5 files changed

+33
-29
lines changed

README.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ Copyright 2013, David Arthur under Apache License, v2.0. See `LICENSE`
1515

1616
# Status
1717

18-
I'm following the version numbers of Kafka, plus one number to indicate the
18+
I'm following the version numbers of Kafka, plus one number to indicate the
1919
version of this project. The current version is 0.8.0-1. This version is under
2020
development, APIs are subject to change.
2121

@@ -194,6 +194,7 @@ git submodule update
194194
cd kafka-src
195195
./sbt update
196196
./sbt package
197+
./sbt assembly-package-dependency
197198
```
198199

199200
And then run the tests. This will actually start up real local Zookeeper

kafka-src

Submodule kafka-src updated 140 files

test/fixtures.py

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,16 @@
1717
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
1818
KAFKA_ROOT = os.path.join(PROJECT_ROOT, "kafka-src")
1919
IVY_ROOT = os.path.expanduser("~/.ivy2/cache")
20+
SCALA_VERSION = '2.8.0'
2021

2122
if "PROJECT_ROOT" in os.environ:
2223
PROJECT_ROOT = os.environ["PROJECT_ROOT"]
2324
if "KAFKA_ROOT" in os.environ:
2425
KAFKA_ROOT = os.environ["KAFKA_ROOT"]
2526
if "IVY_ROOT" in os.environ:
2627
IVY_ROOT = os.environ["IVY_ROOT"]
28+
if "SCALA_VERSION" in os.environ:
29+
SCALA_VERSION = os.environ["SCALA_VERSION"]
2730

2831

2932
def test_resource(file):
@@ -33,16 +36,8 @@ def test_resource(file):
3336
def test_classpath():
3437
# ./kafka-src/bin/kafka-run-class.sh is the authority.
3538
jars = ["."]
36-
jars.append(IVY_ROOT + "/org.xerial.snappy/snappy-java/bundles/snappy-java-1.0.4.1.jar")
37-
jars.append(IVY_ROOT + "/org.scala-lang/scala-library/jars/scala-library-2.8.0.jar")
38-
jars.append(IVY_ROOT + "/org.scala-lang/scala-compiler/jars/scala-compiler-2.8.0.jar")
39-
jars.append(IVY_ROOT + "/log4j/log4j/jars/log4j-1.2.15.jar")
40-
jars.append(IVY_ROOT + "/org.slf4j/slf4j-api/jars/slf4j-api-1.6.4.jar")
41-
jars.append(IVY_ROOT + "/org.apache.zookeeper/zookeeper/jars/zookeeper-3.3.4.jar")
42-
jars.append(IVY_ROOT + "/net.sf.jopt-simple/jopt-simple/jars/jopt-simple-3.2.jar")
43-
jars.extend(glob.glob(KAFKA_ROOT + "/core/target/scala-2.8.0/*.jar"))
44-
jars.extend(glob.glob(KAFKA_ROOT + "/core/lib/*.jar"))
45-
jars.extend(glob.glob(KAFKA_ROOT + "/perf/target/scala-2.8.0/kafka*.jar"))
39+
# assume all dependencies have been packaged into one jar with sbt-assembly's task "assembly-package-dependency"
40+
jars.extend(glob.glob(KAFKA_ROOT + "/core/target/scala-%s/*.jar" % SCALA_VERSION))
4641

4742
jars = filter(os.path.exists, map(os.path.abspath, jars))
4843
return ":".join(jars)
@@ -314,7 +309,7 @@ def open(self):
314309

315310
print("*** Starting Kafka...")
316311
self.child.start()
317-
self.child.wait_for(r"\[Kafka Server \d+\], started")
312+
self.child.wait_for(r"\[Kafka Server %d\], Started" % self.broker_id)
318313
print("*** Done!")
319314

320315
def close(self):

test/resources/kafka.properties

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,9 @@
44
# The ASF licenses this file to You under the Apache License, Version 2.0
55
# (the "License"); you may not use this file except in compliance with
66
# the License. You may obtain a copy of the License at
7-
#
7+
#
88
# http://www.apache.org/licenses/LICENSE-2.0
9-
#
9+
#
1010
# Unless required by applicable law or agreed to in writing, software
1111
# distributed under the License is distributed on an "AS IS" BASIS,
1212
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -47,8 +47,8 @@ log.cleanup.interval.mins=1
4747

4848
############################# Zookeeper #############################
4949

50-
zk.connect={zk_host}:{zk_port}/{zk_chroot}
51-
zk.connection.timeout.ms=1000000
50+
zookeeper.connect={zk_host}:{zk_port}/{zk_chroot}
51+
zookeeper.connection.timeout.ms=1000000
5252

5353
kafka.metrics.polling.interval.secs=5
5454
kafka.metrics.reporters=kafka.metrics.KafkaCSVMetricsReporter

test/test_integration.py

Lines changed: 20 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -242,6 +242,7 @@ def test_produce_consume_two_partitions(self):
242242
# Offset Tests #
243243
####################
244244

245+
@unittest.skip('commmit offset not supported in this version')
245246
def test_commit_fetch_offsets(self):
246247
req = OffsetCommitRequest("test_commit_fetch_offsets", 0, 42, "metadata")
247248
(resp,) = self.client.send_offset_commit_request("group", [req])
@@ -401,8 +402,9 @@ def test_acks_local_write(self):
401402
producer.stop()
402403

403404
def test_acks_cluster_commit(self):
404-
producer = SimpleProducer(self.client, "test_acks_cluster_commit",
405-
req_acks=SimpleProducer.ACK_AFTER_CLUSTER_COMMIT)
405+
producer = SimpleProducer(
406+
self.client, "test_acks_cluster_commit",
407+
req_acks=SimpleProducer.ACK_AFTER_CLUSTER_COMMIT)
406408
resp = producer.send_messages("one")
407409
self.assertEquals(len(resp), 1)
408410

@@ -548,11 +550,11 @@ def test_batched_simple_producer(self):
548550

549551
class TestConsumer(unittest.TestCase):
550552
@classmethod
551-
def setUpClass(cls): # noqa
553+
def setUpClass(cls):
552554
cls.zk = ZookeeperFixture.instance()
553555
cls.server1 = KafkaFixture.instance(0, cls.zk.host, cls.zk.port)
554556
cls.server2 = KafkaFixture.instance(1, cls.zk.host, cls.zk.port)
555-
cls.client = KafkaClient(cls.server2.host, cls.server2.port)
557+
cls.client = KafkaClient(cls.server2.host, cls.server2.port, bufsize=8192)
556558

557559
@classmethod
558560
def tearDownClass(cls): # noqa
@@ -581,7 +583,7 @@ def test_simple_consumer(self):
581583
self.assertEquals(resp.offset, 0)
582584

583585
# Start a consumer
584-
consumer = SimpleConsumer(self.client, "group1", "test_simple_consumer")
586+
consumer = SimpleConsumer(self.client, "group1", "test_simple_consumer", auto_commit=False)
585587
all_messages = []
586588
for message in consumer:
587589
all_messages.append(message)
@@ -604,6 +606,11 @@ def test_simple_consumer(self):
604606

605607
self.assertEquals(len(all_messages), 13)
606608

609+
consumer.stop()
610+
611+
def test_simple_consumer_blocking(self):
612+
consumer = SimpleConsumer(self.client, "group1", "test_simple_consumer_blocking", auto_commit=False)
613+
607614
# Blocking API
608615
start = datetime.now()
609616
messages = consumer.get_messages(block=True, timeout=5)
@@ -612,13 +619,13 @@ def test_simple_consumer(self):
612619
self.assertEqual(len(messages), 0)
613620

614621
# Send 10 messages
615-
produce = ProduceRequest("test_simple_consumer", 0, messages=[
622+
produce = ProduceRequest("test_simple_consumer_blocking", 0, messages=[
616623
create_message("Test message 0 %d" % i) for i in range(10)
617624
])
618625

619626
for resp in self.client.send_produce_request([produce]):
620627
self.assertEquals(resp.error, 0)
621-
self.assertEquals(resp.offset, 100)
628+
self.assertEquals(resp.offset, 0)
622629

623630
# Fetch 5 messages
624631
messages = consumer.get_messages(count=5, block=True, timeout=5)
@@ -650,7 +657,7 @@ def test_simple_consumer_pending(self):
650657
self.assertEquals(resp.error, 0)
651658
self.assertEquals(resp.offset, 0)
652659

653-
consumer = SimpleConsumer(self.client, "group1", "test_simple_pending")
660+
consumer = SimpleConsumer(self.client, "group1", "test_simple_pending", auto_commit=False)
654661
self.assertEquals(consumer.pending(), 20)
655662
self.assertEquals(consumer.pending(partitions=[0]), 10)
656663
self.assertEquals(consumer.pending(partitions=[1]), 10)
@@ -676,7 +683,7 @@ def test_multi_process_consumer(self):
676683
self.assertEquals(resp.offset, 0)
677684

678685
# Start a consumer
679-
consumer = MultiProcessConsumer(self.client, "grp1", "test_mpconsumer")
686+
consumer = MultiProcessConsumer(self.client, "grp1", "test_mpconsumer", auto_commit=False)
680687
all_messages = []
681688
for message in consumer:
682689
all_messages.append(message)
@@ -732,7 +739,7 @@ def test_multi_proc_pending(self):
732739
self.assertEquals(resp.error, 0)
733740
self.assertEquals(resp.offset, 0)
734741

735-
consumer = MultiProcessConsumer(self.client, "group1", "test_mppending")
742+
consumer = MultiProcessConsumer(self.client, "group1", "test_mppending", auto_commit=False)
736743
self.assertEquals(consumer.pending(), 20)
737744
self.assertEquals(consumer.pending(partitions=[0]), 10)
738745
self.assertEquals(consumer.pending(partitions=[1]), 10)
@@ -749,20 +756,21 @@ def test_large_messages(self):
749756
self.assertEquals(resp.offset, 0)
750757

751758
# Produce 10 messages that are too large (bigger than default fetch size)
752-
messages2=[create_message(random_string(5000)) for i in range(10)]
759+
messages2 = [create_message(random_string(5000)) for i in range(10)]
753760
produce2 = ProduceRequest("test_large_messages", 0, messages2)
754761

755762
for resp in self.client.send_produce_request([produce2]):
756763
self.assertEquals(resp.error, 0)
757764
self.assertEquals(resp.offset, 10)
758765

759766
# Consumer should still get all of them
760-
consumer = SimpleConsumer(self.client, "group1", "test_large_messages")
767+
consumer = SimpleConsumer(self.client, "group1", "test_large_messages", auto_commit=False)
761768
all_messages = messages1 + messages2
762769
for i, message in enumerate(consumer):
763770
self.assertEquals(all_messages[i], message.message)
764771
self.assertEquals(i, 19)
765772

773+
766774
def random_string(l):
767775
s = "".join(random.choice(string.letters) for i in xrange(l))
768776
return s

0 commit comments

Comments
 (0)