Skip to content

Commit 1aac57a

Browse files
authored
Merge pull request #30 from Yolean/kafka-011
Upgrade to Kafka 0.11 and test for open issues
2 parents 6a5fb08 + ab35705 commit 1aac57a

25 files changed

+741
-481
lines changed

10broker-config.yml

Lines changed: 246 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,246 @@
1+
kind: ConfigMap
2+
metadata:
3+
name: broker-config
4+
namespace: kafka
5+
apiVersion: v1
6+
data:
7+
init.sh: |-
8+
#!/bin/bash
9+
set -x
10+
11+
export KAFKA_BROKER_ID=${HOSTNAME##*-}
12+
sed -i "s/\${KAFKA_BROKER_ID}/$KAFKA_BROKER_ID/" /etc/kafka/server.properties
13+
14+
server.properties: |-
15+
# Licensed to the Apache Software Foundation (ASF) under one or more
16+
# contributor license agreements. See the NOTICE file distributed with
17+
# this work for additional information regarding copyright ownership.
18+
# The ASF licenses this file to You under the Apache License, Version 2.0
19+
# (the "License"); you may not use this file except in compliance with
20+
# the License. You may obtain a copy of the License at
21+
#
22+
# http://www.apache.org/licenses/LICENSE-2.0
23+
#
24+
# Unless required by applicable law or agreed to in writing, software
25+
# distributed under the License is distributed on an "AS IS" BASIS,
26+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
27+
# See the License for the specific language governing permissions and
28+
# limitations under the License.
29+
30+
# see kafka.server.KafkaConfig for additional details and defaults
31+
32+
############################# Server Basics #############################
33+
34+
# The id of the broker. This must be set to a unique integer for each broker.
35+
broker.id=${KAFKA_BROKER_ID}
36+
37+
# Switch to enable topic deletion or not, default value is false
38+
#delete.topic.enable=true
39+
40+
############################# Socket Server Settings #############################
41+
42+
# The address the socket server listens on. It will get the value returned from
43+
# java.net.InetAddress.getCanonicalHostName() if not configured.
44+
# FORMAT:
45+
# listeners = listener_name://host_name:port
46+
# EXAMPLE:
47+
# listeners = PLAINTEXT://your.host.name:9092
48+
#listeners=PLAINTEXT://:9092
49+
50+
# Hostname and port the broker will advertise to producers and consumers. If not set,
51+
# it uses the value for "listeners" if configured. Otherwise, it will use the value
52+
# returned from java.net.InetAddress.getCanonicalHostName().
53+
#advertised.listeners=PLAINTEXT://your.host.name:9092
54+
55+
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
56+
#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
57+
58+
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
59+
num.network.threads=3
60+
61+
# The number of threads that the server uses for processing requests, which may include disk I/O
62+
num.io.threads=8
63+
64+
# The send buffer (SO_SNDBUF) used by the socket server
65+
socket.send.buffer.bytes=102400
66+
67+
# The receive buffer (SO_RCVBUF) used by the socket server
68+
socket.receive.buffer.bytes=102400
69+
70+
# The maximum size of a request that the socket server will accept (protection against OOM)
71+
socket.request.max.bytes=104857600
72+
73+
74+
############################# Log Basics #############################
75+
76+
# A comma seperated list of directories under which to store log files
77+
log.dirs=/tmp/kafka-logs
78+
79+
# The default number of log partitions per topic. More partitions allow greater
80+
# parallelism for consumption, but this will also result in more files across
81+
# the brokers.
82+
num.partitions=1
83+
84+
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
85+
# This value is recommended to be increased for installations with data dirs located in RAID array.
86+
num.recovery.threads.per.data.dir=1
87+
88+
############################# Internal Topic Settings #############################
89+
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
90+
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
91+
offsets.topic.replication.factor=1
92+
transaction.state.log.replication.factor=1
93+
transaction.state.log.min.isr=1
94+
95+
############################# Log Flush Policy #############################
96+
97+
# Messages are immediately written to the filesystem but by default we only fsync() to sync
98+
# the OS cache lazily. The following configurations control the flush of data to disk.
99+
# There are a few important trade-offs here:
100+
# 1. Durability: Unflushed data may be lost if you are not using replication.
101+
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
102+
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
103+
# The settings below allow one to configure the flush policy to flush data after a period of time or
104+
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
105+
106+
# The number of messages to accept before forcing a flush of data to disk
107+
#log.flush.interval.messages=10000
108+
109+
# The maximum amount of time a message can sit in a log before we force a flush
110+
#log.flush.interval.ms=1000
111+
112+
############################# Log Retention Policy #############################
113+
114+
# The following configurations control the disposal of log segments. The policy can
115+
# be set to delete segments after a period of time, or after a given size has accumulated.
116+
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
117+
# from the end of the log.
118+
119+
# The minimum age of a log file to be eligible for deletion due to age
120+
log.retention.hours=168
121+
122+
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
123+
# segments don't drop below log.retention.bytes. Functions independently of log.retention.hours.
124+
#log.retention.bytes=1073741824
125+
126+
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
127+
log.segment.bytes=1073741824
128+
129+
# The interval at which log segments are checked to see if they can be deleted according
130+
# to the retention policies
131+
log.retention.check.interval.ms=300000
132+
133+
############################# Zookeeper #############################
134+
135+
# Zookeeper connection string (see zookeeper docs for details).
136+
# This is a comma separated host:port pairs, each corresponding to a zk
137+
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
138+
# You can also append an optional chroot string to the urls to specify the
139+
# root directory for all kafka znodes.
140+
zookeeper.connect=localhost:2181
141+
142+
# Timeout in ms for connecting to zookeeper
143+
zookeeper.connection.timeout.ms=6000
144+
145+
146+
############################# Group Coordinator Settings #############################
147+
148+
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
149+
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
150+
# The default value for this is 3 seconds.
151+
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
152+
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
153+
group.initial.rebalance.delay.ms=0
154+
155+
log4j.properties: |-
156+
# Licensed to the Apache Software Foundation (ASF) under one or more
157+
# contributor license agreements. See the NOTICE file distributed with
158+
# this work for additional information regarding copyright ownership.
159+
# The ASF licenses this file to You under the Apache License, Version 2.0
160+
# (the "License"); you may not use this file except in compliance with
161+
# the License. You may obtain a copy of the License at
162+
#
163+
# http://www.apache.org/licenses/LICENSE-2.0
164+
#
165+
# Unless required by applicable law or agreed to in writing, software
166+
# distributed under the License is distributed on an "AS IS" BASIS,
167+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
168+
# See the License for the specific language governing permissions and
169+
# limitations under the License.
170+
171+
# Unspecified loggers and loggers with additivity=true output to server.log and stdout
172+
# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise
173+
log4j.rootLogger=INFO, stdout, kafkaAppender
174+
175+
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
176+
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
177+
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
178+
179+
log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
180+
log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
181+
log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
182+
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
183+
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
184+
185+
log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
186+
log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH
187+
log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
188+
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
189+
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
190+
191+
log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
192+
log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH
193+
log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
194+
log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
195+
log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
196+
197+
log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
198+
log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH
199+
log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
200+
log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
201+
log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
202+
203+
log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
204+
log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH
205+
log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
206+
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
207+
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
208+
209+
log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender
210+
log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH
211+
log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
212+
log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
213+
log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
214+
215+
# Change the two lines below to adjust ZK client logging
216+
log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
217+
log4j.logger.org.apache.zookeeper=INFO
218+
219+
# Change the two lines below to adjust the general broker logging level (output to server.log and stdout)
220+
log4j.logger.kafka=INFO
221+
log4j.logger.org.apache.kafka=INFO
222+
223+
# Change to DEBUG or TRACE to enable request logging
224+
log4j.logger.kafka.request.logger=WARN, requestAppender
225+
log4j.additivity.kafka.request.logger=false
226+
227+
# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
228+
# related to the handling of requests
229+
#log4j.logger.kafka.network.Processor=TRACE, requestAppender
230+
#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
231+
#log4j.additivity.kafka.server.KafkaApis=false
232+
log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
233+
log4j.additivity.kafka.network.RequestChannel$=false
234+
235+
log4j.logger.kafka.controller=TRACE, controllerAppender
236+
log4j.additivity.kafka.controller=false
237+
238+
log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
239+
log4j.additivity.kafka.log.LogCleaner=false
240+
241+
log4j.logger.state.change.logger=TRACE, stateChangeAppender
242+
log4j.additivity.state.change.logger=false
243+
244+
# Change to DEBUG to enable audit log for the authorizer
245+
log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender
246+
log4j.additivity.kafka.authorizer.logger=false

10pvc.yml

Lines changed: 0 additions & 48 deletions
This file was deleted.

30service.yml

Lines changed: 0 additions & 11 deletions
This file was deleted.

50kafka.yml

Lines changed: 42 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -10,23 +10,57 @@ spec:
1010
metadata:
1111
labels:
1212
app: kafka
13+
annotations:
1314
spec:
14-
terminationGracePeriodSeconds: 10
15+
terminationGracePeriodSeconds: 30
16+
initContainers:
17+
- name: init-config
18+
image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce
19+
command: ['/bin/bash', '/etc/kafka/init.sh']
20+
volumeMounts:
21+
- name: config
22+
mountPath: /etc/kafka
1523
containers:
1624
- name: broker
17-
image: solsson/kafka-persistent:0.10.1@sha256:0719b4688b666490abf4b32a3cc5c5da7bb2d6276b47377b35de5429f783e9c2
25+
image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce
26+
env:
27+
- name: KAFKA_LOG4J_OPTS
28+
value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties
1829
ports:
1930
- containerPort: 9092
2031
command:
21-
- sh
22-
- -c
23-
- "./bin/kafka-server-start.sh config/server.properties --override broker.id=$(hostname | awk -F'-' '{print $2}')"
32+
- ./bin/kafka-server-start.sh
33+
- /etc/kafka/server.properties
34+
- --override
35+
- zookeeper.connect=zookeeper:2181
36+
- --override
37+
- log.retention.hours=-1
38+
- --override
39+
- log.dirs=/var/lib/kafka/data/topics
40+
- --override
41+
- auto.create.topics.enable=false
42+
resources:
43+
requests:
44+
cpu: 100m
45+
memory: 512Mi
46+
livenessProbe:
47+
exec:
48+
command:
49+
- /bin/sh
50+
- -c
51+
- 'echo "" | nc -w 1 127.0.0.1 9092'
2452
volumeMounts:
25-
- name: datadir
26-
mountPath: /opt/kafka/data
53+
- name: config
54+
mountPath: /etc/kafka
55+
- name: data
56+
mountPath: /var/lib/kafka/data
57+
volumes:
58+
- name: config
59+
configMap:
60+
name: broker-config
2761
volumeClaimTemplates:
2862
- metadata:
29-
name: datadir
63+
name: data
3064
spec:
3165
accessModes: [ "ReadWriteOnce" ]
3266
resources:

0 commit comments

Comments
 (0)