7
7
init.sh : |-
8
8
#!/bin/bash
9
9
set -x
10
+ cp /etc/kafka-configmap/log4j.properties /etc/kafka/
10
11
11
12
KAFKA_BROKER_ID=${HOSTNAME##*-}
12
- cp -Lur /etc/kafka-configmap/* /etc/kafka/
13
- sed -i "s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/" /etc/kafka/server.properties
14
-
13
+ SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/")
15
14
LABELS="kafka-broker-id=$KAFKA_BROKER_ID"
16
15
ANNOTATIONS=""
17
16
18
17
hash kubectl 2>/dev/null || {
19
- sed -i "s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/" /etc/kafka/server.properties
18
+ SEDS+=( "s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/")
20
19
} && {
21
20
ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}')
22
21
if [ $? -ne 0 ]; then
23
- sed -i "s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/" /etc/kafka/server.properties
22
+ SEDS+=( "s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/")
24
23
elif [ "x$ZONE" == "x<no value>" ]; then
25
- sed -i "s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/" /etc/kafka/server.properties
24
+ SEDS+=( "s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/")
26
25
else
27
- sed -i "s/#init#broker.rack=#init#/broker.rack=$ZONE/" /etc/kafka/server.properties
26
+ SEDS+=( "s/#init#broker.rack=#init#/broker.rack=$ZONE/")
28
27
LABELS="$LABELS kafka-broker-rack=$ZONE"
29
28
fi
30
29
33
32
echo "Outside (i.e. cluster-external access) host lookup command failed"
34
33
else
35
34
OUTSIDE_PORT=3240${KAFKA_BROKER_ID}
36
- sed -i "s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT}|" /etc/kafka/server.properties
35
+ SEDS+=( "s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT}|")
37
36
ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT"
38
37
fi
39
38
44
43
kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?"
45
44
fi
46
45
}
46
+ printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp
47
+ [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties
47
48
48
49
server.properties : |-
49
50
############################# Log Basics #############################
65
66
66
67
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
67
68
# This value is recommended to be increased for installations with data dirs located in RAID array.
68
- num.recovery.threads.per.data.dir=1
69
+ # num.recovery.threads.per.data.dir=1
69
70
70
71
############################# Server Basics #############################
71
72
76
77
77
78
############################# Socket Server Settings #############################
78
79
79
- # The address the socket server listens on. It will get the value returned from
80
+ # The address the socket server listens on. It will get the value returned from
80
81
# java.net.InetAddress.getCanonicalHostName() if not configured.
81
82
# FORMAT:
82
83
# listeners = listener_name://host_name:port
85
86
#listeners=PLAINTEXT://:9092
86
87
listeners=OUTSIDE://:9094,PLAINTEXT://:9092
87
88
88
- # Hostname and port the broker will advertise to producers and consumers. If not set,
89
+ # Hostname and port the broker will advertise to producers and consumers. If not set,
89
90
# it uses the value for "listeners" if configured. Otherwise, it will use the value
90
91
# returned from java.net.InetAddress.getCanonicalHostName().
91
92
#advertised.listeners=PLAINTEXT://your.host.name:9092
@@ -97,19 +98,19 @@ data:
97
98
inter.broker.listener.name=PLAINTEXT
98
99
99
100
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
100
- num.network.threads=3
101
+ # num.network.threads=3
101
102
102
103
# The number of threads that the server uses for processing requests, which may include disk I/O
103
- num.io.threads=8
104
+ # num.io.threads=8
104
105
105
106
# The send buffer (SO_SNDBUF) used by the socket server
106
- socket.send.buffer.bytes=102400
107
+ # socket.send.buffer.bytes=102400
107
108
108
109
# The receive buffer (SO_RCVBUF) used by the socket server
109
- socket.receive.buffer.bytes=102400
110
+ # socket.receive.buffer.bytes=102400
110
111
111
112
# The maximum size of a request that the socket server will accept (protection against OOM)
112
- socket.request.max.bytes=104857600
113
+ # socket.request.max.bytes=104857600
113
114
114
115
############################# Internal Topic Settings #############################
115
116
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
@@ -125,7 +126,7 @@ data:
125
126
# There are a few important trade-offs here:
126
127
# 1. Durability: Unflushed data may be lost if you are not using replication.
127
128
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
128
- # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
129
+ # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
129
130
# The settings below allow one to configure the flush policy to flush data after a period of time or
130
131
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
131
132
@@ -142,19 +143,22 @@ data:
142
143
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
143
144
# from the end of the log.
144
145
146
+ # https://cwiki.apache.org/confluence/display/KAFKA/KIP-186%3A+Increase+offsets+retention+default+to+7+days
147
+ offsets.retention.minutes=10080
148
+
145
149
# The minimum age of a log file to be eligible for deletion due to age
146
150
log.retention.hours=-1
147
151
148
- # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
149
- # segments don't drop below log.retention.bytes. Functions independently of log.retention.hours.
152
+ # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
153
+ # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
150
154
#log.retention.bytes=1073741824
151
155
152
156
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
153
- log.segment.bytes=1073741824
157
+ # log.segment.bytes=1073741824
154
158
155
159
# The interval at which log segments are checked to see if they can be deleted according
156
160
# to the retention policies
157
- log.retention.check.interval.ms=300000
161
+ # log.retention.check.interval.ms=300000
158
162
159
163
############################# Zookeeper #############################
160
164
@@ -166,7 +170,7 @@ data:
166
170
zookeeper.connect=zookeeper:2181
167
171
168
172
# Timeout in ms for connecting to zookeeper
169
- zookeeper.connection.timeout.ms=6000
173
+ # zookeeper.connection.timeout.ms=6000
170
174
171
175
172
176
############################# Group Coordinator Settings #############################
@@ -176,7 +180,7 @@ data:
176
180
# The default value for this is 3 seconds.
177
181
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
178
182
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
179
- group.initial.rebalance.delay.ms=0
183
+ # group.initial.rebalance.delay.ms=0
180
184
181
185
log4j.properties : |-
182
186
# Unspecified loggers and loggers with additivity=true output to server.log and stdout
0 commit comments