Skip to content

Commit 810c2a0

Browse files
committed
Merge branch '4.x', Kafka 1.0.1 + new init script sed concept
2 parents 25718a9 + 9c69c9b commit 810c2a0

File tree

12 files changed

+37
-37
lines changed

12 files changed

+37
-37
lines changed
File renamed without changes.
File renamed without changes.

README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ To quote [@arthurk](https://github.com/Yolean/kubernetes-kafka/issues/82#issueco
1313

1414
We suggest you `apply -f` manifests in the following order:
1515
* Your choice of storage classes from [./configure](./configure/)
16+
* [namespace](./00-namespace.yml)
1617
* [./rbac-namespace-default](./rbac-namespace-default/)
1718
* [./zookeeper](./zookeeper/)
1819
* [./kafka](./kafka/)
@@ -29,6 +30,7 @@ If you begin to rely on this kafka setup we recommend you fork, for example to e
2930
| tag | k8s ≥ | highlights |
3031
| ----- | ------ | ---------- |
3132
| 4.x | 1.9+ | Kafka 1.1 dynamic config |
33+
| v4.1 | 1.9+ | Kafka 1.0.1 new (default)[#148] (config)[#170] |
3234
| v3.2 | 1.9.4, 1.8.9, 1.7.14 | Required for read-only ConfigMaps [#162](https://github.com/Yolean/kubernetes-kafka/issues/162) [#163](https://github.com/Yolean/kubernetes-kafka/pull/163) [k8s #58720](https://github.com/kubernetes/kubernetes/pull/58720) |
3335
| v3.1 | 1.8 | The painstaking path to `min.insync.replicas`=2 |
3436
| v3.0 | 1.8 | [Outside access](#78), [modern manifests](#84), [bootstrap.kafka](#52) |

kafka/10broker-config.yml

Lines changed: 27 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -7,24 +7,23 @@ data:
77
init.sh: |-
88
#!/bin/bash
99
set -x
10+
cp /etc/kafka-configmap/log4j.properties /etc/kafka/
1011
1112
KAFKA_BROKER_ID=${HOSTNAME##*-}
12-
cp -Lur /etc/kafka-configmap/* /etc/kafka/
13-
sed -i "s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/" /etc/kafka/server.properties
14-
13+
SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/")
1514
LABELS="kafka-broker-id=$KAFKA_BROKER_ID"
1615
ANNOTATIONS=""
1716
1817
hash kubectl 2>/dev/null || {
19-
sed -i "s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/" /etc/kafka/server.properties
18+
SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/")
2019
} && {
2120
ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}')
2221
if [ $? -ne 0 ]; then
23-
sed -i "s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/" /etc/kafka/server.properties
22+
SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/")
2423
elif [ "x$ZONE" == "x<no value>" ]; then
25-
sed -i "s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/" /etc/kafka/server.properties
24+
SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/")
2625
else
27-
sed -i "s/#init#broker.rack=#init#/broker.rack=$ZONE/" /etc/kafka/server.properties
26+
SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/")
2827
LABELS="$LABELS kafka-broker-rack=$ZONE"
2928
fi
3029
@@ -33,7 +32,7 @@ data:
3332
echo "Outside (i.e. cluster-external access) host lookup command failed"
3433
else
3534
OUTSIDE_PORT=3240${KAFKA_BROKER_ID}
36-
sed -i "s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT}|" /etc/kafka/server.properties
35+
SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT}|")
3736
ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT"
3837
fi
3938
@@ -44,6 +43,8 @@ data:
4443
kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?"
4544
fi
4645
}
46+
printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp
47+
[ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties
4748
4849
server.properties: |-
4950
############################# Log Basics #############################
@@ -65,7 +66,7 @@ data:
6566
6667
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
6768
# This value is recommended to be increased for installations with data dirs located in RAID array.
68-
num.recovery.threads.per.data.dir=1
69+
#num.recovery.threads.per.data.dir=1
6970
7071
############################# Server Basics #############################
7172
@@ -76,7 +77,7 @@ data:
7677
7778
############################# Socket Server Settings #############################
7879
79-
# The address the socket server listens on. It will get the value returned from
80+
# The address the socket server listens on. It will get the value returned from
8081
# java.net.InetAddress.getCanonicalHostName() if not configured.
8182
# FORMAT:
8283
# listeners = listener_name://host_name:port
@@ -85,7 +86,7 @@ data:
8586
#listeners=PLAINTEXT://:9092
8687
listeners=OUTSIDE://:9094,PLAINTEXT://:9092
8788
88-
# Hostname and port the broker will advertise to producers and consumers. If not set,
89+
# Hostname and port the broker will advertise to producers and consumers. If not set,
8990
# it uses the value for "listeners" if configured. Otherwise, it will use the value
9091
# returned from java.net.InetAddress.getCanonicalHostName().
9192
#advertised.listeners=PLAINTEXT://your.host.name:9092
@@ -97,19 +98,19 @@ data:
9798
inter.broker.listener.name=PLAINTEXT
9899
99100
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
100-
num.network.threads=3
101+
#num.network.threads=3
101102
102103
# The number of threads that the server uses for processing requests, which may include disk I/O
103-
num.io.threads=8
104+
#num.io.threads=8
104105
105106
# The send buffer (SO_SNDBUF) used by the socket server
106-
socket.send.buffer.bytes=102400
107+
#socket.send.buffer.bytes=102400
107108
108109
# The receive buffer (SO_RCVBUF) used by the socket server
109-
socket.receive.buffer.bytes=102400
110+
#socket.receive.buffer.bytes=102400
110111
111112
# The maximum size of a request that the socket server will accept (protection against OOM)
112-
socket.request.max.bytes=104857600
113+
#socket.request.max.bytes=104857600
113114
114115
############################# Internal Topic Settings #############################
115116
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
@@ -125,7 +126,7 @@ data:
125126
# There are a few important trade-offs here:
126127
# 1. Durability: Unflushed data may be lost if you are not using replication.
127128
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
128-
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
129+
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
129130
# The settings below allow one to configure the flush policy to flush data after a period of time or
130131
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
131132
@@ -142,19 +143,22 @@ data:
142143
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
143144
# from the end of the log.
144145
146+
# https://cwiki.apache.org/confluence/display/KAFKA/KIP-186%3A+Increase+offsets+retention+default+to+7+days
147+
offsets.retention.minutes=10080
148+
145149
# The minimum age of a log file to be eligible for deletion due to age
146150
log.retention.hours=-1
147151
148-
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
149-
# segments don't drop below log.retention.bytes. Functions independently of log.retention.hours.
152+
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
153+
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
150154
#log.retention.bytes=1073741824
151155
152156
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
153-
log.segment.bytes=1073741824
157+
#log.segment.bytes=1073741824
154158
155159
# The interval at which log segments are checked to see if they can be deleted according
156160
# to the retention policies
157-
log.retention.check.interval.ms=300000
161+
#log.retention.check.interval.ms=300000
158162
159163
############################# Zookeeper #############################
160164
@@ -166,7 +170,7 @@ data:
166170
zookeeper.connect=zookeeper:2181
167171
168172
# Timeout in ms for connecting to zookeeper
169-
zookeeper.connection.timeout.ms=6000
173+
#zookeeper.connection.timeout.ms=6000
170174
171175
172176
############################# Group Coordinator Settings #############################
@@ -176,7 +180,7 @@ data:
176180
# The default value for this is 3 seconds.
177181
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
178182
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
179-
group.initial.rebalance.delay.ms=0
183+
#group.initial.rebalance.delay.ms=0
180184
181185
log4j.properties: |-
182186
# Unspecified loggers and loggers with additivity=true output to server.log and stdout

kafka/50kafka.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
apiVersion: apps/v1beta2
1+
apiVersion: apps/v1
22
kind: StatefulSet
33
metadata:
44
name: kafka

kafka/test/kafkacat.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,8 @@ spec:
8383
- test-kafkacat
8484
- --partitions
8585
- "1"
86+
- --replication-factor
87+
- "3"
8688
restartPolicy: Never
8789
---
8890
apiVersion: apps/v1beta2

kafka/test/produce-consume.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,8 @@ spec:
6666
- test-produce-consume
6767
- --partitions
6868
- "1"
69+
- --replication-factor
70+
- "3"
6971
restartPolicy: Never
7072
---
7173
apiVersion: apps/v1beta2

kafka/test/replication-config.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ data:
3636
exit 10
3737
fi
3838
39-
echo "$(date --iso-8601='ns') $WITH_ONE partitions have one replica and WITH_TWO_OR_MORE have more"
39+
echo "$(date --iso-8601='ns') $WITH_ONE partitions have one replica and $WITH_TWO_OR_MORE have more"
4040
exit 0
4141
4242
quit-on-nonzero-exit.sh: |-

rbac-namespace-default/00namespace.yml

Lines changed: 0 additions & 5 deletions
This file was deleted.

zookeeper/00namespace.yml

Lines changed: 0 additions & 5 deletions
This file was deleted.

zookeeper/50pzoo.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
apiVersion: apps/v1beta2
1+
apiVersion: apps/v1
22
kind: StatefulSet
33
metadata:
44
name: pzoo

zookeeper/51zoo.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
apiVersion: apps/v1beta2
1+
apiVersion: apps/v1
22
kind: StatefulSet
33
metadata:
44
name: zoo

0 commit comments

Comments
 (0)