Skip to content

Use config files and kafka-jre based build for Confluent Platform services #54

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Aug 1, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 67 additions & 0 deletions 11confluent-config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
kind: ConfigMap
metadata:
name: confluent-config
namespace: kafka
apiVersion: v1
data:
schema-registry.properties: |-
# Copyright 2014 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

listeners=http://0.0.0.0:80
kafkastore.connection.url=zookeeper:2181
kafkastore.topic=_schemas
debug=false

kafka-rest.properties: |-
##
# Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##

#id=kafka-rest-test-server
listeners=http://0.0.0.0:80
bootstrap.servers=kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092
zookeeper.connect=zookeeper:2181
schema.registry.url=http://schemas.kafka.svc.cluster.local:80
#
# Configure interceptor classes for sending consumer and producer metrics to Confluent Control Center
# Make sure that monitoring-interceptors-<version>.jar is on the Java class path
#consumer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor
#producer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor

log4j.properties: |-
log4j.rootLogger=INFO, stdout

log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n

log4j.logger.kafka=ERROR, stdout
log4j.logger.org.apache.zookeeper=ERROR, stdout
log4j.logger.org.apache.kafka=ERROR, stdout
log4j.logger.org.I0Itec.zkclient=ERROR, stdout
log4j.additivity.kafka.server=false
log4j.additivity.kafka.consumer.ZookeeperConsumerConnector=false
22 changes: 14 additions & 8 deletions 61schemas.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,20 @@ spec:
app: schema-registry
spec:
containers:
- name: cp-schema-registry
image: confluentinc/cp-schema-registry@sha256:ac1eb34d9a60ce8904eb1bc01fd94bf1f6513924ca507734679d4b513133714c
- name: cp
image: solsson/kafka-cp@sha256:a22047b9e8bf4b8badfd2fbba47f2d1acdcbb84dfb03c61a15e1ac203036cedf
env:
- name: SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL
value: zookeeper:2181
- name: SCHEMA_REGISTRY_HOST_NAME
value: schemas
- name: SCHEMA_REGISTRY_LISTENERS
value: http://0.0.0.0:80
- name: SCHEMA_REGISTRY_LOG4J_OPTS
value: -Dlog4j.configuration=file:/etc/schema-registry/log4j.properties
command:
- schema-registry-start
- /etc/schema-registry/schema-registry.properties
ports:
- containerPort: 80
volumeMounts:
- name: config
mountPath: /etc/schema-registry
volumes:
- name: config
configMap:
name: confluent-config
24 changes: 14 additions & 10 deletions 71rest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,20 @@ spec:
app: kafka-rest
spec:
containers:
- name: cp-kafka-rest
image: confluentinc/cp-kafka-rest@sha256:aa213c1a67eae6ce9836b52a9b5ecee4d6a0b44f2b9cc69f4e4de85131462f1d
- name: cp
image: solsson/kafka-cp@sha256:a22047b9e8bf4b8badfd2fbba47f2d1acdcbb84dfb03c61a15e1ac203036cedf
env:
- name: KAFKA_REST_ZOOKEEPER_CONNECT
value: zookeeper:2181
- name: KAFKA_REST_HOST_NAME
value: rest
- name: KAFKA_REST_LISTENERS
value: http://0.0.0.0:80
- name: KAFKA_REST_SCHEMA_REGISTRY_URL
value: http://schemas.kafka.svc.cluster.local:80
- name: KAFKAREST_LOG4J_OPTS
value: -Dlog4j.configuration=file:/etc/kafka-rest/log4j.properties
command:
- kafka-rest-start
- /etc/kafka-rest/kafka-rest.properties
ports:
- containerPort: 80
volumeMounts:
- name: config
mountPath: /etc/kafka-rest
volumes:
- name: config
configMap:
name: confluent-config
34 changes: 23 additions & 11 deletions test/rest-curl.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,31 +12,43 @@ data:
# Keep starting up until rest proxy is up and running
curl --retry 10 --retry-delay 30 --retry-connrefused -I -s $REST

curl -s -H 'Accept: application/vnd.kafka.v2+json' $REST/brokers | egrep '."brokers":.0'

curl -H 'Accept: application/vnd.kafka.v2+json' $REST/topics
curl -s -H 'Accept: application/vnd.kafka.v2+json' $REST/topics
echo ""

curl --retry 10 -H 'Accept: application/vnd.kafka.v2+json' $REST/topics/$TOPIC
curl -s -H 'Accept: application/vnd.kafka.v2+json' $REST/topics/$TOPIC
echo ""

curl -X POST \
-H "Content-Type: application/vnd.kafka.json.v2+json" -H "Accept: application/vnd.kafka.v2+json" \
--data "{\"records\":[{\"value\":\"Test from $HOSTNAME at $(date -u -Iseconds)\"}]}" \
$REST/topics/$TOPIC -v --max-time 30 \
|| echo " (timeout might be ok because we only want to send one message)"
# TODO why does the above block?

$REST/topics/$TOPIC
echo ""

curl --retry 10 -H 'Accept: application/vnd.kafka.v2+json' $REST/topics/$TOPIC/partitions
curl -s -H 'Accept: application/vnd.kafka.v2+json' $REST/topics/$TOPIC/partitions
echo ""

curl -X POST -H "Content-Type: application/vnd.kafka.v2+json" --data '{"name": "my_consumer_instance", "format": "json", "auto.offset.reset": "earliest"}' $REST/consumers/my_json_consumer -v;
curl -X POST \
-H "Content-Type: application/vnd.kafka.v2+json" \
--data '{"name": "my_consumer_instance", "format": "json", "auto.offset.reset": "earliest"}' \
$REST/consumers/my_json_consumer
echo ""

curl -X POST -H "Content-Type: application/vnd.kafka.v2+json" --data "{\"topics\":[\"$TOPIC\"]}" $REST/consumers/my_json_consumer/instances/my_consumer_instance/subscription -v;
curl -X POST \
-H "Content-Type: application/vnd.kafka.v2+json" \
--data "{\"topics\":[\"$TOPIC\"]}" \
$REST/consumers/my_json_consumer/instances/my_consumer_instance/subscription \
-w "%{http_code}"
echo ""

curl -X GET -H "Accept: application/vnd.kafka.json.v2+json" $REST/consumers/my_json_consumer/instances/my_consumer_instance/records -v;
curl -X GET \
-H "Accept: application/vnd.kafka.json.v2+json" \
$REST/consumers/my_json_consumer/instances/my_consumer_instance/records

curl -X DELETE -H "Content-Type: application/vnd.kafka.v2+json" $REST/consumers/my_json_consumer/instances/my_consumer_instance -v;
curl -X DELETE \
-H "Content-Type: application/vnd.kafka.v2+json" \
$REST/consumers/my_json_consumer/instances/my_consumer_instance

tail -f /tmp/testlog

Expand Down