Skip to content

Commit 2d1bad5

Browse files
author
Picred
committed
feat: add logstash as producer, kafka as consumer
1 parent 5bcdc7e commit 2d1bad5

24 files changed

+1324
-33
lines changed

kafka/Dockerfile

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
FROM amazoncorretto:17
2+
LABEL maintainer="Salvo Nicotra"
3+
ENV PATH /opt/kafka/bin:$PATH
4+
ENV KAFKA_DIR "/opt/kafka"
5+
ARG KAFKA_VERSION="2.13-3.7.0"
6+
7+
# Installing Kafka
8+
# ADD will automatically extract the file
9+
ADD setup/kafka_${KAFKA_VERSION}.tgz /opt
10+
11+
# Create Sym Link
12+
RUN ln -s /opt/kafka_${KAFKA_VERSION} ${KAFKA_DIR}
13+
14+
ADD kafka-manager.sh ${KAFKA_DIR}/bin/kafka-manager
15+
# Copy All conf here
16+
ADD conf/* ${KAFKA_DIR}/config/
17+
18+
# Edit perms to run entrypoint
19+
USER root
20+
RUN chmod 777 ${KAFKA_DIR}/bin/kafka-manager
21+
22+
ENTRYPOINT [ "kafka-manager" ]
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
name=local-console-sink
17+
connector.class=org.apache.kafka.connect.file.FileStreamSinkConnector
18+
tasks.max=1
19+
topics=connect-test
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
name=local-console-source
17+
connector.class=org.apache.kafka.connect.file.FileStreamSourceConnector
18+
tasks.max=1
19+
topic=connect-test
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
##
2+
# Licensed to the Apache Software Foundation (ASF) under one or more
3+
# contributor license agreements. See the NOTICE file distributed with
4+
# this work for additional information regarding copyright ownership.
5+
# The ASF licenses this file to You under the Apache License, Version 2.0
6+
# (the "License"); you may not use this file except in compliance with
7+
# the License. You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
##
17+
18+
# This file contains some of the configurations for the Kafka Connect distributed worker. This file is intended
19+
# to be used with the examples, and some settings may differ from those used in a production system, especially
20+
# the `bootstrap.servers` and those specifying replication factors.
21+
22+
# A list of host/port pairs to use for establishing the initial connection to the Kafka cluster.
23+
bootstrap.servers=localhost:9092
24+
25+
# unique name for the cluster, used in forming the Connect cluster group. Note that this must not conflict with consumer group IDs
26+
group.id=connect-cluster
27+
28+
# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will
29+
# need to configure these based on the format they want their data in when loaded from or stored into Kafka
30+
key.converter=org.apache.kafka.connect.json.JsonConverter
31+
value.converter=org.apache.kafka.connect.json.JsonConverter
32+
# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply
33+
# it to
34+
key.converter.schemas.enable=true
35+
value.converter.schemas.enable=true
36+
37+
# Topic to use for storing offsets. This topic should have many partitions and be replicated and compacted.
38+
# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
39+
# the topic before starting Kafka Connect if a specific topic configuration is needed.
40+
# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
41+
# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
42+
# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
43+
offset.storage.topic=connect-offsets
44+
offset.storage.replication.factor=1
45+
#offset.storage.partitions=25
46+
47+
# Topic to use for storing connector and task configurations; note that this should be a single partition, highly replicated,
48+
# and compacted topic. Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
49+
# the topic before starting Kafka Connect if a specific topic configuration is needed.
50+
# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
51+
# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
52+
# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
53+
config.storage.topic=connect-configs
54+
config.storage.replication.factor=1
55+
56+
# Topic to use for storing statuses. This topic can have multiple partitions and should be replicated and compacted.
57+
# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
58+
# the topic before starting Kafka Connect if a specific topic configuration is needed.
59+
# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
60+
# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
61+
# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
62+
status.storage.topic=connect-status
63+
status.storage.replication.factor=1
64+
#status.storage.partitions=5
65+
66+
# Flush much faster than normal, which is useful for testing/debugging
67+
offset.flush.interval.ms=10000
68+
69+
# List of comma-separated URIs the REST API will listen on. The supported protocols are HTTP and HTTPS.
70+
# Specify hostname as 0.0.0.0 to bind to all interfaces.
71+
# Leave hostname empty to bind to default interface.
72+
# Examples of legal listener lists: HTTP://myhost:8083,HTTPS://myhost:8084"
73+
#listeners=HTTP://:8083
74+
75+
# The Hostname & Port that will be given out to other workers to connect to i.e. URLs that are routable from other servers.
76+
# If not set, it uses the value for "listeners" if configured.
77+
#rest.advertised.host.name=
78+
#rest.advertised.port=
79+
#rest.advertised.listener=
80+
81+
# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins
82+
# (connectors, converters, transformations). The list should consist of top level directories that include
83+
# any combination of:
84+
# a) directories immediately containing jars with plugins and their dependencies
85+
# b) uber-jars with plugins and their dependencies
86+
# c) directories immediately containing the package directory structure of classes of plugins and their dependencies
87+
# Examples:
88+
# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors,
89+
#plugin.path=
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
name=local-file-sink
17+
connector.class=FileStreamSink
18+
tasks.max=1
19+
file=/tmp/test.sink.txt
20+
topics=connect-test
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
name=local-file-source
17+
connector.class=FileStreamSource
18+
tasks.max=1
19+
file=/tmp/my-test.txt
20+
topic=connect-test

kafka/conf/connect-log4j.properties

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
log4j.rootLogger=INFO, stdout, connectAppender
17+
18+
# Send the logs to the console.
19+
#
20+
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
21+
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
22+
23+
# Send the logs to a file, rolling the file at midnight local time. For example, the `File` option specifies the
24+
# location of the log files (e.g. ${kafka.logs.dir}/connect.log), and at midnight local time the file is closed
25+
# and copied in the same directory but with a filename that ends in the `DatePattern` option.
26+
#
27+
log4j.appender.connectAppender=org.apache.log4j.DailyRollingFileAppender
28+
log4j.appender.connectAppender.DatePattern='.'yyyy-MM-dd-HH
29+
log4j.appender.connectAppender.File=${kafka.logs.dir}/connect.log
30+
log4j.appender.connectAppender.layout=org.apache.log4j.PatternLayout
31+
32+
# The `%X{connector.context}` parameter in the layout includes connector-specific and task-specific information
33+
# in the log messages, where appropriate. This makes it easier to identify those log messages that apply to a
34+
# specific connector.
35+
#
36+
connect.log.pattern=[%d] %p %X{connector.context}%m (%c:%L)%n
37+
38+
log4j.appender.stdout.layout.ConversionPattern=${connect.log.pattern}
39+
log4j.appender.connectAppender.layout.ConversionPattern=${connect.log.pattern}
40+
41+
log4j.logger.org.apache.zookeeper=ERROR
42+
log4j.logger.org.reflections=ERROR
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
# Licensed to the Apache Software Foundation (ASF) under A or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
# see org.apache.kafka.clients.consumer.ConsumerConfig for more details
16+
17+
# Sample MirrorMaker 2.0 top-level configuration file
18+
# Run with ./bin/connect-mirror-maker.sh connect-mirror-maker.properties
19+
20+
# specify any number of cluster aliases
21+
clusters = A, B
22+
23+
# connection information for each cluster
24+
# This is a comma separated host:port pairs for each cluster
25+
# for e.g. "A_host1:9092, A_host2:9092, A_host3:9092"
26+
A.bootstrap.servers = A_host1:9092, A_host2:9092, A_host3:9092
27+
B.bootstrap.servers = B_host1:9092, B_host2:9092, B_host3:9092
28+
29+
# enable and configure individual replication flows
30+
A->B.enabled = true
31+
32+
# regex which defines which topics gets replicated. For eg "foo-.*"
33+
A->B.topics = .*
34+
35+
B->A.enabled = true
36+
B->A.topics = .*
37+
38+
# Setting replication factor of newly created remote topics
39+
replication.factor=1
40+
41+
############################# Internal Topic Settings #############################
42+
# The replication factor for mm2 internal topics "heartbeats", "B.checkpoints.internal" and
43+
# "mm2-offset-syncs.B.internal"
44+
# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
45+
checkpoints.topic.replication.factor=1
46+
heartbeats.topic.replication.factor=1
47+
offset-syncs.topic.replication.factor=1
48+
49+
# The replication factor for connect internal topics "mm2-configs.B.internal", "mm2-offsets.B.internal" and
50+
# "mm2-status.B.internal"
51+
# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
52+
offset.storage.replication.factor=1
53+
status.storage.replication.factor=1
54+
config.storage.replication.factor=1
55+
56+
# customize as needed
57+
# replication.policy.separator = _
58+
# sync.topic.acls.enabled = false
59+
# emit.heartbeats.interval.seconds = 5
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
# These are defaults. This file just demonstrates how to override some settings.
17+
bootstrap.servers=kafkaServer:9092
18+
19+
# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will
20+
# need to configure these based on the format they want their data in when loaded from or stored into Kafka
21+
key.converter=org.apache.kafka.connect.json.JsonConverter
22+
value.converter=org.apache.kafka.connect.json.JsonConverter
23+
# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply
24+
# it to
25+
key.converter.schemas.enable=true
26+
value.converter.schemas.enable=true
27+
28+
offset.storage.file.filename=/tmp/connect.offsets
29+
# Flush much faster than normal, which is useful for testing/debugging
30+
offset.flush.interval.ms=10000
31+
32+
# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins
33+
# (connectors, converters, transformations). The list should consist of top level directories that include
34+
# any combination of:
35+
# a) directories immediately containing jars with plugins and their dependencies
36+
# b) uber-jars with plugins and their dependencies
37+
# c) directories immediately containing the package directory structure of classes of plugins and their dependencies
38+
# Note: symlinks will be followed to discover dependencies or plugins.
39+
# Examples:
40+
# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors,
41+
plugin.path=/opt/kafka/libs/connect-file-3.4.0.jar

kafka/conf/consumer.properties

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
# see org.apache.kafka.clients.consumer.ConsumerConfig for more details
16+
17+
# list of brokers used for bootstrapping knowledge about the rest of the cluster
18+
# format: host1:port1,host2:port2 ...
19+
bootstrap.servers=localhost:9092
20+
21+
# consumer group id
22+
group.id=test-consumer-group
23+
24+
# What to do when there is no initial offset in Kafka or if the current
25+
# offset does not exist any more on the server: latest, earliest, none
26+
#auto.offset.reset=

0 commit comments

Comments
 (0)