-
Notifications
You must be signed in to change notification settings - Fork 52
Expand file tree
/
Copy pathdocker-compose.kafka.yaml
More file actions
168 lines (162 loc) · 5.7 KB
/
docker-compose.kafka.yaml
File metadata and controls
168 lines (162 loc) · 5.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
services:
zookeeper:
image: confluentinc/cp-zookeeper:7.5.0
container_name: kafka-zookeeper
ports:
- "2181:2181"
healthcheck:
test: echo srvr | nc zookeeper 2181 || exit 1
start_period: 10s
retries: 20
interval: 10s
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
networks:
- easymlops_network
# Kafka broker, all configurations you can take a look
# at https://docs.confluent.io/platform/current/installation/docker/config-reference.html
broker:
image: confluentinc/cp-server:7.5.0
container_name: kafka-broker
depends_on:
zookeeper:
condition: service_healthy
ports:
- "9092:9092"
- "9101:9101"
healthcheck:
test: nc -z localhost 9092 || exit -1
start_period: 15s
interval: 5s
timeout: 10s
retries: 10
environment:
# ID of the broker in a cluster
KAFKA_BROKER_ID:
1
# Connect to Zoo Keeper for distributed coordination and leader election
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
# Define how clients connect to brokers
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
# How many copies are maintained for fault tolerance
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
# Schema Registry URL for storing and managing Avro schemas
KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081
# Confluent Metrics Reporter for Control Center Cluster Monitoring
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: "broker:9092"
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: "true"
# For fixing the bug replication factor 3 > the number of node
KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
networks:
- easymlops_network
# For managing Avro schemas
schema-registry:
image: confluentinc/cp-schema-registry:7.5.0
container_name: kafka-schema-registry
depends_on:
- broker
ports:
- "8081:8081"
healthcheck:
start_period: 10s
interval: 10s
retries: 20
test: curl --user superUser:superUser --fail --silent --insecure http://localhost:8081/subjects --output /dev/null || exit 1
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: "broker:29092"
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081
networks:
- easymlops_network
# Kafka Connect
connect:
image: confluentinc/cp-kafka-connect:7.5.0
container_name: kafka-connect
depends_on:
broker:
condition: service_healthy
schema-registry:
condition: service_healthy
zookeeper:
condition: service_healthy
ports:
- "8083:8083"
environment:
CONNECT_BOOTSTRAP_SERVERS: "broker:29092"
CONNECT_REST_ADVERTISED_HOST_NAME: connect
CONNECT_REST_PORT: 8083
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000
CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_KEY_CONVERTER_SCHEMAS_ENABLE: false
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_VALUE_CONVERTER_SCHEMAS_ENABLE: true
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components,/etc/kafka-connect/jars"
# AWS Credentials
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY}
volumes:
- $PWD/src/streaming/connectors/config/jars/kafka_connect/jars/:/etc/kafka-connect/jars
command:
- bash
- -c
- |
echo "Installing connector plugins"
confluent-hub install --no-prompt confluentinc/kafka-connect-s3:10.5.7
echo "Launching Kafka Connect worker"
/etc/confluent/docker/run
networks:
- easymlops_network
# Confluent control center to manage Kafka
control-center:
image: confluentinc/cp-enterprise-control-center:7.5.0
container_name: kafka-control-center
depends_on:
- broker
- schema-registry
- connect
ports:
- "9021:9021"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9021/healthcheck"] # Adjust the URL and options as needed
interval: 30s
timeout: 10s
retries: 3
environment:
CONTROL_CENTER_BOOTSTRAP_SERVERS: "broker:29092"
CONTROL_CENTER_CONNECT_CONNECT-DEFAULT_CLUSTER: "connect:8083,debezium:8083"
CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
CONTROL_CENTER_REPLICATION_FACTOR: 1
CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1
CONTROL_CENTER_CONNECT_HEALTHCHECK_ENDPOINT: "/connectors"
CONFLUENT_METRICS_TOPIC_REPLICATION: 1
networks:
- easymlops_network
# Simulation of sending messages to Kafka topics
kafka_producer:
container_name: kafka-producer
build:
context: src/producer
dockerfile: Dockerfile
env_file:
- src/producer/.env
depends_on:
broker:
condition: service_healthy
networks:
- easymlops_network
networks:
easymlops_network:
external: true