I set up a simple Kafka environment using Docker Compose, and now I’m facing issues when trying to use Kafka in a Spring Boot application. I’m writing this post in hopes that someone with experience can offer some advice or guidance.
First, let me share the configuration files I’ve created.
version: '3.8'
name: simple-sns
services:
# postgres, redis, kafka-ui configuration
kafka-zookeeper:
build:
context: ./kafka/zookeeper
dockerfile: Dockerfile
image: simple-sns-kafka-zookeeper:latest
restart: always
container_name: 'simple-sns-kafka-zookeeper'
ports:
- '2181:2181'
networks:
- simple-sns
kafka:
build:
context: ./kafka
dockerfile: Dockerfile
image: simple-sns-kafka:latest
container_name: 'simple-sns-kafka'
ports:
- '9092:9092'
- '8083:8083' # Kafka Connect REST API port
volumes:
- kafka-data:/bitnami/kafka/data
depends_on:
- kafka-zookeeper
networks:
- simple-sns
volumes:
redis-data:
postgres-data:
kafka-data:
networks:
simple-sns:
# Use Bitnami Kafka as the base image
FROM bitnami/kafka:latest
COPY server.properties /opt/bitnami/kafka/config/server.properties
# Setting up Kafka data directory
VOLUME ["/bitnami/kafka/data"]
# Expose Kafka default port and Kafka Connect port
EXPOSE 9092 8083
# Kafka execution command
CMD ["kafka-server-start.sh", "/opt/bitnami/kafka/config/server.properties"]
broker.id=1
listeners=PLAINTEXT://:9092
advertised.listeners=PLAINTEXT://kafka:9092, PLAINTEXT://localhost:9092
log.dirs=/bitnami/kafka/data
zookeeper.connect=kafka-zookeeper:2181
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
num.partitions=1
# Use Bitnami Zookeeper as base image
FROM bitnami/zookeeper:latest
# Setting environment variables
ENV ZOO_SERVER_ID=1
ENV ZOO_SERVERS=zookeeper:2888:3888::1
ENV ALLOW_ANONYMOUS_LOGIN=yes
# Copy Zookeeper configuration file
COPY zoo.cfg /opt/bitnami/zookeeper/conf/zoo.cfg
# Set up the Zookeeper data directory (map the directory locally if necessary)
VOLUME ["/bitnami/zookeeper"]
# Zookeeper's default port exposure
EXPOSE 2181 2888 3888
# Zookeeper execution command
CMD ["zkServer.sh", "start-foreground"]
tickTime=2000
dataDir=/bitnami/zookeeper/data
clientPort=2181
initLimit=10
syncLimit=5
server.1=zookeeper:2888:3888
spring:
kafka:
consumer:
bootstrap-servers: localhost:9092
group-id: alarm
auto-offset-reset: latest
key-deserializer: .apache.kafkamon.serialization.IntegerDeserializer
value-deserializer: .springframework.kafka.support.serializer.JsonDeserializer
properties:
spring.json.trusted.packages: '*'
listener:
ack-mode: manual
producer:
bootstrap-servers: localhost:9092
key-serializer: .apache.kafkamon.serialization.IntegerSerializer
value-serializer: .springframework.kafka.support.serializer.JsonSerializer
topic:
alarm: alarm
After this, here are the logs I’m encountering:
2025-03-02T15:46:04.651+09:00 INFO 4000 --- [simple-sns] [nio-8080-exec-2] o.a.k.clients.producer.ProducerConfig : ProducerConfig values:
acks = -1
auto.include.jmx.reporter = true
batch.size = 16384
bootstrap.servers = [localhost:9092]
buffer.memory = 33554432
client.dns.lookup = use_all_dns_ips
client.id = simple-sns-producer-1
compression.gzip.level = -1
compression.lz4.level = 9
compression.type = none
compression.zstd.level = 3
connections.max.idle.ms = 540000
delivery.timeout.ms = 120000
enable.idempotence = true
enable.metrics.push = true
interceptor.classes = []
key.serializer = class .apache.kafkamon.serialization.IntegerSerializer
linger.ms = 0
max.block.ms = 60000
max.in.flight.requests.per.connection = 5
max.request.size = 1048576
metadata.max.age.ms = 300000
metadata.max.idle.ms = 300000
metadata.recovery.strategy = none
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partitioner.adaptive.partitioning.enable = true
partitioner.availability.timeout.ms = 0
partitioner.class = null
partitioner.ignore.keys = false
receive.buffer.bytes = 32768
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retries = 2147483647
retry.backoff.max.ms = 1000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.connect.timeout.ms = null
sasl.login.read.timeout.ms = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.login.retry.backoff.max.ms = 10000
sasl.login.retry.backoff.ms = 100
sasl.mechanism = GSSAPI
sasl.oauthbearer.clock.skew.seconds = 30
sasl.oauthbearer.expected.audience = null
sasl.oauthbearer.expected.issuer = null
sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
sasl.oauthbearer.jwks.endpoint.url = null
sasl.oauthbearer.scope.claim.name = scope
sasl.oauthbearer.sub.claim.name = sub
sasl.oauthbearer.token.endpoint.url = null
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
socket.connection.setup.timeout.max.ms = 30000
socket.connection.setup.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLSv1.3
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.timeout.ms = 60000
transactional.id = null
value.serializer = class .springframework.kafka.support.serializer.JsonSerializer
2025-03-02T15:46:04.676+09:00 INFO 4000 --- [simple-sns] [nio-8080-exec-2] o.a.k.c.t.i.KafkaMetricsCollector : initializing Kafka metrics collector
2025-03-02T15:46:04.685+09:00 INFO 4000 --- [simple-sns] [nio-8080-exec-2] o.a.k.clients.producer.KafkaProducer : [Producer clientId=simple-sns-producer-1] Instantiated an idempotent producer.
2025-03-02T15:46:04.700+09:00 INFO 4000 --- [simple-sns] [nio-8080-exec-2] o.a.kafkamon.utils.AppInfoParser : Kafka version: 3.8.1
2025-03-02T15:46:04.701+09:00 INFO 4000 --- [simple-sns] [nio-8080-exec-2] o.a.kafkamon.utils.AppInfoParser : Kafka commitId: 70d6ff42debf7e17
2025-03-02T15:46:04.701+09:00 INFO 4000 --- [simple-sns] [nio-8080-exec-2] o.a.kafkamon.utils.AppInfoParser : Kafka startTimeMs: 1740897964699
2025-03-02T15:46:04.894+09:00 WARN 4000 --- [simple-sns] [-sns-producer-1] .apache.kafka.clients.NetworkClient : [Producer clientId=simple-sns-producer-1] Error while fetching metadata with correlation id 1 : {alarm=LEADER_NOT_AVAILABLE}
2025-03-02T15:46:04.895+09:00 INFO 4000 --- [simple-sns] [-sns-producer-1] .apache.kafka.clients.Metadata : [Producer clientId=simple-sns-producer-1] Cluster ID: RhaEDqBVQkKFTBmAmA48BA
2025-03-02T15:46:04.921+09:00 WARN 4000 --- [simple-sns] [-sns-producer-1] .apache.kafka.clients.NetworkClient : [Producer clientId=simple-sns-producer-1] Error connecting to node kafka:9092 (id: 1 rack: null)
java.UnknownHostException: kafka: nodename nor servname provided, or not known
2025-03-02T15:46:05.029+09:00 WARN 4000 --- [simple-sns] [-sns-producer-1] .apache.kafka.clients.NetworkClient : [Producer clientId=simple-sns-producer-1] Error connecting to node kafka:9092 (id: 1 rack: null)
java.UnknownHostException: kafka
I apologize for any awkwardness in my English as I’m not very familiar with it!
I tried several solutions I found through searching, but I wasn’t able to resolve the issue.
I set up a simple Kafka environment using Docker Compose, and now I’m facing issues when trying to use Kafka in a Spring Boot application. I’m writing this post in hopes that someone with experience can offer some advice or guidance.
First, let me share the configuration files I’ve created.
version: '3.8'
name: simple-sns
services:
# postgres, redis, kafka-ui configuration
kafka-zookeeper:
build:
context: ./kafka/zookeeper
dockerfile: Dockerfile
image: simple-sns-kafka-zookeeper:latest
restart: always
container_name: 'simple-sns-kafka-zookeeper'
ports:
- '2181:2181'
networks:
- simple-sns
kafka:
build:
context: ./kafka
dockerfile: Dockerfile
image: simple-sns-kafka:latest
container_name: 'simple-sns-kafka'
ports:
- '9092:9092'
- '8083:8083' # Kafka Connect REST API port
volumes:
- kafka-data:/bitnami/kafka/data
depends_on:
- kafka-zookeeper
networks:
- simple-sns
volumes:
redis-data:
postgres-data:
kafka-data:
networks:
simple-sns:
# Use Bitnami Kafka as the base image
FROM bitnami/kafka:latest
COPY server.properties /opt/bitnami/kafka/config/server.properties
# Setting up Kafka data directory
VOLUME ["/bitnami/kafka/data"]
# Expose Kafka default port and Kafka Connect port
EXPOSE 9092 8083
# Kafka execution command
CMD ["kafka-server-start.sh", "/opt/bitnami/kafka/config/server.properties"]
broker.id=1
listeners=PLAINTEXT://:9092
advertised.listeners=PLAINTEXT://kafka:9092, PLAINTEXT://localhost:9092
log.dirs=/bitnami/kafka/data
zookeeper.connect=kafka-zookeeper:2181
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
num.partitions=1
# Use Bitnami Zookeeper as base image
FROM bitnami/zookeeper:latest
# Setting environment variables
ENV ZOO_SERVER_ID=1
ENV ZOO_SERVERS=zookeeper:2888:3888::1
ENV ALLOW_ANONYMOUS_LOGIN=yes
# Copy Zookeeper configuration file
COPY zoo.cfg /opt/bitnami/zookeeper/conf/zoo.cfg
# Set up the Zookeeper data directory (map the directory locally if necessary)
VOLUME ["/bitnami/zookeeper"]
# Zookeeper's default port exposure
EXPOSE 2181 2888 3888
# Zookeeper execution command
CMD ["zkServer.sh", "start-foreground"]
tickTime=2000
dataDir=/bitnami/zookeeper/data
clientPort=2181
initLimit=10
syncLimit=5
server.1=zookeeper:2888:3888
spring:
kafka:
consumer:
bootstrap-servers: localhost:9092
group-id: alarm
auto-offset-reset: latest
key-deserializer: .apache.kafkamon.serialization.IntegerDeserializer
value-deserializer: .springframework.kafka.support.serializer.JsonDeserializer
properties:
spring.json.trusted.packages: '*'
listener:
ack-mode: manual
producer:
bootstrap-servers: localhost:9092
key-serializer: .apache.kafkamon.serialization.IntegerSerializer
value-serializer: .springframework.kafka.support.serializer.JsonSerializer
topic:
alarm: alarm
After this, here are the logs I’m encountering:
2025-03-02T15:46:04.651+09:00 INFO 4000 --- [simple-sns] [nio-8080-exec-2] o.a.k.clients.producer.ProducerConfig : ProducerConfig values:
acks = -1
auto.include.jmx.reporter = true
batch.size = 16384
bootstrap.servers = [localhost:9092]
buffer.memory = 33554432
client.dns.lookup = use_all_dns_ips
client.id = simple-sns-producer-1
compression.gzip.level = -1
compression.lz4.level = 9
compression.type = none
compression.zstd.level = 3
connections.max.idle.ms = 540000
delivery.timeout.ms = 120000
enable.idempotence = true
enable.metrics.push = true
interceptor.classes = []
key.serializer = class .apache.kafkamon.serialization.IntegerSerializer
linger.ms = 0
max.block.ms = 60000
max.in.flight.requests.per.connection = 5
max.request.size = 1048576
metadata.max.age.ms = 300000
metadata.max.idle.ms = 300000
metadata.recovery.strategy = none
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partitioner.adaptive.partitioning.enable = true
partitioner.availability.timeout.ms = 0
partitioner.class = null
partitioner.ignore.keys = false
receive.buffer.bytes = 32768
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retries = 2147483647
retry.backoff.max.ms = 1000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.connect.timeout.ms = null
sasl.login.read.timeout.ms = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.login.retry.backoff.max.ms = 10000
sasl.login.retry.backoff.ms = 100
sasl.mechanism = GSSAPI
sasl.oauthbearer.clock.skew.seconds = 30
sasl.oauthbearer.expected.audience = null
sasl.oauthbearer.expected.issuer = null
sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
sasl.oauthbearer.jwks.endpoint.url = null
sasl.oauthbearer.scope.claim.name = scope
sasl.oauthbearer.sub.claim.name = sub
sasl.oauthbearer.token.endpoint.url = null
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
socket.connection.setup.timeout.max.ms = 30000
socket.connection.setup.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLSv1.3
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.timeout.ms = 60000
transactional.id = null
value.serializer = class .springframework.kafka.support.serializer.JsonSerializer
2025-03-02T15:46:04.676+09:00 INFO 4000 --- [simple-sns] [nio-8080-exec-2] o.a.k.c.t.i.KafkaMetricsCollector : initializing Kafka metrics collector
2025-03-02T15:46:04.685+09:00 INFO 4000 --- [simple-sns] [nio-8080-exec-2] o.a.k.clients.producer.KafkaProducer : [Producer clientId=simple-sns-producer-1] Instantiated an idempotent producer.
2025-03-02T15:46:04.700+09:00 INFO 4000 --- [simple-sns] [nio-8080-exec-2] o.a.kafkamon.utils.AppInfoParser : Kafka version: 3.8.1
2025-03-02T15:46:04.701+09:00 INFO 4000 --- [simple-sns] [nio-8080-exec-2] o.a.kafkamon.utils.AppInfoParser : Kafka commitId: 70d6ff42debf7e17
2025-03-02T15:46:04.701+09:00 INFO 4000 --- [simple-sns] [nio-8080-exec-2] o.a.kafkamon.utils.AppInfoParser : Kafka startTimeMs: 1740897964699
2025-03-02T15:46:04.894+09:00 WARN 4000 --- [simple-sns] [-sns-producer-1] .apache.kafka.clients.NetworkClient : [Producer clientId=simple-sns-producer-1] Error while fetching metadata with correlation id 1 : {alarm=LEADER_NOT_AVAILABLE}
2025-03-02T15:46:04.895+09:00 INFO 4000 --- [simple-sns] [-sns-producer-1] .apache.kafka.clients.Metadata : [Producer clientId=simple-sns-producer-1] Cluster ID: RhaEDqBVQkKFTBmAmA48BA
2025-03-02T15:46:04.921+09:00 WARN 4000 --- [simple-sns] [-sns-producer-1] .apache.kafka.clients.NetworkClient : [Producer clientId=simple-sns-producer-1] Error connecting to node kafka:9092 (id: 1 rack: null)
java.UnknownHostException: kafka: nodename nor servname provided, or not known
2025-03-02T15:46:05.029+09:00 WARN 4000 --- [simple-sns] [-sns-producer-1] .apache.kafka.clients.NetworkClient : [Producer clientId=simple-sns-producer-1] Error connecting to node kafka:9092 (id: 1 rack: null)
java.UnknownHostException: kafka
I apologize for any awkwardness in my English as I’m not very familiar with it!
I tried several solutions I found through searching, but I wasn’t able to resolve the issue.
I’m leaving the solution here for anyone who encounters the same issue!
kafka/server.properties
listeners=INTERNAL://0.0.0.0:9092,EXTERNAL://0.0.0.0:9093
advertised.listeners=INTERNAL://kafka:9092,EXTERNAL://localhost:9093
listener.security.protocol.map=INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT
docker-compose.yml
service:
kafka:
build:
context: ./kafka
dockerfile: Dockerfile
image: simple-sns-kafka:latest
container_name: 'simple-sns-kafka'
ports:
- '9092:9092'
- '9093:9093' # added code
- '8083:8083' # Kafka Connect REST API Port
kafka:
consumer:
bootstrap-servers: localhost:9093
producer:
bootstrap-servers: localhost:9093
This is my first time using Stack Overflow. If there’s any better way for me to express my gratitude, please let me know!!
In spring application properties localhost
refers to application's container, which need to point to the kafka container:
spring:
kafka:
consumer:
bootstrap-servers: kafka:9092
...
producer:
bootstrap-servers: kafka:9092
I usually prefer confluent's Kafka docker-compose, though.