与zookeeper建立不起连接
2023-11-16 00:13:14.107 INFO 11 --- [p-nio-80-exec-3] o.a.kafka.common.utils.AppInfoParser : Kafka version: 2.8.1 2023-11-16 00:13:14.107 INFO 11 --- [p-nio-80-exec-3] o.a.kafka.common.utils.AppInfoParser : Kafka commitId: 839b886f9b732b15 2023-11-16 00:13:14.107 INFO 11 --- [p-nio-80-exec-3] o.a.kafka.common.utils.AppInfoParser : Kafka startTimeMs: 1700064794107 2023-11-16 00:13:14.114 INFO 11 --- [p-nio-80-exec-3] org.apache.kafka.clients.Metadata : [Consumer clientId=consumer-null-2, groupId=null] Cluster ID: 3bqQeKeTSb26QfZ7nL2UoQ 2023-11-16 00:13:14.116 INFO 11 --- [p-nio-80-exec-3] org.apache.kafka.common.metrics.Metrics : Metrics scheduler closed 2023-11-16 00:13:14.117 INFO 11 --- [p-nio-80-exec-3] org.apache.kafka.common.metrics.Metrics : Closing reporter org.apache.kafka.common.metrics.JmxReporter 2023-11-16 00:13:14.117 INFO 11 --- [p-nio-80-exec-3] org.apache.kafka.common.metrics.Metrics : Metrics reporters closed 2023-11-16 00:13:14.119 INFO 11 --- [p-nio-80-exec-3] o.a.kafka.common.utils.AppInfoParser : App info kafka.consumer for consumer-null-2 unregistered 2023-11-16 00:13:14.120 INFO 11 --- [p-nio-80-exec-3] o.a.k.clients.admin.AdminClientConfig : AdminClientConfig values: bootstrap.servers = [192.168.1.50:9092, 192.168.1.54:9092, 192.168.1.55:9092] client.dns.lookup = use_all_dns_ips client.id = connections.max.idle.ms = 300000 default.api.timeout.ms = 60000 metadata.max.age.ms = 300000 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 receive.buffer.bytes = 65536 reconnect.backoff.max.ms = 1000 reconnect.backoff.ms = 50 request.timeout.ms = 30000 retries = 2147483647 retry.backoff.ms = 100 sasl.client.callback.handler.class = null sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism = GSSAPI security.protocol = PLAINTEXT security.providers = null send.buffer.bytes = 131072 socket.connection.setup.timeout.max.ms = 30000 socket.connection.setup.timeout.ms = 10000 ssl.cipher.suites = null ssl.enabled.protocols = [TLSv1.2] ssl.endpoint.identification.algorithm = https ssl.engine.factory.class = null ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.certificate.chain = null ssl.keystore.key = null ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.protocol = TLSv1.2 ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.certificates = null ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS
2023-11-16 00:13:14.123 WARN 11 --- [p-nio-80-exec-3] o.a.k.clients.admin.AdminClientConfig : The configuration 'key.deserializer' was supplied but isn't a known config. 2023-11-16 00:13:14.123 WARN 11 --- [p-nio-80-exec-3] o.a.k.clients.admin.AdminClientConfig : The configuration 'value.deserializer' was supplied but isn't a known config. 2023-11-16 00:13:14.123 INFO 11 --- [p-nio-80-exec-3] o.a.kafka.common.utils.AppInfoParser : Kafka version: 2.8.1 2023-11-16 00:13:14.123 INFO 11 --- [p-nio-80-exec-3] o.a.kafka.common.utils.AppInfoParser : Kafka commitId: 839b886f9b732b15 2023-11-16 00:13:14.123 INFO 11 --- [p-nio-80-exec-3] o.a.kafka.common.utils.AppInfoParser : Kafka startTimeMs: 1700064794123 2023-11-16 00:13:30.253 ERROR 11 --- [p-nio-80-exec-3] c.x.k.s.k.p.k.z.s.impl.KafkaZKDAOImpl : method=getBrokerMetadata||zkAddress=192.168.1.54:2181,192.168.1.55:2181,192.168.1.50:2181||errMsg=exception
org.apache.zookeeper.KeeperException$ConnectionLossException: KeeperErrorCode = ConnectionLoss for /brokers/ids
at org.apache.zookeeper.KeeperException.create(KeeperException.java:102)
at org.apache.zookeeper.KeeperException.create(KeeperException.java:54)
at org.apache.zookeeper.ZooKeeper.getChildren(ZooKeeper.java:2746)
at org.apache.zookeeper.ZooKeeper.getChildren(ZooKeeper.java:2772)
at com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.impl.KafkaZKDAOImpl.getChildren(KafkaZKDAOImpl.java:196)
at com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.impl.KafkaZKDAOImpl.getBrokerMetadata(KafkaZKDAOImpl.java:50)
at com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.impl.KafkaZKDAOImpl$$FastClassBySpringCGLIB$$6e05ff5c.invoke(
ZK的超时时间调整的大一些试试
ZK的超时时间调整的大一些试试 你好。 我经过一宿奋战.......... 这个问题已经解决了。 怀疑与docker-compose 方式启动的服务有关,docker 网络? knowstream-init服务?并没有细查。
我是将docker-compose配置改成:
version: '2'
services:
# *不要调整knowstreaming-manager服务名称,ui中会用到
knowstreaming-manager:
#image: knowstreaming/knowstreaming-manager:0.7.0
image: knowstreaming/knowstreaming-manager:0.8.0
container_name: knowstreaming-manager
privileged: true
restart: always
depends_on:
- elasticsearch-single
- knowstreaming-mysql
expose:
- 80
network_mode: "host"
command:
- /bin/sh
- /ks-start.sh
environment:
TZ: Asia/Shanghai
# mysql服务地址
SERVER_MYSQL_ADDRESS: knowstreaming-mysql:3306
# mysql数据库名
SERVER_MYSQL_DB: know_streaming
# mysql用户名
SERVER_MYSQL_USER: root
# mysql用户密码
SERVER_MYSQL_PASSWORD: admin2022_
# es服务地址
SERVER_ES_ADDRESS: elasticsearch-single:9200
# 服务JVM参数
JAVA_OPTS: -Xmx2g -Xms2g
# 对于kafka中ADVERTISED_LISTENERS填写的hostname可以通过该方式完成
extra_hosts:
- "knowstreaming-manager:192.168.1.11"
- "knowstreaming-mysql:192.168.1.11"
- "elasticsearch-single:192.168.1.11"
- "hamigua:192.168.1.11"
# extra_hosts:
# - "hostname:x.x.x.x"
# 服务日志路径
volumes:
- ./ks/manage/log:/logs
- ./ks/manage/conf:/conf
knowstreaming-ui:
# image: knowstreaming/knowstreaming-ui:0.7.0
image: knowstreaming/knowstreaming-ui:0.8.0
container_name: knowstreaming-ui
restart: always
ports:
- '88:80'
environment:
TZ: Asia/Shanghai
depends_on:
- knowstreaming-manager
extra_hosts:
- "knowstreaming-manager:192.168.1.11"
- "nlpt83:192.168.1.11"
# extra_hosts:
# - "hostname:x.x.x.x"
elasticsearch-single:
image: docker.io/library/elasticsearch:7.6.2
container_name: elasticsearch-single
restart: always
expose:
- 9200
- 9300
network_mode: "host"
ports:
- '9200:9200'
- '9300:9300'
environment:
TZ: Asia/Shanghai
# es的JVM参数
ES_JAVA_OPTS: -Xms8192m -Xmx8192m
# 单节点配置,多节点集群参考 https://www.elastic.co/guide/en/elasticsearch/reference/7.6/docker.html#docker-compose-file
discovery.type: single-node
# 数据持久化路径
# volumes:
# - /ks/es/data:/usr/share/elasticsearch/data
# es初始化服务,与manager使用同一镜像
# 首次启动es需初始化模版和索引,后续会自动创建
knowstreaming-init:
# image: knowstreaming/knowstreaming-manager:0.7.0
image: knowstreaming/knowstreaming-manager:0.8.0
container_name: knowstreaming-init
depends_on:
- elasticsearch-single
command:
- /bin/bash
- /es_template_create.sh
environment:
TZ: Asia/Shanghai
# es服务地址
SERVER_ES_ADDRESS: elasticsearch-single:9200
extra_hosts:
- "knowstreaming-manager:192.168.1.11"
- "knowstreaming-mysql:192.168.1.11"
- "elasticsearch-single:192.168.1.11"
knowstreaming-mysql:
image: knowstreaming/knowstreaming-mysql:latest
container_name: knowstreaming-mysql
restart: always
environment:
TZ: Asia/Shanghai
# root 用户密码
MYSQL_ROOT_PASSWORD: admin2022_
# 初始化时创建的数据库名称
MYSQL_DATABASE: know_streaming
# 通配所有host,可以访问远程
MYSQL_ROOT_HOST: '%'
expose:
- 3306
ports:
- '3306:3306'
# 数据持久化路径
volumes:
- ./ks/mysql/data:/data/mysql
修改了部分service的extra_hosts
问题就解决了
PS: 我在自家虚拟机,去用咱们官网的docker-compose,是没有问题的。但是到了单位环境的虚机上面确实是发生了连接不上zk的问题。
👍 有兴趣的话,可以考虑往FAQ中补充一下你遇到的问题及解决方案 @humrobin
👍 有兴趣的话,可以考虑往FAQ中补充一下你遇到的问题及解决方案 @humrobin
好的,自上次用docker-compose方式调整出来后,公司事情较多,一直无休止的内卷中,解决办法我组织下语言发一下。或者伙伴们有兴趣可以看下docker-compose 里面修改的内容,相信多少了解docker的应该会看明白
我也遇到这个问题了,这个zk超时时间,怎么配置?能告诉一下吗?@ZQKC
我也遇到这个问题了,这个zk超时时间,怎么配置?能告诉一下吗?@ZQKC
https://github.com/didi/KnowStreaming/blob/master/docs/dev_guide/%E6%8E%A5%E5%85%A5ZK%E5%B8%A6%E8%AE%A4%E8%AF%81Kafka%E9%9B%86%E7%BE%A4.md
我也遇到这个问题了,这个zk超时时间,怎么配置?能告诉一下吗?@ZQKC
https://github.com/didi/KnowStreaming/blob/master/docs/dev_guide/%E6%8E%A5%E5%85%A5ZK%E5%B8%A6%E8%AE%A4%E8%AF%81Kafka%E9%9B%86%E7%BE%A4.md
可以了,配置的时间长点就可以了