canal icon indicating copy to clipboard operation
canal copied to clipboard

Canal Server 监听消息投递到Rocketmq中,运行2~3天,自动就不在监听日志变化了,但是进程还在运行中

Open Devliuyu opened this issue 1 year ago • 1 comments

canal.properties

################################################# ######### common argument ############# #################################################

tcp bind ip

canal.ip = 192.168.14.99

register ip to zookeeper

canal.register.ip = 192.168.14.99 canal.port = 11111 canal.metrics.pull.port = 11112

canal instance user/passwd

canal.user = canal

canal.passwd =

canal admin config

#canal.admin.manager = 127.0.0.1:8089 canal.admin.port = 11110 canal.admin.user = admin canal.admin.passwd =

admin auto register

#canal.admin.register.auto = true #canal.admin.register.cluster = #canal.admin.register.name =

canal.zkServers = 192.168.14.99:12181

flush data to zk

canal.zookeeper.flush.period = 1000 canal.withoutNetty = false

tcp, kafka, rocketMQ, rabbitMQ, pulsarMQ

canal.serverMode = rocketMQ

flush meta cursor/parse position to file

canal.file.data.dir = ${canal.conf.dir} canal.file.flush.period = 1000

memory store RingBuffer size, should be Math.pow(2,n)

canal.instance.memory.buffer.size = 1024

memory store RingBuffer used memory unit size , default 1kb

canal.instance.memory.buffer.memunit = 3500

meory store gets mode used MEMSIZE or ITEMSIZE

canal.instance.memory.batch.mode = MEMSIZE canal.instance.memory.rawEntry = true

detecing config

canal.instance.detecting.enable = false #canal.instance.detecting.sql = insert into retl.xdual values(1,now()) on duplicate key update x=now() canal.instance.detecting.sql = select 1 canal.instance.detecting.interval.time = 3 canal.instance.detecting.retry.threshold = 3 canal.instance.detecting.heartbeatHaEnable = false

support maximum transaction size, more than the size of the transaction will be cut into multiple transactions delivery

canal.instance.transaction.size = 1024

mysql fallback connected to new master should fallback times

canal.instance.fallbackIntervalInSeconds = 60

network config

canal.instance.network.receiveBufferSize = 16384 canal.instance.network.sendBufferSize = 16384 canal.instance.network.soTimeout = 30

binlog filter config

canal.instance.filter.druid.ddl = true canal.instance.filter.query.dcl = true canal.instance.filter.query.dml = false canal.instance.filter.query.ddl = true canal.instance.filter.table.error = false canal.instance.filter.rows = false canal.instance.filter.transaction.entry = false canal.instance.filter.dml.insert = false canal.instance.filter.dml.update = false canal.instance.filter.dml.delete = true

binlog format/image check

canal.instance.binlog.format = ROW,STATEMENT,MIXED canal.instance.binlog.image = FULL,MINIMAL,NOBLOB

binlog ddl isolation

canal.instance.get.ddl.isolation = false

parallel parser config

canal.instance.parser.parallel = true

concurrent thread number, default 60% available processors, suggest not to exceed Runtime.getRuntime().availableProcessors()

#canal.instance.parser.parallelThreadSize = 16

disruptor ringbuffer size, must be power of 2

canal.instance.parser.parallelBufferSize = 256

table meta tsdb info

canal.instance.tsdb.enable = true canal.instance.tsdb.dir = ${canal.file.data.dir:../conf}/${canal.instance.destination:} canal.instance.tsdb.url = jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL; canal.instance.tsdb.dbUsername = canal canal.instance.tsdb.dbPassword = canal

dump snapshot interval, default 24 hour

canal.instance.tsdb.snapshot.interval = 24

purge snapshot expire , default 360 hour(15 days)

canal.instance.tsdb.snapshot.expire = 360

################################################# ######### destinations ############# ################################################# canal.destinations = real_time

conf root dir

canal.conf.dir = ../conf

auto scan instance dir add/remove and start/stop instance

canal.auto.scan = true canal.auto.scan.interval = 5

set this value to 'true' means that when binlog pos not found, skip to latest.

WARN: pls keep 'false' in production env, or if you know what you want.

canal.auto.reset.latest.pos.mode = false

canal.instance.tsdb.spring.xml = classpath:spring/tsdb/h2-tsdb.xml #canal.instance.tsdb.spring.xml = classpath:spring/tsdb/mysql-tsdb.xml

canal.instance.global.mode = spring canal.instance.global.lazy = false canal.instance.global.manager.address = ${canal.admin.manager} #canal.instance.global.spring.xml = classpath:spring/memory-instance.xml canal.instance.global.spring.xml = classpath:spring/ob-file-instance.xml #canal.instance.global.spring.xml = classpath:spring/default-instance.xml

################################################## ######### MQ Properties ############# ##################################################

aliyun ak/sk , support rds/mq

canal.aliyun.accessKey = canal.aliyun.secretKey = canal.aliyun.uid=

canal.mq.flatMessage = true canal.mq.canalBatchSize = 50 canal.mq.canalGetTimeout = 100

Set this value to "cloud", if you want open message trace feature in aliyun.

canal.mq.accessChannel = local

canal.mq.database.hash = true canal.mq.send.thread.size = 30 canal.mq.build.thread.size = 8

################################################## ######### Kafka ############# ################################################## kafka.bootstrap.servers = 127.0.0.1:9092 kafka.acks = all kafka.compression.type = none kafka.batch.size = 16384 kafka.linger.ms = 1 kafka.max.request.size = 1048576 kafka.buffer.memory = 33554432 kafka.max.in.flight.requests.per.connection = 1 kafka.retries = 0

kafka.kerberos.enable = false kafka.kerberos.krb5.file = ../conf/kerberos/krb5.conf kafka.kerberos.jaas.file = ../conf/kerberos/jaas.conf

sasl demo

kafka.sasl.jaas.config = org.apache.kafka.common.security.scram.ScramLoginModule required \n username="alice" \npassword="alice-secret";

kafka.sasl.mechanism = SCRAM-SHA-512

kafka.security.protocol = SASL_PLAINTEXT

################################################## ######### RocketMQ ############# ################################################## rocketmq.producer.group = CANAL-SYNC-GROUP rocketmq.enable.message.trace = false rocketmq.customized.trace.topic = rocketmq.namespace = rocketmq.namesrv.addr = 192.168.14.96:9876;192.168.14.97:9876;192.168.14.99:9876 rocketmq.retry.times.when.send.failed = 0 rocketmq.vip.channel.enabled = false rocketmq.tag =

################################################## ######### RabbitMQ ############# ################################################## rabbitmq.host = rabbitmq.virtual.host = rabbitmq.exchange = rabbitmq.username = rabbitmq.password = rabbitmq.queue = rabbitmq.routingKey = rabbitmq.deliveryMode =

################################################## ######### Pulsar ############# ################################################## pulsarmq.serverUrl = pulsarmq.roleToken = pulsarmq.topicTenantPrefix =

instance.properties

ob server info

canal.instance.oceanbase.rsList=192.168.14.99:2882:2881 canal.instance.oceanbase.username=datax@fuel_tenant canal.instance.oceanbase.password=xxxxxxxxxx canal.instance.oceanbase.startTimestamp=0 canal.instance.oceanbase.clusterUrl= canal.instance.oceanbase.timezone=+8:00 canal.instance.oceanbase.workingMode=storage

set extraConfigs for libobcdc, format {'key1': 'value1', 'key2': 'value2'}

canal.instance.oceanbase.obcdc.extraConfigs={'store_service_path': './storage'}

ob log proxy info

canal.instance.oceanbase.logproxy.address=192.168.14.99:2983 canal.instance.oceanbase.logproxy.sslEnabled=false #canal.instance.oceanbase.logproxy.serverCert=../conf/${canal.instance.destination:}/ca.crt #canal.instance.oceanbase.logproxy.clientCert=../conf/${canal.instance.destination:}/client.crt #canal.instance.oceanbase.logproxy.clientKey=../conf/${canal.instance.destination:}/client.key #canal.instance.oceanbase.logproxy.clientId=

tenant name

canal.instance.oceanbase.tenant=fuel_tenant

exclude tenant name in target schema name

canal.instance.parser.excludeTenantInDbName=true

table regex, format: [tenant].[database].[table]

canal.instance.filter.regex=fuel_tenant.. #canal.instance.filter.regex=rl.fuel_system.sys_dept,rl.fuel_system.sys_enterprise,rl.fuel_system.sys_menu,rl.fuel_system.sys_role,rl.fuel_system.sys_post,rl.fuel_user.sys_user

table black regex

canal.instance.filter.black.regex=fuel_tenant.fuel_report_data.,fuel_tenant.fuel_lims.,fuel_tenant.xxl_job.,fuel_tenant.fuel_supervise. #canal.instance.filter.black.regex=fuel_tenant.fuel_report_data.lims_result

table field filter(format: schema1.tableName1:field1/field2,schema2.tableName2:field1/field2)

#canal.instance.filter.field=test1.t_product:id/subject/keywords,test2.t_company:id/name/contact/ch

table field black filter(format: schema1.tableName1:field1/field2,schema2.tableName2:field1/field2)

#canal.instance.filter.black.field=test1.t_product:subject/product_image,test2.t_company:id/name/contact/ch

mq config

canal.mq.topic=real-time-sync-data canal.mq.canalBatchSize=50

dynamic topic route by schema or table regex

#canal.mq.dynamicTopic=mytest1.user,mytest2\..,.\..* #canal.mq.partition=6

hash partition config

canal.mq.partitionsNum=3 canal.mq.partitionHash=.\.. #canal.mq.dynamicTopicPartitionNum=test.*:4,mycanal:6 #################################################

Devliuyu avatar Feb 04 '24 01:02 Devliuyu

检查 logproxy 的日志了吗,是不是磁盘满了? 如果处理的数据中没有大事务的话,也可以试下把 libobcdc 的工作模式改成 memory

canal.instance.oceanbase.workingMode=memory

whhe avatar Feb 04 '24 03:02 whhe