安装canal_adapter

发布时间 2023-08-16 09:46:07作者: slnngk

环境:
OS:Centos 7
canal.adapter:1.1.5
mysql:5.7.29

1.解压
解压安装包
[root@elasticsearch-010007081120 canal]# mkdir -p /home/middle/canal_adapter/
[root@elasticsearch-010007081120 canal]#mv canal.adapter-1.1.5.tar.gz /home/middle/canal_adapter/
[root@elasticsearch-010007081120 canal]#cd /home/middle/canal_adapter/
[root@elasticsearch-010007081120 canal]#tar -zxvf canal.adapter-1.1.5.tar.gz

 

2.创建同步账号(若已经存在了的就不需要创建了,授权即可)
grant select,replication slave, replication client on *.* to 'canal'@'%' identified by 'canal123';

 

3.替换mysql驱动
canal_adapter.1.1.5自带的mysql驱动是mysql-connector-java-5.1.48.jar的,需要替换成5.1.49
到mysql官网下载如下驱动,替换即可
mysql-connector-java-5.1.49.jar

 

4.配置文件
[root@localhost conf]# more application.yml 
server:
  port: 8081
spring:
  jackson:
    date-format: yyyy-MM-dd HH:mm:ss
    time-zone: GMT+8
    default-property-inclusion: non_null

canal.conf:
  mode: tcp #tcp kafka rocketMQ rabbitMQ
  flatMessage: true
  zookeeperHosts:
  syncBatchSize: 1000
  retries: 0
  timeout:
  accessKey:
  secretKey:
  consumerProperties:
    # canal tcp consumer
    canal.tcp.server.host: 192.168.1.65:11111 ##canal部署的机器和端口
    canal.tcp.zookeeper.hosts:
    canal.tcp.batch.size: 500
    canal.tcp.username:
    canal.tcp.password:
    # kafka consumer
    kafka.bootstrap.servers: 127.0.0.1:9092
    kafka.enable.auto.commit: false
    kafka.auto.commit.interval.ms: 1000
    kafka.auto.offset.reset: latest
    kafka.request.timeout.ms: 40000
    kafka.session.timeout.ms: 30000
    kafka.isolation.level: read_committed
    kafka.max.poll.records: 1000
    # rocketMQ consumer
    rocketmq.namespace:
    rocketmq.namesrv.addr: 127.0.0.1:9876
    rocketmq.batch.size: 1000
    rocketmq.enable.message.trace: false
    rocketmq.customized.trace.topic:
    rocketmq.access.channel:
    rocketmq.subscribe.filter:
    # rabbitMQ consumer
    rabbitmq.host:
    rabbitmq.virtual.host:
    rabbitmq.username:
    rabbitmq.password:
    rabbitmq.resource.ownerId:

  srcDataSources:
    defaultDS:
      url: jdbc:mysql://192.168.1.65:13306/db_bi?useUnicode=true ##这里是连接业务库的数据库,若同步多个库不知道如何设置
      username: canal     ##用于canal同步的账号
      password: canal123  ##密码
  canalAdapters:
  - instance: example # canal instance Name or mq topic name
    groups:
    - groupId: g1
      outerAdapters:
      - name: logger
#      - name: rdb
#        key: mysql1
#        properties:
#          jdbc.driverClassName: com.mysql.jdbc.Driver
#          jdbc.url: jdbc:mysql://127.0.0.1:3306/mytest2?useUnicode=true
#          jdbc.username: root
#          jdbc.password: 121212
#      - name: rdb
#        key: oracle1
#        properties:
#          jdbc.driverClassName: oracle.jdbc.OracleDriver
#          jdbc.url: jdbc:oracle:thin:@localhost:49161:XE
#          jdbc.username: mytest
#          jdbc.password: m121212
#      - name: rdb
#        key: postgres1
#        properties:
#          jdbc.driverClassName: org.postgresql.Driver
#          jdbc.url: jdbc:postgresql://localhost:5432/postgres
#          jdbc.username: postgres
#          jdbc.password: 121212
#          threads: 1
#          commitSize: 3000
#      - name: hbase
#        properties:
#          hbase.zookeeper.quorum: 127.0.0.1
#          hbase.zookeeper.property.clientPort: 2181
#          zookeeper.znode.parent: /hbase
      - name: es6                                        ##名称是es6,建议不要修改,会自动对应目录../conf/es6
        hosts: http://192.168.1.65:19200 # 127.0.0.1:9200 for rest mode ##我们这里使用的是rest方式,hosts需要配置http的格式
        properties:
          mode: rest ##rest方式
          security.auth: elastic:sdrdev123 ##es账号密码
#          cluster.name: elasticsearch  ##不需要开启,开启了需要与es里设置的cluster.name保持一致
#        - name: kudu
#          key: kudu
#          properties:
#            kudu.master.address: 127.0.0.1 # ',' split multi address

 

注意这里只修改红色部分,其他地方保持不变

 

5.启动
[root@localhost bin]# cd /home/middle/canal_adapter/bin
[root@localhost bin]#./startup.sh


6.查看日志
/home/middle/canal_adapter/logs/adapter/adapter.log


7.验证同步