Parcourir la source

Resolve conflict with master branch

duhengforever il y a 7 ans
Parent
commit
ebaf78f5aa
90 fichiers modifiés avec 3337 ajouts et 1611 suppressions
  1. 251 0
      client-adapter/README.md
  2. 11 6
      client-adapter/common/pom.xml
  3. 0 36
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/CanalOuterAdapter.java
  4. 68 0
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/OuterAdapter.java
  5. 41 0
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/AdapterConfigs.java
  6. 23 34
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/CanalClientConfig.java
  7. 0 52
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/CanalOuterAdapterConfiguration.java
  8. 81 0
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/DatasourceConfig.java
  9. 21 11
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/Dml.java
  10. 44 0
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/EtlResult.java
  11. 28 39
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/ExtensionLoader.java
  12. 51 5
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/JdbcTypeUtil.java
  13. 11 13
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/MessageUtil.java
  14. 52 0
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/OuterAdapterConfig.java
  15. 56 0
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/Result.java
  16. 6 0
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/SPI.java
  17. 2 33
      client-adapter/hbase/pom.xml
  18. 111 40
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/HbaseAdapter.java
  19. 67 43
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/config/MappingConfig.java
  20. 18 18
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/config/MappingConfigLoader.java
  21. 385 0
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/service/HbaseEtlService.java
  22. 68 71
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/service/HbaseSyncService.java
  23. 2 2
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/support/TypeUtil.java
  24. 0 0
      client-adapter/hbase/src/main/resources/META-INF/canal/com.alibaba.otter.canal.client.adapter.OuterAdapter
  25. 0 5
      client-adapter/hbase/src/main/resources/hbase-mapping/configs.conf
  26. 6 2
      client-adapter/hbase/src/main/resources/hbase/mytest_person2.yml
  27. 209 0
      client-adapter/launcher/pom.xml
  28. 34 0
      client-adapter/launcher/src/main/assembly/release.xml
  29. 18 0
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/CanalAdapterApplication.java
  30. 120 0
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/common/EtlLock.java
  31. 6 0
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/common/Mode.java
  32. 213 0
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/common/SyncSwitch.java
  33. 51 0
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/config/AdapterCanalConfig.java
  34. 86 0
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/config/AdapterConfig.java
  35. 42 0
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/config/CuratorClient.java
  36. 34 0
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/config/SpringContext.java
  37. 159 0
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/AbstractCanalAdapterWorker.java
  38. 101 0
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterKafkaWorker.java
  39. 30 52
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterLoader.java
  40. 29 32
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterRocketMQWorker.java
  41. 77 0
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterService.java
  42. 24 73
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterWorker.java
  43. 172 0
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/rest/CommonRest.java
  44. 47 0
      client-adapter/launcher/src/main/resources/application.yml
  45. 1 0
      client-adapter/logger/pom.xml
  46. 5 5
      client-adapter/logger/src/main/java/com/alibaba/otter/canal/client/adapter/logger/LoggerAdapterExample.java
  47. 0 0
      client-adapter/logger/src/main/resources/META-INF/canal/com.alibaba.otter.canal.client.adapter.OuterAdapter
  48. 27 5
      client-adapter/pom.xml
  49. 0 155
      client-launcher/pom.xml
  50. 0 57
      client-launcher/src/main/assembly/dev.xml
  51. 0 57
      client-launcher/src/main/assembly/release.xml
  52. 0 25
      client-launcher/src/main/bin/startup.bat
  53. 0 100
      client-launcher/src/main/bin/startup.sh
  54. 0 65
      client-launcher/src/main/bin/stop.sh
  55. 0 66
      client-launcher/src/main/java/com/alibaba/otter/canal/client/ClientLauncher.java
  56. 0 168
      client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/AbstractCanalAdapterWorker.java
  57. 0 179
      client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/CanalAdapterKafkaWorker.java
  58. 0 47
      client-launcher/src/main/resources/logback.xml
  59. 2 2
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogBuffer.java
  60. 5 0
      deployer/src/main/resources/canal.properties
  61. 1 1
      deployer/src/main/resources/example/instance.properties
  62. 19 19
      deployer/src/main/resources/logback.xml
  63. 3 0
      deployer/src/main/resources/spring/default-instance.xml
  64. 3 0
      deployer/src/main/resources/spring/file-instance.xml
  65. 1 0
      deployer/src/main/resources/spring/group-instance.xml
  66. 3 0
      deployer/src/main/resources/spring/memory-instance.xml
  67. 2 2
      deployer/src/main/resources/spring/tsdb/sql-map/sqlmap_history.xml
  68. 2 2
      deployer/src/main/resources/spring/tsdb/sql-map/sqlmap_snapshot.xml
  69. 59 4
      instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/CanalInstanceWithManager.java
  70. 63 0
      instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/model/CanalParameter.java
  71. 18 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/AbstractMysqlEventParser.java
  72. 2 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogEventParser.java
  73. 2 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParser.java
  74. 4 3
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java
  75. 49 15
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/DatabaseTableMeta.java
  76. 4 7
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaHistoryDAO.java
  77. 4 7
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaSnapshotDAO.java
  78. 17 2
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MetaHistoryDAOTest.java
  79. 41 0
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MetaSnapshotDAOTest.java
  80. 2 2
      parse/src/test/resources/tsdb/sql-map/sqlmap_history.xml
  81. 2 2
      parse/src/test/resources/tsdb/sql-map/sqlmap_snapshot.xml
  82. 0 1
      pom.xml
  83. 22 6
      protocol/src/main/java/com/alibaba/otter/canal/protocol/FlatMessage.java
  84. 1 1
      protocol/src/main/java/com/alibaba/otter/canal/protocol/Message.java
  85. 9 11
      server/src/main/java/com/alibaba/otter/canal/kafka/CanalKafkaProducer.java
  86. 5 0
      server/src/main/java/com/alibaba/otter/canal/server/CanalMQStarter.java
  87. 60 23
      server/src/main/java/com/alibaba/otter/canal/server/embedded/CanalServerWithEmbedded.java
  88. 8 2
      sink/src/main/java/com/alibaba/otter/canal/sink/entry/EntryEventSink.java
  89. 11 0
      store/src/main/java/com/alibaba/otter/canal/store/memory/MemoryEventStoreWithBuffer.java
  90. 25 3
      store/src/main/java/com/alibaba/otter/canal/store/model/Event.java

+ 251 - 0
client-adapter/README.md

@@ -0,0 +1,251 @@
+## 基本说明
+canal 1.1.1版本之后, 增加客户端数据落地的适配及启动功能, 目前支持功能:
+* 客户端启动器
+* 同步管理REST接口
+* 日志适配器, 作为DEMO
+* HBase的数据同步(表对表同步), ETL功能
+* (后续支持) ElasticSearch多表数据同步,ETL功能
+
+## 环境版本
+* 操作系统:无要求
+* java版本: jdk1.8 以上 
+* canal 版本: 请下载最新的安装包,本文以当前v1.1.1 的canal.deployer-1.1.1.tar.gz为例
+* MySQL版本 :5.7.18
+* HBase版本: Apache HBase 1.1.2, 若和服务端版本不一致可自行替换客户端HBase依赖
+
+## 一、适配器启动器
+client-adapter分为适配器和启动器两部分, 适配器为多个fat jar, 每个适配器会将自己所需的依赖打成一个包, 以SPI的方式让启动器动态加载
+
+
+启动器为 SpringBoot 项目, 支持canal-client启动的同时提供相关REST管理接口, 运行目录结构为:
+```
+canal-adapter-launcher.jar
+- lib
+    client-adapter.logger-1.1.1-jar-with-dependencies.jar
+    client-adapter.hbase-1.1.1-jar-with-dependencies.jar
+- config
+    application.yml
+    - hbase
+        mytest_person2.yml
+```
+以上目录结构最终会打包成 canal-adapter-launcher.tar.gz 压缩包
+
+## 二、启动器
+### 2.1 启动器配置 application.yml
+#### canal相关配置部分说明
+```
+canal.conf:
+  canalServerHost: 127.0.0.1:11111  # 对应单机模式下的canal server的ip:port
+  zookeeperHosts: slave1:2181       # 对应集群模式下的zk地址, 如果配置了canalServerHost, 则以canalServerHost为准
+  bootstrapServers: slave1:6667     # kafka或rocketMQ地址, 与canalServerHost不能并存
+  flatMessage: true                 # 扁平message开关, 是否以json字符串形式投递数据, 仅在kafka/rocketMQ模式下有效
+  canalInstances:                   # canal实例组, 如果是tcp模式可配置此项
+  - instance: example               # 对应canal destination
+    adapterGroups:                  # 对应适配器分组, 分组间的适配器并行运行
+    - outAdapters:                  # 适配器列表, 分组内的适配串行运行
+      - name: logger                # 适配器SPI名
+      - name: hbase
+        properties:                 # HBase相关连接参数
+          hbase.zookeeper.quorum: slave1
+          hbase.zookeeper.property.clientPort: 2181
+          zookeeper.znode.parent: /hbase
+  mqTopics:                         # MQ topic租, 如果是kafka或者rockeMQ模式可配置此项, 与canalInstances不能并存
+  - mqMode: kafka                   # MQ的模式: kafak/rocketMQ
+    topic: example                  # MQ topic
+    groups:                         # group组
+    - groupId: g2                   # group id
+      outAdapters:                  # 适配器列表, 以下配置和canalInstances中一样
+      - name: logger                
+```
+#### 适配器相关配置部分说明
+```
+adapter.conf:
+  datasourceConfigs:                # 数据源配置列表, 数据源将在适配器中用于ETL、数据同步回查等使用
+    defaultDS:                      # 数据源 dataSourceKey, 适配器中通过该值获取指定数据源
+      url: jdbc:mysql://127.0.0.1:3306/mytest?useUnicode=true
+      username: root
+      password: 121212
+  adapterConfigs:                   # 适配器内部配置列表
+  - hbase/mytest_person2.yml        # 类型/配置文件名, 这里示例就是对应HBase适配器hbase目录下的mytest_person2.yml文件
+```
+## 2.2 同步管理REST接口
+#### 2.2.1 查询所有订阅同步的canal destination或MQ topic
+```
+curl http://127.0.0.1:8081/destinations
+```
+#### 2.2.2 数据同步开关
+```
+curl http://127.0.0.1:8081/syncSwitch/example/off -X PUT
+```
+针对 example 这个canal destination/MQ topic 进行开关操作. off代表关闭, 该destination/topic下的同步将阻塞或者断开连接不再接收数据, on代表开启
+
+注: 如果在配置文件中配置了 zookeeperHosts 项, 则会使用分布式锁来控制HA中的数据同步开关, 如果是单机模式则使用本地锁来控制开关
+#### 2.2.3 数据同步开关状态
+```
+curl http://127.0.0.1:8081/syncSwitch/example
+```
+查看指定 canal destination/MQ topic 的数据同步开关状态
+#### 2.2.4 手动ETL
+```
+curl http://127.0.0.1:8081/etl/hbase/mytest_person2.yml -X POST -d "params=2018-10-21 00:00:00"
+```
+导入数据到指定类型的库
+#### 2.2.5 查看相关库总数据
+```
+curl http://127.0.0.1:8081/count/hbase/mytest_person2.yml
+```
+### 2.3 启动canal-adapter示例
+#### 2.3.1 启动canal server (单机模式), 参考: [Canal QuickStart](https://github.com/alibaba/canal/wiki/QuickStart)
+#### 2.3.2 修改config/application.yml为:
+```
+server:
+  port: 8081
+logging:
+  level:
+    com.alibaba.otter.canal.client.adapter.hbase: DEBUG
+spring:
+  jackson:
+    date-format: yyyy-MM-dd HH:mm:ss
+    time-zone: GMT+8
+    default-property-inclusion: non_null
+
+canal.conf:
+  canalServerHost: 127.0.0.1:11111
+  flatMessage: true
+  canalInstances:
+  - instance: example
+    adapterGroups:
+    - outAdapters:
+      - name: logger
+```
+启动 canal-adapter-launcher.jar
+```
+java -jar canal-adapter-launcher.jar
+```
+
+## 三、HBase适配器
+### 3.1 修改启动器配置: application.yml
+```
+server:
+  port: 8081
+logging:
+  level:
+    com.alibaba.otter.canal.client.adapter.hbase: DEBUG
+spring:
+  jackson:
+    date-format: yyyy-MM-dd HH:mm:ss
+    time-zone: GMT+8
+    default-property-inclusion: non_null
+
+canal.conf:
+  canalServerHost: 127.0.0.1:11111
+  flatMessage: true
+  canalInstances:
+  - instance: example
+    adapterGroups:
+    - outAdapters:
+      - name: hbase
+        properties:
+          hbase.zookeeper.quorum: slave1
+          hbase.zookeeper.property.clientPort: 2181
+          zookeeper.znode.parent: /hbase
+
+adapter.conf:
+  datasourceConfigs:
+    defaultDS:
+      url: jdbc:mysql://127.0.0.1:3306/mytest?useUnicode=true
+      username: root
+      password: 121212
+  adapterConfigs:
+  - hbase/mytest_person.yml 
+```
+其中指定了一个HBase表映射文件: mytest_person.yml
+### 3.2 适配器表映射文件
+修改 config/hbase/mytest_person.yml文件:
+```
+dataSourceKey: defaultDS            # 对应application.yml中的datasourceConfigs下的配置
+hbaseOrm:                           # mysql--HBase的单表映射配置
+  mode: STRING                      # HBase中的存储类型, 默认统一存为String, 可选: #PHOENIX  #NATIVE   #STRING 
+                                    # NATIVE: 以java类型为主, PHOENIX: 将类型转换为Phoenix对应的类型
+  destination: example              # 对应 canal destination/MQ topic 名称
+  database: mytest                  # 数据库名/schema名
+  table: person                     # 表名
+  hbaseTable: MYTEST.PERSON         # HBase表名
+  family: CF                        # 默认统一Column Family名称
+  uppercaseQualifier: true          # 字段名转大写, 默认为true
+  commitBatch: 3000                 # 批量提交的大小, ETL中用到
+  #rowKey: id,type                  # 复合字段rowKey不能和columns中的rowKey并存
+                                    # 复合rowKey会以 '|' 分隔
+  columns:                          # 字段映射, 如果不配置将自动映射所有字段, 
+                                    # 并取第一个字段为rowKey, HBase字段名以mysql字段名为主
+    id: ROWKE                       
+    name: CF:NAME
+    email: EMAIL                    # 如果column family为默认CF, 则可以省略
+    type:                           # 如果HBase字段和mysql字段名一致, 则可以省略
+    c_time: 
+    birthday: 
+```
+如果涉及到类型转换,可以如下形式:
+```
+...
+  columns:                         
+    id: ROWKE$STRING                      
+    ...                   
+    type: TYPE$BYTE                          
+    ...
+```
+类型转换涉及到Java类型和Phoenix类型两种, 分别定义如下:
+```
+#Java 类型转换, 对应配置 mode: NATIVE
+$DEFAULT
+$STRING
+$INTEGER
+$LONG
+$SHORT
+$BOOLEAN
+$FLOAT
+$DOUBLE
+$BIGDECIMAL
+$DATE
+$BYTE
+$BYTES
+```
+```
+#Phoenix 类型转换, 对应配置 mode: PHOENIX
+$DEFAULT                  对应PHOENIX里的VARCHAR
+$UNSIGNED_INT             对应PHOENIX里的UNSIGNED_INT           4字节
+$UNSIGNED_LONG            对应PHOENIX里的UNSIGNED_LONG          8字节
+$UNSIGNED_TINYINT         对应PHOENIX里的UNSIGNED_TINYINT       1字节
+$UNSIGNED_SMALLINT        对应PHOENIX里的UNSIGNED_SMALLINT      2字节
+$UNSIGNED_FLOAT           对应PHOENIX里的UNSIGNED_FLOAT         4字节
+$UNSIGNED_DOUBLE          对应PHOENIX里的UNSIGNED_DOUBLE        8字节
+$INTEGER                  对应PHOENIX里的INTEGER                4字节
+$BIGINT                   对应PHOENIX里的BIGINT                 8字节
+$TINYINT                  对应PHOENIX里的TINYINT                1字节
+$SMALLINT                 对应PHOENIX里的SMALLINT               2字节
+$FLOAT                    对应PHOENIX里的FLOAT                  4字节
+$DOUBLE                   对应PHOENIX里的DOUBLE                 8字节
+$BOOLEAN                  对应PHOENIX里的BOOLEAN                1字节
+$TIME                     对应PHOENIX里的TIME                   8字节
+$DATE                     对应PHOENIX里的DATE                   8字节
+$TIMESTAMP                对应PHOENIX里的TIMESTAMP              12字节
+$UNSIGNED_TIME            对应PHOENIX里的UNSIGNED_TIME          8字节
+$UNSIGNED_DATE            对应PHOENIX里的UNSIGNED_DATE          8字节
+$UNSIGNED_TIMESTAMP       对应PHOENIX里的UNSIGNED_TIMESTAMP     12字节
+$VARCHAR                  对应PHOENIX里的VARCHAR                动态长度
+$VARBINARY                对应PHOENIX里的VARBINARY              动态长度
+$DECIMAL                  对应PHOENIX里的DECIMAL                动态长度
+```
+如果不配置将以java对象原生类型默认映射转换
+### 3.3 启动HBase数据同步
+#### 创建HBase表
+在HBase shell中运行:
+```
+create 'MYTEST.PERSON', {NAME=>'CF'}
+```
+#### 启动canal-adapter启动器
+```
+java -jar canal-adapter-launcher.jar
+```
+#### 验证
+修改mysql mytest.person表的数据, 将会自动同步到HBase的MYTEST.PERSON表下面, 并会打出DML的log

+ 11 - 6
client-adapter/common/pom.xml

@@ -16,18 +16,23 @@
         <dependency>
             <groupId>com.alibaba.otter</groupId>
             <artifactId>canal.protocol</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-api</artifactId>
-            <version>1.7.12</version>
+            <version>${canal_version}</version>
         </dependency>
         <dependency>
             <groupId>joda-time</groupId>
             <artifactId>joda-time</artifactId>
             <version>2.9.4</version>
         </dependency>
+        <dependency>
+            <groupId>com.alibaba</groupId>
+            <artifactId>druid</artifactId>
+            <version>1.1.9</version>
+        </dependency>
+        <dependency>
+            <groupId>mysql</groupId>
+            <artifactId>mysql-connector-java</artifactId>
+            <version>5.1.40</version>
+        </dependency>
     </dependencies>
 
 </project>

+ 0 - 36
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/CanalOuterAdapter.java

@@ -1,36 +0,0 @@
-package com.alibaba.otter.canal.client.adapter;
-
-import com.alibaba.otter.canal.client.adapter.support.CanalOuterAdapterConfiguration;
-import com.alibaba.otter.canal.client.adapter.support.Dml;
-import com.alibaba.otter.canal.client.adapter.support.SPI;
-
-/**
- * 外部适配器接口
- *
- * @author machengyuan 2018-8-18 下午10:14:02
- * @version 1.0.0
- */
-@SPI("logger")
-public interface CanalOuterAdapter {
-
-    /**
-     * 外部适配器初始化接口
-     *
-     * @param configuration 外部适配器配置信息
-     */
-    void init(CanalOuterAdapterConfiguration configuration);
-
-    /**
-     * 往适配器中写入数据
-     *
-     * @param dml 数据包
-     */
-    void writeOut(Dml dml);
-
-    // void writeOut(FlatMessage flatMessage);
-
-    /**
-     * 外部适配器销毁接口
-     */
-    void destroy();
-}

+ 68 - 0
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/OuterAdapter.java

@@ -0,0 +1,68 @@
+package com.alibaba.otter.canal.client.adapter;
+
+import java.util.List;
+import java.util.Map;
+
+import com.alibaba.otter.canal.client.adapter.support.Dml;
+import com.alibaba.otter.canal.client.adapter.support.EtlResult;
+import com.alibaba.otter.canal.client.adapter.support.OuterAdapterConfig;
+import com.alibaba.otter.canal.client.adapter.support.SPI;
+
+/**
+ * 外部适配器接口
+ *
+ * @author reweerma 2018-8-18 下午10:14:02
+ * @version 1.0.0
+ */
+@SPI("logger")
+public interface OuterAdapter {
+
+    /**
+     * 外部适配器初始化接口
+     *
+     * @param configuration 外部适配器配置信息
+     */
+    void init(OuterAdapterConfig configuration);
+
+    /**
+     * 往适配器中同步数据
+     *
+     * @param dml 数据包
+     */
+    void sync(Dml dml);
+
+    /**
+     * 外部适配器销毁接口
+     */
+    void destroy();
+
+    /**
+     * Etl操作
+     * 
+     * @param task 任务名, 对应配置名
+     * @param params etl筛选条件
+     */
+    default EtlResult etl(String task, List<String> params) {
+        throw new UnsupportedOperationException("unsupported operation");
+    }
+
+    /**
+     * 计算总数
+     * 
+     * @param task 任务名, 对应配置名
+     * @return 总数
+     */
+    default Map<String, Object> count(String task) {
+        throw new UnsupportedOperationException("unsupported operation");
+    }
+
+    /**
+     * 通过task获取对应的destination
+     * 
+     * @param task 任务名, 对应配置名
+     * @return destination
+     */
+    default String getDestination(String task) {
+        return null;
+    }
+}

+ 41 - 0
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/AdapterConfigs.java

@@ -0,0 +1,41 @@
+package com.alibaba.otter.canal.client.adapter.support;
+
+import java.util.HashMap;
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * 适配器配置集合, 用于配置加载, 线程不安全
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+public class AdapterConfigs {
+
+    /**
+     * 类型下对应所有配置名, 如:
+     * hbase
+     *  ┗━ mytest_person.yml
+     *  ┗━ mytest_role.yml
+     *  ┗━ mytest_department.yml
+     */
+    private static Map<String, Set<String>> configs = new HashMap<>();
+
+    public static void put(String key, String value) {
+        Set<String> values = configs.get(key);
+        if (values == null) {
+            values = new LinkedHashSet<>();
+        }
+        values.add(value);
+        configs.put(key, values);
+    }
+
+    public static Set<String> get(String key) {
+        return configs.get(key);
+    }
+
+    public static void clear() {
+        configs.clear();
+    }
+}

+ 23 - 34
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/CanalClientConfig.java

@@ -2,29 +2,26 @@ package com.alibaba.otter.canal.client.adapter.support;
 
 import java.util.ArrayList;
 import java.util.List;
-import java.util.Properties;
 
 /**
  * 配置信息类
  *
- * @author machengyuan 2018-8-18 下午10:40:12
+ * @author rewerma 2018-8-18 下午10:40:12
  * @version 1.0.0
  */
 public class CanalClientConfig {
 
-    private String              canalServerHost;
+    private String              canalServerHost;    // 单机模式下canal server的 ip:port
 
-    private String              zookeeperHosts;
+    private String              zookeeperHosts;     // 集群模式下的zk地址, 如果配置了单机地址则以单机为准!!
 
-    private Properties          properties;
+    private String              bootstrapServers;   // kafka or rocket mq 地址
 
-    private String              bootstrapServers;
+    private Boolean             flatMessage = true; // 是否已flatMessage模式传输, 只适用于mq模式
 
-    private List<MQTopic>       mqTopics;
+    private List<MQTopic>       mqTopics;           // mq topic 列表
 
-    private Boolean             flatMessage = true;
-
-    private List<CanalInstance> canalInstances;
+    private List<CanalInstance> canalInstances;     // tcp 模式下 canal 实例列表, 与mq模式不能共存!!
 
     public String getCanalServerHost() {
         return canalServerHost;
@@ -42,14 +39,6 @@ public class CanalClientConfig {
         this.zookeeperHosts = zookeeperHosts;
     }
 
-    public Properties getProperties() {
-        return properties;
-    }
-
-    public void setProperties(Properties properties) {
-        this.properties = properties;
-    }
-
     public String getBootstrapServers() {
         return bootstrapServers;
     }
@@ -62,6 +51,10 @@ public class CanalClientConfig {
         return mqTopics;
     }
 
+    public void setMqTopics(List<MQTopic> mqTopics) {
+        this.mqTopics = mqTopics;
+    }
+
     public Boolean getFlatMessage() {
         return flatMessage;
     }
@@ -70,10 +63,6 @@ public class CanalClientConfig {
         this.flatMessage = flatMessage;
     }
 
-    public void setMqTopics(List<MQTopic> mqTopics) {
-        this.mqTopics = mqTopics;
-    }
-
     public List<CanalInstance> getCanalInstances() {
         return canalInstances;
     }
@@ -84,9 +73,9 @@ public class CanalClientConfig {
 
     public static class CanalInstance {
 
-        private String             instance;
+        private String             instance;      // 实例名
 
-        private List<AdapterGroup> adapterGroups;
+        private List<AdapterGroup> adapterGroups; // 适配器分组列表
 
         public String getInstance() {
             return instance;
@@ -109,24 +98,24 @@ public class CanalClientConfig {
 
     public static class AdapterGroup {
 
-        private List<CanalOuterAdapterConfiguration> outAdapters;
+        private List<OuterAdapterConfig> outAdapters; // 适配器列表
 
-        public List<CanalOuterAdapterConfiguration> getOutAdapters() {
+        public List<OuterAdapterConfig> getOutAdapters() {
             return outAdapters;
         }
 
-        public void setOutAdapters(List<CanalOuterAdapterConfiguration> outAdapters) {
+        public void setOutAdapters(List<OuterAdapterConfig> outAdapters) {
             this.outAdapters = outAdapters;
         }
     }
 
     public static class MQTopic {
 
-        private String      mqMode;
+        private String      mqMode;                     // mq模式 kafka or rocketMQ
 
-        private String      topic;
+        private String      topic;                      // topic名
 
-        private List<Group> groups = new ArrayList<>();
+        private List<Group> groups = new ArrayList<>(); // 分组列表
 
         public String getMqMode() {
             return mqMode;
@@ -155,11 +144,11 @@ public class CanalClientConfig {
 
     public static class Group {
 
-        private String                               groupId;
+        private String                   groupId;     // group id
 
         // private List<Adaptor> adapters = new ArrayList<>();
 
-        private List<CanalOuterAdapterConfiguration> outAdapters;
+        private List<OuterAdapterConfig> outAdapters; // 适配器配置列表
 
         public String getGroupId() {
             return groupId;
@@ -169,11 +158,11 @@ public class CanalClientConfig {
             this.groupId = groupId;
         }
 
-        public List<CanalOuterAdapterConfiguration> getOutAdapters() {
+        public List<OuterAdapterConfig> getOutAdapters() {
             return outAdapters;
         }
 
-        public void setOutAdapters(List<CanalOuterAdapterConfiguration> outAdapters) {
+        public void setOutAdapters(List<OuterAdapterConfig> outAdapters) {
             this.outAdapters = outAdapters;
         }
 

+ 0 - 52
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/CanalOuterAdapterConfiguration.java

@@ -1,52 +0,0 @@
-package com.alibaba.otter.canal.client.adapter.support;
-
-import java.util.Properties;
-
-/**
- * 外部适配器配置信息类
- *
- * @author machengyuan 2018-8-18 下午10:15:12
- * @version 1.0.0
- */
-public class CanalOuterAdapterConfiguration {
-
-    private String     name;      // 适配器名称, 如: logger, hbase, es
-
-    private String     hosts;     // 适配器内部的地址, 比如对应es该参数可以填写es的server地址
-
-    private String     zkHosts;   // 适配器内部的ZK地址, 比如对应HBase该参数可以填写HBase对应的ZK地址
-
-    private Properties properties; // 其余参数, 可填写适配器中的所需的配置信息
-
-    public String getName() {
-        return name;
-    }
-
-    public void setName(String name) {
-        this.name = name;
-    }
-
-    public String getHosts() {
-        return hosts;
-    }
-
-    public void setHosts(String hosts) {
-        this.hosts = hosts;
-    }
-
-    public Properties getProperties() {
-        return properties;
-    }
-
-    public void setProperties(Properties properties) {
-        this.properties = properties;
-    }
-
-    public String getZkHosts() {
-        return zkHosts;
-    }
-
-    public void setZkHosts(String zkHosts) {
-        this.zkHosts = zkHosts;
-    }
-}

+ 81 - 0
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/DatasourceConfig.java

@@ -0,0 +1,81 @@
+package com.alibaba.otter.canal.client.adapter.support;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import com.alibaba.druid.pool.DruidDataSource;
+
+/**
+ * 数据源配置
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+public class DatasourceConfig {
+
+    public final static Map<String, DruidDataSource> DATA_SOURCES = new ConcurrentHashMap<>(); // key对应的数据源
+
+    private String                                   driver       = "com.mysql.jdbc.Driver";   // 默认为mysql jdbc驱动
+    private String                                   url;                                      // jdbc url
+    private String                                   database;                                 // jdbc database
+    private String                                   type         = "mysql";                   // 类型, 默认为mysql
+    private String                                   username;                                 // jdbc username
+    private String                                   password;                                 // jdbc password
+    private Integer                                  maxActive    = 3;                         // 连接池最大连接数,默认为3
+
+    public String getDriver() {
+        return driver;
+    }
+
+    public void setDriver(String driver) {
+        this.driver = driver;
+    }
+
+    public String getUrl() {
+        return url;
+    }
+
+    public void setUrl(String url) {
+        this.url = url;
+    }
+
+    public String getDatabase() {
+        return database;
+    }
+
+    public void setDatabase(String database) {
+        this.database = database;
+    }
+
+    public String getType() {
+        return type;
+    }
+
+    public void setType(String type) {
+        this.type = type;
+    }
+
+    public String getUsername() {
+        return username;
+    }
+
+    public void setUsername(String username) {
+        this.username = username;
+    }
+
+    public String getPassword() {
+        return password;
+    }
+
+    public void setPassword(String password) {
+        this.password = password;
+    }
+
+    public Integer getMaxActive() {
+        return maxActive;
+    }
+
+    public void setMaxActive(Integer maxActive) {
+        this.maxActive = maxActive;
+    }
+}

+ 21 - 11
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/Dml.java

@@ -7,23 +7,32 @@ import java.util.Map;
 /**
  * DML操作转换对象
  *
- * @author machengyuan 2018-8-19 下午11:30:49
+ * @author rewerma 2018-8-19 下午11:30:49
  * @version 1.0.0
  */
 public class Dml implements Serializable {
 
     private static final long         serialVersionUID = 2611556444074013268L;
 
-    private String                    database;
-    private String                    table;
-    private String                    type;
+    private String                    destination;                            // 对应canal的实例或者MQ的topic
+    private String                    database;                               // 数据库或schema
+    private String                    table;                                  // 表名
+    private String                    type;                                   // 类型: INSERT UPDATE DELETE
     // binlog executeTime
-    private Long                      es;
+    private Long                      es;                                     // 执行耗时
     // dml build timeStamp
-    private Long                      ts;
-    private String                    sql;
-    private List<Map<String, Object>> data;
-    private List<Map<String, Object>> old;
+    private Long                      ts;                                     // 同步时间
+    private String                    sql;                                    // 执行的sql, dml sql为空
+    private List<Map<String, Object>> data;                                   // 数据列表
+    private List<Map<String, Object>> old;                                    // 旧数据列表, 用于update, size和data的size一一对应
+
+    public String getDestination() {
+        return destination;
+    }
+
+    public void setDestination(String destination) {
+        this.destination = destination;
+    }
 
     public String getDatabase() {
         return database;
@@ -102,7 +111,8 @@ public class Dml implements Serializable {
 
     @Override
     public String toString() {
-        return "Dml [database=" + database + ", table=" + table + ", type=" + type + ", es=" + es + ", ts=" + ts
-               + ", sql=" + sql + ", data=" + data + ", old=" + old + "]";
+        return "Dml{" + "destination='" + destination + '\'' + ", database='" + database + '\'' + ", table='" + table
+               + '\'' + ", type='" + type + '\'' + ", es=" + es + ", ts=" + ts + ", sql='" + sql + '\'' + ", data="
+               + data + ", old=" + old + '}';
     }
 }

+ 44 - 0
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/EtlResult.java

@@ -0,0 +1,44 @@
+package com.alibaba.otter.canal.client.adapter.support;
+
+import java.io.Serializable;
+
+/**
+ * ETL的结果对象
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+public class EtlResult implements Serializable {
+
+    private static final long serialVersionUID = 4250522736289866505L;
+
+    private boolean           succeeded        = false;
+
+    private String            resultMessage;
+
+    private String            errorMessage;
+
+    public boolean getSucceeded() {
+        return succeeded;
+    }
+
+    public void setSucceeded(boolean succeeded) {
+        this.succeeded = succeeded;
+    }
+
+    public String getResultMessage() {
+        return resultMessage;
+    }
+
+    public void setResultMessage(String resultMessage) {
+        this.resultMessage = resultMessage;
+    }
+
+    public String getErrorMessage() {
+        return errorMessage;
+    }
+
+    public void setErrorMessage(String errorMessage) {
+        this.errorMessage = errorMessage;
+    }
+}

+ 28 - 39
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/ExtensionLoader.java

@@ -1,22 +1,11 @@
 package com.alibaba.otter.canal.client.adapter.support;
 
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.io.InputStreamReader;
+import java.io.*;
 import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLClassLoader;
 import java.nio.file.Paths;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.TreeSet;
+import java.util.*;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.regex.Pattern;
@@ -27,12 +16,13 @@ import org.slf4j.LoggerFactory;
 /**
  * SPI 类加载器
  *
- * @author machengyuan 2018-8-19 下午11:30:49
+ * @author rewerma 2018-8-19 下午11:30:49
  * @version 1.0.0
  */
 public class ExtensionLoader<T> {
 
-    private static final Logger                                      logger                     = LoggerFactory.getLogger(ExtensionLoader.class);
+    private static final Logger                                      logger                     = LoggerFactory
+        .getLogger(ExtensionLoader.class);
 
     private static final String                                      SERVICES_DIRECTORY         = "META-INF/services/";
 
@@ -40,7 +30,8 @@ public class ExtensionLoader<T> {
 
     private static final String                                      DEFAULT_CLASSLOADER_POLICY = "internal";
 
-    private static final Pattern                                     NAME_SEPARATOR             = Pattern.compile("\\s*[,]+\\s*");
+    private static final Pattern                                     NAME_SEPARATOR             = Pattern
+        .compile("\\s*[,]+\\s*");
 
     private static final ConcurrentMap<Class<?>, ExtensionLoader<?>> EXTENSION_LOADERS          = new ConcurrentHashMap<>();
 
@@ -271,7 +262,8 @@ public class ExtensionLoader<T> {
             return instance;
         } catch (Throwable t) {
             throw new IllegalStateException("Extension instance(name: " + name + ", class: " + type
-                                            + ")  could not be instantiated: " + t.getMessage(), t);
+                                            + ")  could not be instantiated: " + t.getMessage(),
+                t);
         }
     }
 
@@ -279,8 +271,8 @@ public class ExtensionLoader<T> {
         if (type == null) throw new IllegalArgumentException("Extension type == null");
         if (name == null) throw new IllegalArgumentException("Extension name == null");
         Class<?> clazz = getExtensionClasses().get(name);
-        if (clazz == null) throw new IllegalStateException("No such extension \"" + name + "\" for " + type.getName()
-                                                           + "!");
+        if (clazz == null)
+            throw new IllegalStateException("No such extension \"" + name + "\" for " + type.getName() + "!");
         return clazz;
     }
 
@@ -342,8 +334,8 @@ public class ExtensionLoader<T> {
         logger.info("extension classpath dir: " + dir);
         File externalLibDir = new File(dir);
         if (!externalLibDir.exists()) {
-            externalLibDir = new File(File.separator + this.getJarDirectoryPath() + File.separator + "canal_client"
-                                      + File.separator + "lib");
+            externalLibDir = new File(
+                File.separator + this.getJarDirectoryPath() + File.separator + "canal_client" + File.separator + "lib");
         }
         if (externalLibDir.exists()) {
             File[] files = externalLibDir.listFiles(new FilenameFilter() {
@@ -495,12 +487,10 @@ public class ExtensionLoader<T> {
                                             // Class.forName(line, true,
                                             // classLoader);
                                             if (!type.isAssignableFrom(clazz)) {
-                                                throw new IllegalStateException("Error when load extension class(interface: "
-                                                                                + type
-                                                                                + ", class line: "
-                                                                                + clazz.getName()
-                                                                                + "), class "
-                                                                                + clazz.getName()
+                                                throw new IllegalStateException(
+                                                    "Error when load extension class(interface: " + type
+                                                                                + ", class line: " + clazz.getName()
+                                                                                + "), class " + clazz.getName()
                                                                                 + "is not subtype of interface.");
                                             } else {
                                                 try {
@@ -518,9 +508,9 @@ public class ExtensionLoader<T> {
                                                                 extensionClasses.put(n, clazz);
                                                             } else if (c != clazz) {
                                                                 cachedNames.remove(clazz);
-                                                                throw new IllegalStateException("Duplicate extension "
-                                                                                                + type.getName()
-                                                                                                + " name " + n + " on "
+                                                                throw new IllegalStateException(
+                                                                    "Duplicate extension " + type.getName() + " name "
+                                                                                                + n + " on "
                                                                                                 + c.getName() + " and "
                                                                                                 + clazz.getName());
                                                             }
@@ -530,12 +520,9 @@ public class ExtensionLoader<T> {
                                             }
                                         }
                                     } catch (Throwable t) {
-                                        IllegalStateException e = new IllegalStateException("Failed to load extension class(interface: "
-                                                                                            + type
-                                                                                            + ", class line: "
-                                                                                            + line
-                                                                                            + ") in "
-                                                                                            + url
+                                        IllegalStateException e = new IllegalStateException(
+                                            "Failed to load extension class(interface: " + type + ", class line: "
+                                                                                            + line + ") in " + url
                                                                                             + ", cause: "
                                                                                             + t.getMessage(),
                                             t);
@@ -550,13 +537,15 @@ public class ExtensionLoader<T> {
                         }
                     } catch (Throwable t) {
                         logger.error("Exception when load extension class(interface: " + type + ", class file: " + url
-                                     + ") in " + url, t);
+                                     + ") in " + url,
+                            t);
                     }
                 } // end of while urls
             }
         } catch (Throwable t) {
-            logger.error("Exception when load extension class(interface: " + type + ", description file: " + fileName
-                         + ").", t);
+            logger.error(
+                "Exception when load extension class(interface: " + type + ", description file: " + fileName + ").",
+                t);
         }
     }
 

+ 51 - 5
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/JdbcTypeUtil.java

@@ -2,10 +2,7 @@ package com.alibaba.otter.canal.client.adapter.support;
 
 import java.math.BigDecimal;
 import java.math.BigInteger;
-import java.sql.Date;
-import java.sql.Time;
-import java.sql.Timestamp;
-import java.sql.Types;
+import java.sql.*;
 
 import org.joda.time.DateTime;
 import org.slf4j.Logger;
@@ -14,13 +11,62 @@ import org.slf4j.LoggerFactory;
 /**
  * 类型转换工具类
  *
- * @author machengyuan 2018-8-19 下午06:14:23
+ * @author rewerma 2018-8-19 下午06:14:23
  * @version 1.0.0
  */
 public class JdbcTypeUtil {
 
     private static Logger logger = LoggerFactory.getLogger(JdbcTypeUtil.class);
 
+    public static Object getRSData(ResultSet rs, String columnName, int jdbcType) throws SQLException {
+        if (jdbcType == Types.BIT || jdbcType == Types.BOOLEAN) {
+            return rs.getByte(columnName);
+        } else {
+            return rs.getObject(columnName);
+        }
+    }
+
+    public static Class<?> jdbcType2javaType(int jdbcType) {
+        switch (jdbcType) {
+            case Types.BIT:
+            case Types.BOOLEAN:
+                // return Boolean.class;
+            case Types.TINYINT:
+                return Byte.TYPE;
+            case Types.SMALLINT:
+                return Short.class;
+            case Types.INTEGER:
+                return Integer.class;
+            case Types.BIGINT:
+                return Long.class;
+            case Types.DECIMAL:
+            case Types.NUMERIC:
+                return BigDecimal.class;
+            case Types.REAL:
+                return Float.class;
+            case Types.FLOAT:
+            case Types.DOUBLE:
+                return Double.class;
+            case Types.CHAR:
+            case Types.VARCHAR:
+            case Types.LONGVARCHAR:
+                return String.class;
+            case Types.BINARY:
+            case Types.VARBINARY:
+            case Types.LONGVARBINARY:
+            case Types.BLOB:
+                return byte[].class;
+            case Types.DATE:
+                return java.sql.Date.class;
+            case Types.TIME:
+                return Time.class;
+            case Types.TIMESTAMP:
+                return Timestamp.class;
+            default:
+                return String.class;
+        }
+    }
+
     public static Object typeConvert(String columnName, String value, int sqlType, String mysqlType) {
         if (value == null || value.equals("")) {
             return null;

+ 11 - 13
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/MessageUtil.java

@@ -1,11 +1,6 @@
 package com.alibaba.otter.canal.client.adapter.support;
 
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import java.util.*;
 
 import com.alibaba.otter.canal.protocol.CanalEntry;
 import com.alibaba.otter.canal.protocol.FlatMessage;
@@ -14,12 +9,12 @@ import com.alibaba.otter.canal.protocol.Message;
 /**
  * Message对象解析工具类
  *
- * @author machengyuan 2018-8-19 下午06:14:23
+ * @author rewerma 2018-8-19 下午06:14:23
  * @version 1.0.0
  */
 public class MessageUtil {
 
-    public static void parse4Dml(Message message, Consumer<Dml> consumer) {
+    public static void parse4Dml(String destination, Message message, Consumer<Dml> consumer) {
         if (message == null) {
             return;
         }
@@ -42,6 +37,7 @@ public class MessageUtil {
             CanalEntry.EventType eventType = rowChange.getEventType();
 
             final Dml dml = new Dml();
+            dml.setDestination(destination);
             dml.setDatabase(entry.getHeader().getSchemaName());
             dml.setTable(entry.getHeader().getTableName());
             dml.setType(eventType.toString());
@@ -87,10 +83,11 @@ public class MessageUtil {
                         Map<String, Object> rowOld = new LinkedHashMap<>();
                         for (CanalEntry.Column column : rowData.getBeforeColumnsList()) {
                             if (updateSet.contains(column.getName())) {
-                                rowOld.put(column.getName(), JdbcTypeUtil.typeConvert(column.getName(),
-                                    column.getValue(),
-                                    column.getSqlType(),
-                                    column.getMysqlType()));
+                                rowOld.put(column.getName(),
+                                    JdbcTypeUtil.typeConvert(column.getName(),
+                                        column.getValue(),
+                                        column.getSqlType(),
+                                        column.getMysqlType()));
                             }
                         }
                         // update操作将记录修改前的值
@@ -110,11 +107,12 @@ public class MessageUtil {
         }
     }
 
-    public static Dml flatMessage2Dml(FlatMessage flatMessage) {
+    public static Dml flatMessage2Dml(String destination, FlatMessage flatMessage) {
         if (flatMessage == null) {
             return null;
         }
         Dml dml = new Dml();
+        dml.setDestination(destination);
         dml.setDatabase(flatMessage.getDatabase());
         dml.setTable(flatMessage.getTable());
         dml.setType(flatMessage.getType());

+ 52 - 0
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/OuterAdapterConfig.java

@@ -0,0 +1,52 @@
+package com.alibaba.otter.canal.client.adapter.support;
+
+import java.util.Map;
+
+/**
+ * 外部适配器配置信息类
+ *
+ * @author rewerma 2018-8-18 下午10:15:12
+ * @version 1.0.0
+ */
+public class OuterAdapterConfig {
+
+    private String              name;       // 适配器名称, 如: logger, hbase, es
+
+    private String              hosts;      // 适配器内部的地址, 比如对应es该参数可以填写es的server地址
+
+    private String              zkHosts;    // 适配器内部的ZK地址, 比如对应HBase该参数可以填写HBase对应的ZK地址
+
+    private Map<String, String> properties; // 其余参数, 可填写适配器中的所需的配置信息
+
+    public String getName() {
+        return name;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    public String getHosts() {
+        return hosts;
+    }
+
+    public void setHosts(String hosts) {
+        this.hosts = hosts;
+    }
+
+    public Map<String, String> getProperties() {
+        return properties;
+    }
+
+    public void setProperties(Map<String, String> properties) {
+        this.properties = properties;
+    }
+
+    public String getZkHosts() {
+        return zkHosts;
+    }
+
+    public void setZkHosts(String zkHosts) {
+        this.zkHosts = zkHosts;
+    }
+}

+ 56 - 0
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/Result.java

@@ -0,0 +1,56 @@
+package com.alibaba.otter.canal.client.adapter.support;
+
+import java.io.Serializable;
+import java.util.Date;
+
+/**
+ * 用于rest的结果返回对象
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+public class Result implements Serializable {
+
+    public Integer code = 20000;
+    public Object  data;
+    public String  message;
+    public Date    sysTime;
+
+    public static Result createSuccess(String message) {
+        Result result = new Result();
+        result.setMessage(message);
+        return result;
+    }
+
+    public Integer getCode() {
+        return code;
+    }
+
+    public void setCode(Integer code) {
+        this.code = code;
+    }
+
+    public Object getData() {
+        return data;
+    }
+
+    public void setData(Object data) {
+        this.data = data;
+    }
+
+    public String getMessage() {
+        return message;
+    }
+
+    public void setMessage(String message) {
+        this.message = message;
+    }
+
+    public Date getSysTime() {
+        return sysTime;
+    }
+
+    public void setSysTime(Date sysTime) {
+        this.sysTime = sysTime;
+    }
+}

+ 6 - 0
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/SPI.java

@@ -6,6 +6,12 @@ import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
 import java.lang.annotation.Target;
 
+/**
+ * SPI装载器注解
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
 @Documented
 @Retention(RetentionPolicy.RUNTIME)
 @Target({ ElementType.TYPE })

+ 2 - 33
client-adapter/hbase/pom.xml

@@ -22,7 +22,8 @@
         <dependency>
             <groupId>org.yaml</groupId>
             <artifactId>snakeyaml</artifactId>
-            <version>1.17</version>
+            <version>1.19</version>
+            <scope>provided</scope>
         </dependency>
         <dependency>
             <groupId>org.apache.hbase</groupId>
@@ -44,38 +45,6 @@
 
     <build>
         <plugins>
-            <plugin>
-                <artifactId>maven-jar-plugin</artifactId>
-                <configuration>
-                    <archive>
-                        <addMavenDescriptor>true</addMavenDescriptor>
-                    </archive>
-                    <excludes>
-                        <exclude>**/hbase-mapping/**</exclude>
-                    </excludes>
-                </configuration>
-            </plugin>
-            <plugin>
-                <artifactId>maven-antrun-plugin</artifactId>
-                <executions>
-                    <execution>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>run</goal>
-                        </goals>
-                        <configuration>
-                            <tasks>
-                                <copy todir="${project.basedir}/../../client-launcher/target/canal_client/conf/hbase-mapping" overwrite="true" >
-                                    <fileset dir="${project.basedir}/src/main/resources/hbase-mapping" erroronmissingdir="true">
-                                        <include name="*.conf"/>
-                                        <include name="*.yml"/>
-                                    </fileset>
-                                </copy>
-                            </tasks>
-                        </configuration>
-                    </execution>
-                </executions>
-            </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-assembly-plugin</artifactId>

+ 111 - 40
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/HbaseAdapter.java

@@ -2,21 +2,29 @@ package com.alibaba.otter.canal.client.adapter.hbase;
 
 import java.io.IOException;
 import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
 import java.util.Map;
 
+import javax.sql.DataSource;
+
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import com.alibaba.otter.canal.client.adapter.CanalOuterAdapter;
+import com.alibaba.otter.canal.client.adapter.OuterAdapter;
 import com.alibaba.otter.canal.client.adapter.hbase.config.MappingConfig;
 import com.alibaba.otter.canal.client.adapter.hbase.config.MappingConfigLoader;
+import com.alibaba.otter.canal.client.adapter.hbase.service.HbaseEtlService;
 import com.alibaba.otter.canal.client.adapter.hbase.service.HbaseSyncService;
-import com.alibaba.otter.canal.client.adapter.support.CanalOuterAdapterConfiguration;
-import com.alibaba.otter.canal.client.adapter.support.Dml;
-import com.alibaba.otter.canal.client.adapter.support.SPI;
+import com.alibaba.otter.canal.client.adapter.hbase.support.HbaseTemplate;
+import com.alibaba.otter.canal.client.adapter.support.*;
 
 /**
  * HBase外部适配器
@@ -25,74 +33,128 @@ import com.alibaba.otter.canal.client.adapter.support.SPI;
  * @version 1.0.0
  */
 @SPI("hbase")
-public class HbaseAdapter implements CanalOuterAdapter {
+public class HbaseAdapter implements OuterAdapter {
+
+    private static Logger                              logger             = LoggerFactory.getLogger(HbaseAdapter.class);
 
-    private static volatile Map<String, MappingConfig> mappingConfigCache = null;
+    private static volatile Map<String, MappingConfig> hbaseMapping       = null;                                       // 文件名对应配置
+    private static volatile Map<String, MappingConfig> mappingConfigCache = null;                                       // 库名-表名对应配置
 
     private Connection                                 conn;
     private HbaseSyncService                           hbaseSyncService;
+    private HbaseTemplate                              hbaseTemplate;
 
     @Override
-    public void init(CanalOuterAdapterConfiguration configuration) {
+    public void init(OuterAdapterConfig configuration) {
         try {
             if (mappingConfigCache == null) {
                 synchronized (MappingConfig.class) {
                     if (mappingConfigCache == null) {
-                        Map<String, MappingConfig> hbaseMapping = MappingConfigLoader.load();
+                        hbaseMapping = MappingConfigLoader.load();
                         mappingConfigCache = new HashMap<>();
                         for (MappingConfig mappingConfig : hbaseMapping.values()) {
-                            mappingConfigCache.put(mappingConfig.getHbaseOrm().getDatabase() + "-"
-                                                   + mappingConfig.getHbaseOrm().getTable(), mappingConfig);
+                            mappingConfigCache.put(StringUtils.trimToEmpty(mappingConfig.getHbaseMapping().getDestination())
+                                                   + "." + mappingConfig.getHbaseMapping().getDatabase() + "."
+                                                   + mappingConfig.getHbaseMapping().getTable(),
+                                mappingConfig);
                         }
                     }
                 }
             }
 
-            String hosts = configuration.getZkHosts();
-            if (StringUtils.isEmpty(hosts)) {
-                hosts = configuration.getHosts();
-            }
-            if (StringUtils.isEmpty(hosts)) {
-                throw new RuntimeException("Empty zookeeper hosts");
-            }
-            String[] zkHosts = StringUtils.split(hosts, ",");
-            int zkPort = 0;
-            StringBuilder hostsWithoutPort = new StringBuilder();
-            for (String host : zkHosts) {
-                int i = host.indexOf(":");
-                hostsWithoutPort.append(host, 0, i);
-                hostsWithoutPort.append(",");
-                if (zkPort == 0) zkPort = Integer.parseInt(host.substring(i + 1));
-            }
-            hostsWithoutPort.deleteCharAt(hostsWithoutPort.length() - 1);
-
-            String znode = configuration.getProperties().getProperty("znodeParent");
-            if (StringUtils.isEmpty(znode)) {
-                znode = "/hbase";
-            }
+            Map<String, String> propertites = configuration.getProperties();
 
             Configuration hbaseConfig = HBaseConfiguration.create();
-            hbaseConfig.set("hbase.zookeeper.quorum", hostsWithoutPort.toString());
-            hbaseConfig.set("hbase.zookeeper.property.clientPort", Integer.toString(zkPort));
-            hbaseConfig.set("zookeeper.znode.parent", znode);
+            propertites.forEach(hbaseConfig::set);
             conn = ConnectionFactory.createConnection(hbaseConfig);
-            hbaseSyncService = new HbaseSyncService(conn);
+            hbaseTemplate = new HbaseTemplate(conn);
+            hbaseSyncService = new HbaseSyncService(hbaseTemplate);
         } catch (Exception e) {
             throw new RuntimeException(e);
         }
     }
 
     @Override
-    public void writeOut(Dml dml) {
+    public void sync(Dml dml) {
         if (dml == null) {
             return;
         }
+        String destination = StringUtils.trimToEmpty(dml.getDestination());
         String database = dml.getDatabase();
         String table = dml.getTable();
-        MappingConfig config = mappingConfigCache.get(database + "-" + table);
+        MappingConfig config = mappingConfigCache.get(destination + "." + database + "." + table);
         hbaseSyncService.sync(config, dml);
     }
 
+    @Override
+    public EtlResult etl(String task, List<String> params) {
+        EtlResult etlResult = new EtlResult();
+        MappingConfig config = hbaseMapping.get(task);
+        if (config != null) {
+            DataSource dataSource = DatasourceConfig.DATA_SOURCES.get(config.getDataSourceKey());
+            if (dataSource != null) {
+                return HbaseEtlService.importData(dataSource, hbaseTemplate, config, params);
+            } else {
+                etlResult.setSucceeded(false);
+                etlResult.setErrorMessage("DataSource not found");
+                return etlResult;
+            }
+        } else {
+            DataSource dataSource = DatasourceConfig.DATA_SOURCES.get(task);
+            if (dataSource != null) {
+                StringBuilder resultMsg = new StringBuilder();
+                boolean resSucc = true;
+                // ds不为空说明传入的是datasourceKey
+                for (MappingConfig configTmp : hbaseMapping.values()) {
+                    // 取所有的datasourceKey为task的配置
+                    if (configTmp.getDataSourceKey().equals(task)) {
+                        EtlResult etlRes = HbaseEtlService.importData(dataSource, hbaseTemplate, configTmp, params);
+                        if (!etlRes.getSucceeded()) {
+                            resSucc = false;
+                            resultMsg.append(etlRes.getErrorMessage()).append("\n");
+                        } else {
+                            resultMsg.append(etlRes.getResultMessage()).append("\n");
+                        }
+                    }
+                }
+                if (resultMsg.length() > 0) {
+                    etlResult.setSucceeded(resSucc);
+                    if (resSucc) {
+                        etlResult.setResultMessage(resultMsg.toString());
+                    } else {
+                        etlResult.setErrorMessage(resultMsg.toString());
+                    }
+                    return etlResult;
+                }
+            }
+        }
+        etlResult.setSucceeded(false);
+        etlResult.setErrorMessage("Task not found");
+        return etlResult;
+    }
+
+    @Override
+    public Map<String, Object> count(String task) {
+        MappingConfig config = hbaseMapping.get(task);
+        String hbaseTable = config.getHbaseMapping().getHbaseTable();
+        long rowCount = 0L;
+        try {
+            HTable table = (HTable) conn.getTable(TableName.valueOf(hbaseTable));
+            Scan scan = new Scan();
+            scan.setFilter(new FirstKeyOnlyFilter());
+            ResultScanner resultScanner = table.getScanner(scan);
+            for (Result result : resultScanner) {
+                rowCount += result.size();
+            }
+        } catch (IOException e) {
+            logger.error(e.getMessage(), e);
+        }
+        Map<String, Object> res = new LinkedHashMap<>();
+        res.put("hbaseTable", hbaseTable);
+        res.put("count", rowCount);
+        return res;
+    }
+
     @Override
     public void destroy() {
         if (conn != null) {
@@ -103,4 +165,13 @@ public class HbaseAdapter implements CanalOuterAdapter {
             }
         }
     }
+
+    @Override
+    public String getDestination(String task) {
+        MappingConfig config = hbaseMapping.get(task);
+        if (config != null && config.getHbaseMapping() != null) {
+            return config.getHbaseMapping().getDestination();
+        }
+        return null;
+    }
 }

+ 67 - 43
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/config/MappingConfig.java

@@ -1,43 +1,49 @@
 package com.alibaba.otter.canal.client.adapter.hbase.config;
 
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
+import java.util.*;
 
 /**
  * HBase表映射配置
  *
- * @author machengyuan 2018-8-21 下午06:45:49
+ * @author rewerma 2018-8-21 下午06:45:49
  * @version 1.0.0
  */
 public class MappingConfig {
 
-    private HbaseOrm hbaseOrm;
+    private String       dataSourceKey; // 数据源key
 
-    public HbaseOrm getHbaseOrm() {
-        return hbaseOrm;
+    private HbaseMapping hbaseMapping;  // hbase映射配置
+
+    public String getDataSourceKey() {
+        return dataSourceKey;
+    }
+
+    public void setDataSourceKey(String dataSourceKey) {
+        this.dataSourceKey = dataSourceKey;
     }
 
-    public void setHbaseOrm(HbaseOrm hbaseOrm) {
-        this.hbaseOrm = hbaseOrm;
+    public HbaseMapping getHbaseMapping() {
+        return hbaseMapping;
+    }
+
+    public void setHbaseMapping(HbaseMapping hbaseMapping) {
+        this.hbaseMapping = hbaseMapping;
     }
 
     public void validate() {
-        if (hbaseOrm.database == null || hbaseOrm.database.isEmpty()) {
-            throw new NullPointerException("hbaseOrm.database");
+        if (hbaseMapping.database == null || hbaseMapping.database.isEmpty()) {
+            throw new NullPointerException("hbaseMapping.database");
         }
-        if (hbaseOrm.table == null || hbaseOrm.table.isEmpty()) {
-            throw new NullPointerException("hbaseOrm.table");
+        if (hbaseMapping.table == null || hbaseMapping.table.isEmpty()) {
+            throw new NullPointerException("hbaseMapping.table");
         }
-        if (hbaseOrm.hbaseTable == null || hbaseOrm.hbaseTable.isEmpty()) {
-            throw new NullPointerException("hbaseOrm.hbaseTable");
+        if (hbaseMapping.hbaseTable == null || hbaseMapping.hbaseTable.isEmpty()) {
+            throw new NullPointerException("hbaseMapping.hbaseTable");
         }
-        if (hbaseOrm.mode == null) {
-            throw new NullPointerException("hbaseOrm.mode");
+        if (hbaseMapping.mode == null) {
+            throw new NullPointerException("hbaseMapping.mode");
         }
-        if (hbaseOrm.rowKey != null && hbaseOrm.rowKeyColumn != null) {
+        if (hbaseMapping.rowKey != null && hbaseMapping.rowKeyColumn != null) {
             throw new RuntimeException("已配置了复合主键作为RowKey,无需再指定RowKey列");
         }
     }
@@ -49,12 +55,12 @@ public class MappingConfig {
 
         MappingConfig config = (MappingConfig) o;
 
-        return hbaseOrm != null ? hbaseOrm.equals(config.hbaseOrm) : config.hbaseOrm == null;
+        return hbaseMapping != null ? hbaseMapping.equals(config.hbaseMapping) : config.hbaseMapping == null;
     }
 
     @Override
     public int hashCode() {
-        return hbaseOrm != null ? hbaseOrm.hashCode() : 0;
+        return hbaseMapping != null ? hbaseMapping.hashCode() : 0;
     }
 
     public static class ColumnItem {
@@ -121,7 +127,7 @@ public class MappingConfig {
     }
 
     public enum Mode {
-        STRING("STRING"), NATIVE("NATIVE"), PHOENIX("PHOENIX");
+                      STRING("STRING"), NATIVE("NATIVE"), PHOENIX("PHOENIX");
 
         private String type;
 
@@ -134,24 +140,26 @@ public class MappingConfig {
         }
     }
 
-    public static class HbaseOrm {
-
-        private Mode                    mode               = Mode.STRING;
-        private String                  database;
-        private String                  table;
-        private String                  hbaseTable;
-        private String                  family             = "CF";
-        private boolean                 uppercaseQualifier = true;
-        private boolean                 autoCreateTable    = false;                // 同步时HBase中表不存在的情况下自动建表
-        private String                  rowKey;                                    // 指定复合主键为rowKey
-        private Map<String, String>     columns;
-        private ColumnItem              rowKeyColumn;
-        private String                  etlCondition;
-
-        private Map<String, ColumnItem> columnItems        = new LinkedHashMap<>();
-        private Set<String>             families           = new LinkedHashSet<>();
+    public static class HbaseMapping {
+
+        private Mode                    mode               = Mode.STRING;           // hbase默认转换格式
+        private String                  destination;                                // canal实例或MQ的topic
+        private String                  database;                                   // 数据库名或schema名
+        private String                  table;                                      // 表面名
+        private String                  hbaseTable;                                 // hbase表名
+        private String                  family             = "CF";                  // 默认统一column family
+        private boolean                 uppercaseQualifier = true;                  // 是否转大写
+        private boolean                 autoCreateTable    = false;                 // 同步时HBase中表不存在的情况下自动建表
+        private String                  rowKey;                                     // 指定复合主键为rowKey
+        private Map<String, String>     columns;                                    // 字段映射
+        private List<String>            excludeColumns;                             // 不映射的字段
+        private ColumnItem              rowKeyColumn;                               // rowKey字段
+        private String                  etlCondition;                               // etl条件sql
+
+        private Map<String, ColumnItem> columnItems        = new LinkedHashMap<>(); // 转换后的字段映射列表
+        private Set<String>             families           = new LinkedHashSet<>(); // column family列表
         private int                     readBatch          = 5000;
-        private int                     commitBatch        = 5000;
+        private int                     commitBatch        = 5000;                  // etl等批量提交大小
 
         public Mode getMode() {
             return mode;
@@ -161,6 +169,14 @@ public class MappingConfig {
             this.mode = mode;
         }
 
+        public String getDestination() {
+            return destination;
+        }
+
+        public void setDestination(String destination) {
+            this.destination = destination;
+        }
+
         public String getDatabase() {
             return database;
         }
@@ -277,6 +293,14 @@ public class MappingConfig {
             }
         }
 
+        public List<String> getExcludeColumns() {
+            return excludeColumns;
+        }
+
+        public void setExcludeColumns(List<String> excludeColumns) {
+            this.excludeColumns = excludeColumns;
+        }
+
         public String getFamily() {
             return family;
         }
@@ -325,10 +349,10 @@ public class MappingConfig {
             if (this == o) return true;
             if (o == null || getClass() != o.getClass()) return false;
 
-            HbaseOrm hbaseOrm = (HbaseOrm) o;
+            HbaseMapping hbaseMapping = (HbaseMapping) o;
 
-            if (table != null ? !table.equals(hbaseOrm.table) : hbaseOrm.table != null) return false;
-            return hbaseTable != null ? hbaseTable.equals(hbaseOrm.hbaseTable) : hbaseOrm.hbaseTable == null;
+            if (table != null ? !table.equals(hbaseMapping.table) : hbaseMapping.table != null) return false;
+            return hbaseTable != null ? hbaseTable.equals(hbaseMapping.hbaseTable) : hbaseMapping.hbaseTable == null;
         }
 
         @Override

+ 18 - 18
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/config/MappingConfigLoader.java

@@ -4,6 +4,7 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.util.Collection;
 import java.util.LinkedHashMap;
 import java.util.Map;
 
@@ -12,21 +13,19 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.yaml.snakeyaml.Yaml;
 
+import com.alibaba.otter.canal.client.adapter.support.AdapterConfigs;
+
 /**
  * HBase表映射配置加载器
- * <p>
- * 配置统一从hbase-mapping/configs.conf文件作为入口, 该文件包含所有表映射配置的名称或者文件名列表。
- * 每个对应的表配置可以yml配置文件或者以database.table为配置名的简化形式
- * </p>
  *
- * @author machengyuan 2018-8-21 下午06:45:49
+ * @author rewerma 2018-8-21 下午06:45:49
  * @version 1.0.0
  */
 public class MappingConfigLoader {
 
     private static Logger       logger    = LoggerFactory.getLogger(MappingConfigLoader.class);
 
-    private static final String BASE_PATH = "hbase-mapping/";
+    private static final String BASE_PATH = "hbase";
 
     /**
      * 加载HBase表映射配置
@@ -35,12 +34,11 @@ public class MappingConfigLoader {
      */
     public static Map<String, MappingConfig> load() {
         logger.info("## Start loading mapping config ... ");
-        String mappingConfigContent = readConfigContent(BASE_PATH + "configs.conf");
 
         Map<String, MappingConfig> result = new LinkedHashMap<>();
 
-        String[] configLines = mappingConfigContent.split("\n");
-        for (String c : configLines) {
+        Collection<String> configs = AdapterConfigs.get("hbase");
+        for (String c : configs) {
             if (c == null) {
                 continue;
             }
@@ -74,6 +72,7 @@ public class MappingConfigLoader {
                 String[] dbTable;
                 if (dsKey == null) {
                     dbTable = srcMeta.split("\\.");
+
                 } else {
                     dbTable = srcMeta.split("@")[0].split("\\.");
                 }
@@ -81,21 +80,22 @@ public class MappingConfigLoader {
                 if (dbTable.length == 2) {
                     config = new MappingConfig();
 
-                    MappingConfig.HbaseOrm hbaseOrm = new MappingConfig.HbaseOrm();
-                    hbaseOrm.setHbaseTable(dbTable[0].toUpperCase() + "." + dbTable[1].toUpperCase());
-                    hbaseOrm.setAutoCreateTable(true);
-                    hbaseOrm.setDatabase(dbTable[0]);
-                    hbaseOrm.setTable(dbTable[1]);
-                    hbaseOrm.setMode(MappingConfig.Mode.STRING);
-                    hbaseOrm.setRowKey(rowKey);
+                    MappingConfig.HbaseMapping hbaseMapping = new MappingConfig.HbaseMapping();
+                    hbaseMapping.setHbaseTable(dbTable[0].toUpperCase() + "." + dbTable[1].toUpperCase());
+                    hbaseMapping.setAutoCreateTable(true);
+                    hbaseMapping.setDatabase(dbTable[0]);
+                    hbaseMapping.setTable(dbTable[1]);
+                    hbaseMapping.setMode(MappingConfig.Mode.PHOENIX);
+                    hbaseMapping.setRowKey(rowKey);
                     // 有定义rowKey
                     if (rowKey != null) {
                         MappingConfig.ColumnItem columnItem = new MappingConfig.ColumnItem();
                         columnItem.setRowKey(true);
                         columnItem.setColumn(rowKey);
-                        hbaseOrm.setRowKeyColumn(columnItem);
+                        hbaseMapping.setRowKeyColumn(columnItem);
                     }
-                    config.setHbaseOrm(hbaseOrm);
+                    config.setHbaseMapping(hbaseMapping);
+                    config.setDataSourceKey(dsKey);
 
                 } else {
                     throw new RuntimeException(String.format("配置项[%s]内容为空, 或格式不符合database.table", c));

+ 385 - 0
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/service/HbaseEtlService.java

@@ -0,0 +1,385 @@
+package com.alibaba.otter.canal.client.adapter.hbase.service;
+
+import java.sql.*;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+
+import javax.sql.DataSource;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.otter.canal.client.adapter.hbase.config.MappingConfig;
+import com.alibaba.otter.canal.client.adapter.hbase.support.*;
+import com.alibaba.otter.canal.client.adapter.support.EtlResult;
+import com.alibaba.otter.canal.client.adapter.support.JdbcTypeUtil;
+import com.google.common.base.Joiner;
+
+/**
+ * HBase ETL 操作业务类
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+public class HbaseEtlService {
+
+    private static Logger logger = LoggerFactory.getLogger(HbaseEtlService.class);
+
+    public static Object sqlRS(DataSource ds, String sql, Function<ResultSet, Object> fun) throws SQLException {
+        Connection conn = null;
+        Statement stmt = null;
+        ResultSet rs = null;
+        try {
+            conn = ds.getConnection();
+            stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
+            stmt.setFetchSize(Integer.MIN_VALUE);
+            rs = stmt.executeQuery(sql);
+            return fun.apply(rs);
+        } finally {
+            if (rs != null) {
+                try {
+                    rs.close();
+                } catch (SQLException e) {
+                    logger.error(e.getMessage(), e);
+                }
+            }
+            if (stmt != null) {
+                try {
+                    stmt.close();
+                } catch (SQLException e) {
+                    logger.error(e.getMessage(), e);
+                }
+            }
+            if (conn != null) {
+                try {
+                    conn.close();
+                } catch (SQLException e) {
+                    logger.error(e.getMessage(), e);
+                }
+            }
+            rs = null;
+            stmt = null;
+            conn = null;
+        }
+    }
+
+    /**
+     * 建表
+     * 
+     * @param hbaseTemplate
+     * @param config
+     */
+    public static void createTable(HbaseTemplate hbaseTemplate, MappingConfig config) {
+        try {
+            // 判断hbase表是否存在,不存在则建表
+            MappingConfig.HbaseMapping hbaseMapping = config.getHbaseMapping();
+            if (!hbaseTemplate.tableExists(hbaseMapping.getHbaseTable())) {
+                hbaseTemplate.createTable(hbaseMapping.getHbaseTable(), hbaseMapping.getFamily());
+            }
+        } catch (Exception e) {
+            logger.error(e.getMessage(), e);
+            throw new RuntimeException(e);
+        }
+    }
+
+    /**
+     * 导入数据
+     * 
+     * @param ds 数据源
+     * @param hbaseTemplate hbaseTemplate
+     * @param config 配置
+     * @param params 筛选条件
+     * @return 导入结果
+     */
+    public static EtlResult importData(DataSource ds, HbaseTemplate hbaseTemplate, MappingConfig config,
+                                       List<String> params) {
+        EtlResult etlResult = new EtlResult();
+        AtomicLong successCount = new AtomicLong();
+        List<String> errMsg = new ArrayList<>();
+        String hbaseTable = "";
+        try {
+            if (config == null) {
+                logger.error("Config is null!");
+                etlResult.setSucceeded(false);
+                etlResult.setErrorMessage("Config is null!");
+                return etlResult;
+            }
+            MappingConfig.HbaseMapping hbaseMapping = config.getHbaseMapping();
+            hbaseTable = hbaseMapping.getHbaseTable();
+
+            long start = System.currentTimeMillis();
+
+            if (params != null && params.size() == 1 && "rebuild".equalsIgnoreCase(params.get(0))) {
+                logger.info(hbaseMapping.getHbaseTable() + " rebuild is starting!");
+                // 如果表存在则删除
+                if (hbaseTemplate.tableExists(hbaseMapping.getHbaseTable())) {
+                    hbaseTemplate.disableTable(hbaseMapping.getHbaseTable());
+                    hbaseTemplate.deleteTable(hbaseMapping.getHbaseTable());
+                }
+                params = null;
+            } else {
+                logger.info(hbaseMapping.getHbaseTable() + " etl is starting!");
+            }
+            createTable(hbaseTemplate, config);
+
+            // 拼接sql
+            String sql = "SELECT * FROM " + config.getHbaseMapping().getDatabase() + "." + hbaseMapping.getTable();
+
+            // 拼接条件
+            if (params != null && params.size() == 1 && hbaseMapping.getEtlCondition() == null) {
+                AtomicBoolean stExists = new AtomicBoolean(false);
+                // 验证是否有SYS_TIME字段
+                sqlRS(ds, sql, rs -> {
+                    try {
+                        ResultSetMetaData rsmd = rs.getMetaData();
+                        int cnt = rsmd.getColumnCount();
+                        for (int i = 1; i <= cnt; i++) {
+                            String columnName = rsmd.getColumnName(i);
+                            if ("SYS_TIME".equalsIgnoreCase(columnName)) {
+                                stExists.set(true);
+                                break;
+                            }
+                        }
+                    } catch (Exception e) {
+                        // ignore
+                    }
+                    return null;
+                });
+                if (stExists.get()) {
+                    sql += " WHERE SYS_TIME >= '" + params.get(0) + "' ";
+                }
+            } else if (hbaseMapping.getEtlCondition() != null && params != null) {
+                String etlCondition = hbaseMapping.getEtlCondition();
+                int size = params.size();
+                for (int i = 0; i < size; i++) {
+                    etlCondition = etlCondition.replace("{" + i + "}", params.get(i));
+                }
+
+                sql += " " + etlCondition;
+            }
+
+            // 获取总数
+            String countSql = "SELECT COUNT(1) FROM ( " + sql + ") _CNT ";
+            long cnt = (Long) sqlRS(ds, countSql, rs -> {
+                Long count = null;
+                try {
+                    if (rs.next()) {
+                        count = ((Number) rs.getObject(1)).longValue();
+                    }
+                } catch (Exception e) {
+                    logger.error(e.getMessage(), e);
+                }
+                return count == null ? 0 : count;
+            });
+
+            // 当大于1万条记录时开启多线程
+            if (cnt >= 10000) {
+                int threadCount = 3;
+                long perThreadCnt = cnt / threadCount;
+                ExecutorService executor = Executors.newFixedThreadPool(threadCount);
+                List<Future<Boolean>> futures = new ArrayList<>(threadCount);
+                for (int i = 0; i < threadCount; i++) {
+                    long offset = i * perThreadCnt;
+                    Long size = null;
+                    if (i != threadCount - 1) {
+                        size = perThreadCnt;
+                    }
+                    String sqlFinal;
+                    if (size != null) {
+                        sqlFinal = sql + " LIMIT " + offset + "," + size;
+                    } else {
+                        sqlFinal = sql + " LIMIT " + offset + "," + cnt;
+                    }
+                    Future<Boolean> future = executor
+                        .submit(() -> executeSqlImport(ds, sqlFinal, hbaseMapping, hbaseTemplate, successCount, errMsg));
+                    futures.add(future);
+                }
+
+                for (Future<Boolean> future : futures) {
+                    future.get();
+                }
+
+                executor.shutdown();
+            } else {
+                executeSqlImport(ds, sql, hbaseMapping, hbaseTemplate, successCount, errMsg);
+            }
+
+            logger.info(
+                hbaseMapping.getHbaseTable() + " etl completed in: " + (System.currentTimeMillis() - start) / 1000 + "s!");
+
+            etlResult.setResultMessage("导入HBase表 " + hbaseMapping.getHbaseTable() + " 数据:" + successCount.get() + " 条");
+        } catch (Exception e) {
+            logger.error(e.getMessage(), e);
+            errMsg.add(hbaseTable + " etl failed! ==>" + e.getMessage());
+        }
+
+        if (errMsg.isEmpty()) {
+            etlResult.setSucceeded(true);
+        } else {
+            etlResult.setErrorMessage(Joiner.on("\n").join(errMsg));
+        }
+        return etlResult;
+    }
+
+    /**
+     * 执行导入
+     * 
+     * @param ds
+     * @param sql
+     * @param hbaseMapping
+     * @param hbaseTemplate
+     * @param successCount
+     * @param errMsg
+     * @return
+     */
+    private static boolean executeSqlImport(DataSource ds, String sql, MappingConfig.HbaseMapping hbaseMapping,
+                                            HbaseTemplate hbaseTemplate, AtomicLong successCount, List<String> errMsg) {
+        try {
+            sqlRS(ds, sql, rs -> {
+                int i = 1;
+
+                try {
+                    boolean complete = false;
+                    List<HRow> rows = new ArrayList<>();
+                    String[] rowKeyColumns = null;
+                    if (hbaseMapping.getRowKey() != null) {
+                        rowKeyColumns = hbaseMapping.getRowKey().trim().split(",");
+                    }
+                    while (rs.next()) {
+                        int cc = rs.getMetaData().getColumnCount();
+                        int[] jdbcTypes = new int[cc];
+                        Class<?>[] classes = new Class[cc];
+                        for (int j = 1; j <= cc; j++) {
+                            int jdbcType = rs.getMetaData().getColumnType(j);
+                            jdbcTypes[j - 1] = jdbcType;
+                            classes[j - 1] = JdbcTypeUtil.jdbcType2javaType(jdbcType);
+                        }
+                        HRow row = new HRow();
+
+                        if (rowKeyColumns != null) {
+                            // 取rowKey字段拼接
+                            StringBuilder rowKeyVale = new StringBuilder();
+                            for (String rowKeyColumnName : rowKeyColumns) {
+                                Object obj = rs.getObject(rowKeyColumnName);
+                                if (obj != null) {
+                                    rowKeyVale.append(obj.toString());
+                                }
+                                rowKeyVale.append("|");
+                            }
+                            int len = rowKeyVale.length();
+                            if (len > 0) {
+                                rowKeyVale.delete(len - 1, len);
+                            }
+                            row.setRowKey(Bytes.toBytes(rowKeyVale.toString()));
+                        }
+
+                        for (int j = 1; j <= cc; j++) {
+                            String columnName = rs.getMetaData().getColumnName(j);
+
+                            Object val = JdbcTypeUtil.getRSData(rs, columnName, jdbcTypes[j - 1]);
+                            if (val == null) {
+                                continue;
+                            }
+
+                            MappingConfig.ColumnItem columnItem = hbaseMapping.getColumnItems().get(columnName);
+                            // 没有配置映射
+                            if (columnItem == null) {
+                                String family = hbaseMapping.getFamily();
+                                String qualifile = columnName;
+                                if (hbaseMapping.isUppercaseQualifier()) {
+                                    qualifile = qualifile.toUpperCase();
+                                }
+                                if (MappingConfig.Mode.STRING == hbaseMapping.getMode()) {
+                                    if (hbaseMapping.getRowKey() == null && j == 1) {
+                                        row.setRowKey(Bytes.toBytes(val.toString()));
+                                    } else {
+                                        row.addCell(family, qualifile, Bytes.toBytes(val.toString()));
+                                    }
+                                } else if (MappingConfig.Mode.NATIVE == hbaseMapping.getMode()) {
+                                    Type type = Type.getType(classes[j - 1]);
+                                    if (hbaseMapping.getRowKey() == null && j == 1) {
+                                        row.setRowKey(TypeUtil.toBytes(val, type));
+                                    } else {
+                                        row.addCell(family, qualifile, TypeUtil.toBytes(val, type));
+                                    }
+                                } else if (MappingConfig.Mode.PHOENIX == hbaseMapping.getMode()) {
+                                    PhType phType = PhType.getType(classes[j - 1]);
+                                    if (hbaseMapping.getRowKey() == null && j == 1) {
+                                        row.setRowKey(PhTypeUtil.toBytes(val, phType));
+                                    } else {
+                                        row.addCell(family, qualifile, PhTypeUtil.toBytes(val, phType));
+                                    }
+                                }
+                            } else {
+                                // 如果不需要类型转换
+                                if (columnItem.getType() == null || "".equals(columnItem.getType())) {
+                                    if (val instanceof java.sql.Date) {
+                                        SimpleDateFormat dateFmt = new SimpleDateFormat("yyyy-MM-dd");
+                                        val = dateFmt.format((Date) val);
+                                    } else if (val instanceof Timestamp) {
+                                        SimpleDateFormat datetimeFmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+                                        val = datetimeFmt.format((Date) val);
+                                    }
+
+                                    byte[] valBytes = Bytes.toBytes(val.toString());
+                                    if (columnItem.isRowKey()) {
+                                        row.setRowKey(valBytes);
+                                    } else {
+                                        row.addCell(columnItem.getFamily(), columnItem.getQualifier(), valBytes);
+                                    }
+                                } else {
+                                    PhType phType = PhType.getType(columnItem.getType());
+                                    if (columnItem.isRowKey()) {
+                                        row.setRowKey(PhTypeUtil.toBytes(val, phType));
+                                    } else {
+                                        row.addCell(columnItem.getFamily(),
+                                            columnItem.getQualifier(),
+                                            PhTypeUtil.toBytes(val, phType));
+                                    }
+                                }
+                            }
+                        }
+
+                        if (row.getRowKey() == null) throw new RuntimeException("RowKey 值为空");
+
+                        rows.add(row);
+                        complete = false;
+                        if (i % hbaseMapping.getCommitBatch() == 0 && !rows.isEmpty()) {
+                            hbaseTemplate.puts(hbaseMapping.getHbaseTable(), rows);
+                            rows.clear();
+                            complete = true;
+                        }
+                        i++;
+                        successCount.incrementAndGet();
+                        if (logger.isDebugEnabled()) {
+                            logger.debug("successful import count:" + successCount.get());
+                        }
+                    }
+
+                    if (!complete && !rows.isEmpty()) {
+                        hbaseTemplate.puts(hbaseMapping.getHbaseTable(), rows);
+                    }
+
+                } catch (Exception e) {
+                    logger.error(hbaseMapping.getHbaseTable() + " etl failed! ==>" + e.getMessage(), e);
+                    errMsg.add(hbaseMapping.getHbaseTable() + " etl failed! ==>" + e.getMessage());
+                    // throw new RuntimeException(e);
+                }
+                return i;
+            });
+            return true;
+        } catch (Exception e) {
+            logger.error(e.getMessage(), e);
+            return false;
+        }
+    }
+}

+ 68 - 71
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/service/HbaseSyncService.java

@@ -1,29 +1,19 @@
 package com.alibaba.otter.canal.client.adapter.hbase.service;
 
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import java.util.*;
 
-import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.alibaba.otter.canal.client.adapter.hbase.config.MappingConfig;
-import com.alibaba.otter.canal.client.adapter.hbase.support.HRow;
-import com.alibaba.otter.canal.client.adapter.hbase.support.HbaseTemplate;
-import com.alibaba.otter.canal.client.adapter.hbase.support.PhType;
-import com.alibaba.otter.canal.client.adapter.hbase.support.PhTypeUtil;
-import com.alibaba.otter.canal.client.adapter.hbase.support.Type;
-import com.alibaba.otter.canal.client.adapter.hbase.support.TypeUtil;
+import com.alibaba.otter.canal.client.adapter.hbase.support.*;
 import com.alibaba.otter.canal.client.adapter.support.Dml;
 
 /**
  * HBase同步操作业务
  *
- * @author machengyuan 2018-8-21 下午06:45:49
+ * @author rewerma 2018-8-21 下午06:45:49
  * @version 1.0.0
  */
 public class HbaseSyncService {
@@ -32,8 +22,8 @@ public class HbaseSyncService {
 
     private HbaseTemplate hbaseTemplate;                                    // HBase操作模板
 
-    public HbaseSyncService(Connection conn){
-        hbaseTemplate = new HbaseTemplate(conn);
+    public HbaseSyncService(HbaseTemplate hbaseTemplate){
+        this.hbaseTemplate = hbaseTemplate;
     }
 
     public void sync(MappingConfig config, Dml dml) {
@@ -69,11 +59,11 @@ public class HbaseSyncService {
             return;
         }
 
-        MappingConfig.HbaseOrm hbaseOrm = config.getHbaseOrm();
+        MappingConfig.HbaseMapping hbaseMapping = config.getHbaseMapping();
 
         // if (!validHTable(config)) {
         // logger.error("HBase table '{}' not exists",
-        // hbaseOrm.getHbaseTable());
+        // hbaseMapping.getHbaseTable());
         // return;
         // }
         int i = 1;
@@ -83,28 +73,28 @@ public class HbaseSyncService {
             HRow hRow = new HRow();
 
             // 拼接复合rowKey
-            if (hbaseOrm.getRowKey() != null) {
-                String[] rowKeyColumns = hbaseOrm.getRowKey().trim().split(",");
+            if (hbaseMapping.getRowKey() != null) {
+                String[] rowKeyColumns = hbaseMapping.getRowKey().trim().split(",");
                 String rowKeyVale = getRowKeys(rowKeyColumns, r);
                 // params.put("rowKey", Bytes.toBytes(rowKeyVale));
                 hRow.setRowKey(Bytes.toBytes(rowKeyVale));
             }
 
-            convertData2Row(hbaseOrm, hRow, r);
+            convertData2Row(hbaseMapping, hRow, r);
             if (hRow.getRowKey() == null) {
                 throw new RuntimeException("empty rowKey");
             }
             rows.add(hRow);
             complete = false;
-            if (i % config.getHbaseOrm().getCommitBatch() == 0 && !rows.isEmpty()) {
-                hbaseTemplate.puts(hbaseOrm.getHbaseTable(), rows);
+            if (i % config.getHbaseMapping().getCommitBatch() == 0 && !rows.isEmpty()) {
+                hbaseTemplate.puts(hbaseMapping.getHbaseTable(), rows);
                 rows.clear();
                 complete = true;
             }
             i++;
         }
         if (!complete && !rows.isEmpty()) {
-            hbaseTemplate.puts(hbaseOrm.getHbaseTable(), rows);
+            hbaseTemplate.puts(hbaseMapping.getHbaseTable(), rows);
         }
 
     }
@@ -112,27 +102,30 @@ public class HbaseSyncService {
     /**
      * 将Map数据转换为HRow行数据
      * 
-     * @param hbaseOrm hbase映射配置
+     * @param hbaseMapping hbase映射配置
      * @param hRow 行对象
      * @param data Map数据
      */
-    private static void convertData2Row(MappingConfig.HbaseOrm hbaseOrm, HRow hRow, Map<String, Object> data) {
-        Map<String, MappingConfig.ColumnItem> columnItems = hbaseOrm.getColumnItems();
+    private static void convertData2Row(MappingConfig.HbaseMapping hbaseMapping, HRow hRow, Map<String, Object> data) {
+        Map<String, MappingConfig.ColumnItem> columnItems = hbaseMapping.getColumnItems();
         int i = 0;
         for (Map.Entry<String, Object> entry : data.entrySet()) {
+            if (hbaseMapping.getExcludeColumns() != null && hbaseMapping.getExcludeColumns().contains(entry.getKey())) {
+                continue;
+            }
             if (entry.getValue() != null) {
                 MappingConfig.ColumnItem columnItem = columnItems.get(entry.getKey());
 
-                byte[] bytes = typeConvert(columnItem, hbaseOrm, entry.getValue());
+                byte[] bytes = typeConvert(columnItem, hbaseMapping, entry.getValue());
 
                 if (columnItem == null) {
-                    String familyName = hbaseOrm.getFamily();
+                    String familyName = hbaseMapping.getFamily();
                     String qualifier = entry.getKey();
-                    if (hbaseOrm.isUppercaseQualifier()) {
+                    if (hbaseMapping.isUppercaseQualifier()) {
                         qualifier = qualifier.toUpperCase();
                     }
 
-                    if (hbaseOrm.getRowKey() == null && i == 0) {
+                    if (hbaseMapping.getRowKey() == null && i == 0) {
                         hRow.setRowKey(bytes);
                     } else {
                         hRow.addCell(familyName, qualifier, bytes);
@@ -163,15 +156,15 @@ public class HbaseSyncService {
             return;
         }
 
-        MappingConfig.HbaseOrm hbaseOrm = config.getHbaseOrm();
+        MappingConfig.HbaseMapping hbaseMapping = config.getHbaseMapping();
 
         // if (!validHTable(config)) {
         // logger.error("HBase table '{}' not exists",
-        // hbaseOrm.getHbaseTable());
+        // hbaseMapping.getHbaseTable());
         // return;
         // }
 
-        MappingConfig.ColumnItem rowKeyColumn = hbaseOrm.getRowKeyColumn();
+        MappingConfig.ColumnItem rowKeyColumn = hbaseMapping.getRowKeyColumn();
         int index = 0;
         int i = 1;
         boolean complete = false;
@@ -179,8 +172,8 @@ public class HbaseSyncService {
         out: for (Map<String, Object> r : data) {
             byte[] rowKeyBytes;
 
-            if (hbaseOrm.getRowKey() != null) {
-                String[] rowKeyColumns = hbaseOrm.getRowKey().trim().split(",");
+            if (hbaseMapping.getRowKey() != null) {
+                String[] rowKeyColumns = hbaseMapping.getRowKey().trim().split(",");
 
                 // 判断是否有复合主键修改
                 for (String updateColumn : old.get(index).keySet()) {
@@ -197,20 +190,23 @@ public class HbaseSyncService {
                 rowKeyBytes = Bytes.toBytes(rowKeyVale);
             } else if (rowKeyColumn == null) {
                 Map<String, Object> rowKey = data.get(0);
-                rowKeyBytes = typeConvert(null, hbaseOrm, rowKey.values().iterator().next());
+                rowKeyBytes = typeConvert(null, hbaseMapping, rowKey.values().iterator().next());
             } else {
-                rowKeyBytes = typeConvert(rowKeyColumn, hbaseOrm, r.get(rowKeyColumn.getColumn()));
+                rowKeyBytes = typeConvert(rowKeyColumn, hbaseMapping, r.get(rowKeyColumn.getColumn()));
             }
             if (rowKeyBytes == null) throw new RuntimeException("rowKey值为空");
 
-            Map<String, MappingConfig.ColumnItem> columnItems = hbaseOrm.getColumnItems();
+            Map<String, MappingConfig.ColumnItem> columnItems = hbaseMapping.getColumnItems();
             HRow hRow = new HRow(rowKeyBytes);
             for (String updateColumn : old.get(index).keySet()) {
+                if (hbaseMapping.getExcludeColumns() != null && hbaseMapping.getExcludeColumns().contains(updateColumn)) {
+                    continue;
+                }
                 MappingConfig.ColumnItem columnItem = columnItems.get(updateColumn);
                 if (columnItem == null) {
-                    String family = hbaseOrm.getFamily();
+                    String family = hbaseMapping.getFamily();
                     String qualifier = updateColumn;
-                    if (hbaseOrm.isUppercaseQualifier()) {
+                    if (hbaseMapping.isUppercaseQualifier()) {
                         qualifier = qualifier.toUpperCase();
                     }
 
@@ -219,7 +215,7 @@ public class HbaseSyncService {
                     if (newVal == null) {
                         hRow.addCell(family, qualifier, null);
                     } else {
-                        hRow.addCell(family, qualifier, typeConvert(null, hbaseOrm, newVal));
+                        hRow.addCell(family, qualifier, typeConvert(null, hbaseMapping, newVal));
                     }
                 } else {
                     // 排除修改id的情况
@@ -231,14 +227,14 @@ public class HbaseSyncService {
                     } else {
                         hRow.addCell(columnItem.getFamily(),
                             columnItem.getQualifier(),
-                            typeConvert(columnItem, hbaseOrm, newVal));
+                            typeConvert(columnItem, hbaseMapping, newVal));
                     }
                 }
             }
             rows.add(hRow);
             complete = false;
-            if (i % config.getHbaseOrm().getCommitBatch() == 0 && !rows.isEmpty()) {
-                hbaseTemplate.puts(hbaseOrm.getHbaseTable(), rows);
+            if (i % config.getHbaseMapping().getCommitBatch() == 0 && !rows.isEmpty()) {
+                hbaseTemplate.puts(hbaseMapping.getHbaseTable(), rows);
                 rows.clear();
                 complete = true;
             }
@@ -246,7 +242,7 @@ public class HbaseSyncService {
             index++;
         }
         if (!complete && !rows.isEmpty()) {
-            hbaseTemplate.puts(hbaseOrm.getHbaseTable(), rows);
+            hbaseTemplate.puts(hbaseMapping.getHbaseTable(), rows);
         }
     }
 
@@ -256,45 +252,45 @@ public class HbaseSyncService {
             return;
         }
 
-        MappingConfig.HbaseOrm hbaseOrm = config.getHbaseOrm();
+        MappingConfig.HbaseMapping hbaseMapping = config.getHbaseMapping();
 
         // if (!validHTable(config)) {
         // logger.error("HBase table '{}' not exists",
-        // hbaseOrm.getHbaseTable());
+        // hbaseMapping.getHbaseTable());
         // return;
         // }
 
-        MappingConfig.ColumnItem rowKeyColumn = hbaseOrm.getRowKeyColumn();
+        MappingConfig.ColumnItem rowKeyColumn = hbaseMapping.getRowKeyColumn();
         boolean complete = false;
         int i = 1;
         Set<byte[]> rowKeys = new HashSet<>();
         for (Map<String, Object> r : data) {
             byte[] rowKeyBytes;
 
-            if (hbaseOrm.getRowKey() != null) {
-                String[] rowKeyColumns = hbaseOrm.getRowKey().trim().split(",");
+            if (hbaseMapping.getRowKey() != null) {
+                String[] rowKeyColumns = hbaseMapping.getRowKey().trim().split(",");
                 String rowKeyVale = getRowKeys(rowKeyColumns, r);
                 rowKeyBytes = Bytes.toBytes(rowKeyVale);
             } else if (rowKeyColumn == null) {
                 // 如果不需要类型转换
                 Map<String, Object> rowKey = data.get(0);
-                rowKeyBytes = typeConvert(null, hbaseOrm, rowKey.values().iterator().next());
+                rowKeyBytes = typeConvert(null, hbaseMapping, rowKey.values().iterator().next());
             } else {
                 Object val = r.get(rowKeyColumn.getColumn());
-                rowKeyBytes = typeConvert(rowKeyColumn, hbaseOrm, val);
+                rowKeyBytes = typeConvert(rowKeyColumn, hbaseMapping, val);
             }
             if (rowKeyBytes == null) throw new RuntimeException("rowKey值为空");
             rowKeys.add(rowKeyBytes);
             complete = false;
-            if (i % config.getHbaseOrm().getCommitBatch() == 0 && !rowKeys.isEmpty()) {
-                hbaseTemplate.deletes(hbaseOrm.getHbaseTable(), rowKeys);
+            if (i % config.getHbaseMapping().getCommitBatch() == 0 && !rowKeys.isEmpty()) {
+                hbaseTemplate.deletes(hbaseMapping.getHbaseTable(), rowKeys);
                 rowKeys.clear();
                 complete = true;
             }
             i++;
         }
         if (!complete && !rowKeys.isEmpty()) {
-            hbaseTemplate.deletes(hbaseOrm.getHbaseTable(), rowKeys);
+            hbaseTemplate.deletes(hbaseMapping.getHbaseTable(), rowKeys);
         }
     }
 
@@ -304,9 +300,9 @@ public class HbaseSyncService {
         if (old == null || old.isEmpty() || data == null || data.isEmpty()) {
             return;
         }
-        MappingConfig.HbaseOrm hbaseOrm = config.getHbaseOrm();
+        MappingConfig.HbaseMapping hbaseMapping = config.getHbaseMapping();
 
-        String[] rowKeyColumns = hbaseOrm.getRowKey().trim().split(",");
+        String[] rowKeyColumns = hbaseMapping.getRowKey().trim().split(",");
 
         int index = 0;
         int i = 1;
@@ -347,13 +343,13 @@ public class HbaseSyncService {
 
             rowKeys.add(oldRowKeyBytes);
             HRow row = new HRow(newRowKeyBytes);
-            convertData2Row(hbaseOrm, row, r);
+            convertData2Row(hbaseMapping, row, r);
             rows.add(row);
             complete = false;
-            if (i % config.getHbaseOrm().getCommitBatch() == 0 && !rows.isEmpty()) {
-                hbaseTemplate.deletes(hbaseOrm.getHbaseTable(), rowKeys);
+            if (i % config.getHbaseMapping().getCommitBatch() == 0 && !rows.isEmpty()) {
+                hbaseTemplate.deletes(hbaseMapping.getHbaseTable(), rowKeys);
 
-                hbaseTemplate.puts(hbaseOrm.getHbaseTable(), rows);
+                hbaseTemplate.puts(hbaseMapping.getHbaseTable(), rows);
                 rowKeys.clear();
                 rows.clear();
                 complete = true;
@@ -362,8 +358,8 @@ public class HbaseSyncService {
             index++;
         }
         if (!complete && !rows.isEmpty()) {
-            hbaseTemplate.deletes(hbaseOrm.getHbaseTable(), rowKeys);
-            hbaseTemplate.puts(hbaseOrm.getHbaseTable(), rows);
+            hbaseTemplate.deletes(hbaseMapping.getHbaseTable(), rowKeys);
+            hbaseTemplate.puts(hbaseMapping.getHbaseTable(), rows);
         }
     }
 
@@ -371,31 +367,32 @@ public class HbaseSyncService {
      * 根据对应的类型进行转换
      * 
      * @param columnItem 列项配置
-     * @param hbaseOrm hbase映射配置
+     * @param hbaseMapping hbase映射配置
      * @param value 值
      * @return 复合字段rowKey
      */
-    private static byte[] typeConvert(MappingConfig.ColumnItem columnItem, MappingConfig.HbaseOrm hbaseOrm, Object value) {
+    private static byte[] typeConvert(MappingConfig.ColumnItem columnItem, MappingConfig.HbaseMapping hbaseMapping,
+                                      Object value) {
         if (value == null) {
             return null;
         }
         byte[] bytes = null;
         if (columnItem == null || columnItem.getType() == null || "".equals(columnItem.getType())) {
-            if (MappingConfig.Mode.STRING == hbaseOrm.getMode()) {
+            if (MappingConfig.Mode.STRING == hbaseMapping.getMode()) {
                 bytes = Bytes.toBytes(value.toString());
-            } else if (MappingConfig.Mode.NATIVE == hbaseOrm.getMode()) {
+            } else if (MappingConfig.Mode.NATIVE == hbaseMapping.getMode()) {
                 bytes = TypeUtil.toBytes(value);
-            } else if (MappingConfig.Mode.PHOENIX == hbaseOrm.getMode()) {
+            } else if (MappingConfig.Mode.PHOENIX == hbaseMapping.getMode()) {
                 PhType phType = PhType.getType(value.getClass());
                 bytes = PhTypeUtil.toBytes(value, phType);
             }
         } else {
-            if (hbaseOrm.getMode() == MappingConfig.Mode.STRING) {
+            if (hbaseMapping.getMode() == MappingConfig.Mode.STRING) {
                 bytes = Bytes.toBytes(value.toString());
-            } else if (hbaseOrm.getMode() == MappingConfig.Mode.NATIVE) {
+            } else if (hbaseMapping.getMode() == MappingConfig.Mode.NATIVE) {
                 Type type = Type.getType(columnItem.getType());
                 bytes = TypeUtil.toBytes(value, type);
-            } else if (hbaseOrm.getMode() == MappingConfig.Mode.PHOENIX) {
+            } else if (hbaseMapping.getMode() == MappingConfig.Mode.PHOENIX) {
                 PhType phType = PhType.getType(columnItem.getType());
                 bytes = PhTypeUtil.toBytes(value, phType);
             }

+ 2 - 2
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/support/TypeUtil.java

@@ -92,6 +92,7 @@ public class TypeUtil {
         return b;
     }
 
+    @SuppressWarnings("unchecked")
     public static <T> T toObject(byte[] bytes, Class<T> clazz) {
         if (bytes == null) {
             return null;
@@ -132,10 +133,10 @@ public class TypeUtil {
         } else {
             throw new IllegalArgumentException("mismatch class type");
         }
-        // noinspection unchecked
         return (T) res;
     }
 
+    @SuppressWarnings("unchecked")
     public static <T> T toObject(byte[] bytes, Type type) {
         if (bytes == null) {
             return null;
@@ -182,7 +183,6 @@ public class TypeUtil {
         } else {
             throw new IllegalArgumentException("mismatch class type");
         }
-        // noinspection unchecked
         return (T) res;
     }
 }

+ 0 - 0
client-adapter/hbase/src/main/resources/META-INF/canal/com.alibaba.otter.canal.client.adapter.CanalOuterAdapter → client-adapter/hbase/src/main/resources/META-INF/canal/com.alibaba.otter.canal.client.adapter.OuterAdapter


+ 0 - 5
client-adapter/hbase/src/main/resources/hbase-mapping/configs.conf

@@ -1,5 +0,0 @@
-# 详细映射配置
-mytest_person2.yml
-
-# 简易配置, 只用指定数据库名.表名, 详细配置全部使用默认
-mytest.person

+ 6 - 2
client-adapter/hbase/src/main/resources/hbase-mapping/mytest_person2.yml → client-adapter/hbase/src/main/resources/hbase/mytest_person2.yml

@@ -1,5 +1,7 @@
-hbaseOrm:
+dataSourceKey: defaultDS
+hbaseMapping:
   mode: PHOENIX  #NATIVE   #STRING
+  destination: example
   database: mytest  # 数据库名
   table: person2     # 数据库表名
   hbaseTable: MYTEST.PERSON2   # HBase表名
@@ -15,6 +17,8 @@ hbaseOrm:
     type: $DECIMAL
     c_time: C_TIME$UNSIGNED_TIMESTAMP
     birthday: BIRTHDAY$DATE
+  excludeColumns:
+    - lat   # 忽略字段
 
 # -- NATIVE类型
 # $DEFAULT
@@ -43,7 +47,7 @@ hbaseOrm:
 # $TINYINT                  对应PHOENIX里的TINYINT                1字节
 # $SMALLINT                 对应PHOENIX里的SMALLINT               2字节
 # $FLOAT                    对应PHOENIX里的FLOAT                  4字节
-# DOUBLE                    对应PHOENIX里的DOUBLE                 8字节
+# $DOUBLE                    对应PHOENIX里的DOUBLE                 8字节
 # $BOOLEAN                  对应PHOENIX里的BOOLEAN                1字节
 # $TIME                     对应PHOENIX里的TIME                   8字节
 # $DATE                     对应PHOENIX里的DATE                   8字节

+ 209 - 0
client-adapter/launcher/pom.xml

@@ -0,0 +1,209 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>canal.client-adapter</artifactId>
+        <groupId>com.alibaba.otter</groupId>
+        <version>1.1.1-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>com.alibaba.otter</groupId>
+    <artifactId>client-adapter.launcher</artifactId>
+    <packaging>jar</packaging>
+    <name>canal client adapter launcher module for otter ${project.version}</name>
+
+    <dependencyManagement>
+        <dependencies>
+            <dependency>
+                <groupId>org.springframework.boot</groupId>
+                <artifactId>spring-boot-dependencies</artifactId>
+                <version>2.0.1.RELEASE</version>
+                <type>pom</type>
+                <scope>import</scope>
+            </dependency>
+        </dependencies>
+    </dependencyManagement>
+
+    <dependencies>
+        <dependency>
+            <groupId>com.alibaba.otter</groupId>
+            <artifactId>client-adapter.common</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>com.alibaba.otter</groupId>
+            <artifactId>canal.client</artifactId>
+            <version>${canal_version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.yaml</groupId>
+            <artifactId>snakeyaml</artifactId>
+            <version>1.19</version>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-web</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-configuration-processor</artifactId>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.curator</groupId>
+            <artifactId>curator-recipes</artifactId>
+            <version>2.10.0</version>
+        </dependency>
+        <!-- 单独引入rocketmq依赖 -->
+        <dependency>
+            <groupId>org.apache.rocketmq</groupId>
+            <artifactId>rocketmq-client</artifactId>
+            <version>4.3.0</version>
+        </dependency>
+        <!-- 单独引入kafka依赖 -->
+        <dependency>
+            <groupId>org.apache.kafka</groupId>
+            <artifactId>kafka-clients</artifactId>
+            <version>1.1.1</version>
+        </dependency>
+
+        <!-- outer adapter jar with dependencies-->
+        <dependency>
+            <groupId>com.alibaba.otter</groupId>
+            <artifactId>client-adapter.logger</artifactId>
+            <version>${project.version}</version>
+            <exclusions>
+                <exclusion>
+                    <artifactId>*</artifactId>
+                    <groupId>*</groupId>
+                </exclusion>
+            </exclusions>
+            <classifier>jar-with-dependencies</classifier>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>com.alibaba.otter</groupId>
+            <artifactId>client-adapter.hbase</artifactId>
+            <version>${project.version}</version>
+            <exclusions>
+                <exclusion>
+                    <artifactId>*</artifactId>
+                    <groupId>*</groupId>
+                </exclusion>
+            </exclusions>
+            <classifier>jar-with-dependencies</classifier>
+            <optional>true</optional>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <finalName>canal-adapter-launcher</finalName>
+        <plugins>
+            <plugin>
+                <groupId>org.springframework.boot</groupId>
+                <artifactId>spring-boot-maven-plugin</artifactId>
+                <version>2.0.1.RELEASE</version>
+                <configuration>
+                    <excludes>
+                        <exclude>
+                            <groupId>com.alibaba.otter</groupId>
+                            <artifactId>client-adapter.hbase</artifactId>
+                        </exclude>
+                        <exclude>
+                            <groupId>com.alibaba.otter</groupId>
+                            <artifactId>client-adapter.logger</artifactId>
+                        </exclude>
+                    </excludes>
+                </configuration>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>repackage</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <artifactId>maven-antrun-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>run</goal>
+                        </goals>
+                        <configuration>
+                            <tasks>
+                                <copy todir="${project.basedir}/target/config" overwrite="true" >
+                                    <fileset dir="${project.basedir}/src/main/resources" erroronmissingdir="true">
+                                        <include name="*.yml"/>
+                                    </fileset>
+                                </copy>
+                                <copy todir="${project.basedir}/target/config/hbase" overwrite="true" >
+                                    <fileset dir="${project.basedir}/../hbase/src/main/resources/hbase" erroronmissingdir="true">
+                                        <include name="*.yml"/>
+                                    </fileset>
+                                </copy>
+                            </tasks>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-dependency-plugin</artifactId>
+                <version>2.10</version>
+                <executions>
+                    <execution>
+                        <id>copy-dependencies-to-canal-client-service</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>copy-dependencies</goal>
+                        </goals>
+                        <configuration>
+                            <includeClassifiers>jar-with-dependencies</includeClassifiers>
+                            <outputDirectory>${project.basedir}/target/lib</outputDirectory>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+    <profiles>
+        <profile>
+            <id>release</id>
+            <activation>
+                <property>
+                    <name>env</name>
+                    <value>release</value>
+                </property>
+            </activation>
+
+            <build>
+                <plugins>
+                    <plugin>
+                        <artifactId>maven-assembly-plugin</artifactId>
+                        <configuration>
+                            <appendAssemblyId>false</appendAssemblyId>
+                            <descriptors>
+                                <descriptor>${basedir}/src/main/assembly/release.xml</descriptor>
+                            </descriptors>
+                            <finalName>${project.artifactId}-${project.version}</finalName>
+                            <outputDirectory>${project.basedir}/../../target</outputDirectory>
+                        </configuration>
+                        <executions>
+                            <execution>
+                                <id>make-assembly</id>
+                                <phase>package</phase>
+                                <goals>
+                                    <goal>single</goal>
+                                </goals>
+                            </execution>
+                        </executions>
+                    </plugin>
+                </plugins>
+            </build>
+        </profile>
+    </profiles>
+</project>

+ 34 - 0
client-adapter/launcher/src/main/assembly/release.xml

@@ -0,0 +1,34 @@
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+          xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+    <id>canal-adapter</id>
+    <formats>
+        <format>tar.gz</format>
+    </formats>
+    <fileSets>
+        <fileSet>
+            <directory>${project.build.directory}</directory>
+            <outputDirectory>/</outputDirectory>
+            <includes>
+                <include>*.jar</include>
+            </includes>
+        </fileSet>
+
+        <fileSet>
+            <directory>${project.build.directory}/lib</directory>
+            <outputDirectory>/lib/</outputDirectory>
+            <includes>
+                <include>*.jar</include>
+            </includes>
+        </fileSet>
+
+        <fileSet>
+            <directory>${project.build.directory}/config</directory>
+            <outputDirectory>/config/</outputDirectory>
+            <includes>
+                <include>**</include>
+            </includes>
+        </fileSet>
+    </fileSets>
+    <baseDirectory>/</baseDirectory>
+</assembly>

+ 18 - 0
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/CanalAdapterApplication.java

@@ -0,0 +1,18 @@
+package com.alibaba.otter.canal.adapter.launcher;
+
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.boot.builder.SpringApplicationBuilder;
+
+/**
+ * 启动入口
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+@SpringBootApplication
+public class CanalAdapterApplication {
+
+    public static void main(String[] args) {
+        new SpringApplicationBuilder(CanalAdapterApplication.class).run(args);
+    }
+}

+ 120 - 0
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/common/EtlLock.java

@@ -0,0 +1,120 @@
+package com.alibaba.otter.canal.adapter.launcher.common;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantLock;
+
+import javax.annotation.PostConstruct;
+import javax.annotation.Resource;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.recipes.locks.InterProcessMutex;
+import org.springframework.stereotype.Component;
+
+import com.alibaba.otter.canal.adapter.launcher.config.CuratorClient;
+
+/**
+ * Etl 同步锁
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+@Component
+public class EtlLock {
+
+    private static final Map<String, ReentrantLock>     LOCAL_LOCK       = new ConcurrentHashMap<>();
+
+    private static final Map<String, InterProcessMutex> DISTRIBUTED_LOCK = new ConcurrentHashMap<>();
+
+    private static Mode                                 mode             = Mode.LOCAL;
+
+    @Resource
+    private CuratorClient                               curatorClient;
+
+    @PostConstruct
+    public void init() {
+        CuratorFramework curator = curatorClient.getCurator();
+        if (curator != null) {
+            mode = Mode.DISTRIBUTED;
+        } else {
+            mode = Mode.LOCAL;
+        }
+    }
+
+    private ReentrantLock getLock(String key) {
+        ReentrantLock lock = LOCAL_LOCK.get(key);
+        if (lock == null) {
+            synchronized (EtlLock.class) {
+                lock = LOCAL_LOCK.get(key);
+                if (lock == null) {
+                    lock = new ReentrantLock();
+                    LOCAL_LOCK.put(key, lock);
+                }
+            }
+        }
+        return lock;
+    }
+
+    private InterProcessMutex getRemoteLock(String key) {
+        InterProcessMutex lock = DISTRIBUTED_LOCK.get(key);
+        if (lock == null) {
+            synchronized (EtlLock.class) {
+                lock = DISTRIBUTED_LOCK.get(key);
+                if (lock == null) {
+                    lock = new InterProcessMutex(curatorClient.getCurator(), key);
+                    DISTRIBUTED_LOCK.put(key, lock);
+                }
+            }
+        }
+        return lock;
+    }
+
+    public void lock(String key) throws Exception {
+        if (mode == Mode.LOCAL) {
+            getLock(key).lock();
+        } else {
+            InterProcessMutex lock = getRemoteLock(key);
+            lock.acquire();
+        }
+    }
+
+    public boolean tryLock(String key, long timeout, TimeUnit unit) {
+        try {
+            if (mode == Mode.LOCAL) {
+                return getLock(key).tryLock(timeout, unit);
+            } else {
+                InterProcessMutex lock = getRemoteLock(key);
+                return lock.acquire(timeout, unit);
+            }
+        } catch (Exception e) {
+            return false;
+        }
+    }
+
+    public boolean tryLock(String key) {
+        try {
+            if (mode == Mode.LOCAL) {
+                return getLock(key).tryLock();
+            } else {
+                InterProcessMutex lock = getRemoteLock(key);
+                return lock.acquire(500, TimeUnit.MILLISECONDS);
+            }
+        } catch (Exception e) {
+            return false;
+        }
+    }
+
+    public void unlock(String key) {
+        if (mode == Mode.LOCAL) {
+            getLock(key).unlock();
+        } else {
+            InterProcessMutex lock = getRemoteLock(key);
+            try {
+                lock.release();
+            } catch (Exception e) {
+                // ignore
+            }
+        }
+    }
+}

+ 6 - 0
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/common/Mode.java

@@ -0,0 +1,6 @@
+package com.alibaba.otter.canal.adapter.launcher.common;
+
+public enum Mode {
+                  LOCAL, // 本地模式
+                  DISTRIBUTED // 分布式
+}

+ 213 - 0
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/common/SyncSwitch.java

@@ -0,0 +1,213 @@
+package com.alibaba.otter.canal.adapter.launcher.common;
+
+import java.nio.charset.StandardCharsets;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import javax.annotation.PostConstruct;
+import javax.annotation.Resource;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.recipes.cache.NodeCache;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.data.Stat;
+import org.springframework.stereotype.Component;
+
+import com.alibaba.otter.canal.adapter.launcher.config.AdapterCanalConfig;
+import com.alibaba.otter.canal.adapter.launcher.config.CuratorClient;
+import com.alibaba.otter.canal.common.utils.BooleanMutex;
+
+/**
+ * 同步开关
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+@Component
+public class SyncSwitch {
+
+    private static final String                    SYN_SWITCH_ZK_NODE = "/sync-switch/";
+
+    private static final Map<String, BooleanMutex> LOCAL_LOCK         = new ConcurrentHashMap<>();
+
+    private static final Map<String, BooleanMutex> DISTRIBUTED_LOCK   = new ConcurrentHashMap<>();
+
+    private static Mode                            mode               = Mode.LOCAL;
+
+    @Resource
+    private AdapterCanalConfig                     adapterCanalConfig;
+    @Resource
+    private CuratorClient                          curatorClient;
+
+    @PostConstruct
+    public void init() {
+        CuratorFramework curator = curatorClient.getCurator();
+        if (curator != null) {
+            mode = Mode.DISTRIBUTED;
+            DISTRIBUTED_LOCK.clear();
+            for (String destination : adapterCanalConfig.DESTINATIONS) {
+                // 对应每个destination注册锁
+                BooleanMutex mutex = new BooleanMutex(true);
+                initMutex(curator, destination, mutex);
+                DISTRIBUTED_LOCK.put(destination, mutex);
+                startListen(destination, mutex);
+            }
+        } else {
+            mode = Mode.LOCAL;
+            LOCAL_LOCK.clear();
+            for (String destination : adapterCanalConfig.DESTINATIONS) {
+                // 对应每个destination注册锁
+                LOCAL_LOCK.put(destination, new BooleanMutex(true));
+            }
+        }
+    }
+
+    private synchronized void startListen(String destination, BooleanMutex mutex) {
+        try {
+            String path = SYN_SWITCH_ZK_NODE + destination;
+            CuratorFramework curator = curatorClient.getCurator();
+            final NodeCache nodeCache = new NodeCache(curator, path);
+            nodeCache.start();
+            nodeCache.getListenable().addListener(() -> initMutex(curator, destination, mutex));
+        } catch (Exception e) {
+            throw new RuntimeException(e.getMessage());
+        }
+    }
+
+    private synchronized void initMutex(CuratorFramework curator, String destination, BooleanMutex mutex) {
+        try {
+            String path = SYN_SWITCH_ZK_NODE + destination;
+            Stat stat = curator.checkExists().forPath(path);
+            if (stat == null) {
+                if (!mutex.state()) {
+                    mutex.set(true);
+                }
+            } else {
+                String data = new String(curator.getData().forPath(path), StandardCharsets.UTF_8);
+                if ("on".equals(data)) {
+                    if (!mutex.state()) {
+                        mutex.set(true);
+                    }
+                } else {
+                    if (mutex.state()) {
+                        mutex.set(false);
+                    }
+                }
+            }
+        } catch (Exception e) {
+            throw new RuntimeException(e.getMessage());
+        }
+    }
+
+    public synchronized void off(String destination) {
+        if (mode == Mode.LOCAL) {
+            BooleanMutex mutex = LOCAL_LOCK.get(destination);
+            if (mutex != null && mutex.state()) {
+                mutex.set(false);
+            }
+        } else {
+            try {
+                String path = SYN_SWITCH_ZK_NODE + destination;
+                try {
+                    curatorClient.getCurator()
+                        .create()
+                        .creatingParentContainersIfNeeded()
+                        .withMode(CreateMode.PERSISTENT)
+                        .forPath(path, "off".getBytes(StandardCharsets.UTF_8));
+                } catch (Exception e) {
+                    curatorClient.getCurator().setData().forPath(path, "off".getBytes(StandardCharsets.UTF_8));
+                }
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+
+    public synchronized void on(String destination) {
+        if (mode == Mode.LOCAL) {
+            BooleanMutex mutex = LOCAL_LOCK.get(destination);
+            if (mutex != null && !mutex.state()) {
+                mutex.set(true);
+            }
+        } else {
+            try {
+                String path = SYN_SWITCH_ZK_NODE + destination;
+                try {
+                    curatorClient.getCurator()
+                        .create()
+                        .creatingParentContainersIfNeeded()
+                        .withMode(CreateMode.PERSISTENT)
+                        .forPath(path, "on".getBytes(StandardCharsets.UTF_8));
+                } catch (Exception e) {
+                    curatorClient.getCurator().setData().forPath(path, "on".getBytes(StandardCharsets.UTF_8));
+                }
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+
+    public synchronized void release(String destination) {
+        if (mode == Mode.LOCAL) {
+            BooleanMutex mutex = LOCAL_LOCK.get(destination);
+            if (mutex != null && !mutex.state()) {
+                mutex.set(true);
+            }
+        }
+        if (mode == Mode.DISTRIBUTED) {
+            BooleanMutex mutex = DISTRIBUTED_LOCK.get(destination);
+            if (mutex != null && !mutex.state()) {
+                mutex.set(true);
+            }
+        }
+    }
+
+    public Boolean status(String destination) {
+        if (mode == Mode.LOCAL) {
+            BooleanMutex mutex = LOCAL_LOCK.get(destination);
+            if (mutex != null) {
+                return mutex.state();
+            } else {
+                return null;
+            }
+        } else {
+            BooleanMutex mutex = DISTRIBUTED_LOCK.get(destination);
+            if (mutex != null) {
+                return mutex.state();
+            } else {
+                return null;
+            }
+        }
+    }
+
+    public void get(String destination) throws InterruptedException {
+        if (mode == Mode.LOCAL) {
+            BooleanMutex mutex = LOCAL_LOCK.get(destination);
+            if (mutex != null) {
+                mutex.get();
+            }
+        } else {
+            BooleanMutex mutex = DISTRIBUTED_LOCK.get(destination);
+            if (mutex != null) {
+                mutex.get();
+            }
+        }
+    }
+
+    public void get(String destination, long timeout, TimeUnit unit) throws InterruptedException, TimeoutException {
+        if (mode == Mode.LOCAL) {
+            BooleanMutex mutex = LOCAL_LOCK.get(destination);
+            if (mutex != null) {
+                mutex.get(timeout, unit);
+            }
+        } else {
+            BooleanMutex mutex = DISTRIBUTED_LOCK.get(destination);
+            if (mutex != null) {
+                mutex.get(timeout, unit);
+            }
+        }
+    }
+
+}

+ 51 - 0
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/config/AdapterCanalConfig.java

@@ -0,0 +1,51 @@
+package com.alibaba.otter.canal.adapter.launcher.config;
+
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.stereotype.Component;
+
+import com.alibaba.otter.canal.client.adapter.support.CanalClientConfig;
+
+/**
+ * canal 的相关配置类
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+@Component
+@ConfigurationProperties(prefix = "canal.conf")
+public class AdapterCanalConfig extends CanalClientConfig {
+
+    public final Set<String> DESTINATIONS = new LinkedHashSet<>();
+
+    @Override
+    public void setCanalInstances(List<CanalInstance> canalInstances) {
+        super.setCanalInstances(canalInstances);
+
+        if (canalInstances != null) {
+            synchronized (DESTINATIONS) {
+                DESTINATIONS.clear();
+                for (CanalInstance canalInstance : canalInstances) {
+                    DESTINATIONS.add(canalInstance.getInstance());
+                }
+            }
+        }
+    }
+
+    @Override
+    public void setMqTopics(List<MQTopic> mqTopics) {
+        super.setMqTopics(mqTopics);
+
+        if (mqTopics != null) {
+            synchronized (DESTINATIONS) {
+                DESTINATIONS.clear();
+                for (MQTopic mqTopic : mqTopics) {
+                    DESTINATIONS.add(mqTopic.getTopic());
+                }
+            }
+        }
+    }
+}

+ 86 - 0
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/config/AdapterConfig.java

@@ -0,0 +1,86 @@
+package com.alibaba.otter.canal.adapter.launcher.config;
+
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Map;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.stereotype.Component;
+
+import com.alibaba.druid.pool.DruidDataSource;
+import com.alibaba.otter.canal.client.adapter.support.AdapterConfigs;
+import com.alibaba.otter.canal.client.adapter.support.DatasourceConfig;
+
+/**
+ * 适配器数据源及配置文件列表配置类
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+@Component
+@ConfigurationProperties(prefix = "adapter.conf")
+public class AdapterConfig {
+
+    private static Logger                 logger = LoggerFactory.getLogger(AdapterConfig.class);
+
+    private Map<String, DatasourceConfig> datasourceConfigs;
+
+    private List<String>                  adapterConfigs;
+
+    public List<String> getAdapterConfigs() {
+        return adapterConfigs;
+    }
+
+    public Map<String, DatasourceConfig> getDatasourceConfigs() {
+        return datasourceConfigs;
+    }
+
+    public void setDatasourceConfigs(Map<String, DatasourceConfig> datasourceConfigs) {
+        this.datasourceConfigs = datasourceConfigs;
+
+        if (datasourceConfigs != null) {
+            for (Map.Entry<String, DatasourceConfig> entry : datasourceConfigs.entrySet()) {
+                DatasourceConfig datasourceConfig = entry.getValue();
+                // 加载数据源连接池
+                DruidDataSource ds = new DruidDataSource();
+                ds.setDriverClassName(datasourceConfig.getDriver());
+                ds.setUrl(datasourceConfig.getUrl());
+                ds.setUsername(datasourceConfig.getUsername());
+                ds.setPassword(datasourceConfig.getPassword());
+                ds.setInitialSize(1);
+                ds.setMinIdle(1);
+                ds.setMaxActive(datasourceConfig.getMaxActive());
+                ds.setMaxWait(60000);
+                ds.setTimeBetweenEvictionRunsMillis(60000);
+                ds.setMinEvictableIdleTimeMillis(300000);
+                ds.setPoolPreparedStatements(false);
+                ds.setMaxPoolPreparedStatementPerConnectionSize(20);
+                ds.setValidationQuery("select 1");
+                try {
+                    ds.init();
+                } catch (SQLException e) {
+                    logger.error("ERROR ## failed to initial datasource: " + datasourceConfig.getUrl(), e);
+                }
+                DatasourceConfig.DATA_SOURCES.put(entry.getKey(), ds);
+            }
+        }
+    }
+
+    public void setAdapterConfigs(List<String> adapterConfigs) {
+        this.adapterConfigs = adapterConfigs;
+
+        if (adapterConfigs != null) {
+            AdapterConfigs.clear();
+            for (String adapterConfig : adapterConfigs) {
+                int idx = adapterConfig.indexOf("/");
+                if (idx > -1) {
+                    String type = adapterConfig.substring(0, idx);
+                    String ymlFile = adapterConfig.substring(idx + 1);
+                    AdapterConfigs.put(type, ymlFile);
+                }
+            }
+        }
+    }
+}

+ 42 - 0
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/config/CuratorClient.java

@@ -0,0 +1,42 @@
+package com.alibaba.otter.canal.adapter.launcher.config;
+
+import javax.annotation.PostConstruct;
+import javax.annotation.Resource;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.springframework.stereotype.Component;
+
+/**
+ * curator 配置类
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+@Component
+public class CuratorClient {
+
+    @Resource
+    private AdapterCanalConfig adapterCanalConfig;
+
+    private CuratorFramework   curator = null;
+
+    @PostConstruct
+    public void init() {
+        if (adapterCanalConfig.getZookeeperHosts() != null) {
+            curator = CuratorFrameworkFactory.builder()
+                .connectString(adapterCanalConfig.getZookeeperHosts())
+                .retryPolicy(new ExponentialBackoffRetry(1000, 3))
+                .sessionTimeoutMs(6000)
+                .connectionTimeoutMs(3000)
+                .namespace("canal-adapter")
+                .build();
+            curator.start();
+        }
+    }
+
+    public CuratorFramework getCurator() {
+        return curator;
+    }
+}

+ 34 - 0
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/config/SpringContext.java

@@ -0,0 +1,34 @@
+package com.alibaba.otter.canal.adapter.launcher.config;
+
+import org.springframework.beans.BeansException;
+import org.springframework.context.ApplicationContext;
+import org.springframework.context.ApplicationContextAware;
+import org.springframework.stereotype.Component;
+
+/**
+ * spring util配置类
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+@Component
+public class SpringContext implements ApplicationContextAware {
+
+    private static ApplicationContext context;
+
+    /*
+     * 注入ApplicationContext
+     */
+    public void setApplicationContext(final ApplicationContext context) throws BeansException {
+        // 在加载Spring时自动获得context
+        SpringContext.context = context;
+    }
+
+    public static Object getBean(final String beanName) {
+        return SpringContext.context.getBean(beanName);
+    }
+
+    public static Object getBean(final Class<?> clz) {
+        return context.getBean(clz);
+    }
+}

+ 159 - 0
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/AbstractCanalAdapterWorker.java

@@ -0,0 +1,159 @@
+package com.alibaba.otter.canal.adapter.launcher.loader;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.otter.canal.adapter.launcher.common.SyncSwitch;
+import com.alibaba.otter.canal.adapter.launcher.config.SpringContext;
+import com.alibaba.otter.canal.client.adapter.OuterAdapter;
+import com.alibaba.otter.canal.client.adapter.support.Dml;
+import com.alibaba.otter.canal.client.adapter.support.MessageUtil;
+import com.alibaba.otter.canal.protocol.FlatMessage;
+import com.alibaba.otter.canal.protocol.Message;
+
+/**
+ * 适配器工作线程抽象类
+ *
+ * @author rewerma 2018-8-19 下午11:30:49
+ * @version 1.0.0
+ */
+public abstract class AbstractCanalAdapterWorker {
+
+    protected final Logger                    logger  = LoggerFactory.getLogger(this.getClass());
+
+    protected String                          canalDestination;                                                // canal实例
+    protected List<List<OuterAdapter>>        canalOuterAdapters;                                              // 外部适配器
+    protected ExecutorService                 groupInnerExecutorService;                                       // 组内工作线程池
+    protected volatile boolean                running = false;                                                 // 是否运行中
+    protected Thread                          thread  = null;
+    protected Thread.UncaughtExceptionHandler handler = (t, e) -> logger.error("parse events has an error", e);
+
+    protected SyncSwitch                      syncSwitch;
+
+    public AbstractCanalAdapterWorker(){
+        syncSwitch = (SyncSwitch) SpringContext.getBean(SyncSwitch.class);
+    }
+
+    protected void writeOut(final Message message) {
+        List<Future<Boolean>> futures = new ArrayList<>();
+        // 组间适配器并行运行
+        canalOuterAdapters.forEach(outerAdapters -> {
+            final List<OuterAdapter> adapters = outerAdapters;
+            futures.add(groupInnerExecutorService.submit(() -> {
+                try {
+                    // 组内适配器穿行运行,尽量不要配置组内适配器
+                    adapters.forEach(adapter -> {
+                        long begin = System.currentTimeMillis();
+                        MessageUtil.parse4Dml(canalDestination, message, adapter::sync);
+
+                        if (logger.isDebugEnabled()) {
+                            logger.debug("{} elapsed time: {}",
+                                adapter.getClass().getName(),
+                                (System.currentTimeMillis() - begin));
+                        }
+                    });
+                    return true;
+                } catch (Exception e) {
+                    return false;
+                }
+            }));
+
+            // 等待所有适配器写入完成
+            // 由于是组间并发操作,所以将阻塞直到耗时最久的工作组操作完成
+            futures.forEach(future -> {
+                try {
+                    if (!future.get()) {
+                        logger.error("Outer adapter write failed");
+                    }
+                } catch (InterruptedException | ExecutionException e) {
+                    // ignore
+                }
+            });
+        });
+    }
+
+    protected void writeOut(final FlatMessage flatMessage) {
+        List<Future<Boolean>> futures = new ArrayList<>();
+        // 组间适配器并行运行
+        canalOuterAdapters.forEach(outerAdapters -> {
+            futures.add(groupInnerExecutorService.submit(() -> {
+                try {
+                    // 组内适配器穿行运行,尽量不要配置组内适配器
+                    outerAdapters.forEach(adapter -> {
+                        long begin = System.currentTimeMillis();
+                        Dml dml = MessageUtil.flatMessage2Dml(canalDestination, flatMessage);
+                        adapter.sync(dml);
+                        if (logger.isDebugEnabled()) {
+                            logger.debug("{} elapsed time: {}",
+                                adapter.getClass().getName(),
+                                (System.currentTimeMillis() - begin));
+                        }
+                    });
+                    return true;
+                } catch (Exception e) {
+                    return false;
+                }
+            }));
+
+            // 等待所有适配器写入完成
+            // 由于是组间并发操作,所以将阻塞直到耗时最久的工作组操作完成
+            futures.forEach(future -> {
+                try {
+                    if (!future.get()) {
+                        logger.error("Outer adapter write failed");
+                    }
+                } catch (InterruptedException | ExecutionException e) {
+                    // ignore
+                }
+            });
+        });
+    }
+
+    public void start() {
+        if (!running) {
+            thread = new Thread(this::process);
+            thread.setUncaughtExceptionHandler(handler);
+            thread.start();
+            running = true;
+        }
+    }
+
+    protected abstract void process();
+
+    public void stop() {
+        try {
+            if (!running) {
+                return;
+            }
+
+            closeConnection();
+            running = false;
+
+            syncSwitch.release(canalDestination);
+
+            logger.info("destination {} is waiting for adapters' worker thread die!", canalDestination);
+            if (thread != null) {
+                try {
+                    thread.join();
+                } catch (InterruptedException e) {
+                    // ignore
+                }
+            }
+            groupInnerExecutorService.shutdown();
+            logger.info("destination {} adapters worker thread dead!", canalDestination);
+            canalOuterAdapters.forEach(outerAdapters -> outerAdapters.forEach(OuterAdapter::destroy));
+            logger.info("destination {} all adapters destroyed!", canalDestination);
+        } catch (Exception e) {
+            logger.error(e.getMessage(), e);
+        }
+    }
+
+    protected abstract void closeConnection();
+
+}

+ 101 - 0
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterKafkaWorker.java

@@ -0,0 +1,101 @@
+package com.alibaba.otter.canal.adapter.launcher.loader;
+
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.kafka.clients.consumer.CommitFailedException;
+import org.apache.kafka.common.errors.WakeupException;
+
+import com.alibaba.otter.canal.client.adapter.OuterAdapter;
+import com.alibaba.otter.canal.client.kafka.KafkaCanalConnector;
+import com.alibaba.otter.canal.client.kafka.KafkaCanalConnectors;
+import com.alibaba.otter.canal.protocol.FlatMessage;
+import com.alibaba.otter.canal.protocol.Message;
+
+/**
+ * kafka对应的client适配器工作线程
+ *
+ * @author rewerma 2018-8-19 下午11:30:49
+ * @version 1.0.0
+ */
+public class CanalAdapterKafkaWorker extends AbstractCanalAdapterWorker {
+
+    private KafkaCanalConnector connector;
+
+    private String              topic;
+
+    private boolean             flatMessage;
+
+    public CanalAdapterKafkaWorker(String bootstrapServers, String topic, String groupId,
+                                   List<List<OuterAdapter>> canalOuterAdapters, boolean flatMessage){
+        this.canalOuterAdapters = canalOuterAdapters;
+        this.groupInnerExecutorService = Executors.newFixedThreadPool(canalOuterAdapters.size());
+        this.topic = topic;
+        this.canalDestination = topic;
+        this.flatMessage = flatMessage;
+        connector = KafkaCanalConnectors.newKafkaConnector(bootstrapServers, topic, null, groupId, flatMessage);
+        // connector.setSessionTimeout(1L, TimeUnit.MINUTES);
+    }
+
+    @Override
+    protected  void closeConnection(){
+        connector.stopRunning();
+    }
+
+    @Override
+    protected void process() {
+        while (!running)
+            ;
+        ExecutorService executor = Executors.newSingleThreadExecutor();
+        while (running) {
+            try {
+                logger.info("=============> Start to connect topic: {} <=============", this.topic);
+                connector.connect();
+                logger.info("=============> Start to subscribe topic: {} <=============", this.topic);
+                connector.subscribe();
+                logger.info("=============> Subscribe topic: {} succeed <=============", this.topic);
+                while (running) {
+                    try {
+                        syncSwitch.get(canalDestination);
+
+                        List<?> messages;
+                        if (!flatMessage) {
+                            messages = connector.getWithoutAck();
+                        } else {
+                            messages = connector.getFlatMessageWithoutAck(100L, TimeUnit.MILLISECONDS);
+                        }
+                        if (messages != null) {
+                            for (final Object message : messages) {
+                                if (message instanceof FlatMessage) {
+                                    writeOut((FlatMessage) message);
+                                } else {
+                                    writeOut((Message) message);
+                                }
+                            }
+                        }
+                        connector.ack();
+                    } catch (CommitFailedException e) {
+                        logger.warn(e.getMessage());
+                    } catch (Exception e) {
+                        logger.error(e.getMessage(), e);
+                        TimeUnit.SECONDS.sleep(1L);
+                    }
+                }
+            } catch (Exception e) {
+                logger.error(e.getMessage(), e);
+            }
+        }
+
+        executor.shutdown();
+
+        try {
+            connector.unsubscribe();
+        } catch (WakeupException e) {
+            // No-op. Continue process
+        }
+        connector.disconnect();
+        logger.info("=============> Disconnect topic: {} <=============", this.topic);
+    }
+}

+ 30 - 52
client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/CanalAdapterLoader.java → client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterLoader.java

@@ -1,4 +1,4 @@
-package com.alibaba.otter.canal.client.adapter.loader;
+package com.alibaba.otter.canal.adapter.launcher.loader;
 
 import java.net.InetSocketAddress;
 import java.net.SocketAddress;
@@ -13,13 +13,13 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.alibaba.otter.canal.client.adapter.CanalOuterAdapter;
+import com.alibaba.otter.canal.client.adapter.OuterAdapter;
 import com.alibaba.otter.canal.client.adapter.support.CanalClientConfig;
-import com.alibaba.otter.canal.client.adapter.support.CanalOuterAdapterConfiguration;
 import com.alibaba.otter.canal.client.adapter.support.ExtensionLoader;
+import com.alibaba.otter.canal.client.adapter.support.OuterAdapterConfig;
 
 /**
- * MQ外部适配器的加载器
+ * 外部适配器的加载器
  *
  * @version 1.0.0
  */
@@ -33,29 +33,17 @@ public class CanalAdapterLoader {
 
     private Map<String, AbstractCanalAdapterWorker> canalMQWorker = new HashMap<>();
 
-    private ExtensionLoader<CanalOuterAdapter>      loader;
+    private ExtensionLoader<OuterAdapter>           loader;
 
     public CanalAdapterLoader(CanalClientConfig canalClientConfig){
         this.canalClientConfig = canalClientConfig;
     }
 
     /**
-     * 初始化canal-client、 canal-client-rocketmq的适配器
+     * 初始化canal-client
      */
     public void init() {
-        // canal instances 和 mq topics 配置不能同时为空
-        if (canalClientConfig.getCanalInstances() == null && canalClientConfig.getMqTopics() == null) {
-            throw new RuntimeException("Blank config property: canalInstances or canalMQTopics");
-        }
-
-        loader = ExtensionLoader.getExtensionLoader(CanalOuterAdapter.class, "" /*
-                                                                                 * TODO
-                                                                                 * canalClientConfig
-                                                                                 * .
-                                                                                 * getClassloaderPolicy
-                                                                                 * (
-                                                                                 * )
-                                                                                 */);
+        loader = ExtensionLoader.getExtensionLoader(OuterAdapter.class);
 
         String canalServerHost = this.canalClientConfig.getCanalServerHost();
         SocketAddress sa = null;
@@ -68,20 +56,22 @@ public class CanalAdapterLoader {
         // 初始化canal-client的适配器
         if (canalClientConfig.getCanalInstances() != null) {
             for (CanalClientConfig.CanalInstance instance : canalClientConfig.getCanalInstances()) {
-                List<List<CanalOuterAdapter>> canalOuterAdapterGroups = new ArrayList<>();
+                List<List<OuterAdapter>> canalOuterAdapterGroups = new ArrayList<>();
 
                 for (CanalClientConfig.AdapterGroup connectorGroup : instance.getAdapterGroups()) {
-                    List<CanalOuterAdapter> canalOutConnectors = new ArrayList<>();
-                    for (CanalOuterAdapterConfiguration c : connectorGroup.getOutAdapters()) {
+                    List<OuterAdapter> canalOutConnectors = new ArrayList<>();
+                    for (OuterAdapterConfig c : connectorGroup.getOutAdapters()) {
                         loadConnector(c, canalOutConnectors);
                     }
                     canalOuterAdapterGroups.add(canalOutConnectors);
                 }
                 CanalAdapterWorker worker;
-                if (zkHosts != null) {
+                if (sa != null) {
+                    worker = new CanalAdapterWorker(instance.getInstance(), sa, canalOuterAdapterGroups);
+                } else if (zkHosts != null) {
                     worker = new CanalAdapterWorker(instance.getInstance(), zkHosts, canalOuterAdapterGroups);
                 } else {
-                    worker = new CanalAdapterWorker(instance.getInstance(), sa, canalOuterAdapterGroups);
+                    throw new RuntimeException("No canal server connector found");
                 }
                 canalWorkers.put(instance.getInstance(), worker);
                 worker.start();
@@ -93,11 +83,11 @@ public class CanalAdapterLoader {
         if (canalClientConfig.getMqTopics() != null) {
             for (CanalClientConfig.MQTopic topic : canalClientConfig.getMqTopics()) {
                 for (CanalClientConfig.Group group : topic.getGroups()) {
-                    List<List<CanalOuterAdapter>> canalOuterAdapterGroups = new ArrayList<>();
+                    List<List<OuterAdapter>> canalOuterAdapterGroups = new ArrayList<>();
 
-                    List<CanalOuterAdapter> canalOuterAdapters = new ArrayList<>();
+                    List<OuterAdapter> canalOuterAdapters = new ArrayList<>();
 
-                    for (CanalOuterAdapterConfiguration config : group.getOutAdapters()) {
+                    for (OuterAdapterConfig config : group.getOutAdapters()) {
                         loadConnector(config, canalOuterAdapters);
                     }
                     canalOuterAdapterGroups.add(canalOuterAdapters);
@@ -109,7 +99,8 @@ public class CanalAdapterLoader {
                         canalMQWorker.put(topic.getTopic() + "-rocketmq-" + group.getGroupId(), rocketMQWorker);
                         rocketMQWorker.start();
                     } else if ("kafka".equalsIgnoreCase(topic.getMqMode())) {
-                        CanalAdapterKafkaWorker canalKafkaWorker = new CanalAdapterKafkaWorker(canalClientConfig.getBootstrapServers(),
+                        CanalAdapterKafkaWorker canalKafkaWorker = new CanalAdapterKafkaWorker(
+                            canalClientConfig.getBootstrapServers(),
                             topic.getTopic(),
                             group.getGroupId(),
                             canalOuterAdapterGroups,
@@ -117,17 +108,17 @@ public class CanalAdapterLoader {
                         canalMQWorker.put(topic.getTopic() + "-kafka-" + group.getGroupId(), canalKafkaWorker);
                         canalKafkaWorker.start();
                     }
-                    logger.info("Start adapter for canal-client rocketmq topic: {} succeed", topic.getTopic() + "-"
-                                                                                             + group.getGroupId());
+                    logger.info("Start adapter for canal-client rocketmq topic: {} succeed",
+                        topic.getTopic() + "-" + group.getGroupId());
 
                 }
             }
         }
     }
 
-    private void loadConnector(CanalOuterAdapterConfiguration config, List<CanalOuterAdapter> canalOutConnectors) {
+    private void loadConnector(OuterAdapterConfig config, List<OuterAdapter> canalOutConnectors) {
         try {
-            CanalOuterAdapter adapter = loader.getExtension(config.getName());
+            OuterAdapter adapter = loader.getExtension(config.getName());
             ClassLoader cl = Thread.currentThread().getContextClassLoader();
             // 替换ClassLoader
             Thread.currentThread().setContextClassLoader(adapter.getClass().getClassLoader());
@@ -146,31 +137,18 @@ public class CanalAdapterLoader {
     public void destroy() {
         if (canalWorkers.size() > 0) {
             ExecutorService stopExecutorService = Executors.newFixedThreadPool(canalWorkers.size());
-            for (CanalAdapterWorker v : canalWorkers.values()) {
-                final CanalAdapterWorker caw = v;
-                stopExecutorService.submit(new Runnable() {
-
-                    @Override
-                    public void run() {
-                        caw.stop();
-                    }
-                });
+            for (CanalAdapterWorker canalAdapterWorker : canalWorkers.values()) {
+                stopExecutorService.submit(canalAdapterWorker::stop);
             }
             stopExecutorService.shutdown();
         }
+
         if (canalMQWorker.size() > 0) {
-            ExecutorService stopMQWokerService = Executors.newFixedThreadPool(canalMQWorker.size());
-            for (AbstractCanalAdapterWorker tmp : canalMQWorker.values()) {
-                final AbstractCanalAdapterWorker worker = tmp;
-                stopMQWokerService.submit(new Runnable() {
-
-                    @Override
-                    public void run() {
-                        worker.stop();
-                    }
-                });
+            ExecutorService stopMQWorkerService = Executors.newFixedThreadPool(canalMQWorker.size());
+            for (AbstractCanalAdapterWorker canalAdapterMQWorker : canalMQWorker.values()) {
+                stopMQWorkerService.submit(canalAdapterMQWorker::stop);
             }
-            stopMQWokerService.shutdown();
+            stopMQWorkerService.shutdown();
         }
         logger.info("All canal adapters destroyed");
     }

+ 29 - 32
client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/CanalAdapterRocketMQWorker.java → client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterRocketMQWorker.java

@@ -1,4 +1,4 @@
-package com.alibaba.otter.canal.client.adapter.loader;
+package com.alibaba.otter.canal.adapter.launcher.loader;
 
 import com.alibaba.otter.canal.protocol.FlatMessage;
 import java.util.List;
@@ -9,7 +9,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.kafka.clients.consumer.CommitFailedException;
 import org.apache.kafka.common.errors.WakeupException;
 
-import com.alibaba.otter.canal.client.adapter.CanalOuterAdapter;
+import com.alibaba.otter.canal.client.adapter.OuterAdapter;
 import com.alibaba.otter.canal.client.rocketmq.RocketMQCanalConnector;
 import com.alibaba.otter.canal.client.rocketmq.RocketMQCanalConnectorProvider;
 import com.alibaba.otter.canal.protocol.Message;
@@ -17,7 +17,7 @@ import com.alibaba.otter.canal.protocol.Message;
 /**
  * kafka对应的client适配器工作线程
  *
- * @author machengyuan 2018-8-19 下午11:30:49
+ * @author rewerma 2018-8-19 下午11:30:49
  * @version 1.0.0
  */
 public class CanalAdapterRocketMQWorker extends AbstractCanalAdapterWorker {
@@ -40,38 +40,12 @@ public class CanalAdapterRocketMQWorker extends AbstractCanalAdapterWorker {
     }
 
     @Override
-    public void start() {
-        if (!running) {
-            thread = new Thread(new Runnable() {
-
-                @Override
-                public void run() {
-                    process();
-                }
-            });
-            thread.setUncaughtExceptionHandler(handler);
-            running = true;
-            thread.start();
-        }
+    protected void closeConnection() {
+        connector.stopRunning();
     }
 
     @Override
-    public void stop() {
-        try {
-            if (!running) {
-                return;
-            }
-            connector.stopRunning();
-            running = false;
-            logger.info("Stop topic {} out adapters begin", this.topic);
-            stopOutAdapters();
-            logger.info("Stop topic {} out adapters end", this.topic);
-        } catch (Exception e) {
-            logger.error(e.getMessage(), e);
-        }
-    }
-
-    private void process() {
+    protected void process() {
         while (!running)
             ;
         ExecutorService executor = Executors.newSingleThreadExecutor();
@@ -91,6 +65,7 @@ public class CanalAdapterRocketMQWorker extends AbstractCanalAdapterWorker {
                             message = connector.getFlatMessageWithoutAck();
                         }
                         if (message != null) {
+<<<<<<< HEAD:client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/CanalAdapterRocketMQWorker.java
                             final Object msg = message;
                             executor.submit(new Runnable() {
                                 @Override
@@ -110,7 +85,29 @@ public class CanalAdapterRocketMQWorker extends AbstractCanalAdapterWorker {
                                     } catch (Exception e) {
                                         logger.error(e.getMessage(), e);
                                     }
+=======
+                            executor.submit(() -> {
+                                try {
+                                    if (logger.isDebugEnabled()) {
+                                        logger.debug("topic: {} batchId: {} batchSize: {} ",
+                                            topic,
+                                            message.getId(),
+                                            message.getEntries().size());
+                                    }
+                                    long begin = System.currentTimeMillis();
+                                    writeOut(message);
+                                    long now = System.currentTimeMillis();
+                                    if ((System.currentTimeMillis() - begin) > 5 * 60 * 1000) {
+                                        logger.error("topic: {} batchId {} elapsed time: {} ms",
+                                            topic,
+                                            message.getId(),
+                                            now - begin);
+                                    }
+                                } catch (Exception e) {
+                                    logger.error(e.getMessage(), e);
+>>>>>>> 2c6bd3ee1b23f30fdb2b1f748805284a4fed872c:client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterRocketMQWorker.java
                                 }
+                                connector.ack(message.getId());
                             });
                         } else {
                             logger.debug("Message is null");

+ 77 - 0
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterService.java

@@ -0,0 +1,77 @@
+package com.alibaba.otter.canal.adapter.launcher.loader;
+
+import javax.annotation.PostConstruct;
+import javax.annotation.PreDestroy;
+import javax.annotation.Resource;
+
+import com.alibaba.otter.canal.adapter.launcher.common.SyncSwitch;
+import com.alibaba.otter.canal.adapter.launcher.config.SpringContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.stereotype.Component;
+
+import com.alibaba.druid.pool.DruidDataSource;
+import com.alibaba.otter.canal.adapter.launcher.config.AdapterCanalConfig;
+import com.alibaba.otter.canal.adapter.launcher.config.AdapterConfig;
+import com.alibaba.otter.canal.client.adapter.support.DatasourceConfig;
+
+/**
+ * 适配器启动业务类
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+@Component
+public class CanalAdapterService {
+
+    private static final Logger       logger = LoggerFactory.getLogger(CanalAdapterService.class);
+
+    private static CanalAdapterLoader adapterLoader;
+
+    @Resource
+    private AdapterCanalConfig        adapterCanalConfig;
+
+    // 注入bean保证优先注册
+    @Resource
+    private AdapterConfig             adapterConfig;
+    @Resource
+    private SpringContext             springContext;
+    @Resource
+    private SyncSwitch                syncSwitch;
+
+    @PostConstruct
+    public void init() {
+        if (adapterLoader == null) {
+            try {
+                logger.info("## start the canal client adapters.");
+                adapterLoader = new CanalAdapterLoader(adapterCanalConfig);
+                adapterLoader.init();
+                logger.info("## the canal client adapters are running now ......");
+            } catch (Throwable e) {
+                logger.error("## something goes wrong when starting up the canal client adapters:", e);
+                System.exit(0);
+            }
+        }
+    }
+
+    @PreDestroy
+    public void destroy() {
+        try {
+            logger.info("## stop the canal client adapters");
+            if (adapterLoader != null) {
+                adapterLoader.destroy();
+            }
+            for (DruidDataSource druidDataSource : DatasourceConfig.DATA_SOURCES.values()) {
+                try {
+                    druidDataSource.close();
+                } catch (Exception e) {
+                    logger.error(e.getMessage(), e);
+                }
+            }
+        } catch (Throwable e) {
+            logger.warn("## something goes wrong when stopping canal client adapters:", e);
+        } finally {
+            logger.info("## canal client adapters are down.");
+        }
+    }
+}

+ 24 - 73
client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/CanalAdapterWorker.java → client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterWorker.java

@@ -1,19 +1,21 @@
-package com.alibaba.otter.canal.client.adapter.loader;
+package com.alibaba.otter.canal.adapter.launcher.loader;
 
 import java.net.SocketAddress;
 import java.util.List;
 import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 
 import com.alibaba.otter.canal.client.CanalConnector;
 import com.alibaba.otter.canal.client.CanalConnectors;
-import com.alibaba.otter.canal.client.adapter.CanalOuterAdapter;
+import com.alibaba.otter.canal.client.adapter.OuterAdapter;
 import com.alibaba.otter.canal.client.impl.ClusterCanalConnector;
 import com.alibaba.otter.canal.protocol.Message;
 
 /**
  * 原生canal-server对应的client适配器工作线程
  *
- * @author machengyuan 2018-8-19 下午11:30:49
+ * @author rewrema 2018-8-19 下午11:30:49
  * @version 1.0.0
  */
 public class CanalAdapterWorker extends AbstractCanalAdapterWorker {
@@ -31,7 +33,7 @@ public class CanalAdapterWorker extends AbstractCanalAdapterWorker {
      * @param canalOuterAdapters 外部适配器组
      */
     public CanalAdapterWorker(String canalDestination, SocketAddress address,
-                              List<List<CanalOuterAdapter>> canalOuterAdapters){
+                              List<List<OuterAdapter>> canalOuterAdapters){
         this.canalOuterAdapters = canalOuterAdapters;
         this.canalDestination = canalDestination;
         groupInnerExecutorService = Executors.newFixedThreadPool(canalOuterAdapters.size());
@@ -46,88 +48,41 @@ public class CanalAdapterWorker extends AbstractCanalAdapterWorker {
      * @param canalOuterAdapters 外部适配器组
      */
     public CanalAdapterWorker(String canalDestination, String zookeeperHosts,
-                              List<List<CanalOuterAdapter>> canalOuterAdapters){
+                              List<List<OuterAdapter>> canalOuterAdapters){
         this.canalOuterAdapters = canalOuterAdapters;
         this.canalDestination = canalDestination;
         groupInnerExecutorService = Executors.newFixedThreadPool(canalOuterAdapters.size());
         connector = CanalConnectors.newClusterConnector(zookeeperHosts, canalDestination, "", "");
         ((ClusterCanalConnector) connector).setSoTimeout(SO_TIMEOUT);
-
-        // super.initSwitcher(canalDestination);
     }
 
     @Override
-    public void start() {
-        if (!running) {
-            thread = new Thread(new Runnable() {
-
-                @Override
-                public void run() {
-                    process();
-                }
-            });
-            thread.setUncaughtExceptionHandler(handler);
-            thread.start();
-            running = true;
-        }
+    protected void closeConnection() {
+        connector.stopRunning();
     }
 
     @Override
-    public void stop() {
-        try {
-            if (!running) {
-                return;
-            }
-
-            // if (switcher != null && !switcher.state()) {
-            // switcher.set(true);
-            // }
-
-            connector.stopRunning();
-            running = false;
-
-            logger.info("destination {} is waiting for adapters' worker thread die!", canalDestination);
-            if (thread != null) {
-                try {
-                    thread.join();
-                } catch (InterruptedException e) {
-                    // ignore
-                }
-            }
-            groupInnerExecutorService.shutdown();
-            logger.info("destination {} adapters' worker thread dead!", canalDestination);
-            for (List<CanalOuterAdapter> outerAdapters : canalOuterAdapters) {
-                for (CanalOuterAdapter adapter : outerAdapters) {
-                    adapter.destroy();
-                }
-            }
-            logger.info("destination {} all adapters destroyed!", canalDestination);
-        } catch (Exception e) {
-            logger.error(e.getMessage(), e);
-        }
-    }
-
-    private void process() {
+    protected void process() {
         while (!running)
             ; // waiting until running == true
         while (running) {
             try {
-                // if (switcher != null) {
-                // switcher.get();
-                // }
+                syncSwitch.get(canalDestination);
+
                 logger.info("=============> Start to connect destination: {} <=============", this.canalDestination);
                 connector.connect();
                 logger.info("=============> Start to subscribe destination: {} <=============", this.canalDestination);
                 connector.subscribe();
                 logger.info("=============> Subscribe destination: {} succeed <=============", this.canalDestination);
                 while (running) {
-                    // try {
-                    // if (switcher != null) {
-                    // switcher.get();
-                    // }
-                    // } catch (TimeoutException e) {
-                    // break;
-                    // }
+                    try {
+                        syncSwitch.get(canalDestination, 1L, TimeUnit.MINUTES);
+                    } catch (TimeoutException e) {
+                        break;
+                    }
+                    if (!running) {
+                        break;
+                    }
 
                     // server配置canal.instance.network.soTimeout(默认: 30s)
                     // 范围内未与server交互,server将关闭本次socket连接
@@ -137,11 +92,7 @@ public class CanalAdapterWorker extends AbstractCanalAdapterWorker {
                         int size = message.getEntries().size();
 
                         if (batchId == -1 || size == 0) {
-                            try {
-                                Thread.sleep(1000);
-                            } catch (InterruptedException e) {
-                                // ignore
-                            }
+                            Thread.sleep(1000);
                         } else {
                             if (logger.isDebugEnabled()) {
                                 logger.debug("destination: {} batchId: {} batchSize: {} ",
@@ -151,18 +102,18 @@ public class CanalAdapterWorker extends AbstractCanalAdapterWorker {
                             }
                             long begin = System.currentTimeMillis();
                             writeOut(message);
-                            long now = System.currentTimeMillis();
                             if (logger.isDebugEnabled()) {
                                 logger.debug("destination: {} batchId: {} elapsed time: {} ms",
                                     this.canalDestination,
                                     batchId,
-                                    now - begin);
+                                    System.currentTimeMillis() - begin);
                             }
                         }
                         connector.ack(batchId); // 提交确认
                     } catch (Exception e) {
                         connector.rollback(batchId); // 处理失败, 回滚数据
-                        throw e;
+                        logger.error("sync error!", e);
+                        Thread.sleep(500);
                     }
                 }
 

+ 172 - 0
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/rest/CommonRest.java

@@ -0,0 +1,172 @@
+package com.alibaba.otter.canal.adapter.launcher.rest;
+
+import java.util.*;
+
+import javax.annotation.PostConstruct;
+import javax.annotation.Resource;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.web.bind.annotation.*;
+
+import com.alibaba.otter.canal.adapter.launcher.common.EtlLock;
+import com.alibaba.otter.canal.adapter.launcher.common.SyncSwitch;
+import com.alibaba.otter.canal.adapter.launcher.config.AdapterCanalConfig;
+import com.alibaba.otter.canal.client.adapter.OuterAdapter;
+import com.alibaba.otter.canal.client.adapter.support.EtlResult;
+import com.alibaba.otter.canal.client.adapter.support.ExtensionLoader;
+import com.alibaba.otter.canal.client.adapter.support.Result;
+
+/**
+ * 适配器操作Rest
+ *
+ * @author rewerma @ 2018-10-20
+ * @version 1.0.0
+ */
+@RestController
+public class CommonRest {
+
+    private static Logger                 logger           = LoggerFactory.getLogger(CommonRest.class);
+
+    private static final String           ETL_LOCK_ZK_NODE = "/sync-etl/";
+
+    private ExtensionLoader<OuterAdapter> loader;
+
+    @Resource
+    private SyncSwitch                    syncSwitch;
+    @Resource
+    private EtlLock                       etlLock;
+
+    @Resource
+    private AdapterCanalConfig            adapterCanalConfig;
+
+    @PostConstruct
+    public void init() {
+        loader = ExtensionLoader.getExtensionLoader(OuterAdapter.class);
+    }
+
+    /**
+     * ETL curl http://127.0.0.1:8081/etl/hbase/mytest_person2.yml -X POST
+     * 
+     * @param type 类型 hbase, es
+     * @param task 任务名对应配置文件名 mytest_person2.yml
+     * @param params etl where条件参数, 为空全部导入
+     * @return
+     */
+    @PostMapping("/etl/{type}/{task}")
+    public EtlResult etl(@PathVariable String type, @PathVariable String task,
+                         @RequestParam(name = "params", required = false) String params) {
+
+        boolean locked = etlLock.tryLock(ETL_LOCK_ZK_NODE + type + "-" + task);
+        if (!locked) {
+            EtlResult result = new EtlResult();
+            result.setSucceeded(false);
+            result.setErrorMessage(task + " 有其他进程正在导入中, 请稍后再试");
+            return result;
+        }
+        try {
+            OuterAdapter adapter = loader.getExtension(type);
+            String destination = adapter.getDestination(task);
+            Boolean oriSwithcStatus = null;
+            if (destination != null) {
+                oriSwithcStatus = syncSwitch.status(destination);
+                syncSwitch.off(destination);
+            }
+            try {
+                List<String> paramArr = null;
+                if (params != null) {
+                    String[] parmaArray = params.trim().split(";");
+                    paramArr = Arrays.asList(parmaArray);
+                }
+                return adapter.etl(task, paramArr);
+            } finally {
+                if (destination != null && oriSwithcStatus != null && oriSwithcStatus) {
+                    syncSwitch.on(destination);
+                }
+            }
+        } finally {
+            etlLock.unlock(ETL_LOCK_ZK_NODE + type + "-" + task);
+        }
+    }
+
+    /**
+     * 统计总数 curl http://127.0.0.1:8081/count/hbase/mytest_person2.yml
+     * 
+     * @param type 类型 hbase, es
+     * @param task 任务名对应配置文件名 mytest_person2.yml
+     * @return
+     */
+    @GetMapping("/count/{type}/{task}")
+    public Map<String, Object> count(@PathVariable String type, @PathVariable String task) {
+        OuterAdapter adapter = loader.getExtension(type);
+        return adapter.count(task);
+    }
+
+    /**
+     * 返回所有实例 curl http://127.0.0.1:8081/destinations
+     */
+    @GetMapping("/destinations")
+    public List<Map<String, String>> destinations() {
+        List<Map<String, String>> result = new ArrayList<>();
+        Set<String> destinations = adapterCanalConfig.DESTINATIONS;
+        for (String destination : destinations) {
+            Map<String, String> resMap = new LinkedHashMap<>();
+            Boolean status = syncSwitch.status(destination);
+            String resStatus = "none";
+            if (status != null && status) {
+                resStatus = "on";
+            } else if (status != null && !status) {
+                resStatus = "off";
+            }
+            resMap.put("destination", destination);
+            resMap.put("status", resStatus);
+            result.add(resMap);
+        }
+        return result;
+    }
+
+    /**
+     * 实例同步开关 curl http://127.0.0.1:8081/syncSwitch/example/off -X PUT
+     * 
+     * @param destination 实例名称
+     * @param status 开关状态: off on
+     * @return
+     */
+    @PutMapping("/syncSwitch/{destination}/{status}")
+    public Result etl(@PathVariable String destination, @PathVariable String status) {
+        if (status.equals("on")) {
+            syncSwitch.on(destination);
+            logger.info("#Destination: {} sync on", destination);
+            return Result.createSuccess("实例: " + destination + " 开启同步成功");
+        } else if (status.equals("off")) {
+            syncSwitch.off(destination);
+            logger.info("#Destination: {} sync off", destination);
+            return Result.createSuccess("实例: " + destination + " 关闭同步成功");
+        } else {
+            Result result = new Result();
+            result.setCode(50000);
+            result.setMessage("实例: " + destination + " 操作失败");
+            return result;
+        }
+    }
+
+    /**
+     * 获取实例开关状态 curl http://127.0.0.1:8081/syncSwitch/example
+     * 
+     * @param destination 实例名称
+     * @return
+     */
+    @GetMapping("/syncSwitch/{destination}")
+    public Map<String, String> etl(@PathVariable String destination) {
+        Boolean status = syncSwitch.status(destination);
+        String resStatus = "none";
+        if (status != null && status) {
+            resStatus = "on";
+        } else if (status != null && !status) {
+            resStatus = "off";
+        }
+        Map<String, String> res = new LinkedHashMap<>();
+        res.put("stauts", resStatus);
+        return res;
+    }
+}

+ 47 - 0
client-adapter/launcher/src/main/resources/application.yml

@@ -0,0 +1,47 @@
+server:
+  port: 8081
+logging:
+  level:
+    com.alibaba.otter.canal.client.adapter.hbase: DEBUG
+spring:
+  jackson:
+    date-format: yyyy-MM-dd HH:mm:ss
+    time-zone: GMT+8
+    default-property-inclusion: non_null
+
+
+hbasezookeeper.quorum: 127.0.0.1
+hbase.zookeeper.property.clientPort: 2181
+hbase.zookeeper.znode.parent: /hbase
+
+canal.conf:
+  canalServerHost: 127.0.0.1:11111
+#  zookeeperHosts: slave1:2181
+#  bootstrapServers: slave1:6667 #or rocketmq nameservers:host1:9876;host2:9876
+  flatMessage: true
+  canalInstances:
+  - instance: example
+    adapterGroups:
+    - outAdapters:
+      - name: logger
+#      - name: hbase
+#        properties:
+#          hbase.zookeeper.quorum: ${hbase.zookeeper.quorum}
+#          hbase.zookeeper.property.clientPort: ${hbase.zookeeper.property.clientPort}
+#          zookeeper.znode.parent: ${hbase.zookeeper.znode.parent}
+#  mqTopics:
+#  - mqMode: kafka
+#    topic: example
+#    groups:
+#    - groupId: g2
+#      outAdapters:
+#      - name: logger
+
+#adapter.conf:
+#  datasourceConfigs:
+#    defaultDS:
+#      url: jdbc:mysql://127.0.0.1:3306/mytest?useUnicode=true
+#      username: root
+#      password: 121212
+#  adapterConfigs:
+#  - hbase/mytest_person2.yml

+ 1 - 0
client-adapter/logger/pom.xml

@@ -21,6 +21,7 @@
         </dependency>
     </dependencies>
 
+
     <build>
         <plugins>
             <plugin>

+ 5 - 5
client-adapter/logger/src/main/java/com/alibaba/otter/canal/client/adapter/logger/LoggerAdapterExample.java

@@ -3,8 +3,8 @@ package com.alibaba.otter.canal.client.adapter.logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.alibaba.otter.canal.client.adapter.CanalOuterAdapter;
-import com.alibaba.otter.canal.client.adapter.support.CanalOuterAdapterConfiguration;
+import com.alibaba.otter.canal.client.adapter.OuterAdapter;
+import com.alibaba.otter.canal.client.adapter.support.OuterAdapterConfig;
 import com.alibaba.otter.canal.client.adapter.support.Dml;
 import com.alibaba.otter.canal.client.adapter.support.SPI;
 
@@ -16,17 +16,17 @@ import com.alibaba.otter.canal.client.adapter.support.SPI;
  */
 @SPI("logger")
 // logger参数对应CanalOuterAdapterConfiguration配置中的name
-public class LoggerAdapterExample implements CanalOuterAdapter {
+public class LoggerAdapterExample implements OuterAdapter {
 
     private Logger logger = LoggerFactory.getLogger(this.getClass());
 
     @Override
-    public void init(CanalOuterAdapterConfiguration configuration) {
+    public void init(OuterAdapterConfig configuration) {
 
     }
 
     @Override
-    public void writeOut(Dml dml) {
+    public void sync(Dml dml) {
         logger.info(dml.toString());
     }
 

+ 0 - 0
client-adapter/logger/src/main/resources/META-INF/canal/com.alibaba.otter.canal.client.adapter.CanalOuterAdapter → client-adapter/logger/src/main/resources/META-INF/canal/com.alibaba.otter.canal.client.adapter.OuterAdapter


+ 27 - 5
client-adapter/pom.xml

@@ -2,20 +2,42 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0"
          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <parent>
-        <artifactId>canal</artifactId>
-        <groupId>com.alibaba.otter</groupId>
-        <version>1.1.1-SNAPSHOT</version>
-    </parent>
     <modelVersion>4.0.0</modelVersion>
     <groupId>com.alibaba.otter</groupId>
     <artifactId>canal.client-adapter</artifactId>
+    <version>1.1.1-SNAPSHOT</version>
     <packaging>pom</packaging>
     <name>canal client adapter module for otter ${project.version}</name>
+
+    <properties>
+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+        <maven.test.skip>true</maven.test.skip>
+        <downloadSources>true</downloadSources>
+        <java_source_version>1.8</java_source_version>
+        <java_target_version>1.8</java_target_version>
+        <file_encoding>UTF-8</file_encoding>
+        <canal_version>1.1.1-SNAPSHOT</canal_version>
+    </properties>
+
     <modules>
         <module>common</module>
         <module>logger</module>
         <module>hbase</module>
+        <module>launcher</module>
     </modules>
 
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <version>3.7.0</version>
+                <configuration>
+                    <source>${java_source_version}</source>
+                    <target>${java_target_version}</target>
+                    <encoding>${file_encoding}</encoding>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
 </project>

+ 0 - 155
client-launcher/pom.xml

@@ -1,155 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <parent>
-        <artifactId>canal</artifactId>
-        <groupId>com.alibaba.otter</groupId>
-        <version>1.1.1-SNAPSHOT</version>
-    </parent>
-    <modelVersion>4.0.0</modelVersion>
-    <groupId>com.alibaba.otter</groupId>
-    <artifactId>canal.client-launcher</artifactId>
-    <packaging>jar</packaging>
-    <name>canal client launcher module for otter ${project.version}</name>
-    <dependencies>
-        <dependency>
-            <groupId>com.alibaba.otter</groupId>
-            <artifactId>client-adapter.common</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>com.alibaba.otter</groupId>
-            <artifactId>canal.client</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <!-- 单独引入rocketmq依赖 -->
-        <dependency>
-            <groupId>org.apache.rocketmq</groupId>
-            <artifactId>rocketmq-client</artifactId>
-            <version>4.3.0</version>
-        </dependency>
-        <!-- 单独引入kafka依赖 -->
-        <dependency>
-            <groupId>org.apache.kafka</groupId>
-            <artifactId>kafka-clients</artifactId>
-            <version>1.1.1</version>
-        </dependency>
-        <dependency>
-            <groupId>org.yaml</groupId>
-            <artifactId>snakeyaml</artifactId>
-            <version>1.17</version>
-        </dependency>
-
-        <!-- outer adapter -->
-        <dependency>
-            <groupId>com.alibaba.otter</groupId>
-            <artifactId>client-adapter.logger</artifactId>
-            <version>${project.version}</version>
-            <classifier>jar-with-dependencies</classifier>
-        </dependency>
-        <dependency>
-            <groupId>com.alibaba.otter</groupId>
-            <artifactId>client-adapter.hbase</artifactId>
-            <version>${project.version}</version>
-            <classifier>jar-with-dependencies</classifier>
-        </dependency>
-    </dependencies>
-
-    <build>
-        <plugins>
-            <!-- deploy模块的packaging通常是jar,如果项目中没有java 源代码或资源文件,加上这一段配置使项目能通过构建 -->
-            <plugin>
-                <artifactId>maven-jar-plugin</artifactId>
-                <configuration>
-                    <archive>
-                        <addMavenDescriptor>true</addMavenDescriptor>
-                    </archive>
-                    <excludes>
-                        <exclude>**/logback.xml</exclude>
-                        <exclude>**/canal-client.yml</exclude>
-                    </excludes>
-                </configuration>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-assembly-plugin</artifactId>
-                <!-- 这是最新版本,推荐使用这个版本 -->
-                <version>2.2.1</version>
-                <executions>
-                    <execution>
-                        <id>assemble</id>
-                        <goals>
-                            <goal>single</goal>
-                        </goals>
-                        <phase>package</phase>
-                    </execution>
-                </executions>
-                <configuration>
-                    <appendAssemblyId>false</appendAssemblyId>
-                    <attach>false</attach>
-                </configuration>
-            </plugin>
-        </plugins>
-    </build>
-
-    <profiles>
-        <profile>
-            <id>dev</id>
-            <activation>
-                <activeByDefault>true</activeByDefault>
-                <property>
-                    <name>env</name>
-                    <value>!release</value>
-                </property>
-            </activation>
-
-            <build>
-                <plugins>
-                    <plugin>
-                        <artifactId>maven-assembly-plugin</artifactId>
-                        <configuration>
-                            <!-- maven assembly插件需要一个描述文件 来告诉插件包的结构以及打包所需的文件来自哪里 -->
-                            <descriptors>
-                                <descriptor>${basedir}/src/main/assembly/dev.xml</descriptor>
-                            </descriptors>
-                            <finalName>canal_client</finalName>
-                            <outputDirectory>${project.build.directory}</outputDirectory>
-                        </configuration>
-                    </plugin>
-                </plugins>
-            </build>
-
-        </profile>
-
-        <profile>
-            <id>release</id>
-            <activation>
-                <property>
-                    <name>env</name>
-                    <value>release</value>
-                </property>
-            </activation>
-
-            <build>
-                <plugins>
-                    <plugin>
-                        <artifactId>maven-assembly-plugin</artifactId>
-                        <configuration>
-                            <!-- 发布模式使用的maven assembly插件描述文件 -->
-                            <descriptors>
-                                <descriptor>${basedir}/src/main/assembly/release.xml</descriptor>
-                            </descriptors>
-                            <!-- 如果一个应用的包含多个deploy模块,如果使用同样的包名, 如果把它们复制的一个目录中可能会失败,所以包名加了 artifactId以示区分 -->
-                            <finalName>${project.artifactId}-${project.version}</finalName>
-                            <!-- scm 要求 release 模式打出的包放到顶级目录下的target子目录中 -->
-                            <outputDirectory>${project.parent.build.directory}</outputDirectory>
-                        </configuration>
-                    </plugin>
-                </plugins>
-            </build>
-        </profile>
-    </profiles>
-
-</project>

+ 0 - 57
client-launcher/src/main/assembly/dev.xml

@@ -1,57 +0,0 @@
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-	xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
-	<id>dist</id>
-	<formats>
-		<format>dir</format>
-	</formats>
-	<includeBaseDirectory>false</includeBaseDirectory>
-	<fileSets>
-		<fileSet>
-			<directory>.</directory>
-			<outputDirectory>/</outputDirectory>
-			<includes>
-				<include>README*</include>
-			</includes>
-		</fileSet>
-		<fileSet>
-			<directory>./src/main/bin</directory>
-			<outputDirectory>bin</outputDirectory>
-			<includes>
-				<include>**/*</include>
-			</includes>
-			<fileMode>0755</fileMode>
-		</fileSet>
-		<fileSet>
-			<directory>./src/main/conf</directory>
-			<outputDirectory>/conf</outputDirectory>
-			<includes>
-				<include>**/*</include>
-			</includes>
-		</fileSet>
-		<fileSet>
-			<directory>./src/main/resources</directory>
-			<outputDirectory>/conf</outputDirectory>
-			<includes>
-				<include>**/*</include>
-			</includes>
-			<excludes>
-				<exclude>META-INF/**</exclude>
-			</excludes>
-		</fileSet>
-		<fileSet>
-			<directory>target</directory>
-			<outputDirectory>logs</outputDirectory>
-			<excludes>
-				<exclude>**/*</exclude>
-			</excludes>
-		</fileSet>
-	</fileSets>
-	<dependencySets>
-		<dependencySet>
-			<outputDirectory>lib</outputDirectory>
-			<excludes>
-				<exclude>junit:junit</exclude>
-			</excludes>
-		</dependencySet>
-	</dependencySets>
-</assembly>

+ 0 - 57
client-launcher/src/main/assembly/release.xml

@@ -1,57 +0,0 @@
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-	xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
-	<id>dist</id>
-	<formats>
-		<format>tar.gz</format>
-	</formats>
-	<includeBaseDirectory>false</includeBaseDirectory>
-	<fileSets>
-		<fileSet>
-			<directory>.</directory>
-			<outputDirectory>/</outputDirectory>
-			<includes>
-				<include>README*</include>
-			</includes>
-		</fileSet>
-		<fileSet>
-			<directory>./src/main/bin</directory>
-			<outputDirectory>bin</outputDirectory>
-			<includes>
-				<include>**/*</include>
-			</includes>
-			<fileMode>0755</fileMode>
-		</fileSet>
-		<fileSet>
-			<directory>./src/main/conf</directory>
-			<outputDirectory>/conf</outputDirectory>
-			<includes>
-				<include>**/*</include>
-			</includes>
-		</fileSet>
-		<fileSet>
-			<directory>./src/main/resources</directory>
-			<outputDirectory>/conf</outputDirectory>
-			<includes>
-				<include>**/*</include>
-			</includes>
-			<excludes>
-				<exclude>META-INF/**</exclude>
-			</excludes>
-		</fileSet>
-		<fileSet>
-			<directory>target</directory>
-			<outputDirectory>logs</outputDirectory>
-			<excludes>
-				<exclude>**/*</exclude>
-			</excludes>
-		</fileSet>
-	</fileSets>
-	<dependencySets>
-		<dependencySet>
-			<outputDirectory>lib</outputDirectory>
-			<excludes>
-				<exclude>junit:junit</exclude>
-			</excludes>
-		</dependencySet>
-	</dependencySets>
-</assembly>

+ 0 - 25
client-launcher/src/main/bin/startup.bat

@@ -1,25 +0,0 @@
-@echo off
-@if not "%ECHO%" == ""  echo %ECHO%
-@if "%OS%" == "Windows_NT"  setlocal
-
-set ENV_PATH=.\
-if "%OS%" == "Windows_NT" set ENV_PATH=%~dp0%
-
-set conf_dir=%ENV_PATH%\..\conf
-set canal_conf=%conf_dir%\canal-client.yml
-set logback_configurationFile=%conf_dir%\logback.xml
-
-set CLASSPATH=%conf_dir%
-set CLASSPATH=%conf_dir%\..\lib\*;%CLASSPATH%
-
-set JAVA_MEM_OPTS= -Xms128m -Xmx512m -XX:PermSize=128m
-set JAVA_OPTS_EXT= -Djava.awt.headless=true -Djava.net.preferIPv4Stack=true -Dapplication.codeset=UTF-8 -Dfile.encoding=UTF-8
-set JAVA_DEBUG_OPT= -server -Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,address=9099,server=y,suspend=n
-set CANAL_OPTS= -DappName=otter-canal -Dlogback.configurationFile="%logback_configurationFile%" -Dcanal.conf="%canal_conf%"
-
-set JAVA_OPTS= %JAVA_MEM_OPTS% %JAVA_OPTS_EXT% %JAVA_DEBUG_OPT% %CANAL_OPTS%
-
-set CMD_STR= java %JAVA_OPTS% -classpath "%CLASSPATH%" java %JAVA_OPTS% -classpath "%CLASSPATH%" com.alibaba.otter.canal.client.ClientLauncher
-echo start cmd : %CMD_STR%
-
-java %JAVA_OPTS% -classpath "%CLASSPATH%" com.alibaba.otter.canal.client.ClientLauncher

+ 0 - 100
client-launcher/src/main/bin/startup.sh

@@ -1,100 +0,0 @@
-#!/bin/bash 
-
-current_path=`pwd`
-case "`uname`" in
-    Linux)
-		bin_abs_path=$(readlink -f $(dirname $0))
-		;;
-	*)
-		bin_abs_path=`cd $(dirname $0); pwd`
-		;;
-esac
-base=${bin_abs_path}/..
-canal_conf=$base/conf/canal-client.yml
-logback_configurationFile=$base/conf/logback.xml
-export LANG=en_US.UTF-8
-export BASE=$base
-
-if [ -f $base/bin/canal_client.pid ] ; then
-	echo "found canal_client.pid , Please run stop.sh first ,then startup.sh" 2>&2
-    exit 1
-fi
-
-## set java path
-if [ -z "$JAVA" ] ; then
-  JAVA=$(which java)
-fi
-
-ALIBABA_JAVA="/usr/alibaba/java/bin/java"
-TAOBAO_JAVA="/opt/taobao/java/bin/java"
-if [ -z "$JAVA" ]; then
-  if [ -f $ALIBABA_JAVA ] ; then
-  	JAVA=$ALIBABA_JAVA
-  elif [ -f $TAOBAO_JAVA ] ; then
-  	JAVA=$TAOBAO_JAVA
-  else
-  	echo "Cannot find a Java JDK. Please set either set JAVA or put java (>=1.5) in your PATH." 2>&2
-    exit 1
-  fi
-fi
-
-case "$#" 
-in
-0 ) 
-	;;
-1 )	
-	var=$*
-	if [ -f $var ] ; then 
-		canal_conf=$var
-	else
-		echo "THE PARAMETER IS NOT CORRECT.PLEASE CHECK AGAIN."
-        exit
-	fi;;
-2 )	
-	var=$1
-	if [ -f $var ] ; then
-		canal_conf=$var
-	else 
-		if [ "$1" = "debug" ]; then
-			DEBUG_PORT=$2
-			DEBUG_SUSPEND="n"
-			JAVA_DEBUG_OPT="-Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,address=$DEBUG_PORT,server=y,suspend=$DEBUG_SUSPEND"
-		fi
-     fi;;
-* )
-	echo "THE PARAMETERS MUST BE TWO OR LESS.PLEASE CHECK AGAIN."
-	exit;;
-esac
-
-str=`file -L $JAVA | grep 64-bit`
-if [ -n "$str" ]; then
-	JAVA_OPTS="-server -Xms2048m -Xmx3072m -Xmn1024m -XX:SurvivorRatio=2 -XX:PermSize=96m -XX:MaxPermSize=256m -Xss256k -XX:-UseAdaptiveSizePolicy -XX:MaxTenuringThreshold=15 -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCMSCompactAtFullCollection -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:+HeapDumpOnOutOfMemoryError"
-else
-	JAVA_OPTS="-server -Xms1024m -Xmx1024m -XX:NewSize=256m -XX:MaxNewSize=256m -XX:MaxPermSize=128m "
-fi
-
-JAVA_OPTS=" $JAVA_OPTS -Djava.awt.headless=true -Djava.net.preferIPv4Stack=true -Dfile.encoding=UTF-8"
-CANAL_OPTS="-DappName=otter-canal-client -Dlogback.configurationFile=$logback_configurationFile -Dcanal.conf=$canal_conf"
-
-if [ -e $canal_conf -a -e $logback_configurationFile ]
-then 
-	
-	for i in $base/lib/*;
-		do CLASSPATH=$i:"$CLASSPATH";
-	done
- 	CLASSPATH="$base/conf:$CLASSPATH";
- 	
- 	echo "cd to $bin_abs_path for workaround relative path"
-  	cd $bin_abs_path
- 	
-	echo LOG CONFIGURATION : $logback_configurationFile
-	echo canal conf : $canal_conf 
-	echo CLASSPATH :$CLASSPATH
-	$JAVA $JAVA_OPTS $JAVA_DEBUG_OPT $CANAL_OPTS -classpath .:$CLASSPATH com.alibaba.otter.canal.client.ClientLauncher 1>>$base/logs/canal_client.log 2>&1 &
-	echo $! > $base/bin/canal_client.pid
-	
-	echo "cd to $current_path for continue"
-  	cd $current_path
-else 
-	echo "canal client conf("$canal_conf") OR log configration file($logback_configurationFile) is not exist,please create then first!"
-fi

+ 0 - 65
client-launcher/src/main/bin/stop.sh

@@ -1,65 +0,0 @@
-#!/bin/bash
-
-cygwin=false;
-linux=false;
-case "`uname`" in
-    CYGWIN*)
-        cygwin=true
-        ;;
-    Linux*)
-    	linux=true
-    	;;
-esac
-
-get_pid() {	
-	STR=$1
-	PID=$2
-    if $cygwin; then
-        JAVA_CMD="$JAVA_HOME\bin\java"
-        JAVA_CMD=`cygpath --path --unix $JAVA_CMD`
-        JAVA_PID=`ps |grep $JAVA_CMD |awk '{print $1}'`
-    else
-    	if $linux; then
-	        if [ ! -z "$PID" ]; then
-	        	JAVA_PID=`ps -C java -f --width 1000|grep "$STR"|grep "$PID"|grep -v grep|awk '{print $2}'`
-		    else 
-		        JAVA_PID=`ps -C java -f --width 1000|grep "$STR"|grep -v grep|awk '{print $2}'`
-	        fi
-	    else
-	    	if [ ! -z "$PID" ]; then
-	        	JAVA_PID=`ps aux |grep "$STR"|grep "$PID"|grep -v grep|awk '{print $2}'`
-		    else 
-		        JAVA_PID=`ps aux |grep "$STR"|grep -v grep|awk '{print $2}'`
-	        fi
-	    fi
-    fi
-    echo $JAVA_PID;
-}
-
-base=`dirname $0`/..
-pidfile=$base/bin/canal_client.pid
-if [ ! -f "$pidfile" ];then
-	echo "canal client is not running. exists"
-	exit
-fi
-
-pid=`cat $pidfile`
-if [ "$pid" == "" ] ; then
-	pid=`get_pid "appName=otter-canal-client"`
-fi
-
-echo -e "`hostname`: stopping canal $pid ... "
-kill $pid
-
-LOOPS=0
-while (true); 
-do 
-	gpid=`get_pid "appName=otter-canal-client" "$pid"`
-    if [ "$gpid" == "" ] ; then
-    	echo "Oook! cost:$LOOPS"
-    	`rm $pidfile`
-    	break;
-    fi
-    let LOOPS=LOOPS+1
-    sleep 1
-done

+ 0 - 66
client-launcher/src/main/java/com/alibaba/otter/canal/client/ClientLauncher.java

@@ -1,66 +0,0 @@
-package com.alibaba.otter.canal.client;
-
-import java.io.FileInputStream;
-
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.yaml.snakeyaml.Yaml;
-
-import com.alibaba.otter.canal.client.adapter.loader.CanalAdapterLoader;
-import com.alibaba.otter.canal.client.adapter.support.CanalClientConfig;
-
-public class ClientLauncher {
-
-    private static final String CLASSPATH_URL_PREFIX = "classpath:";
-    private static final Logger logger               = LoggerFactory.getLogger(ClientLauncher.class);
-
-    public static void main(String[] args) {
-        try {
-            logger.info("## set default uncaught exception handler");
-            setGlobalUncaughtExceptionHandler();
-
-            logger.info("## load canal client configurations");
-            String conf = System.getProperty("client.conf", "classpath:canal-client.yml");
-            CanalClientConfig canalClientConfig;
-            if (conf.startsWith(CLASSPATH_URL_PREFIX)) {
-                conf = StringUtils.substringAfter(conf, CLASSPATH_URL_PREFIX);
-                canalClientConfig = new Yaml().loadAs(ClientLauncher.class.getClassLoader().getResourceAsStream(conf),
-                    CanalClientConfig.class);
-            } else {
-                canalClientConfig = new Yaml().loadAs(new FileInputStream(conf), CanalClientConfig.class);
-            }
-            logger.info("## start the canal client adapters.");
-            final CanalAdapterLoader adapterLoader = new CanalAdapterLoader(canalClientConfig);
-            adapterLoader.init();
-            logger.info("## the canal client adapters are running now ......");
-            Runtime.getRuntime().addShutdownHook(new Thread() {
-
-                public void run() {
-                    try {
-                        logger.info("## stop the canal client adapters");
-                        adapterLoader.destroy();
-                    } catch (Throwable e) {
-                        logger.warn("## something goes wrong when stopping canal client adapters:", e);
-                    } finally {
-                        logger.info("## canal client adapters are down.");
-                    }
-                }
-
-            });
-        } catch (Throwable e) {
-            logger.error("## something goes wrong when starting up the canal client adapters:", e);
-            System.exit(0);
-        }
-    }
-
-    private static void setGlobalUncaughtExceptionHandler() {
-        Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
-
-            @Override
-            public void uncaughtException(Thread t, Throwable e) {
-                logger.error("UnCaughtException", e);
-            }
-        });
-    }
-}

+ 0 - 168
client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/AbstractCanalAdapterWorker.java

@@ -1,168 +0,0 @@
-package com.alibaba.otter.canal.client.adapter.loader;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.alibaba.otter.canal.client.adapter.CanalOuterAdapter;
-import com.alibaba.otter.canal.client.adapter.support.Dml;
-import com.alibaba.otter.canal.client.adapter.support.MessageUtil;
-import com.alibaba.otter.canal.protocol.FlatMessage;
-import com.alibaba.otter.canal.protocol.Message;
-
-/**
- * 适配器工作线程抽象类
- *
- * @author machengyuan 2018-8-19 下午11:30:49
- * @version 1.0.0
- */
-public abstract class AbstractCanalAdapterWorker {
-
-    protected final Logger                    logger  = LoggerFactory.getLogger(this.getClass());
-
-    protected String                          canalDestination;                                  // canal实例
-    protected List<List<CanalOuterAdapter>>   canalOuterAdapters;                                // 外部适配器
-    protected ExecutorService                 groupInnerExecutorService;                         // 组内工作线程池
-    protected volatile boolean                running = false;                                   // 是否运行中
-    protected Thread                          thread  = null;
-    protected Thread.UncaughtExceptionHandler handler = new Thread.UncaughtExceptionHandler() {
-
-                                                          @Override
-                                                          public void uncaughtException(Thread t, Throwable e) {
-                                                              logger.error("parse events has an error", e);
-                                                          }
-                                                      };
-
-    protected void writeOut(final Message message) {
-        List<Future<Boolean>> futures = new ArrayList<>();
-        // 组间适配器并行运行
-        for (List<CanalOuterAdapter> outerAdapters : canalOuterAdapters) {
-            final List<CanalOuterAdapter> adapters = outerAdapters;
-            futures.add(groupInnerExecutorService.submit(new Callable<Boolean>() {
-
-                @Override
-                public Boolean call() {
-                    try {
-                        // 组内适配器穿行运行,尽量不要配置组内适配器
-                        for (final CanalOuterAdapter c : adapters) {
-                            long begin = System.currentTimeMillis();
-                            MessageUtil.parse4Dml(message, new MessageUtil.Consumer<Dml>() {
-
-                                @Override
-                                public void accept(Dml dml) {
-                                    c.writeOut(dml);
-                                }
-                            });
-
-                            if (logger.isDebugEnabled()) {
-                                logger.debug("{} elapsed time: {}",
-                                    c.getClass().getName(),
-                                    (System.currentTimeMillis() - begin));
-                            }
-                        }
-                        return true;
-                    } catch (Exception e) {
-                        return false;
-                    }
-                }
-            }));
-
-            // 等待所有适配器写入完成
-            // 由于是组间并发操作,所以将阻塞直到耗时最久的工作组操作完成
-            for (Future<Boolean> f : futures) {
-                try {
-                    if (!f.get()) {
-                        logger.error("Outer adapter write failed");
-                    }
-                } catch (InterruptedException | ExecutionException e) {
-                    // ignore
-                }
-            }
-        }
-    }
-
-    protected void writeOut(final FlatMessage flatMessage) {
-        List<Future<Boolean>> futures = new ArrayList<>();
-        // 组间适配器并行运行
-        for (List<CanalOuterAdapter> outerAdapters : canalOuterAdapters) {
-            final List<CanalOuterAdapter> adapters = outerAdapters;
-            futures.add(groupInnerExecutorService.submit(new Callable<Boolean>() {
-
-                @Override
-                public Boolean call() {
-                    try {
-                        // 组内适配器穿行运行,尽量不要配置组内适配器
-                        for (CanalOuterAdapter c : adapters) {
-                            long begin = System.currentTimeMillis();
-                            Dml dml = MessageUtil.flatMessage2Dml(flatMessage);
-                            c.writeOut(dml);
-                            if (logger.isDebugEnabled()) {
-                                logger.debug("{} elapsed time: {}",
-                                    c.getClass().getName(),
-                                    (System.currentTimeMillis() - begin));
-                            }
-                        }
-                        return true;
-                    } catch (Exception e) {
-                        return false;
-                    }
-                }
-            }));
-
-            // 等待所有适配器写入完成
-            // 由于是组间并发操作,所以将阻塞直到耗时最久的工作组操作完成
-            for (Future<Boolean> f : futures) {
-                try {
-                    if (!f.get()) {
-                        logger.error("Outer adapter write failed");
-                    }
-                } catch (InterruptedException | ExecutionException e) {
-                    // ignore
-                }
-            }
-        }
-    }
-
-    protected void writeOut(Message message, String topic) {
-        if (logger.isDebugEnabled()) {
-            logger.debug("topic: {} batchId: {} batchSize: {} ", topic, message.getId(), message.getEntries().size());
-        }
-        long begin = System.currentTimeMillis();
-        writeOut(message);
-        long now = System.currentTimeMillis();
-        if ((System.currentTimeMillis() - begin) > 5 * 60 * 1000) {
-            logger.error("topic: {} batchId {} elapsed time: {} ms", topic, message.getId(), now - begin);
-        }
-        if (logger.isDebugEnabled()) {
-            logger.debug("topic: {} batchId {} elapsed time: {} ms", topic, message.getId(), now - begin);
-        }
-    }
-
-    protected void stopOutAdapters() {
-        if (thread != null) {
-            try {
-                thread.join();
-            } catch (InterruptedException e) {
-                // ignore
-            }
-        }
-        groupInnerExecutorService.shutdown();
-        logger.info("topic connectors' worker thread dead!");
-        for (List<CanalOuterAdapter> outerAdapters : canalOuterAdapters) {
-            for (CanalOuterAdapter adapter : outerAdapters) {
-                adapter.destroy();
-            }
-        }
-        logger.info("topic all connectors destroyed!");
-    }
-
-    public abstract void start();
-
-    public abstract void stop();
-}

+ 0 - 179
client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/CanalAdapterKafkaWorker.java

@@ -1,179 +0,0 @@
-package com.alibaba.otter.canal.client.adapter.loader;
-
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.kafka.clients.consumer.CommitFailedException;
-import org.apache.kafka.common.errors.WakeupException;
-
-import com.alibaba.otter.canal.client.adapter.CanalOuterAdapter;
-import com.alibaba.otter.canal.client.kafka.KafkaCanalConnector;
-import com.alibaba.otter.canal.client.kafka.KafkaCanalConnectors;
-import com.alibaba.otter.canal.protocol.FlatMessage;
-import com.alibaba.otter.canal.protocol.Message;
-
-/**
- * kafka对应的client适配器工作线程
- *
- * @author machengyuan 2018-8-19 下午11:30:49
- * @version 1.0.0
- */
-public class CanalAdapterKafkaWorker extends AbstractCanalAdapterWorker {
-
-    private KafkaCanalConnector connector;
-
-    private String              topic;
-
-    private boolean             flatMessage;
-
-    public CanalAdapterKafkaWorker(String bootstrapServers, String topic, String groupId,
-                                   List<List<CanalOuterAdapter>> canalOuterAdapters, boolean flatMessage){
-        this.canalOuterAdapters = canalOuterAdapters;
-        this.groupInnerExecutorService = Executors.newFixedThreadPool(canalOuterAdapters.size());
-        this.topic = topic;
-        this.canalDestination = topic;
-        this.flatMessage = flatMessage;
-        connector = KafkaCanalConnectors.newKafkaConnector(bootstrapServers, topic, null, groupId, flatMessage);
-        // connector.setSessionTimeout(1L, TimeUnit.MINUTES);
-
-        // super.initSwitcher(topic);
-    }
-
-    @Override
-    public void start() {
-        if (!running) {
-            thread = new Thread(new Runnable() {
-
-                @Override
-                public void run() {
-                    process();
-                }
-            });
-            thread.setUncaughtExceptionHandler(handler);
-            running = true;
-            thread.start();
-        }
-    }
-
-    @Override
-    public void stop() {
-        try {
-            if (!running) {
-                return;
-            }
-
-            connector.stopRunning();
-            running = false;
-
-            // if (switcher != null && !switcher.state()) {
-            // switcher.set(true);
-            // }
-
-            if (thread != null) {
-                try {
-                    thread.join();
-                } catch (InterruptedException e) {
-                    // ignore
-                }
-            }
-            groupInnerExecutorService.shutdown();
-            logger.info("topic {} connectors' worker thread dead!", this.topic);
-            for (List<CanalOuterAdapter> outerAdapters : canalOuterAdapters) {
-                for (CanalOuterAdapter adapter : outerAdapters) {
-                    adapter.destroy();
-                }
-            }
-            logger.info("topic {} all connectors destroyed!", this.topic);
-        } catch (Exception e) {
-            logger.error(e.getMessage(), e);
-        }
-    }
-
-    private void process() {
-        while (!running)
-            ;
-        ExecutorService executor = Executors.newSingleThreadExecutor();
-        // final AtomicBoolean executing = new AtomicBoolean(true);
-        while (running) {
-            try {
-                logger.info("=============> Start to connect topic: {} <=============", this.topic);
-                connector.connect();
-                logger.info("=============> Start to subscribe topic: {} <=============", this.topic);
-                connector.subscribe();
-                logger.info("=============> Subscribe topic: {} succeed <=============", this.topic);
-                while (running) {
-                    try {
-                        // switcher.get(); //等待开关开启
-
-                        List<?> messages;
-                        if (!flatMessage) {
-                            messages = connector.getWithoutAck();
-                        } else {
-                            messages = connector.getFlatMessageWithoutAck(100L, TimeUnit.MILLISECONDS);
-                        }
-                        if (messages != null) {
-                            for (final Object message : messages) {
-                                if (message instanceof FlatMessage) {
-                                    writeOut((FlatMessage) message);
-                                } else {
-                                    writeOut((Message) message);
-                                }
-                                // executing.set(true);
-                                // if (message != null) {
-                                // executor.submit(new Runnable() {
-                                //
-                                // @Override
-                                // public void run() {
-                                // try {
-                                // if (message instanceof FlatMessage) {
-                                // writeOut((FlatMessage) message);
-                                // } else {
-                                // writeOut((Message) message);
-                                // }
-                                // } catch (Exception e) {
-                                // logger.error(e.getMessage(), e);
-                                // } finally {
-                                // executing.compareAndSet(true, false);
-                                // }
-                                // }
-                                // });
-                                //
-                                // // 间隔一段时间ack一次, 防止因超时未响应切换到另外台客户端
-                                // long currentTS = System.currentTimeMillis();
-                                // while (executing.get()) {
-                                // // 大于10秒未消费完ack一次keep alive
-                                // if (System.currentTimeMillis() - currentTS > 10000) {
-                                // connector.ack();
-                                // currentTS = System.currentTimeMillis();
-                                // }
-                                // }
-                                // }
-                            }
-                        }
-                        connector.ack();
-                    } catch (CommitFailedException e) {
-                        logger.warn(e.getMessage());
-                    } catch (Exception e) {
-                        logger.error(e.getMessage(), e);
-                        TimeUnit.SECONDS.sleep(1L);
-                    }
-                }
-            } catch (Exception e) {
-                logger.error(e.getMessage(), e);
-            }
-        }
-
-        executor.shutdown();
-
-        try {
-            connector.unsubscribe();
-        } catch (WakeupException e) {
-            // No-op. Continue process
-        }
-        connector.disconnect();
-        logger.info("=============> Disconnect topic: {} <=============", this.topic);
-    }
-}

+ 0 - 47
client-launcher/src/main/resources/logback.xml

@@ -1,47 +0,0 @@
-<configuration scan="true" scanPeriod=" 5 seconds">
-	<jmxConfigurator />
-	<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
-		<encoder>
-			<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{56} - %msg%n
-			</pattern>
-		</encoder>
-	</appender>
-	
-	<appender name="CANAL-ROOT" class="ch.qos.logback.classic.sift.SiftingAppender">
-		<discriminator>
-			<Key>destination</Key>
-			<DefaultValue>canal_client</DefaultValue>
-		</discriminator>
-		<sift>
-			<appender name="FILE-${destination}" class="ch.qos.logback.core.rolling.RollingFileAppender">
-				<File>../logs/${destination}.log</File>
-				<rollingPolicy
-					class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
-					<!-- rollover daily -->
-					<fileNamePattern>../logs/%d{yyyy-MM-dd}/${destination}-%d{yyyy-MM-dd}-%i.log.gz</fileNamePattern>
-					<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
-						<!-- or whenever the file size reaches 100MB -->
-						<maxFileSize>512MB</maxFileSize>
-					</timeBasedFileNamingAndTriggeringPolicy>
-					<maxHistory>60</maxHistory>
-				</rollingPolicy>
-				<encoder>
-					<pattern>
-						%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{56} - %msg%n
-					</pattern>
-				</encoder>
-			</appender>
-		</sift>
-	</appender>
-
-	<logger name="com.alibaba.otter.canal.client" additivity="false">
-		<level value="INFO" />
-		<!--<appender-ref ref="STDOUT"/>-->
-		<appender-ref ref="CANAL-ROOT" />
-	</logger>
-    
-	<root level="WARN">
-		<!--<appender-ref ref="STDOUT"/>-->
-		<appender-ref ref="CANAL-ROOT" />
-	</root>
-</configuration>

+ 2 - 2
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogBuffer.java

@@ -367,7 +367,7 @@ public final class RowsLogBuffer {
                     // 转化为unsign long
                     switch (len) {
                         case 1:
-                            value = buffer.getInt8();
+                            value = buffer.getUint8();
                             break;
                         case 2:
                             value = buffer.getBeUint16();
@@ -859,7 +859,7 @@ public final class RowsLogBuffer {
                     // 转化为unsign long
                     switch (len) {
                         case 1:
-                            value = buffer.getInt8();
+                            value = buffer.getUint8();
                             break;
                         case 2:
                             value = buffer.getUint16();

+ 5 - 0
deployer/src/main/resources/canal.properties

@@ -20,6 +20,7 @@ canal.instance.memory.buffer.size = 16384
 canal.instance.memory.buffer.memunit = 1024 
 ## meory store gets mode used MEMSIZE or ITEMSIZE
 canal.instance.memory.batch.mode = MEMSIZE
+canal.instance.memory.rawEntry = true
 
 ## detecing config
 canal.instance.detecting.enable = false
@@ -68,6 +69,10 @@ canal.instance.tsdb.dir=${canal.file.data.dir:../conf}/${canal.instance.destinat
 canal.instance.tsdb.url=jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
 canal.instance.tsdb.dbUsername=canal
 canal.instance.tsdb.dbPassword=canal
+# dump snapshot interval, default 24 hour
+canal.instance.tsdb.snapshot.interval=24
+# purge snapshot expire , default 360 hour(15 days)
+canal.instance.tsdb.snapshot.expire=360
 
 # rds oss binlog account
 canal.instance.rds.accesskey =

+ 1 - 1
deployer/src/main/resources/example/instance.properties

@@ -25,7 +25,7 @@ canal.instance.tsdb.enable=true
 
 #canal.instance.standby.address =
 #canal.instance.standby.journal.name =
-#canal.instance.standby.position = 
+#canal.instance.standby.position =
 #canal.instance.standby.timestamp =
 #canal.instance.standby.gtid=
 

+ 19 - 19
deployer/src/main/resources/logback.xml

@@ -6,7 +6,7 @@
 			</pattern>
 		</encoder>
 	</appender>
-	
+
 	<appender name="CANAL-ROOT" class="ch.qos.logback.classic.sift.SiftingAppender">
 		<discriminator>
 			<Key>destination</Key>
@@ -16,7 +16,7 @@
 			<appender name="FILE-${destination}" class="ch.qos.logback.core.rolling.RollingFileAppender">
 				<File>../logs/${destination}/${destination}.log</File>
 				<rollingPolicy
-					class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+						class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
 					<!-- rollover daily -->
 					<fileNamePattern>../logs/${destination}/%d{yyyy-MM-dd}/${destination}-%d{yyyy-MM-dd}-%i.log.gz</fileNamePattern>
 					<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
@@ -33,7 +33,7 @@
 			</appender>
 		</sift>
 	</appender>
-	
+
 	<appender name="CANAL-META" class="ch.qos.logback.classic.sift.SiftingAppender">
 		<discriminator>
 			<Key>destination</Key>
@@ -43,7 +43,7 @@
 			<appender name="META-FILE-${destination}" class="ch.qos.logback.core.rolling.RollingFileAppender">
 				<File>../logs/${destination}/meta.log</File>
 				<rollingPolicy
-					class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+						class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
 					<!-- rollover daily -->
 					<fileNamePattern>../logs/${destination}/%d{yyyy-MM-dd}/meta-%d{yyyy-MM-dd}-%i.log.gz</fileNamePattern>
 					<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
@@ -60,26 +60,26 @@
 			</appender>
 		</sift>
 	</appender>
-	
-    <logger name="com.alibaba.otter.canal.instance" additivity="false">  
-        <level value="INFO" />  
-        <appender-ref ref="CANAL-ROOT" />
-    </logger>
-    <logger name="com.alibaba.otter.canal.deployer" additivity="false">  
-        <level value="INFO" />  
-        <appender-ref ref="CANAL-ROOT" />
-    </logger>
-    <logger name="com.alibaba.otter.canal.meta.FileMixedMetaManager" additivity="false">  
-        <level value="INFO" />
-        <appender-ref ref="CANAL-META" />
-    </logger>
+
+	<logger name="com.alibaba.otter.canal.instance" additivity="false">
+		<level value="INFO" />
+		<appender-ref ref="CANAL-ROOT" />
+	</logger>
+	<logger name="com.alibaba.otter.canal.deployer" additivity="false">
+		<level value="INFO" />
+		<appender-ref ref="CANAL-ROOT" />
+	</logger>
+	<logger name="com.alibaba.otter.canal.meta.FileMixedMetaManager" additivity="false">
+		<level value="INFO" />
+		<appender-ref ref="CANAL-META" />
+	</logger>
 	<logger name="com.alibaba.otter.canal.kafka" additivity="false">
 		<level value="INFO" />
 		<appender-ref ref="CANAL-ROOT" />
 	</logger>
-    
+
 	<root level="WARN">
-		<!-- <appender-ref ref="STDOUT"/> -->
+		<appender-ref ref="STDOUT"/>
 		<appender-ref ref="CANAL-ROOT" />
 	</root>
 </configuration>

+ 3 - 0
deployer/src/main/resources/spring/default-instance.xml

@@ -58,6 +58,7 @@
 		<property name="bufferMemUnit" value="${canal.instance.memory.buffer.memunit:1024}" />
 		<property name="batchMode" value="${canal.instance.memory.batch.mode:MEMSIZE}" />
 		<property name="ddlIsolation" value="${canal.instance.get.ddl.isolation:false}" />
+		<property name="raw" value="${canal.instance.memory.rawEntry:true}" />
 	</bean>
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
@@ -175,6 +176,8 @@
 		<!--表结构相关-->
 		<property name="enableTsdb" value="${canal.instance.tsdb.enable:true}"/>
 		<property name="tsdbSpringXml" value="${canal.instance.tsdb.spring.xml:}"/>
+		<property name="tsdbSnapshotInterval" value="${canal.instance.tsdb.snapshot.interval:24}" />
+		<property name="tsdbSnapshotExpire" value="${canal.instance.tsdb.snapshot.expire:360}" />
 		
 		<!--是否启用GTID模式-->
 		<property name="isGTIDMode" value="${canal.instance.gtidon:false}"/>

+ 3 - 0
deployer/src/main/resources/spring/file-instance.xml

@@ -44,6 +44,7 @@
 		<property name="bufferMemUnit" value="${canal.instance.memory.buffer.memunit:1024}" />
 		<property name="batchMode" value="${canal.instance.memory.batch.mode:MEMSIZE}" />
 		<property name="ddlIsolation" value="${canal.instance.get.ddl.isolation:false}" />
+		<property name="raw" value="${canal.instance.memory.rawEntry:true}" />
 	</bean>
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
@@ -160,6 +161,8 @@
 		<!--表结构相关-->
 		<property name="enableTsdb" value="${canal.instance.tsdb.enable:true}"/>
 		<property name="tsdbSpringXml" value="${canal.instance.tsdb.spring.xml:}"/>
+		<property name="tsdbSnapshotInterval" value="${canal.instance.tsdb.snapshot.interval:24}" />
+		<property name="tsdbSnapshotExpire" value="${canal.instance.tsdb.snapshot.expire:360}" />
 
 		<!--是否启用GTID模式-->
 		<property name="isGTIDMode" value="${canal.instance.gtidon:false}"/>

+ 1 - 0
deployer/src/main/resources/spring/group-instance.xml

@@ -41,6 +41,7 @@
 		<property name="bufferMemUnit" value="${canal.instance.memory.buffer.memunit:1024}" />
 		<property name="batchMode" value="${canal.instance.memory.batch.mode:MEMSIZE}" />
 		<property name="ddlIsolation" value="${canal.instance.get.ddl.isolation:false}" />
+		<property name="raw" value="${canal.instance.memory.rawEntry:true}" />
 	</bean>
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">

+ 3 - 0
deployer/src/main/resources/spring/memory-instance.xml

@@ -41,6 +41,7 @@
 		<property name="bufferMemUnit" value="${canal.instance.memory.buffer.memunit:1024}" />
 		<property name="batchMode" value="${canal.instance.memory.batch.mode:MEMSIZE}" />
 		<property name="ddlIsolation" value="${canal.instance.get.ddl.isolation:false}" />
+		<property name="raw" value="${canal.instance.memory.rawEntry:true}" />
 	</bean>
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
@@ -148,6 +149,8 @@
 		<!--表结构相关-->
 		<property name="enableTsdb" value="${canal.instance.tsdb.enable:false}"/>
 		<property name="tsdbSpringXml" value="${canal.instance.tsdb.spring.xml:}"/>
+		<property name="tsdbSnapshotInterval" value="${canal.instance.tsdb.snapshot.interval:24}" />
+		<property name="tsdbSnapshotExpire" value="${canal.instance.tsdb.snapshot.expire:360}" />
 		
 		<!--是否启用GTID模式-->
 		<property name="isGTIDMode" value="${canal.instance.gtidon:false}"/>

+ 2 - 2
deployer/src/main/resources/spring/tsdb/sql-map/sqlmap_history.xml

@@ -36,10 +36,10 @@
     </delete>
 
 
-    <delete id="deleteByGmtModified" parameterClass="java.util.Map">
+    <delete id="deleteByTimestamp" parameterClass="java.util.Map">
         <![CDATA[
 		delete from meta_history
-		where gmt_modified < timestamp(#timestamp#)
+		where destination=#destination# and binlog_timestamp < #timestamp#
         ]]>
     </delete>
 </sqlMap>

+ 2 - 2
deployer/src/main/resources/spring/tsdb/sql-map/sqlmap_snapshot.xml

@@ -42,10 +42,10 @@
         where destination=#destination#
     </delete>
 
-    <delete id="deleteByGmtModified" parameterClass="java.util.Map">
+    <delete id="deleteByTimestamp" parameterClass="java.util.Map">
         <![CDATA[
 		delete from meta_snapshot
-		where gmt_modified < timestamp(#timestamp#)
+		where destination=#destination# and binlog_timestamp < #timestamp# and binlog_timestamp > 0
         ]]>
     </delete>
 </sqlMap>

+ 59 - 4
instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/CanalInstanceWithManager.java

@@ -6,7 +6,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
-import com.alibaba.otter.canal.meta.FileMixedMetaManager;
+import org.apache.commons.lang.BooleanUtils;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -39,7 +39,16 @@ import com.alibaba.otter.canal.parse.inbound.AbstractEventParser;
 import com.alibaba.otter.canal.parse.inbound.group.GroupEventParser;
 import com.alibaba.otter.canal.parse.inbound.mysql.LocalBinlogEventParser;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser;
-import com.alibaba.otter.canal.parse.index.*;
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.RdsBinlogEventParserProxy;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.DefaultTableMetaTSDBFactory;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.TableMetaTSDB;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.TableMetaTSDBBuilder;
+import com.alibaba.otter.canal.parse.index.CanalLogPositionManager;
+import com.alibaba.otter.canal.parse.index.FailbackLogPositionManager;
+import com.alibaba.otter.canal.parse.index.MemoryLogPositionManager;
+import com.alibaba.otter.canal.parse.index.MetaLogPositionManager;
+import com.alibaba.otter.canal.parse.index.PeriodMixedLogPositionManager;
+import com.alibaba.otter.canal.parse.index.ZooKeeperLogPositionManager;
 import com.alibaba.otter.canal.parse.support.AuthenticationInfo;
 import com.alibaba.otter.canal.protocol.position.EntryPosition;
 import com.alibaba.otter.canal.sink.entry.EntryEventSink;
@@ -138,6 +147,7 @@ public class CanalInstanceWithManager extends AbstractCanalInstance {
             memoryEventStore.setBufferMemUnit(parameters.getMemoryStorageBufferMemUnit());
             memoryEventStore.setBatchMode(BatchMode.valueOf(parameters.getStorageBatchMode().name()));
             memoryEventStore.setDdlIsolation(parameters.getDdlIsolation());
+            memoryEventStore.setRaw(parameters.getMemoryStorageRawEntry());
             eventStore = memoryEventStore;
         } else if (mode.isFile()) {
             // 后续版本支持
@@ -229,7 +239,18 @@ public class CanalInstanceWithManager extends AbstractCanalInstance {
     private CanalEventParser doInitEventParser(SourcingType type, List<InetSocketAddress> dbAddresses) {
         CanalEventParser eventParser;
         if (type.isMysql()) {
-            MysqlEventParser mysqlEventParser = new MysqlEventParser();
+            MysqlEventParser mysqlEventParser = null;
+            if (StringUtils.isNotEmpty(parameters.getRdsAccesskey())
+                && StringUtils.isNotEmpty(parameters.getRdsSecretkey())
+                && StringUtils.isNotEmpty(parameters.getRdsInstanceId())) {
+
+                mysqlEventParser = new RdsBinlogEventParserProxy();
+                ((RdsBinlogEventParserProxy) mysqlEventParser).setAccesskey(parameters.getRdsAccesskey());
+                ((RdsBinlogEventParserProxy) mysqlEventParser).setSecretkey(parameters.getRdsSecretkey());
+                ((RdsBinlogEventParserProxy) mysqlEventParser).setInstanceId(parameters.getRdsInstanceId());
+            } else {
+                mysqlEventParser = new MysqlEventParser();
+            }
             mysqlEventParser.setDestination(destination);
             // 编码参数
             mysqlEventParser.setConnectionCharset(Charset.forName(parameters.getConnectionCharset()));
@@ -273,6 +294,40 @@ public class CanalInstanceWithManager extends AbstractCanalInstance {
             mysqlEventParser.setFallbackIntervalInSeconds(parameters.getFallbackIntervalInSeconds());
             mysqlEventParser.setProfilingEnabled(false);
             mysqlEventParser.setFilterTableError(parameters.getFilterTableError());
+            mysqlEventParser.setIsGTIDMode(BooleanUtils.toBoolean(parameters.getGtidEnable()));
+            // tsdb
+            if (parameters.getTsdbSnapshotInterval() != null) {
+                mysqlEventParser.setTsdbSnapshotInterval(parameters.getTsdbSnapshotInterval());
+            }
+            if (parameters.getTsdbSnapshotExpire() != null) {
+                mysqlEventParser.setTsdbSnapshotExpire(parameters.getTsdbSnapshotExpire());
+            }
+            boolean tsdbEnable = BooleanUtils.toBoolean(parameters.getTsdbEnable());
+            if (tsdbEnable) {
+                mysqlEventParser.setEnableTsdb(tsdbEnable);
+                mysqlEventParser.setTableMetaTSDBFactory(new DefaultTableMetaTSDBFactory() {
+
+                    @Override
+                    public void destory(String destination) {
+                        TableMetaTSDBBuilder.destory(destination);
+                    }
+
+                    @Override
+                    public TableMetaTSDB build(String destination, String springXml) {
+                        try {
+                            System.setProperty("canal.instance.tsdb.url", parameters.getTsdbJdbcUrl());
+                            System.setProperty("canal.instance.tsdb.dbUsername", parameters.getTsdbJdbcUserName());
+                            System.setProperty("canal.instance.tsdb.dbPassword", parameters.getTsdbJdbcPassword());
+
+                            return TableMetaTSDBBuilder.build(destination, "classpath:spring/tsdb/mysql-tsdb.xml");
+                        } finally {
+                            System.setProperty("canal.instance.tsdb.url", "");
+                            System.setProperty("canal.instance.tsdb.dbUsername", "");
+                            System.setProperty("canal.instance.tsdb.dbPassword", "");
+                        }
+                    }
+                });
+            }
             eventParser = mysqlEventParser;
         } else if (type.isLocalBinlog()) {
             LocalBinlogEventParser localBinlogEventParser = new LocalBinlogEventParser();
@@ -291,8 +346,8 @@ public class CanalInstanceWithManager extends AbstractCanalInstance {
                     parameters.getDbUsername(),
                     parameters.getDbPassword(),
                     parameters.getDefaultDatabaseName()));
-
             }
+
             eventParser = localBinlogEventParser;
         } else if (type.isOracle()) {
             throw new CanalException("unsupport SourcingType for " + type);

+ 63 - 0
instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/model/CanalParameter.java

@@ -39,6 +39,7 @@ public class CanalParameter implements Serializable {
     private BatchMode                storageBatchMode                   = BatchMode.MEMSIZE;         // 基于大小返回结果
     private Integer                  memoryStorageBufferSize            = 16 * 1024;                 // 内存存储的buffer大小
     private Integer                  memoryStorageBufferMemUnit         = 1024;                      // 内存存储的buffer内存占用单位,默认为1kb
+    private Boolean                  memoryStorageRawEntry              = Boolean.TRUE;              // 内存存储的对象是否启用raw的ByteString模式
     private String                   fileStorageDirectory;                                           // 文件存储的目录位置
     private Integer                  fileStorageStoreCount;                                          // 每个文件store存储的记录数
     private Integer                  fileStorageRollverCount;                                        // store文件的个数
@@ -97,6 +98,12 @@ public class CanalParameter implements Serializable {
     private String                   tsdbJdbcUrl;
     private String                   tsdbJdbcUserName;
     private String                   tsdbJdbcPassword;
+    private Integer                  tsdbSnapshotInterval               = 24;
+    private Integer                  tsdbSnapshotExpire                 = 360;
+    private String                   rdsAccesskey;
+    private String                   rdsSecretkey;
+    private String                   rdsInstanceId;
+    private Boolean                  gtidEnable                         = Boolean.FALSE;             // 是否开启gtid
     // ================================== 兼容字段处理
     private InetSocketAddress        masterAddress;                                                  // 主库信息
     private String                   masterUsername;                                                 // 帐号
@@ -919,6 +926,62 @@ public class CanalParameter implements Serializable {
         this.tsdbJdbcPassword = tsdbJdbcPassword;
     }
 
+    public String getRdsAccesskey() {
+        return rdsAccesskey;
+    }
+
+    public void setRdsAccesskey(String rdsAccesskey) {
+        this.rdsAccesskey = rdsAccesskey;
+    }
+
+    public String getRdsSecretkey() {
+        return rdsSecretkey;
+    }
+
+    public void setRdsSecretkey(String rdsSecretkey) {
+        this.rdsSecretkey = rdsSecretkey;
+    }
+
+    public String getRdsInstanceId() {
+        return rdsInstanceId;
+    }
+
+    public void setRdsInstanceId(String rdsInstanceId) {
+        this.rdsInstanceId = rdsInstanceId;
+    }
+
+    public Boolean getGtidEnable() {
+        return gtidEnable;
+    }
+
+    public void setGtidEnable(Boolean gtidEnable) {
+        this.gtidEnable = gtidEnable;
+    }
+
+    public Boolean getMemoryStorageRawEntry() {
+        return memoryStorageRawEntry;
+    }
+
+    public void setMemoryStorageRawEntry(Boolean memoryStorageRawEntry) {
+        this.memoryStorageRawEntry = memoryStorageRawEntry;
+    }
+
+    public Integer getTsdbSnapshotInterval() {
+        return tsdbSnapshotInterval;
+    }
+
+    public void setTsdbSnapshotInterval(Integer tsdbSnapshotInterval) {
+        this.tsdbSnapshotInterval = tsdbSnapshotInterval;
+    }
+
+    public Integer getTsdbSnapshotExpire() {
+        return tsdbSnapshotExpire;
+    }
+
+    public void setTsdbSnapshotExpire(Integer tsdbSnapshotExpire) {
+        this.tsdbSnapshotExpire = tsdbSnapshotExpire;
+    }
+
     public String toString() {
         return ToStringBuilder.reflectionToString(this, CanalToStringStyle.DEFAULT_STYLE);
     }

+ 18 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/AbstractMysqlEventParser.java

@@ -26,6 +26,8 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
 
     protected TableMetaTSDBFactory tableMetaTSDBFactory      = new DefaultTableMetaTSDBFactory();
     protected boolean              enableTsdb                = false;
+    protected int                  tsdbSnapshotInterval      = 24;
+    protected int                  tsdbSnapshotExpire        = 360;
     protected String               tsdbSpringXml;
     protected TableMetaTSDB        tableMetaTSDB;
 
@@ -210,4 +212,20 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
         return this.receivedBinlogBytes;
     }
 
+    public int getTsdbSnapshotInterval() {
+        return tsdbSnapshotInterval;
+    }
+
+    public void setTsdbSnapshotInterval(int tsdbSnapshotInterval) {
+        this.tsdbSnapshotInterval = tsdbSnapshotInterval;
+    }
+
+    public int getTsdbSnapshotExpire() {
+        return tsdbSnapshotExpire;
+    }
+
+    public void setTsdbSnapshotExpire(int tsdbSnapshotExpire) {
+        this.tsdbSnapshotExpire = tsdbSnapshotExpire;
+    }
+
 }

+ 2 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogEventParser.java

@@ -55,6 +55,8 @@ public class LocalBinlogEventParser extends AbstractMysqlEventParser implements
             ((DatabaseTableMeta) tableMetaTSDB).setConnection(metaConnection);
             ((DatabaseTableMeta) tableMetaTSDB).setFilter(eventFilter);
             ((DatabaseTableMeta) tableMetaTSDB).setBlackFilter(eventBlackFilter);
+            ((DatabaseTableMeta) tableMetaTSDB).setSnapshotInterval(tsdbSnapshotInterval);
+            ((DatabaseTableMeta) tableMetaTSDB).setSnapshotExpire(tsdbSnapshotExpire);
         }
 
         tableMetaCache = new TableMetaCache(metaConnection, tableMetaTSDB);

+ 2 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParser.java

@@ -121,6 +121,8 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                 ((DatabaseTableMeta) tableMetaTSDB).setConnection(metaConnection);
                 ((DatabaseTableMeta) tableMetaTSDB).setFilter(eventFilter);
                 ((DatabaseTableMeta) tableMetaTSDB).setBlackFilter(eventBlackFilter);
+                ((DatabaseTableMeta) tableMetaTSDB).setSnapshotInterval(tsdbSnapshotInterval);
+                ((DatabaseTableMeta) tableMetaTSDB).setSnapshotExpire(tsdbSnapshotExpire);
             }
 
             tableMetaCache = new TableMetaCache(metaConnection, tableMetaTSDB);

+ 4 - 3
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java

@@ -671,9 +671,6 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
             }
 
             int javaType = buffer.getJavaType();
-            if (isSingleBit && javaType == Types.TINYINT) {
-                javaType = Types.BIT;
-            }
             if (buffer.isNull()) {
                 columnBuilder.setIsNull(true);
             } else {
@@ -722,6 +719,10 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                             // 对象为number类型,直接valueof即可
                             columnBuilder.setValue(String.valueOf(value));
                         }
+
+                        if (isSingleBit && javaType == Types.TINYINT) {
+                            javaType = Types.BIT;
+                        }
                         break;
                     case Types.REAL: // float
                     case Types.DOUBLE: // double

+ 49 - 15
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/DatabaseTableMeta.java

@@ -44,19 +44,21 @@ import com.alibaba.otter.canal.protocol.position.EntryPosition;
  */
 public class DatabaseTableMeta implements TableMetaTSDB {
 
-    public static final EntryPosition INIT_POSITION = new EntryPosition("0", 0L, -2L, -1L);
-    private static Logger             logger        = LoggerFactory.getLogger(DatabaseTableMeta.class);
-    private static Pattern            pattern       = Pattern.compile("Duplicate entry '.*' for key '*'");
-    private static Pattern            h2Pattern     = Pattern.compile("Unique index or primary key violation");
+    public static final EntryPosition INIT_POSITION    = new EntryPosition("0", 0L, -2L, -1L);
+    private static Logger             logger           = LoggerFactory.getLogger(DatabaseTableMeta.class);
+    private static Pattern            pattern          = Pattern.compile("Duplicate entry '.*' for key '*'");
+    private static Pattern            h2Pattern        = Pattern.compile("Unique index or primary key violation");
     private String                    destination;
     private MemoryTableMeta           memoryTableMeta;
-    private MysqlConnection           connection;                                                              // 查询meta信息的链接
+    private MysqlConnection           connection;                                                                 // 查询meta信息的链接
     private CanalEventFilter          filter;
     private CanalEventFilter          blackFilter;
     private EntryPosition             lastPosition;
     private ScheduledExecutorService  scheduler;
     private MetaHistoryDAO            metaHistoryDAO;
     private MetaSnapshotDAO           metaSnapshotDAO;
+    private int                       snapshotInterval = 24;
+    private int                       snapshotExpire   = 360;
 
     public DatabaseTableMeta(){
 
@@ -77,18 +79,30 @@ public class DatabaseTableMeta implements TableMetaTSDB {
         });
 
         // 24小时生成一份snapshot
-        scheduler.scheduleWithFixedDelay(new Runnable() {
+        if (snapshotInterval > 0) {
+            scheduler.scheduleWithFixedDelay(new Runnable() {
+
+                @Override
+                public void run() {
+                    boolean applyResult = false;
+                    try {
+                        MDC.put("destination", destination);
+                        applyResult = applySnapshotToDB(lastPosition, false);
+                    } catch (Throwable e) {
+                        logger.error("scheudle applySnapshotToDB faield", e);
+                    }
 
-            @Override
-            public void run() {
-                try {
-                    MDC.put("destination", destination);
-                    applySnapshotToDB(lastPosition, false);
-                } catch (Throwable e) {
-                    logger.error("scheudle applySnapshotToDB faield", e);
+                    try {
+                        MDC.put("destination", destination);
+                        if (applyResult) {
+                            snapshotExpire((int) TimeUnit.HOURS.toSeconds(snapshotExpire));
+                        }
+                    } catch (Throwable e) {
+                        logger.error("scheudle snapshotExpire faield", e);
+                    }
                 }
-            }
-        }, 24, 24, TimeUnit.HOURS);
+            }, snapshotInterval, snapshotInterval, TimeUnit.HOURS);
+        }
         return true;
     }
 
@@ -461,6 +475,10 @@ public class DatabaseTableMeta implements TableMetaTSDB {
         return true;
     }
 
+    private int snapshotExpire(int expireTimestamp) {
+        return metaSnapshotDAO.deleteByTimestamp(destination, expireTimestamp);
+    }
+
     public void setConnection(MysqlConnection connection) {
         this.connection = connection;
     }
@@ -489,6 +507,22 @@ public class DatabaseTableMeta implements TableMetaTSDB {
         this.blackFilter = blackFilter;
     }
 
+    public int getSnapshotInterval() {
+        return snapshotInterval;
+    }
+
+    public void setSnapshotInterval(int snapshotInterval) {
+        this.snapshotInterval = snapshotInterval;
+    }
+
+    public int getSnapshotExpire() {
+        return snapshotExpire;
+    }
+
+    public void setSnapshotExpire(int snapshotExpire) {
+        this.snapshotExpire = snapshotExpire;
+    }
+
     public MysqlConnection getConnection() {
         return connection;
     }

+ 4 - 7
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaHistoryDAO.java

@@ -1,7 +1,5 @@
 package com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao;
 
-import java.text.SimpleDateFormat;
-import java.util.Date;
 import java.util.HashMap;
 import java.util.List;
 
@@ -37,13 +35,12 @@ public class MetaHistoryDAO extends MetaBaseDAO {
     /**
      * 删除interval秒之前的数据
      */
-    public Integer deleteByGmtModified(int interval) {
+    public Integer deleteByTimestamp(String destination, int interval) {
         HashMap params = Maps.newHashMapWithExpectedSize(2);
         long timestamp = System.currentTimeMillis() - interval * 1000;
-        Date date = new Date(timestamp);
-        SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
-        params.put("timestamp", format.format(date));
-        return getSqlMapClientTemplate().delete("meta_history.deleteByGmtModified", params);
+        params.put("timestamp", timestamp);
+        params.put("destination", destination);
+        return getSqlMapClientTemplate().delete("meta_history.deleteByTimestamp", params);
     }
 
     protected void initDao() throws Exception {

+ 4 - 7
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaSnapshotDAO.java

@@ -1,7 +1,5 @@
 package com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao;
 
-import java.text.SimpleDateFormat;
-import java.util.Date;
 import java.util.HashMap;
 
 import com.google.common.collect.Maps;
@@ -40,13 +38,12 @@ public class MetaSnapshotDAO extends MetaBaseDAO {
     /**
      * 删除interval秒之前的数据
      */
-    public Integer deleteByGmtModified(int interval) {
+    public Integer deleteByTimestamp(String destination, int interval) {
         HashMap params = Maps.newHashMapWithExpectedSize(2);
         long timestamp = System.currentTimeMillis() - interval * 1000;
-        Date date = new Date(timestamp);
-        SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
-        params.put("timestamp", format.format(date));
-        return getSqlMapClientTemplate().delete("meta_snapshot.deleteByGmtModified", params);
+        params.put("timestamp", timestamp);
+        params.put("destination", destination);
+        return getSqlMapClientTemplate().delete("meta_snapshot.deleteByTimestamp", params);
     }
 
     protected void initDao() throws Exception {

+ 17 - 2
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MetaHistoryDAOTest.java

@@ -16,7 +16,7 @@ import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDO;
  * Created by wanshao Date: 2017/9/20 Time: 下午5:00
  **/
 @RunWith(SpringJUnit4ClassRunner.class)
-@ContextConfiguration(locations = { "/tsdb/mysql-tsdb.xml" })
+@ContextConfiguration(locations = { "/tsdb/h2-tsdb.xml" })
 public class MetaHistoryDAOTest {
 
     @Resource
@@ -24,7 +24,22 @@ public class MetaHistoryDAOTest {
 
     @Test
     public void testSimple() {
-        List<MetaHistoryDO> metaHistoryDOList = metaHistoryDAO.findByTimestamp("test", 0L, 0L);
+        MetaHistoryDO historyDO = new MetaHistoryDO();
+        historyDO.setDestination("test");
+        historyDO.setBinlogFile("000001");
+        historyDO.setBinlogOffest(4L);
+        historyDO.setBinlogMasterId("1");
+        historyDO.setBinlogTimestamp(System.currentTimeMillis() - 7300 * 1000);
+        historyDO.setSqlSchema("test");
+        historyDO.setUseSchema("test");
+        historyDO.setSqlTable("testTable");
+        historyDO.setSqlTable("drop table testTable");
+        metaHistoryDAO.insert(historyDO);
+
+        int count = metaHistoryDAO.deleteByTimestamp("test", 7200);
+        System.out.println(count);
+
+        List<MetaHistoryDO> metaHistoryDOList = metaHistoryDAO.findByTimestamp("test", 0L, System.currentTimeMillis());
         for (MetaHistoryDO metaHistoryDO : metaHistoryDOList) {
             System.out.println(metaHistoryDO.getId());
         }

+ 41 - 0
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MetaSnapshotDAOTest.java

@@ -0,0 +1,41 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import javax.annotation.Resource;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaSnapshotDAO;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaSnapshotDO;
+
+/**
+ * Created by wanshao Date: 2017/9/20 Time: 下午5:00
+ **/
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration(locations = { "/tsdb/h2-tsdb.xml" })
+public class MetaSnapshotDAOTest {
+
+    @Resource
+    MetaSnapshotDAO metaSnapshotDAO;
+
+    @Test
+    public void testSimple() {
+        MetaSnapshotDO metaSnapshotDO = new MetaSnapshotDO();
+        metaSnapshotDO.setDestination("test");
+        metaSnapshotDO.setBinlogFile("000001");
+        metaSnapshotDO.setBinlogOffest(4L);
+        metaSnapshotDO.setBinlogMasterId("1");
+        metaSnapshotDO.setBinlogTimestamp(System.currentTimeMillis() - 7300 * 1000);
+        metaSnapshotDO.setData("test");
+        metaSnapshotDAO.insert(metaSnapshotDO);
+
+        MetaSnapshotDO snapshotDO = metaSnapshotDAO.findByTimestamp("test", System.currentTimeMillis());
+        System.out.println(snapshotDO);
+
+        int count = metaSnapshotDAO.deleteByTimestamp("test", 7200);
+        System.out.println(count);
+    }
+
+}

+ 2 - 2
parse/src/test/resources/tsdb/sql-map/sqlmap_history.xml

@@ -36,10 +36,10 @@
     </delete>
 
 
-    <delete id="deleteByGmtModified" parameterClass="java.util.Map">
+    <delete id="deleteByTimestamp" parameterClass="java.util.Map">
         <![CDATA[
 		delete from meta_history
-		where gmt_modified < timestamp(#timestamp#)
+		where destination=#destination# and binlog_timestamp < #timestamp#
         ]]>
     </delete>
 </sqlMap>

+ 2 - 2
parse/src/test/resources/tsdb/sql-map/sqlmap_snapshot.xml

@@ -54,10 +54,10 @@
         where destination=#destination#
     </delete>
 
-    <delete id="deleteByGmtModified" parameterClass="java.util.Map">
+    <delete id="deleteByTimestamp" parameterClass="java.util.Map">
         <![CDATA[
 		delete from meta_snapshot
-		where gmt_modified < timestamp(#timestamp#)
+		where destination=#destination# and binlog_timestamp < #timestamp# and binlog_timestamp > 0
         ]]>
     </delete>
 </sqlMap>

+ 0 - 1
pom.xml

@@ -119,7 +119,6 @@
         <module>example</module>
         <module>prometheus</module>
         <module>client-adapter</module>
-        <module>client-launcher</module>
     </modules>
 
     <dependencyManagement>

+ 22 - 6
protocol/src/main/java/com/alibaba/otter/canal/protocol/FlatMessage.java

@@ -149,11 +149,19 @@ public class FlatMessage implements Serializable {
             }
 
             List<FlatMessage> flatMessages = new ArrayList<>();
+            List<CanalEntry.Entry> entrys = null;
+            if (message.isRaw()) {
+                List<ByteString> rawEntries = message.getRawEntries();
+                entrys = new ArrayList<CanalEntry.Entry>(rawEntries.size());
+                for (ByteString byteString : rawEntries) {
+                    CanalEntry.Entry entry = CanalEntry.Entry.parseFrom(byteString);
+                    entrys.add(entry);
+                }
+            } else {
+                entrys = message.getEntries();
+            }
 
-            List<ByteString> rawEntries = message.getRawEntries();
-
-            for (ByteString byteString : rawEntries) {
-                CanalEntry.Entry entry = CanalEntry.Entry.parseFrom(byteString);
+            for (CanalEntry.Entry entry : entrys) {
                 if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONBEGIN
                     || entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONEND) {
                     continue;
@@ -204,7 +212,11 @@ public class FlatMessage implements Serializable {
                         for (CanalEntry.Column column : columns) {
                             sqlType.put(column.getName(), column.getSqlType());
                             mysqlType.put(column.getName(), column.getMysqlType());
-                            row.put(column.getName(), column.getValue());
+                            if (column.getIsNull()) {
+                                row.put(column.getName(), null);
+                            } else  {
+                                row.put(column.getName(), column.getValue());
+                            }
                             // 获取update为true的字段
                             if (column.getUpdated()) {
                                 updateSet.add(column.getName());
@@ -218,7 +230,11 @@ public class FlatMessage implements Serializable {
                             Map<String, String> rowOld = new LinkedHashMap<>();
                             for (CanalEntry.Column column : rowData.getBeforeColumnsList()) {
                                 if (updateSet.contains(column.getName())) {
-                                    rowOld.put(column.getName(), column.getValue());
+                                    if (column.getIsNull()) {
+                                        rowOld.put(column.getName(), null);
+                                    } else {
+                                        rowOld.put(column.getName(), column.getValue());
+                                    }
                                 }
                             }
                             // update操作将记录修改前的值

+ 1 - 1
protocol/src/main/java/com/alibaba/otter/canal/protocol/Message.java

@@ -19,7 +19,6 @@ public class Message implements Serializable {
     private static final long      serialVersionUID = 1234034768477580009L;
 
     private long                   id;
-    @Deprecated
     private List<CanalEntry.Entry> entries          = new ArrayList<CanalEntry.Entry>();
     // row data for performance, see:
     // https://github.com/alibaba/canal/issues/726
@@ -39,6 +38,7 @@ public class Message implements Serializable {
         } else {
             this.entries = entries == null ? new ArrayList<Entry>() : entries;
         }
+        this.raw = raw;
     }
 
     public Message(long id){

+ 9 - 11
server/src/main/java/com/alibaba/otter/canal/kafka/CanalKafkaProducer.java

@@ -1,23 +1,21 @@
 package com.alibaba.otter.canal.kafka;
 
-import java.util.ArrayList;
 import java.util.List;
 import java.util.Properties;
-import java.util.concurrent.Future;
 
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.Producer;
 import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.clients.producer.RecordMetadata;
 import org.apache.kafka.common.serialization.StringSerializer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.alibaba.fastjson.JSON;
+import com.alibaba.fastjson.serializer.SerializerFeature;
 import com.alibaba.otter.canal.common.MQProperties;
 import com.alibaba.otter.canal.protocol.FlatMessage;
 import com.alibaba.otter.canal.protocol.Message;
-import com.alibaba.otter.canal.spi.CanalMQProducer;;
+import com.alibaba.otter.canal.spi.CanalMQProducer;
 
 /**
  * kafka producer 主操作类
@@ -103,8 +101,10 @@ public class CanalKafkaProducer implements CanalMQProducer {
                 for (FlatMessage flatMessage : flatMessages) {
                     if (canalDestination.getPartition() != null) {
                         try {
-                            ProducerRecord<String, String> record = new ProducerRecord<String, String>(canalDestination
-                                .getTopic(), canalDestination.getPartition(), null, JSON.toJSONString(flatMessage));
+                            ProducerRecord<String, String> record = new ProducerRecord<String, String>(canalDestination.getTopic(),
+                                canalDestination.getPartition(),
+                                null,
+                                JSON.toJSONString(flatMessage));
                             producer2.send(record);
                         } catch (Exception e) {
                             logger.error(e.getMessage(), e);
@@ -122,8 +122,7 @@ public class CanalKafkaProducer implements CanalMQProducer {
                                 FlatMessage flatMessagePart = partitionFlatMessage[i];
                                 if (flatMessagePart != null) {
                                     try {
-                                        ProducerRecord<String, String> record = new ProducerRecord<String, String>(
-                                            canalDestination.getTopic(),
+                                        ProducerRecord<String, String> record = new ProducerRecord<String, String>(canalDestination.getTopic(),
                                             i,
                                             null,
                                             JSON.toJSONString(flatMessagePart));
@@ -137,11 +136,10 @@ public class CanalKafkaProducer implements CanalMQProducer {
                             }
                         } else {
                             try {
-                                ProducerRecord<String, String> record = new ProducerRecord<String, String>(
-                                    canalDestination.getTopic(),
+                                ProducerRecord<String, String> record = new ProducerRecord<String, String>(canalDestination.getTopic(),
                                     0,
                                     null,
-                                    JSON.toJSONString(flatMessage));
+                                    JSON.toJSONString(flatMessage, SerializerFeature.WriteMapNullValue));
                                 producer2.send(record).get();
                             } catch (Exception e) {
                                 logger.error(e.getMessage(), e);

+ 5 - 0
server/src/main/java/com/alibaba/otter/canal/server/CanalMQStarter.java

@@ -57,6 +57,11 @@ public class CanalMQStarter {
                 System.setProperty("canal.instance.filter.transaction.entry", "true");
             }
 
+            if (properties.getFlatMessage()) {
+                // 针对flat message模式,设置为raw避免ByteString->Entry的二次解析
+                System.setProperty("canal.instance.memory.rawEntry", "false");
+            }
+
             // 对应每个instance启动一个worker线程
             List<MQProperties.CanalDestination> destinations = properties.getCanalDestinations();
 

+ 60 - 23
server/src/main/java/com/alibaba/otter/canal/server/embedded/CanalServerWithEmbedded.java

@@ -1,11 +1,12 @@
 package com.alibaba.otter.canal.server.embedded;
 
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.ServiceLoader;
 import java.util.concurrent.TimeUnit;
 
-import com.alibaba.otter.canal.spi.CanalMetricsProvider;
-import com.alibaba.otter.canal.spi.CanalMetricsService;
-import com.alibaba.otter.canal.spi.NopCanalMetricsService;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.MDC;
@@ -14,6 +15,7 @@ import org.springframework.util.CollectionUtils;
 import com.alibaba.otter.canal.common.AbstractCanalLifeCycle;
 import com.alibaba.otter.canal.instance.core.CanalInstance;
 import com.alibaba.otter.canal.instance.core.CanalInstanceGenerator;
+import com.alibaba.otter.canal.protocol.CanalEntry;
 import com.alibaba.otter.canal.protocol.ClientIdentity;
 import com.alibaba.otter.canal.protocol.Message;
 import com.alibaba.otter.canal.protocol.position.LogPosition;
@@ -22,7 +24,11 @@ import com.alibaba.otter.canal.protocol.position.PositionRange;
 import com.alibaba.otter.canal.server.CanalServer;
 import com.alibaba.otter.canal.server.CanalService;
 import com.alibaba.otter.canal.server.exception.CanalServerException;
+import com.alibaba.otter.canal.spi.CanalMetricsProvider;
+import com.alibaba.otter.canal.spi.CanalMetricsService;
+import com.alibaba.otter.canal.spi.NopCanalMetricsService;
 import com.alibaba.otter.canal.store.CanalEventStore;
+import com.alibaba.otter.canal.store.memory.MemoryEventStoreWithBuffer;
 import com.alibaba.otter.canal.store.model.Event;
 import com.alibaba.otter.canal.store.model.Events;
 import com.google.common.base.Function;
@@ -40,12 +46,12 @@ import com.google.protobuf.ByteString;
  */
 public class CanalServerWithEmbedded extends AbstractCanalLifeCycle implements CanalServer, CanalService {
 
-    private static final Logger        logger           = LoggerFactory.getLogger(CanalServerWithEmbedded.class);
+    private static final Logger        logger  = LoggerFactory.getLogger(CanalServerWithEmbedded.class);
     private Map<String, CanalInstance> canalInstances;
     // private Map<ClientIdentity, Position> lastRollbackPostions;
     private CanalInstanceGenerator     canalInstanceGenerator;
     private int                        metricsPort;
-    private CanalMetricsService        metrics          = NopCanalMetricsService.NOP;
+    private CanalMetricsService        metrics = NopCanalMetricsService.NOP;
 
     private static class SingletonHolder {
 
@@ -207,7 +213,7 @@ public class CanalServerWithEmbedded extends AbstractCanalLifeCycle implements C
      * b. 如果timeout不为null
      *    1. timeout为0,则采用get阻塞方式,获取数据,不设置超时,直到有足够的batchSize数据才返回
      *    2. timeout不为0,则采用get+timeout方式,获取数据,超时还没有batchSize足够的数据,有多少返回多少
-     *
+     * 
      * 注意: meta获取和数据的获取需要保证顺序性,优先拿到meta的,一定也会是优先拿到数据,所以需要加同步. (不能出现先拿到meta,拿到第二批数据,这样就会导致数据顺序性出现问题)
      * </pre>
      */
@@ -239,12 +245,23 @@ public class CanalServerWithEmbedded extends AbstractCanalLifeCycle implements C
             } else {
                 // 记录到流式信息
                 Long batchId = canalInstance.getMetaManager().addBatch(clientIdentity, events.getPositionRange());
-                List<ByteString> entrys = Lists.transform(events.getEvents(), new Function<Event, ByteString>() {
+                boolean raw = isRaw(canalInstance.getEventStore());
+                List entrys = null;
+                if (raw) {
+                    entrys = Lists.transform(events.getEvents(), new Function<Event, ByteString>() {
+
+                        public ByteString apply(Event input) {
+                            return input.getRawEntry();
+                        }
+                    });
+                } else {
+                    entrys = Lists.transform(events.getEvents(), new Function<Event, CanalEntry.Entry>() {
 
-                    public ByteString apply(Event input) {
-                        return input.getRawEntry();
-                    }
-                });
+                        public CanalEntry.Entry apply(Event input) {
+                            return input.getEntry();
+                        }
+                    });
+                }
                 if (logger.isInfoEnabled()) {
                     logger.info("get successfully, clientId:{} batchSize:{} real size is {} and result is [batchId:{} , position:{}]",
                         clientIdentity.getClientId(),
@@ -255,7 +272,7 @@ public class CanalServerWithEmbedded extends AbstractCanalLifeCycle implements C
                 }
                 // 直接提交ack
                 ack(clientIdentity, batchId);
-                return new Message(batchId, true, entrys);
+                return new Message(batchId, raw, entrys);
             }
         }
     }
@@ -283,7 +300,7 @@ public class CanalServerWithEmbedded extends AbstractCanalLifeCycle implements C
      * b. 如果timeout不为null
      *    1. timeout为0,则采用get阻塞方式,获取数据,不设置超时,直到有足够的batchSize数据才返回
      *    2. timeout不为0,则采用get+timeout方式,获取数据,超时还没有batchSize足够的数据,有多少返回多少
-     *
+     * 
      * 注意: meta获取和数据的获取需要保证顺序性,优先拿到meta的,一定也会是优先拿到数据,所以需要加同步. (不能出现先拿到meta,拿到第二批数据,这样就会导致数据顺序性出现问题)
      * </pre>
      */
@@ -311,7 +328,8 @@ public class CanalServerWithEmbedded extends AbstractCanalLifeCycle implements C
             }
 
             if (CollectionUtils.isEmpty(events.getEvents())) {
-                // logger.debug("getWithoutAck successfully, clientId:{} batchSize:{} but result
+                // logger.debug("getWithoutAck successfully, clientId:{}
+                // batchSize:{} but result
                 // is null",
                 // clientIdentity.getClientId(),
                 // batchSize);
@@ -319,12 +337,23 @@ public class CanalServerWithEmbedded extends AbstractCanalLifeCycle implements C
             } else {
                 // 记录到流式信息
                 Long batchId = canalInstance.getMetaManager().addBatch(clientIdentity, events.getPositionRange());
-                List<ByteString> entrys = Lists.transform(events.getEvents(), new Function<Event, ByteString>() {
+                boolean raw = isRaw(canalInstance.getEventStore());
+                List entrys = null;
+                if (raw) {
+                    entrys = Lists.transform(events.getEvents(), new Function<Event, ByteString>() {
+
+                        public ByteString apply(Event input) {
+                            return input.getRawEntry();
+                        }
+                    });
+                } else {
+                    entrys = Lists.transform(events.getEvents(), new Function<Event, CanalEntry.Entry>() {
 
-                    public ByteString apply(Event input) {
-                        return input.getRawEntry();
-                    }
-                });
+                        public CanalEntry.Entry apply(Event input) {
+                            return input.getEntry();
+                        }
+                    });
+                }
                 if (logger.isInfoEnabled()) {
                     logger.info("getWithoutAck successfully, clientId:{} batchSize:{}  real size is {} and result is [batchId:{} , position:{}]",
                         clientIdentity.getClientId(),
@@ -333,7 +362,7 @@ public class CanalServerWithEmbedded extends AbstractCanalLifeCycle implements C
                         batchId,
                         events.getPositionRange());
                 }
-                return new Message(batchId, true, entrys);
+                return new Message(batchId, raw, entrys);
             }
 
         }
@@ -515,17 +544,25 @@ public class CanalServerWithEmbedded extends AbstractCanalLifeCycle implements C
             // 发现provider, 进行初始化
             if (list.size() > 1) {
                 logger.warn("Found more than one CanalMetricsProvider, use the first one.");
-                //报告冲突
+                // 报告冲突
                 for (CanalMetricsProvider p : list) {
                     logger.warn("Found CanalMetricsProvider: {}.", p.getClass().getName());
                 }
             }
-            //默认使用第一个
+            // 默认使用第一个
             CanalMetricsProvider provider = list.get(0);
             this.metrics = provider.getService();
         }
     }
 
+    private boolean isRaw(CanalEventStore eventStore) {
+        if (eventStore instanceof MemoryEventStoreWithBuffer) {
+            return ((MemoryEventStoreWithBuffer) eventStore).isRaw();
+        }
+
+        return true;
+    }
+
     // ========= setter ==========
 
     public void setCanalInstanceGenerator(CanalInstanceGenerator canalInstanceGenerator) {

+ 8 - 2
sink/src/main/java/com/alibaba/otter/canal/sink/entry/EntryEventSink.java

@@ -20,6 +20,7 @@ import com.alibaba.otter.canal.sink.CanalEventDownStreamHandler;
 import com.alibaba.otter.canal.sink.CanalEventSink;
 import com.alibaba.otter.canal.sink.exception.CanalSinkException;
 import com.alibaba.otter.canal.store.CanalEventStore;
+import com.alibaba.otter.canal.store.memory.MemoryEventStoreWithBuffer;
 import com.alibaba.otter.canal.store.model.Event;
 
 /**
@@ -42,7 +43,8 @@ public class EntryEventSink extends AbstractCanalEventSink<List<CanalEntry.Entry
     protected AtomicLong           lastTransactionCount          = new AtomicLong(0L);
     protected volatile long        lastEmptyTransactionTimestamp = 0L;
     protected AtomicLong           lastEmptyTransactionCount     = new AtomicLong(0L);
-    private AtomicLong             eventsSinkBlockingTime        = new AtomicLong(0L);
+    protected AtomicLong           eventsSinkBlockingTime        = new AtomicLong(0L);
+    protected boolean              raw;
 
     public EntryEventSink(){
         addHandler(new HeartBeatEntryEventHandler());
@@ -52,6 +54,10 @@ public class EntryEventSink extends AbstractCanalEventSink<List<CanalEntry.Entry
         super.start();
         Assert.notNull(eventStore);
 
+        if (eventStore instanceof MemoryEventStoreWithBuffer) {
+            this.raw = ((MemoryEventStoreWithBuffer) eventStore).isRaw();
+        }
+
         for (CanalEventDownStreamHandler handler : getHandlers()) {
             if (!handler.isStart()) {
                 handler.start();
@@ -104,7 +110,7 @@ public class EntryEventSink extends AbstractCanalEventSink<List<CanalEntry.Entry
 
             hasRowData |= (entry.getEntryType() == EntryType.ROWDATA);
             hasHeartBeat |= (entry.getEntryType() == EntryType.HEARTBEAT);
-            Event event = new Event(new LogIdentity(remoteAddress, -1L), entry);
+            Event event = new Event(new LogIdentity(remoteAddress, -1L), entry, raw);
             events.add(event);
         }
 

+ 11 - 0
store/src/main/java/com/alibaba/otter/canal/store/memory/MemoryEventStoreWithBuffer.java

@@ -70,6 +70,7 @@ public class MemoryEventStoreWithBuffer extends AbstractCanalStoreScavenge imple
 
     private BatchMode         batchMode     = BatchMode.ITEMSIZE;                        // 默认为内存大小模式
     private boolean           ddlIsolation  = false;
+    private boolean           raw           = true;                                      // 针对entry是否开启raw模式
 
     public MemoryEventStoreWithBuffer(){
 
@@ -628,6 +629,14 @@ public class MemoryEventStoreWithBuffer extends AbstractCanalStoreScavenge imple
         this.ddlIsolation = ddlIsolation;
     }
 
+    public boolean isRaw() {
+        return raw;
+    }
+
+    public void setRaw(boolean raw) {
+        this.raw = raw;
+    }
+
     public AtomicLong getPutSequence() {
         return putSequence;
     }
@@ -671,4 +680,6 @@ public class MemoryEventStoreWithBuffer extends AbstractCanalStoreScavenge imple
     public AtomicLong getAckTableRows() {
         return ackTableRows;
     }
+
+
 }

+ 25 - 3
store/src/main/java/com/alibaba/otter/canal/store/model/Event.java

@@ -34,10 +34,17 @@ public class Event implements Serializable {
     private long              rawLength;
     private int               rowsCount;
 
+    // ==== https://github.com/alibaba/canal/issues/1019
+    private CanalEntry.Entry  entry;
+
     public Event(){
     }
 
     public Event(LogIdentity logIdentity, CanalEntry.Entry entry){
+        this(logIdentity, entry, true);
+    }
+
+    public Event(LogIdentity logIdentity, CanalEntry.Entry entry, boolean raw){
         this.logIdentity = logIdentity;
         this.entryType = entry.getEntryType();
         this.executeTime = entry.getHeader().getExecuteTime();
@@ -46,9 +53,6 @@ public class Event implements Serializable {
         this.serverId = entry.getHeader().getServerId();
         this.gtid = entry.getHeader().getGtid();
         this.eventType = entry.getHeader().getEventType();
-        // build raw
-        this.rawEntry = entry.toByteString();
-        this.rawLength = rawEntry.size();
         if (entryType == EntryType.ROWDATA) {
             List<CanalEntry.Pair> props = entry.getHeader().getPropsList();
             if (props != null) {
@@ -60,6 +64,16 @@ public class Event implements Serializable {
                 }
             }
         }
+
+        if (raw) {
+            // build raw
+            this.rawEntry = entry.toByteString();
+            this.rawLength = rawEntry.size();
+        } else {
+            this.entry = entry;
+            // 按照3倍的event length预估
+            this.rawLength = entry.getHeader().getEventLength() * 3;
+        }
     }
 
     public LogIdentity getLogIdentity() {
@@ -150,6 +164,14 @@ public class Event implements Serializable {
         this.rowsCount = rowsCount;
     }
 
+    public CanalEntry.Entry getEntry() {
+        return entry;
+    }
+
+    public void setEntry(CanalEntry.Entry entry) {
+        this.entry = entry;
+    }
+
     public String toString() {
         return ToStringBuilder.reflectionToString(this, CanalToStringStyle.DEFAULT_STYLE);
     }