Browse Source

Merge remote-tracking branch 'upstream/master'

winger 6 years ago
parent
commit
61aea888f6
77 changed files with 1896 additions and 1145 deletions
  1. 19 19
      README.md
  2. 0 2
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/CanalOuterAdapter.java
  3. 21 30
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/CanalClientConfig.java
  4. 3 3
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/CanalOuterAdapterConfiguration.java
  5. 14 2
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/Dml.java
  6. 56 34
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/ExtensionLoader.java
  7. 12 6
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/MessageUtil.java
  8. 5 1
      client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/SPI.java
  9. 1 5
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/HbaseAdapter.java
  10. 6 2
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/config/MappingConfig.java
  11. 18 7
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/service/HbaseSyncService.java
  12. 5 1
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/support/HbaseTemplate.java
  13. 10 26
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/support/PhType.java
  14. 14 8
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/support/PhTypeUtil.java
  15. 3 13
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/support/Type.java
  16. 2 1
      client-adapter/logger/src/main/java/com/alibaba/otter/canal/client/adapter/logger/LoggerAdapterExample.java
  17. 6 0
      client-launcher/pom.xml
  18. 4 3
      client-launcher/src/main/java/com/alibaba/otter/canal/client/ClientLauncher.java
  19. 37 4
      client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/AbstractCanalAdapterWorker.java
  20. 35 30
      client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/CanalAdapterKafkaWorker.java
  21. 48 44
      client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/CanalAdapterLoader.java
  22. 124 0
      client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/CanalAdapterRocketMQWorker.java
  23. 6 4
      client-launcher/src/main/resources/canal-client.yml
  24. 8 2
      client/pom.xml
  25. 55 0
      client/src/main/java/com/alibaba/otter/canal/client/CanalMessageDeserializer.java
  26. 3 6
      client/src/main/java/com/alibaba/otter/canal/client/impl/ClusterCanalConnector.java
  27. 9 34
      client/src/main/java/com/alibaba/otter/canal/client/impl/SimpleCanalConnector.java
  28. 1 1
      client/src/main/java/com/alibaba/otter/canal/client/impl/running/ClientRunningMonitor.java
  29. 2 3
      client/src/main/java/com/alibaba/otter/canal/client/kafka/KafkaCanalConnector.java
  30. 3 4
      client/src/main/java/com/alibaba/otter/canal/client/kafka/KafkaCanalConnectors.java
  31. 3 37
      client/src/main/java/com/alibaba/otter/canal/client/kafka/MessageDeserializer.java
  32. 4 5
      client/src/main/java/com/alibaba/otter/canal/client/kafka/running/ClientRunningMonitor.java
  33. 48 0
      client/src/main/java/com/alibaba/otter/canal/client/rocketmq/ConsumerBatchMessage.java
  34. 202 0
      client/src/main/java/com/alibaba/otter/canal/client/rocketmq/RocketMQCanalConnector.java
  35. 20 0
      client/src/main/java/com/alibaba/otter/canal/client/rocketmq/RocketMQCanalConnectorProvider.java
  36. 13 0
      client/src/main/java/com/alibaba/otter/canal/client/rocketmq/RocketMQCanalListener.java
  37. 114 114
      client/src/test/java/com/alibaba/otter/canal/client/running/ClientRunningTest.java
  38. 1 2
      client/src/test/java/com/alibaba/otter/canal/client/running/kafka/CanalKafkaClientExample.java
  39. 9 0
      client/src/test/java/com/alibaba/otter/canal/client/running/rocketmq/AbstractRocektMQTest.java
  40. 134 0
      client/src/test/java/com/alibaba/otter/canal/client/running/rocketmq/CanalRocketMQClientExample.java
  41. 3 0
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/LogDecoder.java
  42. 4 4
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogBuffer.java
  43. 45 41
      dbsync/src/test/java/com/taobao/tddl/dbsync/binlog/DirectLogFetcherTest.java
  44. 1 1
      deployer/pom.xml
  45. 15 12
      deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalLauncher.java
  46. 1 1
      deployer/src/main/resources/canal.properties
  47. 6 2
      deployer/src/main/resources/example/instance.properties
  48. 4 3
      deployer/src/main/resources/mq.yml
  49. 6 2
      deployer/src/main/resources/spring/default-instance.xml
  50. 7 3
      deployer/src/main/resources/spring/file-instance.xml
  51. 12 4
      deployer/src/main/resources/spring/group-instance.xml
  52. 7 3
      deployer/src/main/resources/spring/memory-instance.xml
  53. 3 1
      docker/image/admin/app.sh
  54. 10 6
      example/src/main/java/com/alibaba/otter/canal/example/SimpleCanalClientPermanceTest.java
  55. 4 1
      instance/spring/src/test/resources/retl/instance.properties
  56. 2 2
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/AbstractEventParser.java
  57. 0 2
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/MultiStageCoprocessor.java
  58. 23 25
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlMultiStageCoprocessor.java
  59. 29 2
      parse/src/main/java/com/alibaba/otter/canal/parse/support/AuthenticationInfo.java
  60. 8 4
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/FastsqlSchemaTest.java
  61. 15 0
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MemoryTableMeta_DDL_Test.java
  62. 8 0
      parse/src/test/resources/ddl/ddl_test2.sql
  63. 2 2
      pom.xml
  64. 264 405
      protocol/src/main/java/com/alibaba/otter/canal/protocol/CanalEntry.java
  65. 4 3
      protocol/src/main/java/com/alibaba/otter/canal/protocol/ClientIdentity.java
  66. 25 7
      protocol/src/main/java/com/alibaba/otter/canal/protocol/FlatMessage.java
  67. 5 0
      server/pom.xml
  68. 73 0
      server/src/main/java/com/alibaba/otter/canal/common/CanalMessageSerializer.java
  69. 10 2
      server/src/main/java/com/alibaba/otter/canal/common/MQProperties.java
  70. 68 47
      server/src/main/java/com/alibaba/otter/canal/kafka/CanalKafkaProducer.java
  71. 2 62
      server/src/main/java/com/alibaba/otter/canal/kafka/MessageSerializer.java
  72. 66 0
      server/src/main/java/com/alibaba/otter/canal/rocketmq/CanalRocketMQProducer.java
  73. 38 38
      server/src/main/java/com/alibaba/otter/canal/server/CanalMQStarter.java
  74. 1 1
      server/src/main/java/com/alibaba/otter/canal/server/netty/handler/SessionHandler.java
  75. 37 0
      server/src/main/java/com/alibaba/otter/canal/spi/CanalMQProducer.java
  76. 1 0
      server/src/test/java/com/alibaba/otter/canal/server/ProtocolTest.java
  77. 9 6
      store/src/main/java/com/alibaba/otter/canal/store/memory/MemoryEventStoreWithBuffer.java

+ 19 - 19
README.md

@@ -54,25 +54,25 @@ See the wiki page for : <a href="https://github.com/alibaba/canal/wiki" >wiki文
 
 <h3><a name="table-of-contents" class="anchor" href="#table-of-contents"><span class="mini-icon mini-icon-link"></span></a>wiki文档列表</h3>
 <ul>
-<li><a class="internal present" href="/alibaba/canal/wiki/Home">Home</a></li>
-<li><a class="internal present" href="/alibaba/canal/wiki/Introduction">Introduction</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/Home">Home</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/Introduction">Introduction</a></li>
 <li>
-<a class="internal present" href="/alibaba/canal/wiki/QuickStart">QuickStart</a>
+<a class="internal present" href="https://github.com/alibaba/canal/wiki/QuickStart">QuickStart</a>
 <ul>
-<li><a class="internal present" href="/alibaba/canal/wiki/Docker-QuickStart">Docker QuickStart</a></li>
-<li><a class="internal present" href="/alibaba/canal/wiki/Canal-Kafka-QuickStart">Canal Kafka QuickStart</a></li>
-<li><a class="internal present" href="/alibaba/canal/wiki/aliyun-RDS-QuickStart">Aliyun RDS QuickStart</a></li>
-<li><a class="internal present" href="/alibaba/canal/wiki/Prometheus-QuickStart">Prometheus QuickStart</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/Docker-QuickStart">Docker QuickStart</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/Canal-Kafka-QuickStart">Canal Kafka QuickStart</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/aliyun-RDS-QuickStart">Aliyun RDS QuickStart</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/Prometheus-QuickStart">Prometheus QuickStart</a></li>
 </ul>
 </li>
-<li><a class="internal present" href="/alibaba/canal/wiki/AdminGuide">AdminGuide</a></li>
-<li><a class="internal present" href="/alibaba/canal/wiki/ClientExample">ClientExample</a></li>
-<li><a class="internal present" href="/alibaba/canal/wiki/ClientAPI">ClientAPI</a></li>
-<li><a class="internal present" href="/alibaba/canal/wiki/Performance">Performance</a></li>
-<li><a class="internal present" href="/alibaba/canal/wiki/DevGuide">DevGuide</a></li>
-<li><a class="internal present" href="/alibaba/canal/wiki/BinlogChange%28mysql5.6%29">BinlogChange(Mysql5.6)</a></li>
-<li><a class="internal present" href="/alibaba/canal/wiki/BinlogChange%28MariaDB%29">BinlogChange(MariaDB)</a></li>
-<li><a class="internal present" href="/alibaba/canal/wiki/TableMetaTSDB">TableMetaTSDB</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/AdminGuide">AdminGuide</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/ClientExample">ClientExample</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/ClientAPI">ClientAPI</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/Performance">Performance</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/DevGuide">DevGuide</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/BinlogChange%28mysql5.6%29">BinlogChange(Mysql5.6)</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/BinlogChange%28MariaDB%29">BinlogChange(MariaDB)</a></li>
+<li><a class="internal present" href="https://github.com/alibaba/canal/wiki/TableMetaTSDB">TableMetaTSDB</a></li>
 <li><a href="http://alibaba.github.com/canal/release.html">ReleaseNotes</a></li>
 <li><a href="https://github.com/alibaba/canal/releases">Download</a></li>
 <li><a class="internal present" href="/alibaba/canal/wiki/FAQ">FAQ</a></li>
@@ -82,8 +82,8 @@ See the wiki page for : <a href="https://github.com/alibaba/canal/wiki" >wiki文
 
 1. canal整体交互协议设计上使用了protobuf3.0,理论上可以支持绝大部分的多语言场景,欢迎大家提交多客户端的PR
     * canal java客户端: <a href="https://github.com/alibaba/canal/wiki/ClientExample"> https://github.com/alibaba/canal/wiki/ClientExample </a>
-    * canal c#客户端开源项目地址:<a href="https://github.com/CanalSharp/CanalSharp"> https://github.com/CanalSharp/CanalSharp </a>
-    * canal go客户端,开发进行中
+    * canal c#客户端开源项目地址:<a href="https://github.com/CanalClient/CanalSharp"> https://github.com/CanalSharp/CanalSharp </a>
+    * canal go客户端开源项目地址:<a href="https://github.com/CanalClient/canal-go"> https://github.com/CanalClient/canal-go </a>
 2. canal作为MySQL binlog的增量获取工具,可以将数据投递到MQ系统中,比如Kafka/RocketMQ,可以借助于MQ的多语言能力 
 
 <h1>相关资料</h1>
@@ -116,10 +116,10 @@ See the wiki page for : <a href="https://github.com/alibaba/canal/wiki" >wiki文
 <h3>最新更新</h3>
 <ol>
 <li>canal发布重大版本更新1.1.0,具体releaseNode参考:<a href="https://github.com/alibaba/canal/releases/tag/canal-1.1.0">https://github.com/alibaba/canal/releases/tag/canal-1.1.0</a></li>
-<li>canal c#客户端开源项目地址:<a href="https://github.com/CanalSharp/CanalSharp"> https://github.com/CanalSharp/CanalSharp </a>,推荐! </li>
+<li>canal c#客户端开源项目地址:<a href="https://github.com/CanalClient/CanalSharp"> https://github.com/CanalClient/CanalSharp </a>,推荐! </li>
 <li>canal QQ讨论群已经建立,群号:161559791 ,欢迎加入进行技术讨论。</li>
 <li>canal消费端项目开源: Otter(分布式数据库同步系统),地址:<a href="https://github.com/alibaba/otter">https://github.com/alibaba/otter</a></li>
 
 <li>Canal已在阿里云推出商业化版本 <a href="https://www.aliyun.com/product/dts?spm=a2c4g.11186623.cloudEssentials.80.srdwr7">数据传输服务DTS</a>, 开通即用,免去部署维护的昂贵使用成本。DTS针对阿里云RDS、DRDS等产品进行了适配,解决了Binlog日志回收,主备切换、VPC网络切换等场景下的订阅高可用问题。同时,针对RDS进行了针对性的性能优化。出于稳定性、性能及成本的考虑,强烈推荐阿里云用户使用DTS产品。<a href="https://help.aliyun.com/document_detail/26592.html?spm=a2c4g.11174283.6.539.t1Y91E">DTS产品使用文档</a></li>
 DTS支持阿里云RDS&DRDS的Binlog日志实时订阅,现推出首月免费体验,限时限量,<a href="https://common-buy.aliyun.com/?commodityCode=dtspre&request=%7b%22dts_function%22%3a%22data_subscribe%22%7d">立即体验>>></a>
-</ol>
+</ol>

+ 0 - 2
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/CanalOuterAdapter.java

@@ -3,8 +3,6 @@ package com.alibaba.otter.canal.client.adapter;
 import com.alibaba.otter.canal.client.adapter.support.CanalOuterAdapterConfiguration;
 import com.alibaba.otter.canal.client.adapter.support.Dml;
 import com.alibaba.otter.canal.client.adapter.support.SPI;
-import com.alibaba.otter.canal.protocol.FlatMessage;
-import com.alibaba.otter.canal.protocol.Message;
 
 /**
  * 外部适配器接口

+ 21 - 30
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/CanalClientConfig.java

@@ -20,9 +20,9 @@ public class CanalClientConfig {
 
     private String              bootstrapServers;
 
-    private Boolean             flatMessage = true;
+    private List<MQTopic>       mqTopics;
 
-    private List<KafkaTopic>    kafkaTopics;
+    private Boolean             flatMessage = true;
 
     private List<CanalInstance> canalInstances;
 
@@ -58,6 +58,10 @@ public class CanalClientConfig {
         this.bootstrapServers = bootstrapServers;
     }
 
+    public List<MQTopic> getMqTopics() {
+        return mqTopics;
+    }
+
     public Boolean getFlatMessage() {
         return flatMessage;
     }
@@ -66,12 +70,8 @@ public class CanalClientConfig {
         this.flatMessage = flatMessage;
     }
 
-    public List<KafkaTopic> getKafkaTopics() {
-        return kafkaTopics;
-    }
-
-    public void setKafkaTopics(List<KafkaTopic> kafkaTopics) {
-        this.kafkaTopics = kafkaTopics;
+    public void setMqTopics(List<MQTopic> mqTopics) {
+        this.mqTopics = mqTopics;
     }
 
     public List<CanalInstance> getCanalInstances() {
@@ -120,12 +120,22 @@ public class CanalClientConfig {
         }
     }
 
-    public static class KafkaTopic {
+    public static class MQTopic {
+
+        private String      mqMode;
 
         private String      topic;
 
         private List<Group> groups = new ArrayList<>();
 
+        public String getMqMode() {
+            return mqMode;
+        }
+
+        public void setMqMode(String mqMode) {
+            this.mqMode = mqMode;
+        }
+
         public String getTopic() {
             return topic;
         }
@@ -167,25 +177,6 @@ public class CanalClientConfig {
             this.outAdapters = outAdapters;
         }
 
-        // public List<Adaptor> getAdapters() {
-        // return adapters;
-        // }
-        //
-        // public void setAdapters(List<Adaptor> adapters) {
-        // this.adapters = adapters;
-        // }
-    }
-
-    // public static class Adaptor {
-    // private List<CanalOuterAdapterConfiguration> outAdapters;
-    //
-    // public List<CanalOuterAdapterConfiguration> getOutAdapters() {
-    // return outAdapters;
-    // }
-    //
-    // public void setOutAdapters(List<CanalOuterAdapterConfiguration> outAdapters)
-    // {
-    // this.outAdapters = outAdapters;
-    // }
-    // }
+    }
+
 }

+ 3 - 3
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/CanalOuterAdapterConfiguration.java

@@ -10,11 +10,11 @@ import java.util.Properties;
  */
 public class CanalOuterAdapterConfiguration {
 
-    private String     name;       // 适配器名称, 如: logger, hbase, es
+    private String     name;      // 适配器名称, 如: logger, hbase, es
 
-    private String     hosts;      // 适配器内部的地址, 比如对应es该参数可以填写es的server地址
+    private String     hosts;     // 适配器内部的地址, 比如对应es该参数可以填写es的server地址
 
-    private String     zkHosts;    // 适配器内部的ZK地址, 比如对应HBase该参数可以填写HBase对应的ZK地址
+    private String     zkHosts;   // 适配器内部的ZK地址, 比如对应HBase该参数可以填写HBase对应的ZK地址
 
     private Properties properties; // 其余参数, 可填写适配器中的所需的配置信息
 

+ 14 - 2
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/Dml.java

@@ -17,6 +17,9 @@ public class Dml implements Serializable {
     private String                    database;
     private String                    table;
     private String                    type;
+    // binlog executeTime
+    private Long                      es;
+    // dml build timeStamp
     private Long                      ts;
     private String                    sql;
     private List<Map<String, Object>> data;
@@ -78,11 +81,20 @@ public class Dml implements Serializable {
         this.old = old;
     }
 
+    public Long getEs() {
+        return es;
+    }
+
+    public void setEs(Long es) {
+        this.es = es;
+    }
+
     public void clear() {
         database = null;
         table = null;
         type = null;
         ts = null;
+        es = null;
         data = null;
         old = null;
         sql = null;
@@ -90,7 +102,7 @@ public class Dml implements Serializable {
 
     @Override
     public String toString() {
-        return "Dml{" + "database='" + database + '\'' + ", table='" + table + '\'' + ", type='" + type + '\'' + ", ts="
-               + ts + ", sql='" + sql + '\'' + ", data=" + data + ", old=" + old + '}';
+        return "Dml [database=" + database + ", table=" + table + ", type=" + type + ", es=" + es + ", ts=" + ts
+               + ", sql=" + sql + ", data=" + data + ", old=" + old + "]";
     }
 }

+ 56 - 34
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/ExtensionLoader.java

@@ -1,11 +1,22 @@
 package com.alibaba.otter.canal.client.adapter.support;
 
-import java.io.*;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.io.InputStreamReader;
 import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLClassLoader;
 import java.nio.file.Paths;
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.regex.Pattern;
@@ -21,8 +32,7 @@ import org.slf4j.LoggerFactory;
  */
 public class ExtensionLoader<T> {
 
-    private static final Logger                                      logger                     = LoggerFactory
-        .getLogger(ExtensionLoader.class);
+    private static final Logger                                      logger                     = LoggerFactory.getLogger(ExtensionLoader.class);
 
     private static final String                                      SERVICES_DIRECTORY         = "META-INF/services/";
 
@@ -30,8 +40,7 @@ public class ExtensionLoader<T> {
 
     private static final String                                      DEFAULT_CLASSLOADER_POLICY = "internal";
 
-    private static final Pattern                                     NAME_SEPARATOR             = Pattern
-        .compile("\\s*[,]+\\s*");
+    private static final Pattern                                     NAME_SEPARATOR             = Pattern.compile("\\s*[,]+\\s*");
 
     private static final ConcurrentMap<Class<?>, ExtensionLoader<?>> EXTENSION_LOADERS          = new ConcurrentHashMap<>();
 
@@ -262,8 +271,7 @@ public class ExtensionLoader<T> {
             return instance;
         } catch (Throwable t) {
             throw new IllegalStateException("Extension instance(name: " + name + ", class: " + type
-                                            + ")  could not be instantiated: " + t.getMessage(),
-                t);
+                                            + ")  could not be instantiated: " + t.getMessage(), t);
         }
     }
 
@@ -271,8 +279,8 @@ public class ExtensionLoader<T> {
         if (type == null) throw new IllegalArgumentException("Extension type == null");
         if (name == null) throw new IllegalArgumentException("Extension name == null");
         Class<?> clazz = getExtensionClasses().get(name);
-        if (clazz == null)
-            throw new IllegalStateException("No such extension \"" + name + "\" for " + type.getName() + "!");
+        if (clazz == null) throw new IllegalStateException("No such extension \"" + name + "\" for " + type.getName()
+                                                           + "!");
         return clazz;
     }
 
@@ -296,10 +304,17 @@ public class ExtensionLoader<T> {
             throw new IllegalStateException("failed to get class loader resource");
         }
         String dirtyPath = url.toString();
-        String jarPath = dirtyPath.replaceAll("^.*file:/", ""); // removes file:/ and everything before it
-        jarPath = jarPath.replaceAll("jar!.*", "jar"); // removes everything after .jar, if .jar exists in dirtyPath
-        jarPath = jarPath.replaceAll("%20", " "); // necessary if path has spaces within
-        if (!jarPath.endsWith(".jar")) { // this is needed if you plan to run the app using Spring Tools Suit play
+        String jarPath = dirtyPath.replaceAll("^.*file:/", ""); // removes
+                                                                // file:/ and
+                                                                // everything
+                                                                // before it
+        jarPath = jarPath.replaceAll("jar!.*", "jar"); // removes everything
+                                                       // after .jar, if .jar
+                                                       // exists in dirtyPath
+        jarPath = jarPath.replaceAll("%20", " "); // necessary if path has
+                                                  // spaces within
+        if (!jarPath.endsWith(".jar")) { // this is needed if you plan to run
+                                         // the app using Spring Tools Suit play
                                          // button.
             jarPath = jarPath.replaceAll("/classes/.*", "/classes/");
         }
@@ -327,8 +342,8 @@ public class ExtensionLoader<T> {
         logger.info("extension classpath dir: " + dir);
         File externalLibDir = new File(dir);
         if (!externalLibDir.exists()) {
-            externalLibDir = new File(
-                File.separator + this.getJarDirectoryPath() + File.separator + "canal_client" + File.separator + "lib");
+            externalLibDir = new File(File.separator + this.getJarDirectoryPath() + File.separator + "canal_client"
+                                      + File.separator + "lib");
         }
         if (externalLibDir.exists()) {
             File[] files = externalLibDir.listFiles(new FilenameFilter() {
@@ -363,7 +378,8 @@ public class ExtensionLoader<T> {
                                 if (name.startsWith("java.") || name.startsWith("org.slf4j.")
                                     || name.startsWith("org.apache.logging")
                                     || name.startsWith("org.apache.commons.logging.")) {
-                                    // || name.startsWith("org.apache.hadoop.")) {
+                                    // || name.startsWith("org.apache.hadoop."))
+                                    // {
                                     c = super.loadClass(name);
                                 }
                                 if (c != null) return c;
@@ -386,7 +402,8 @@ public class ExtensionLoader<T> {
                                 @SuppressWarnings("unchecked")
                                 Enumeration<URL>[] tmp = (Enumeration<URL>[]) new Enumeration<?>[2];
 
-                                tmp[0] = findResources(name); // local class path first
+                                tmp[0] = findResources(name); // local class
+                                                              // path first
                                 // tmp[1] = super.getResources(name);
 
                                 return new CompoundEnumeration<>(tmp);
@@ -474,12 +491,16 @@ public class ExtensionLoader<T> {
                                         }
                                         if (line.length() > 0) {
                                             Class<?> clazz = classLoader.loadClass(line);
-                                            // Class<?> clazz = Class.forName(line, true, classLoader);
+                                            // Class<?> clazz =
+                                            // Class.forName(line, true,
+                                            // classLoader);
                                             if (!type.isAssignableFrom(clazz)) {
-                                                throw new IllegalStateException(
-                                                    "Error when load extension class(interface: " + type
-                                                                                + ", class line: " + clazz.getName()
-                                                                                + "), class " + clazz.getName()
+                                                throw new IllegalStateException("Error when load extension class(interface: "
+                                                                                + type
+                                                                                + ", class line: "
+                                                                                + clazz.getName()
+                                                                                + "), class "
+                                                                                + clazz.getName()
                                                                                 + "is not subtype of interface.");
                                             } else {
                                                 try {
@@ -497,9 +518,9 @@ public class ExtensionLoader<T> {
                                                                 extensionClasses.put(n, clazz);
                                                             } else if (c != clazz) {
                                                                 cachedNames.remove(clazz);
-                                                                throw new IllegalStateException(
-                                                                    "Duplicate extension " + type.getName() + " name "
-                                                                                                + n + " on "
+                                                                throw new IllegalStateException("Duplicate extension "
+                                                                                                + type.getName()
+                                                                                                + " name " + n + " on "
                                                                                                 + c.getName() + " and "
                                                                                                 + clazz.getName());
                                                             }
@@ -509,9 +530,12 @@ public class ExtensionLoader<T> {
                                             }
                                         }
                                     } catch (Throwable t) {
-                                        IllegalStateException e = new IllegalStateException(
-                                            "Failed to load extension class(interface: " + type + ", class line: "
-                                                                                            + line + ") in " + url
+                                        IllegalStateException e = new IllegalStateException("Failed to load extension class(interface: "
+                                                                                            + type
+                                                                                            + ", class line: "
+                                                                                            + line
+                                                                                            + ") in "
+                                                                                            + url
                                                                                             + ", cause: "
                                                                                             + t.getMessage(),
                                             t);
@@ -526,15 +550,13 @@ public class ExtensionLoader<T> {
                         }
                     } catch (Throwable t) {
                         logger.error("Exception when load extension class(interface: " + type + ", class file: " + url
-                                     + ") in " + url,
-                            t);
+                                     + ") in " + url, t);
                     }
                 } // end of while urls
             }
         } catch (Throwable t) {
-            logger.error(
-                "Exception when load extension class(interface: " + type + ", description file: " + fileName + ").",
-                t);
+            logger.error("Exception when load extension class(interface: " + type + ", description file: " + fileName
+                         + ").", t);
         }
     }
 

+ 12 - 6
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/MessageUtil.java

@@ -1,6 +1,11 @@
 package com.alibaba.otter.canal.client.adapter.support;
 
-import java.util.*;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import com.alibaba.otter.canal.protocol.CanalEntry;
 import com.alibaba.otter.canal.protocol.FlatMessage;
@@ -40,6 +45,7 @@ public class MessageUtil {
             dml.setDatabase(entry.getHeader().getSchemaName());
             dml.setTable(entry.getHeader().getTableName());
             dml.setType(eventType.toString());
+            dml.setEs(entry.getHeader().getExecuteTime());
             dml.setTs(System.currentTimeMillis());
             dml.setSql(rowChange.getSql());
             List<Map<String, Object>> data = new ArrayList<>();
@@ -81,11 +87,10 @@ public class MessageUtil {
                         Map<String, Object> rowOld = new LinkedHashMap<>();
                         for (CanalEntry.Column column : rowData.getBeforeColumnsList()) {
                             if (updateSet.contains(column.getName())) {
-                                rowOld.put(column.getName(),
-                                    JdbcTypeUtil.typeConvert(column.getName(),
-                                        column.getValue(),
-                                        column.getSqlType(),
-                                        column.getMysqlType()));
+                                rowOld.put(column.getName(), JdbcTypeUtil.typeConvert(column.getName(),
+                                    column.getValue(),
+                                    column.getSqlType(),
+                                    column.getMysqlType()));
                             }
                         }
                         // update操作将记录修改前的值
@@ -114,6 +119,7 @@ public class MessageUtil {
         dml.setTable(flatMessage.getTable());
         dml.setType(flatMessage.getType());
         dml.setTs(flatMessage.getTs());
+        dml.setEs(flatMessage.getEs());
         dml.setSql(flatMessage.getSql());
         if (flatMessage.getSqlType() == null || flatMessage.getMysqlType() == null) {
             throw new RuntimeException("SqlType or mysqlType is null");

+ 5 - 1
client-adapter/common/src/main/java/com/alibaba/otter/canal/client/adapter/support/SPI.java

@@ -1,6 +1,10 @@
 package com.alibaba.otter.canal.client.adapter.support;
 
-import java.lang.annotation.*;
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
 
 @Documented
 @Retention(RetentionPolicy.RUNTIME)

+ 1 - 5
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/HbaseAdapter.java

@@ -4,7 +4,6 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
-import com.alibaba.otter.canal.protocol.FlatMessage;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -17,9 +16,7 @@ import com.alibaba.otter.canal.client.adapter.hbase.config.MappingConfigLoader;
 import com.alibaba.otter.canal.client.adapter.hbase.service.HbaseSyncService;
 import com.alibaba.otter.canal.client.adapter.support.CanalOuterAdapterConfiguration;
 import com.alibaba.otter.canal.client.adapter.support.Dml;
-import com.alibaba.otter.canal.client.adapter.support.MessageUtil;
 import com.alibaba.otter.canal.client.adapter.support.SPI;
-import com.alibaba.otter.canal.protocol.Message;
 
 /**
  * HBase外部适配器
@@ -45,8 +42,7 @@ public class HbaseAdapter implements CanalOuterAdapter {
                         mappingConfigCache = new HashMap<>();
                         for (MappingConfig mappingConfig : hbaseMapping.values()) {
                             mappingConfigCache.put(mappingConfig.getHbaseOrm().getDatabase() + "-"
-                                                   + mappingConfig.getHbaseOrm().getTable(),
-                                mappingConfig);
+                                                   + mappingConfig.getHbaseOrm().getTable(), mappingConfig);
                         }
                     }
                 }

+ 6 - 2
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/config/MappingConfig.java

@@ -1,6 +1,10 @@
 package com.alibaba.otter.canal.client.adapter.hbase.config;
 
-import java.util.*;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
 
 /**
  * HBase表映射配置
@@ -117,7 +121,7 @@ public class MappingConfig {
     }
 
     public enum Mode {
-                      STRING("STRING"), NATIVE("NATIVE"), PHOENIX("PHOENIX");
+        STRING("STRING"), NATIVE("NATIVE"), PHOENIX("PHOENIX");
 
         private String type;
 

+ 18 - 7
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/service/HbaseSyncService.java

@@ -1,6 +1,10 @@
 package com.alibaba.otter.canal.client.adapter.hbase.service;
 
-import java.util.*;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -8,7 +12,12 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.alibaba.otter.canal.client.adapter.hbase.config.MappingConfig;
-import com.alibaba.otter.canal.client.adapter.hbase.support.*;
+import com.alibaba.otter.canal.client.adapter.hbase.support.HRow;
+import com.alibaba.otter.canal.client.adapter.hbase.support.HbaseTemplate;
+import com.alibaba.otter.canal.client.adapter.hbase.support.PhType;
+import com.alibaba.otter.canal.client.adapter.hbase.support.PhTypeUtil;
+import com.alibaba.otter.canal.client.adapter.hbase.support.Type;
+import com.alibaba.otter.canal.client.adapter.hbase.support.TypeUtil;
 import com.alibaba.otter.canal.client.adapter.support.Dml;
 
 /**
@@ -63,7 +72,8 @@ public class HbaseSyncService {
         MappingConfig.HbaseOrm hbaseOrm = config.getHbaseOrm();
 
         // if (!validHTable(config)) {
-        // logger.error("HBase table '{}' not exists", hbaseOrm.getHbaseTable());
+        // logger.error("HBase table '{}' not exists",
+        // hbaseOrm.getHbaseTable());
         // return;
         // }
         int i = 1;
@@ -156,7 +166,8 @@ public class HbaseSyncService {
         MappingConfig.HbaseOrm hbaseOrm = config.getHbaseOrm();
 
         // if (!validHTable(config)) {
-        // logger.error("HBase table '{}' not exists", hbaseOrm.getHbaseTable());
+        // logger.error("HBase table '{}' not exists",
+        // hbaseOrm.getHbaseTable());
         // return;
         // }
 
@@ -248,7 +259,8 @@ public class HbaseSyncService {
         MappingConfig.HbaseOrm hbaseOrm = config.getHbaseOrm();
 
         // if (!validHTable(config)) {
-        // logger.error("HBase table '{}' not exists", hbaseOrm.getHbaseTable());
+        // logger.error("HBase table '{}' not exists",
+        // hbaseOrm.getHbaseTable());
         // return;
         // }
 
@@ -363,8 +375,7 @@ public class HbaseSyncService {
      * @param value 值
      * @return 复合字段rowKey
      */
-    private static byte[] typeConvert(MappingConfig.ColumnItem columnItem, MappingConfig.HbaseOrm hbaseOrm,
-                                      Object value) {
+    private static byte[] typeConvert(MappingConfig.ColumnItem columnItem, MappingConfig.HbaseOrm hbaseOrm, Object value) {
         if (value == null) {
             return null;
         }

+ 5 - 1
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/support/HbaseTemplate.java

@@ -8,7 +8,11 @@ import java.util.Set;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;

+ 10 - 26
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/support/PhType.java

@@ -13,37 +13,21 @@ import java.util.Date;
  * @version 1.0.0
  */
 public enum PhType {
-    DEFAULT(-1, "VARCHAR"),
-    UNSIGNED_INT(4, "UNSIGNED_INT"),
-    UNSIGNED_LONG(8, "UNSIGNED_LONG"),
-    UNSIGNED_TINYINT(1, "UNSIGNED_TINYINT"),
-    UNSIGNED_SMALLINT(2, "UNSIGNED_SMALLINT"),
-    UNSIGNED_FLOAT(4, "UNSIGNED_FLOAT"),
-    UNSIGNED_DOUBLE(8, "UNSIGNED_DOUBLE"),
-    INTEGER(4, "INTEGER"),
-    BIGINT(8, "BIGINT"),
-    TINYINT(1, "TINYINT"),
-    SMALLINT(2, "SMALLINT"),
-    FLOAT(4, "FLOAT"),
-    DOUBLE(8, "DOUBLE"),
-    DECIMAL(-1, "DECIMAL"),
-    BOOLEAN(1, "BOOLEAN"),
-    UNSIGNED_TIME(8, "UNSIGNED_TIME"),
-    UNSIGNED_DATE(8, "UNSIGNED_DATE"),
-    UNSIGNED_TIMESTAMP(12, "UNSIGNED_TIMESTAMP"),
-    TIME(8, "TIME"),
-    DATE(8, "DATE"),
-    TIMESTAMP(12, "TIMESTAMP"),
-    VARCHAR(-1, "VARCHAR"),
-    VARBINARY(-1, "VARBINARY");
+    DEFAULT(-1, "VARCHAR"), UNSIGNED_INT(4, "UNSIGNED_INT"), UNSIGNED_LONG(8, "UNSIGNED_LONG"),
+    UNSIGNED_TINYINT(1, "UNSIGNED_TINYINT"), UNSIGNED_SMALLINT(2, "UNSIGNED_SMALLINT"),
+    UNSIGNED_FLOAT(4, "UNSIGNED_FLOAT"), UNSIGNED_DOUBLE(8, "UNSIGNED_DOUBLE"), INTEGER(4, "INTEGER"),
+    BIGINT(8, "BIGINT"), TINYINT(1, "TINYINT"), SMALLINT(2, "SMALLINT"), FLOAT(4, "FLOAT"), DOUBLE(8, "DOUBLE"),
+    DECIMAL(-1, "DECIMAL"), BOOLEAN(1, "BOOLEAN"), UNSIGNED_TIME(8, "UNSIGNED_TIME"),
+    UNSIGNED_DATE(8, "UNSIGNED_DATE"), UNSIGNED_TIMESTAMP(12, "UNSIGNED_TIMESTAMP"), TIME(8, "TIME"), DATE(8, "DATE"),
+    TIMESTAMP(12, "TIMESTAMP"), VARCHAR(-1, "VARCHAR"), VARBINARY(-1, "VARBINARY");
 
     /**
      * -1:长度可变
      */
-    private int len;
+    private int    len;
     private String type;
 
-    PhType(int len, String type) {
+    PhType(int len, String type){
         this.len = len;
         this.type = type;
     }
@@ -87,7 +71,7 @@ public enum PhType {
             phType = VARCHAR;
         } else if (BigDecimal.class.isAssignableFrom(javaType)) {
             phType = DECIMAL;
-        }  else if (BigInteger.class.isAssignableFrom(javaType)) {
+        } else if (BigInteger.class.isAssignableFrom(javaType)) {
             phType = UNSIGNED_LONG;
         } else {
             phType = DEFAULT;

+ 14 - 8
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/support/PhTypeUtil.java

@@ -169,7 +169,8 @@ public class PhTypeUtil {
 
     private static int encodeInt(int v, byte[] b, int o) {
         checkForSufficientLength(b, o, Bytes.SIZEOF_INT);
-        b[o + 0] = (byte) ((v >> 24) ^ 0x80); // Flip sign bit so that INTEGER is binary comparable
+        b[o + 0] = (byte) ((v >> 24) ^ 0x80); // Flip sign bit so that INTEGER
+                                              // is binary comparable
         b[o + 1] = (byte) (v >> 16);
         b[o + 2] = (byte) (v >> 8);
         b[o + 3] = (byte) v;
@@ -209,7 +210,8 @@ public class PhTypeUtil {
 
     private static int encodeLong(long v, byte[] b, int o) {
         checkForSufficientLength(b, o, Bytes.SIZEOF_LONG);
-        b[o + 0] = (byte) ((v >> 56) ^ 0x80); // Flip sign bit so that INTEGER is binary comparable
+        b[o + 0] = (byte) ((v >> 56) ^ 0x80); // Flip sign bit so that INTEGER
+                                              // is binary comparable
         b[o + 1] = (byte) (v >> 48);
         b[o + 2] = (byte) (v >> 40);
         b[o + 3] = (byte) (v >> 32);
@@ -255,7 +257,8 @@ public class PhTypeUtil {
 
     private static int encodeShort(short v, byte[] b, int o) {
         checkForSufficientLength(b, o, Bytes.SIZEOF_SHORT);
-        b[o + 0] = (byte) ((v >> 8) ^ 0x80); // Flip sign bit so that Short is binary comparable
+        b[o + 0] = (byte) ((v >> 8) ^ 0x80); // Flip sign bit so that Short is
+                                             // binary comparable
         b[o + 1] = (byte) v;
         return Bytes.SIZEOF_SHORT;
     }
@@ -287,7 +290,8 @@ public class PhTypeUtil {
 
     private static int encodeByte(byte v, byte[] b, int o) {
         checkForSufficientLength(b, o, Bytes.SIZEOF_BYTE);
-        b[o] = (byte) (v ^ 0x80); // Flip sign bit so that Short is binary comparable
+        b[o] = (byte) (v ^ 0x80); // Flip sign bit so that Short is binary
+                                  // comparable
         return Bytes.SIZEOF_BYTE;
     }
 
@@ -546,7 +550,8 @@ public class PhTypeUtil {
             multiplyBy = 10;
             divideBy = BigInteger.TEN;
         }
-        // Normalize the scale based on what is necessary to end up with a base 100
+        // Normalize the scale based on what is necessary to end up with a base
+        // 100
         // decimal (i.e. 10.123e3)
         int digitOffset;
         BigInteger compareAgainst;
@@ -564,7 +569,8 @@ public class PhTypeUtil {
             if (length <= MAX_BIG_DECIMAL_BYTES) {
                 result[--index] = NEG_TERMINAL_BYTE;
             } else {
-                // Adjust length and offset down because we don't have enough room
+                // Adjust length and offset down because we don't have enough
+                // room
                 length = MAX_BIG_DECIMAL_BYTES;
                 index = offset + length;
             }
@@ -593,8 +599,8 @@ public class PhTypeUtil {
 
     private static void checkForSufficientLength(byte[] b, int offset, int requiredLength) {
         if (b.length < offset + requiredLength) {
-            throw new RuntimeException(
-                "Expected length of at least " + requiredLength + " bytes, but had " + (b.length - offset));
+            throw new RuntimeException("Expected length of at least " + requiredLength + " bytes, but had "
+                                       + (b.length - offset));
         }
     }
 

+ 3 - 13
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/support/Type.java

@@ -11,22 +11,12 @@ import java.util.Date;
  * @version 1.0.0
  */
 public enum Type {
-    DEFAULT("STRING"),
-    STRING("STRING"),
-    INTEGER("INTEGER"),
-    LONG("LONG"),
-    SHORT("SHORT"),
-    BOOLEAN("BOOLEAN"),
-    FLOAT("FLOAT"),
-    DOUBLE("DOUBLE"),
-    BIGDECIMAL("BIGDECIMAL"),
-    DATE("DATE"),
-    BYTE("BYTE"),
-    BYTES("BYTES");
+    DEFAULT("STRING"), STRING("STRING"), INTEGER("INTEGER"), LONG("LONG"), SHORT("SHORT"), BOOLEAN("BOOLEAN"),
+    FLOAT("FLOAT"), DOUBLE("DOUBLE"), BIGDECIMAL("BIGDECIMAL"), DATE("DATE"), BYTE("BYTE"), BYTES("BYTES");
 
     private String type;
 
-    Type(String type) {
+    Type(String type){
         this.type = type;
     }
 

+ 2 - 1
client-adapter/logger/src/main/java/com/alibaba/otter/canal/client/adapter/logger/LoggerAdapterExample.java

@@ -14,7 +14,8 @@ import com.alibaba.otter.canal.client.adapter.support.SPI;
  * @author machengyuan 2018-8-19 下午11:45:38
  * @version 1.0.0
  */
-@SPI("logger") // logger参数对应CanalOuterAdapterConfiguration配置中的name
+@SPI("logger")
+// logger参数对应CanalOuterAdapterConfiguration配置中的name
 public class LoggerAdapterExample implements CanalOuterAdapter {
 
     private Logger logger = LoggerFactory.getLogger(this.getClass());

+ 6 - 0
client-launcher/pom.xml

@@ -23,6 +23,12 @@
             <artifactId>canal.client</artifactId>
             <version>${project.version}</version>
         </dependency>
+        <!-- 单独引入rocketmq依赖 -->
+        <dependency>
+            <groupId>org.apache.rocketmq</groupId>
+            <artifactId>rocketmq-client</artifactId>
+            <version>4.3.0</version>
+        </dependency>
         <!-- 单独引入kafka依赖 -->
         <dependency>
             <groupId>org.apache.kafka</groupId>

+ 4 - 3
client-launcher/src/main/java/com/alibaba/otter/canal/client/ClientLauncher.java

@@ -1,13 +1,14 @@
 package com.alibaba.otter.canal.client;
 
-import com.alibaba.otter.canal.client.adapter.loader.CanalAdapterLoader;
-import com.alibaba.otter.canal.client.adapter.support.CanalClientConfig;
+import java.io.FileInputStream;
+
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.yaml.snakeyaml.Yaml;
 
-import java.io.FileInputStream;
+import com.alibaba.otter.canal.client.adapter.loader.CanalAdapterLoader;
+import com.alibaba.otter.canal.client.adapter.support.CanalClientConfig;
 
 public class ClientLauncher {
 

+ 37 - 4
client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/AbstractCanalAdapterWorker.java

@@ -26,10 +26,10 @@ public abstract class AbstractCanalAdapterWorker {
 
     protected final Logger                    logger  = LoggerFactory.getLogger(this.getClass());
 
-    protected String                          canalDestination;                                                 // canal实例
-    protected List<List<CanalOuterAdapter>>   canalOuterAdapters;                                               // 外部适配器
-    protected ExecutorService                 groupInnerExecutorService;                                        // 组内工作线程池
-    protected volatile boolean                running = false;                                                  // 是否运行中
+    protected String                          canalDestination;                                  // canal实例
+    protected List<List<CanalOuterAdapter>>   canalOuterAdapters;                                // 外部适配器
+    protected ExecutorService                 groupInnerExecutorService;                         // 组内工作线程池
+    protected volatile boolean                running = false;                                   // 是否运行中
     protected Thread                          thread  = null;
     protected Thread.UncaughtExceptionHandler handler = new Thread.UncaughtExceptionHandler() {
 
@@ -129,6 +129,39 @@ public abstract class AbstractCanalAdapterWorker {
         }
     }
 
+    protected void writeOut(Message message, String topic) {
+        if (logger.isDebugEnabled()) {
+            logger.debug("topic: {} batchId: {} batchSize: {} ", topic, message.getId(), message.getEntries().size());
+        }
+        long begin = System.currentTimeMillis();
+        writeOut(message);
+        long now = System.currentTimeMillis();
+        if ((System.currentTimeMillis() - begin) > 5 * 60 * 1000) {
+            logger.error("topic: {} batchId {} elapsed time: {} ms", topic, message.getId(), now - begin);
+        }
+        if (logger.isDebugEnabled()) {
+            logger.debug("topic: {} batchId {} elapsed time: {} ms", topic, message.getId(), now - begin);
+        }
+    }
+
+    protected void stopOutAdapters() {
+        if (thread != null) {
+            try {
+                thread.join();
+            } catch (InterruptedException e) {
+                // ignore
+            }
+        }
+        groupInnerExecutorService.shutdown();
+        logger.info("topic connectors' worker thread dead!");
+        for (List<CanalOuterAdapter> outerAdapters : canalOuterAdapters) {
+            for (CanalOuterAdapter adapter : outerAdapters) {
+                adapter.destroy();
+            }
+        }
+        logger.info("topic all connectors destroyed!");
+    }
+
     public abstract void start();
 
     public abstract void stop();

+ 35 - 30
client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/CanalAdapterKafkaWorker.java

@@ -96,7 +96,7 @@ public class CanalAdapterKafkaWorker extends AbstractCanalAdapterWorker {
         while (!running)
             ;
         ExecutorService executor = Executors.newSingleThreadExecutor();
-        final AtomicBoolean executing = new AtomicBoolean(true);
+        // final AtomicBoolean executing = new AtomicBoolean(true);
         while (running) {
             try {
                 logger.info("=============> Start to connect topic: {} <=============", this.topic);
@@ -116,36 +116,41 @@ public class CanalAdapterKafkaWorker extends AbstractCanalAdapterWorker {
                         }
                         if (messages != null) {
                             for (final Object message : messages) {
-                                executing.set(true);
-                                if (message != null) {
-                                    executor.submit(new Runnable() {
-
-                                        @Override
-                                        public void run() {
-                                            try {
-                                                if (message instanceof FlatMessage) {
-                                                    writeOut((FlatMessage) message);
-                                                } else {
-                                                    writeOut((Message) message);
-                                                }
-                                            } catch (Exception e) {
-                                                logger.error(e.getMessage(), e);
-                                            } finally {
-                                                executing.compareAndSet(true, false);
-                                            }
-                                        }
-                                    });
-
-                                    // 间隔一段时间ack一次, 防止因超时未响应切换到另外台客户端
-                                    long currentTS = System.currentTimeMillis();
-                                    while (executing.get()) {
-                                        // 大于10秒未消费完ack一次keep alive
-                                        if (System.currentTimeMillis() - currentTS > 10000) {
-                                            connector.ack();
-                                            currentTS = System.currentTimeMillis();
-                                        }
-                                    }
+                                if (message instanceof FlatMessage) {
+                                    writeOut((FlatMessage) message);
+                                } else {
+                                    writeOut((Message) message);
                                 }
+                                // executing.set(true);
+                                // if (message != null) {
+                                // executor.submit(new Runnable() {
+                                //
+                                // @Override
+                                // public void run() {
+                                // try {
+                                // if (message instanceof FlatMessage) {
+                                // writeOut((FlatMessage) message);
+                                // } else {
+                                // writeOut((Message) message);
+                                // }
+                                // } catch (Exception e) {
+                                // logger.error(e.getMessage(), e);
+                                // } finally {
+                                // executing.compareAndSet(true, false);
+                                // }
+                                // }
+                                // });
+                                //
+                                // // 间隔一段时间ack一次, 防止因超时未响应切换到另外台客户端
+                                // long currentTS = System.currentTimeMillis();
+                                // while (executing.get()) {
+                                // // 大于10秒未消费完ack一次keep alive
+                                // if (System.currentTimeMillis() - currentTS > 10000) {
+                                // connector.ack();
+                                // currentTS = System.currentTimeMillis();
+                                // }
+                                // }
+                                // }
                             }
                         }
                         connector.ack();

+ 48 - 44
client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/CanalAdapterLoader.java

@@ -9,6 +9,7 @@ import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -18,38 +19,43 @@ import com.alibaba.otter.canal.client.adapter.support.CanalOuterAdapterConfigura
 import com.alibaba.otter.canal.client.adapter.support.ExtensionLoader;
 
 /**
- * 外部适配器的加载器
+ * MQ外部适配器的加载器
  *
- * @author machengyuan 2018-8-19 下午11:45:49
  * @version 1.0.0
  */
 public class CanalAdapterLoader {
 
-    private static final Logger                  logger            = LoggerFactory.getLogger(CanalAdapterLoader.class);
+    private static final Logger                     logger        = LoggerFactory.getLogger(CanalAdapterLoader.class);
 
-    private CanalClientConfig                    canalClientConfig;
+    private CanalClientConfig                       canalClientConfig;
 
-    private Map<String, CanalAdapterWorker>      canalWorkers      = new HashMap<>();
+    private Map<String, CanalAdapterWorker>         canalWorkers  = new HashMap<>();
 
-    private Map<String, CanalAdapterKafkaWorker> canalKafkaWorkers = new HashMap<>();
+    private Map<String, AbstractCanalAdapterWorker> canalMQWorker = new HashMap<>();
 
-    private ExtensionLoader<CanalOuterAdapter>   loader;
+    private ExtensionLoader<CanalOuterAdapter>      loader;
 
     public CanalAdapterLoader(CanalClientConfig canalClientConfig){
         this.canalClientConfig = canalClientConfig;
     }
 
     /**
-     * 初始化canal-client、 canal-client-kafka的适配器
+     * 初始化canal-client、 canal-client-rocketmq的适配器
      */
     public void init() {
-        // canal instances 和 kafka topics 配置不能同时为空
-        if (canalClientConfig.getCanalInstances() == null && canalClientConfig.getKafkaTopics() == null) {
-            throw new RuntimeException("Blank config property: canalInstances or canalKafkaTopics");
+        // canal instances 和 mq topics 配置不能同时为空
+        if (canalClientConfig.getCanalInstances() == null && canalClientConfig.getMqTopics() == null) {
+            throw new RuntimeException("Blank config property: canalInstances or canalMQTopics");
         }
 
-        loader = ExtensionLoader.getExtensionLoader(CanalOuterAdapter.class,
-            "" /* TODO canalClientConfig.getClassloaderPolicy() */);
+        loader = ExtensionLoader.getExtensionLoader(CanalOuterAdapter.class, "" /*
+                                                                                 * TODO
+                                                                                 * canalClientConfig
+                                                                                 * .
+                                                                                 * getClassloaderPolicy
+                                                                                 * (
+                                                                                 * )
+                                                                                 */);
 
         String canalServerHost = this.canalClientConfig.getCanalServerHost();
         SocketAddress sa = null;
@@ -59,13 +65,6 @@ public class CanalAdapterLoader {
         }
         String zkHosts = this.canalClientConfig.getZookeeperHosts();
 
-        boolean flatMessage = this.canalClientConfig.getFlatMessage();
-
-        // if (zkHosts == null && sa == null) {
-        // throw new RuntimeException("Blank config property: canalServerHost or
-        // zookeeperHosts");
-        // }
-
         // 初始化canal-client的适配器
         if (canalClientConfig.getCanalInstances() != null) {
             for (CanalClientConfig.CanalInstance instance : canalClientConfig.getCanalInstances()) {
@@ -90,32 +89,37 @@ public class CanalAdapterLoader {
             }
         }
 
-        // 初始化canal-client-kafka的适配器
-        if (canalClientConfig.getKafkaTopics() != null) {
-            for (CanalClientConfig.KafkaTopic kafkaTopic : canalClientConfig.getKafkaTopics()) {
-                for (CanalClientConfig.Group group : kafkaTopic.getGroups()) {
+        // 初始化canal-client-mq的适配器
+        if (canalClientConfig.getMqTopics() != null) {
+            for (CanalClientConfig.MQTopic topic : canalClientConfig.getMqTopics()) {
+                for (CanalClientConfig.Group group : topic.getGroups()) {
                     List<List<CanalOuterAdapter>> canalOuterAdapterGroups = new ArrayList<>();
 
                     List<CanalOuterAdapter> canalOuterAdapters = new ArrayList<>();
 
                     for (CanalOuterAdapterConfiguration config : group.getOutAdapters()) {
-                        // for (CanalOuterAdapterConfiguration config : adaptor.getOutAdapters()) {
                         loadConnector(config, canalOuterAdapters);
-                        // }
                     }
                     canalOuterAdapterGroups.add(canalOuterAdapters);
+                    if (StringUtils.isBlank(topic.getMqMode()) || "rocketmq".equalsIgnoreCase(topic.getMqMode())) {
+                        CanalAdapterRocketMQWorker rocketMQWorker = new CanalAdapterRocketMQWorker(canalClientConfig.getBootstrapServers(),
+                            topic.getTopic(),
+                            group.getGroupId(),
+                            canalOuterAdapterGroups);
+                        canalMQWorker.put(topic.getTopic() + "-rocketmq-" + group.getGroupId(), rocketMQWorker);
+                        rocketMQWorker.start();
+                    } else if ("kafka".equalsIgnoreCase(topic.getMqMode())) {
+                        CanalAdapterKafkaWorker canalKafkaWorker = new CanalAdapterKafkaWorker(canalClientConfig.getBootstrapServers(),
+                            topic.getTopic(),
+                            group.getGroupId(),
+                            canalOuterAdapterGroups,
+                            canalClientConfig.getFlatMessage());
+                        canalMQWorker.put(topic.getTopic() + "-kafka-" + group.getGroupId(), canalKafkaWorker);
+                        canalKafkaWorker.start();
+                    }
+                    logger.info("Start adapter for canal-client rocketmq topic: {} succeed", topic.getTopic() + "-"
+                                                                                             + group.getGroupId());
 
-                    // String zkServers = canalClientConfig.getZookeeperHosts();
-                    CanalAdapterKafkaWorker canalKafkaWorker = new CanalAdapterKafkaWorker(
-                        canalClientConfig.getBootstrapServers(),
-                        kafkaTopic.getTopic(),
-                        group.getGroupId(),
-                        canalOuterAdapterGroups,
-                        flatMessage);
-                    canalKafkaWorkers.put(kafkaTopic.getTopic() + "-" + group.getGroupId(), canalKafkaWorker);
-                    canalKafkaWorker.start();
-                    logger.info("Start adapter for canal-client kafka topic: {} succeed",
-                        kafkaTopic.getTopic() + "-" + group.getGroupId());
                 }
             }
         }
@@ -154,19 +158,19 @@ public class CanalAdapterLoader {
             }
             stopExecutorService.shutdown();
         }
-        if (canalKafkaWorkers.size() > 0) {
-            ExecutorService stopKafkaExecutorService = Executors.newFixedThreadPool(canalKafkaWorkers.size());
-            for (CanalAdapterKafkaWorker v : canalKafkaWorkers.values()) {
-                final CanalAdapterKafkaWorker cakw = v;
-                stopKafkaExecutorService.submit(new Runnable() {
+        if (canalMQWorker.size() > 0) {
+            ExecutorService stopMQWokerService = Executors.newFixedThreadPool(canalMQWorker.size());
+            for (AbstractCanalAdapterWorker tmp : canalMQWorker.values()) {
+                final AbstractCanalAdapterWorker worker = tmp;
+                stopMQWokerService.submit(new Runnable() {
 
                     @Override
                     public void run() {
-                        cakw.stop();
+                        worker.stop();
                     }
                 });
             }
-            stopKafkaExecutorService.shutdown();
+            stopMQWokerService.shutdown();
         }
         logger.info("All canal adapters destroyed");
     }

+ 124 - 0
client-launcher/src/main/java/com/alibaba/otter/canal/client/adapter/loader/CanalAdapterRocketMQWorker.java

@@ -0,0 +1,124 @@
+package com.alibaba.otter.canal.client.adapter.loader;
+
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.kafka.clients.consumer.CommitFailedException;
+import org.apache.kafka.common.errors.WakeupException;
+
+import com.alibaba.otter.canal.client.adapter.CanalOuterAdapter;
+import com.alibaba.otter.canal.client.rocketmq.RocketMQCanalConnector;
+import com.alibaba.otter.canal.client.rocketmq.RocketMQCanalConnectorProvider;
+import com.alibaba.otter.canal.protocol.Message;
+
+/**
+ * kafka对应的client适配器工作线程
+ *
+ * @author machengyuan 2018-8-19 下午11:30:49
+ * @version 1.0.0
+ */
+public class CanalAdapterRocketMQWorker extends AbstractCanalAdapterWorker {
+
+    private RocketMQCanalConnector connector;
+
+    private String                 topic;
+
+    public CanalAdapterRocketMQWorker(String nameServers, String topic, String groupId,
+                                      List<List<CanalOuterAdapter>> canalOuterAdapters){
+        logger.info("RocketMQ consumer config topic:{}, nameServer:{}, groupId:{}", topic, nameServers, groupId);
+        this.canalOuterAdapters = canalOuterAdapters;
+        this.groupInnerExecutorService = Executors.newFixedThreadPool(canalOuterAdapters.size());
+        this.topic = topic;
+        this.canalDestination = topic;
+        connector = RocketMQCanalConnectorProvider.newRocketMQConnector(nameServers, topic, groupId);
+    }
+
+    @Override
+    public void start() {
+        if (!running) {
+            thread = new Thread(new Runnable() {
+
+                @Override
+                public void run() {
+                    process();
+                }
+            });
+            thread.setUncaughtExceptionHandler(handler);
+            running = true;
+            thread.start();
+        }
+    }
+
+    @Override
+    public void stop() {
+        try {
+            if (!running) {
+                return;
+            }
+            connector.stopRunning();
+            running = false;
+            logger.info("Stop topic {} out adapters begin", this.topic);
+            stopOutAdapters();
+            logger.info("Stop topic {} out adapters end", this.topic);
+        } catch (Exception e) {
+            logger.error(e.getMessage(), e);
+        }
+    }
+
+    private void process() {
+        while (!running)
+            ;
+        ExecutorService executor = Executors.newSingleThreadExecutor();
+        while (running) {
+            try {
+                logger.info("=============> Start to connect topic: {} <=============", this.topic);
+                connector.connect();
+                logger.info("=============> Start to subscribe topic: {}<=============", this.topic);
+                connector.subscribe();
+                logger.info("=============> Subscribe topic: {} succeed<=============", this.topic);
+                while (running) {
+                    try {
+                        // switcher.get(); //等待开关开启
+
+                        final Message message = connector.getWithoutAck(1);
+                        if (message != null) {
+                            executor.submit(new Runnable() {
+
+                                @Override
+                                public void run() {
+                                    try {
+                                        writeOut(message, topic);
+                                    } catch (Exception e) {
+                                        logger.error(e.getMessage(), e);
+                                    }
+                                    connector.ack(message.getId());
+                                }
+                            });
+                        } else {
+                            logger.debug("Message is null");
+                        }
+                    } catch (CommitFailedException e) {
+                        logger.warn(e.getMessage());
+                    } catch (Exception e) {
+                        logger.error(e.getMessage(), e);
+                        TimeUnit.SECONDS.sleep(1L);
+                    }
+                }
+            } catch (Exception e) {
+                logger.error(e.getMessage(), e);
+            }
+        }
+
+        executor.shutdown();
+
+        try {
+            connector.unsubscribe();
+        } catch (WakeupException e) {
+            // No-op. Continue process
+        }
+        connector.stopRunning();
+        logger.info("=============> Disconnect topic: {} <=============", this.topic);
+    }
+}

+ 6 - 4
client-launcher/src/main/resources/canal-client.yml

@@ -1,6 +1,6 @@
 #canalServerHost: 127.0.0.1:11111
 #zookeeperHosts: slave1:2181
-bootstrapServers: slave1:6667,slave2:6667
+bootstrapServers: slave1:6667,slave2:6667 #or rocketmq nameservers:host1:9876;host2:9876
 flatMessage: true
 
 #canalInstances:
@@ -11,10 +11,12 @@ flatMessage: true
 #    - name: hbase
 #      hosts: slave1:2181
 #      properties: {znodeParent: "/hbase-unsecure"}
-kafkaTopics:
-- topic: example
+
+mqTopics:
+- mqMode: rocketmq
+  topic: example
   groups:
-  - groupId: egroup
+  - groupId: example
     outAdapters:
     - name: logger
 #    - name: hbase

+ 8 - 2
client/pom.xml

@@ -101,8 +101,14 @@
 			<version>${spring_version}</version>
 			<scope>test</scope>
 		</dependency>
-
-		<!-- 客户端要使用请单独引入kafka依赖 -->
+        <!-- 客户端要使用请单独引入rocketmq-client依赖 -->
+		<dependency>
+			<groupId>org.apache.rocketmq</groupId>
+			<artifactId>rocketmq-client</artifactId>
+			<version>4.3.0</version>
+            <scope>provided</scope>
+		</dependency>
+		<!-- 客户端要使用请单独引入kafka-clients依赖 -->
 		<dependency>
 			<groupId>org.apache.kafka</groupId>
 			<artifactId>kafka-clients</artifactId>

+ 55 - 0
client/src/main/java/com/alibaba/otter/canal/client/CanalMessageDeserializer.java

@@ -0,0 +1,55 @@
+package com.alibaba.otter.canal.client;
+
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.CanalPacket;
+import com.alibaba.otter.canal.protocol.CanalPacket.Ack;
+import com.alibaba.otter.canal.protocol.CanalPacket.Compression;
+import com.alibaba.otter.canal.protocol.Message;
+import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+import com.google.protobuf.ByteString;
+
+public class CanalMessageDeserializer {
+
+    public static Message deserializer(byte[] data) {
+        return deserializer(data, false);
+    }
+
+    public static Message deserializer(byte[] data, boolean lazyParseEntry) {
+        try {
+            if (data == null) {
+                return null;
+            } else {
+                CanalPacket.Packet p = CanalPacket.Packet.parseFrom(data);
+                switch (p.getType()) {
+                    case MESSAGES: {
+                        if (!p.getCompression().equals(Compression.NONE)
+                            && !p.getCompression().equals(Compression.COMPRESSIONCOMPATIBLEPROTO2)) {
+                            throw new CanalClientException("compression is not supported in this connector");
+                        }
+
+                        CanalPacket.Messages messages = CanalPacket.Messages.parseFrom(p.getBody());
+                        Message result = new Message(messages.getBatchId());
+                        if (lazyParseEntry) {
+                            // byteString
+                            result.setRawEntries(messages.getMessagesList());
+                        } else {
+                            for (ByteString byteString : messages.getMessagesList()) {
+                                result.addEntry(CanalEntry.Entry.parseFrom(byteString));
+                            }
+                        }
+                        return result;
+                    }
+                    case ACK: {
+                        Ack ack = Ack.parseFrom(p.getBody());
+                        throw new CanalClientException("something goes wrong with reason: " + ack.getErrorMessage());
+                    }
+                    default: {
+                        throw new CanalClientException("unexpected packet type: " + p.getType());
+                    }
+                }
+            }
+        } catch (Exception e) {
+            throw new CanalClientException("deserializer failed", e);
+        }
+    }
+}

+ 3 - 6
client/src/main/java/com/alibaba/otter/canal/client/impl/ClusterCanalConnector.java

@@ -218,8 +218,7 @@ public class ClusterCanalConnector implements CanalConnector {
                 return;
             } catch (Throwable t) {
                 logger.warn(String.format("something goes wrong when rollbacking data from server:%s",
-                    currentConnector != null ? currentConnector.getAddress() : "null"),
-                    t);
+                    currentConnector != null ? currentConnector.getAddress() : "null"), t);
                 times++;
                 restart();
                 logger.info("restart the connector for next round retry.");
@@ -236,8 +235,7 @@ public class ClusterCanalConnector implements CanalConnector {
                 return;
             } catch (Throwable t) {
                 logger.warn(String.format("something goes wrong when rollbacking data from server:%s",
-                    currentConnector != null ? currentConnector.getAddress() : "null"),
-                    t);
+                    currentConnector != null ? currentConnector.getAddress() : "null"), t);
                 times++;
                 restart();
                 logger.info("restart the connector for next round retry.");
@@ -255,8 +253,7 @@ public class ClusterCanalConnector implements CanalConnector {
                 return;
             } catch (Throwable t) {
                 logger.warn(String.format("something goes wrong when acking data from server:%s",
-                    currentConnector != null ? currentConnector.getAddress() : "null"),
-                    t);
+                    currentConnector != null ? currentConnector.getAddress() : "null"), t);
                 times++;
                 restart();
                 logger.info("restart the connector for next round retry.");

+ 9 - 34
client/src/main/java/com/alibaba/otter/canal/client/impl/SimpleCanalConnector.java

@@ -18,13 +18,13 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.alibaba.otter.canal.client.CanalConnector;
+import com.alibaba.otter.canal.client.CanalMessageDeserializer;
 import com.alibaba.otter.canal.client.impl.running.ClientRunningData;
 import com.alibaba.otter.canal.client.impl.running.ClientRunningListener;
 import com.alibaba.otter.canal.client.impl.running.ClientRunningMonitor;
 import com.alibaba.otter.canal.common.utils.AddressUtils;
 import com.alibaba.otter.canal.common.utils.BooleanMutex;
 import com.alibaba.otter.canal.common.zookeeper.ZkClientx;
-import com.alibaba.otter.canal.protocol.CanalEntry.Entry;
 import com.alibaba.otter.canal.protocol.CanalPacket.Ack;
 import com.alibaba.otter.canal.protocol.CanalPacket.ClientAck;
 import com.alibaba.otter.canal.protocol.CanalPacket.ClientAuth;
@@ -32,7 +32,6 @@ import com.alibaba.otter.canal.protocol.CanalPacket.ClientRollback;
 import com.alibaba.otter.canal.protocol.CanalPacket.Compression;
 import com.alibaba.otter.canal.protocol.CanalPacket.Get;
 import com.alibaba.otter.canal.protocol.CanalPacket.Handshake;
-import com.alibaba.otter.canal.protocol.CanalPacket.Messages;
 import com.alibaba.otter.canal.protocol.CanalPacket.Packet;
 import com.alibaba.otter.canal.protocol.CanalPacket.PacketType;
 import com.alibaba.otter.canal.protocol.CanalPacket.Sub;
@@ -180,8 +179,8 @@ public class SimpleCanalConnector implements CanalConnector {
 
             Ack ackBody = Ack.parseFrom(ack.getBody());
             if (ackBody.getErrorCode() > 0) {
-                throw new CanalClientException(
-                    "something goes wrong when doing authentication: " + ackBody.getErrorMessage());
+                throw new CanalClientException("something goes wrong when doing authentication: "
+                                               + ackBody.getErrorMessage());
             }
 
             connected = true;
@@ -320,34 +319,7 @@ public class SimpleCanalConnector implements CanalConnector {
 
     private Message receiveMessages() throws IOException {
         byte[] data = readNextPacket();
-        Packet p = Packet.parseFrom(data);
-        switch (p.getType()) {
-            case MESSAGES: {
-                if (!p.getCompression().equals(Compression.NONE)
-                    && !p.getCompression().equals(Compression.COMPRESSIONCOMPATIBLEPROTO2)) {
-                    throw new CanalClientException("compression is not supported in this connector");
-                }
-
-                Messages messages = Messages.parseFrom(p.getBody());
-                Message result = new Message(messages.getBatchId());
-                if (lazyParseEntry) {
-                    // byteString
-                    result.setRawEntries(messages.getMessagesList());
-                } else {
-                    for (ByteString byteString : messages.getMessagesList()) {
-                        result.addEntry(Entry.parseFrom(byteString));
-                    }
-                }
-                return result;
-            }
-            case ACK: {
-                Ack ack = Ack.parseFrom(p.getBody());
-                throw new CanalClientException("something goes wrong with reason: " + ack.getErrorMessage());
-            }
-            default: {
-                throw new CanalClientException("unexpected packet type: " + p.getType());
-            }
-        }
+        return CanalMessageDeserializer.deserializer(data, lazyParseEntry);
     }
 
     public void ack(long batchId) throws CanalClientException {
@@ -361,8 +333,11 @@ public class SimpleCanalConnector implements CanalConnector {
             .setBatchId(batchId)
             .build();
         try {
-            writeWithHeader(
-                Packet.newBuilder().setType(PacketType.CLIENTACK).setBody(ca.toByteString()).build().toByteArray());
+            writeWithHeader(Packet.newBuilder()
+                .setType(PacketType.CLIENTACK)
+                .setBody(ca.toByteString())
+                .build()
+                .toByteArray());
         } catch (IOException e) {
             throw new CanalClientException(e);
         }

+ 1 - 1
client/src/main/java/com/alibaba/otter/canal/client/impl/running/ClientRunningMonitor.java

@@ -98,7 +98,7 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
         String path = ZookeeperPathUtils.getDestinationClientRunning(this.destination, clientData.getClientId());
         zkClient.unsubscribeDataChanges(path, dataListener);
         releaseRunning(); // 尝试一下release
-        //Fix issue #697
+        // Fix issue #697
         if (delayExector != null) {
             delayExector.shutdown();
         }

+ 2 - 3
client/src/main/java/com/alibaba/otter/canal/client/kafka/KafkaCanalConnector.java

@@ -33,8 +33,7 @@ public class KafkaCanalConnector {
     private volatile boolean               running   = false;
     private boolean                        flatMessage;
 
-    public KafkaCanalConnector(String servers, String topic, Integer partition, String groupId,
-                               boolean flatMessage){
+    public KafkaCanalConnector(String servers, String topic, Integer partition, String groupId, boolean flatMessage){
         this.topic = topic;
         this.partition = partition;
         this.flatMessage = flatMessage;
@@ -234,7 +233,7 @@ public class KafkaCanalConnector {
             kafkaConsumer.commitSync();
         }
         if (kafkaConsumer2 != null) {
-            kafkaConsumer2.commitAsync();
+            kafkaConsumer2.commitSync();
         }
     }
 

+ 3 - 4
client/src/main/java/com/alibaba/otter/canal/client/kafka/KafkaCanalConnectors.java

@@ -17,8 +17,7 @@ public class KafkaCanalConnectors {
      * @param groupId
      * @return
      */
-    public static KafkaCanalConnector newKafkaConnector(String servers, String topic, Integer partition,
-                                                        String groupId) {
+    public static KafkaCanalConnector newKafkaConnector(String servers, String topic, Integer partition, String groupId) {
         return new KafkaCanalConnector(servers, topic, partition, groupId, false);
     }
 
@@ -44,8 +43,8 @@ public class KafkaCanalConnectors {
      * @param flatMessage
      * @return
      */
-    public static KafkaCanalConnector newKafkaConnector( String servers, String topic,
-                                                        Integer partition, String groupId,boolean flatMessage) {
+    public static KafkaCanalConnector newKafkaConnector(String servers, String topic, Integer partition,
+                                                        String groupId, boolean flatMessage) {
         return new KafkaCanalConnector(servers, topic, partition, groupId, flatMessage);
     }
 }

+ 3 - 37
client/src/main/java/com/alibaba/otter/canal/client/kafka/MessageDeserializer.java

@@ -3,14 +3,9 @@ package com.alibaba.otter.canal.client.kafka;
 import java.util.Map;
 
 import org.apache.kafka.common.serialization.Deserializer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
-import com.alibaba.otter.canal.protocol.CanalEntry;
-import com.alibaba.otter.canal.protocol.CanalPacket;
+import com.alibaba.otter.canal.client.CanalMessageDeserializer;
 import com.alibaba.otter.canal.protocol.Message;
-import com.alibaba.otter.canal.protocol.exception.CanalClientException;
-import com.google.protobuf.ByteString;
 
 /**
  * Kafka Message类的反序列化
@@ -20,42 +15,13 @@ import com.google.protobuf.ByteString;
  */
 public class MessageDeserializer implements Deserializer<Message> {
 
-    private static Logger logger = LoggerFactory.getLogger(MessageDeserializer.class);
-
     @Override
     public void configure(Map<String, ?> configs, boolean isKey) {
     }
 
     @Override
-    public Message deserialize(String topic, byte[] data) {
-        try {
-            if (data == null) {
-                return null;
-            }
-            else {
-                CanalPacket.Packet p = CanalPacket.Packet.parseFrom(data);
-                switch (p.getType()) {
-                    case MESSAGES: {
-                        if (!p.getCompression().equals(CanalPacket.Compression.NONE)
-                                && !p.getCompression().equals(CanalPacket.Compression.COMPRESSIONCOMPATIBLEPROTO2)) {
-                            throw new CanalClientException("compression is not supported in this connector");
-                        }
-
-                        CanalPacket.Messages messages = CanalPacket.Messages.parseFrom(p.getBody());
-                        Message result = new Message(messages.getBatchId());
-                        for (ByteString byteString : messages.getMessagesList()) {
-                            result.addEntry(CanalEntry.Entry.parseFrom(byteString));
-                        }
-                        return result;
-                    }
-                    default:
-                        break;
-                }
-            }
-        } catch (Exception e) {
-            logger.error("Error when deserializing byte[] to message ");
-        }
-        return null;
+    public Message deserialize(String topic1, byte[] data) {
+        return CanalMessageDeserializer.deserializer(data);
     }
 
     @Override

+ 4 - 5
client/src/main/java/com/alibaba/otter/canal/client/kafka/running/ClientRunningMonitor.java

@@ -24,7 +24,6 @@ import com.alibaba.otter.canal.common.zookeeper.ZkClientx;
 import com.alibaba.otter.canal.common.zookeeper.ZookeeperPathUtils;
 import com.alibaba.otter.canal.protocol.exception.CanalClientException;
 
-
 /**
  * kafka client running状态信息
  *
@@ -55,15 +54,15 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
     }
 
     private static final Logger        logger       = LoggerFactory.getLogger(ClientRunningMonitor.class);
-    private ZkClientx zkClient;
+    private ZkClientx                  zkClient;
     private String                     topic;
-    private ClientRunningData clientData;
+    private ClientRunningData          clientData;
     private IZkDataListener            dataListener;
-    private BooleanMutex mutex        = new BooleanMutex(false);
+    private BooleanMutex               mutex        = new BooleanMutex(false);
     private volatile boolean           release      = false;
     private volatile ClientRunningData activeData;
     private ScheduledExecutorService   delayExector = Executors.newScheduledThreadPool(1);
-    private ClientRunningListener listener;
+    private ClientRunningListener      listener;
     private int                        delayTime    = 5;
 
     private static Integer             virtualPort;

+ 48 - 0
client/src/main/java/com/alibaba/otter/canal/client/rocketmq/ConsumerBatchMessage.java

@@ -0,0 +1,48 @@
+package com.alibaba.otter.canal.client.rocketmq;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+public class ConsumerBatchMessage<T> {
+
+    private final BlockingQueue<T> data;
+    private CountDownLatch         latch;
+    private boolean                hasFailure = false;
+
+    public ConsumerBatchMessage(BlockingQueue<T> data){
+        this.data = data;
+        latch = new CountDownLatch(data.size());
+    }
+
+    public boolean waitFinish(long timeout) throws InterruptedException {
+        return latch.await(timeout, TimeUnit.MILLISECONDS);
+    }
+
+    public boolean isSuccess() {
+        return !hasFailure;
+    }
+
+    public BlockingQueue<T> getData() {
+        return data;
+    }
+
+    /**
+     * Countdown if the sub message is successful.
+     */
+    public void ack() {
+        latch.countDown();
+    }
+
+    /**
+     * Countdown and fail-fast if the sub message is failed.
+     */
+    public void fail() {
+        hasFailure = true;
+        // fail fast
+        long count = latch.getCount();
+        for (int i = 0; i < count; i++) {
+            latch.countDown();
+        }
+    }
+}

+ 202 - 0
client/src/main/java/com/alibaba/otter/canal/client/rocketmq/RocketMQCanalConnector.java

@@ -0,0 +1,202 @@
+package com.alibaba.otter.canal.client.rocketmq;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.rocketmq.client.consumer.DefaultMQPushConsumer;
+import org.apache.rocketmq.client.consumer.listener.ConsumeOrderlyContext;
+import org.apache.rocketmq.client.consumer.listener.ConsumeOrderlyStatus;
+import org.apache.rocketmq.client.consumer.listener.MessageListenerOrderly;
+import org.apache.rocketmq.client.exception.MQClientException;
+import org.apache.rocketmq.common.message.MessageExt;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.otter.canal.client.CanalConnector;
+import com.alibaba.otter.canal.client.CanalMessageDeserializer;
+import com.alibaba.otter.canal.protocol.Message;
+import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+
+public class RocketMQCanalConnector implements CanalConnector {
+
+    private static final Logger                          logger              = LoggerFactory.getLogger(RocketMQCanalConnector.class);
+
+    private String                                       nameServer;
+    private String                                       topic;
+    private String                                       groupName;
+    private volatile boolean                             connected           = false;
+    private DefaultMQPushConsumer                        rocketMQConsumer;
+    private BlockingQueue<ConsumerBatchMessage<Message>> messageBlockingQueue;
+    Map<Long, ConsumerBatchMessage<Message>>             messageCache;
+    private long                                         batchProcessTimeout = 10000;
+
+    public RocketMQCanalConnector(String nameServer, String topic, String groupName){
+        this.nameServer = nameServer;
+        this.topic = topic;
+        this.groupName = groupName;
+        messageBlockingQueue = new LinkedBlockingQueue<>();
+        messageCache = new ConcurrentHashMap<>();
+    }
+
+    @Override
+    public void connect() throws CanalClientException {
+        rocketMQConsumer = new DefaultMQPushConsumer(groupName);
+        if (!StringUtils.isBlank(nameServer)) {
+            rocketMQConsumer.setNamesrvAddr(nameServer);
+        }
+    }
+
+    @Override
+    public void disconnect() throws CanalClientException {
+        rocketMQConsumer.shutdown();
+    }
+
+    @Override
+    public boolean checkValid() throws CanalClientException {
+        return connected;
+    }
+
+    @Override
+    public synchronized void subscribe(String filter) throws CanalClientException {
+        if (connected) {
+            return;
+        }
+        try {
+            if (rocketMQConsumer == null) {
+                this.connect();
+            }
+            rocketMQConsumer.subscribe(topic, "*");
+            rocketMQConsumer.registerMessageListener(new MessageListenerOrderly() {
+
+                @Override
+                public ConsumeOrderlyStatus consumeMessage(List<MessageExt> messageExts, ConsumeOrderlyContext context) {
+                    context.setAutoCommit(true);
+                    boolean isSuccess = process(messageExts);
+                    if (isSuccess) {
+                        return ConsumeOrderlyStatus.SUCCESS;
+                    } else {
+                        return ConsumeOrderlyStatus.SUSPEND_CURRENT_QUEUE_A_MOMENT;
+                    }
+                }
+            });
+            rocketMQConsumer.start();
+        } catch (MQClientException ex) {
+            connected = false;
+            logger.error("Start RocketMQ consumer error", ex);
+        }
+        connected = true;
+    }
+
+    private boolean process(List<MessageExt> messageExts) {
+        BlockingQueue<Message> messageList = new LinkedBlockingQueue<>();
+        for (MessageExt messageExt : messageExts) {
+            byte[] data = messageExt.getBody();
+            Message message = CanalMessageDeserializer.deserializer(data);
+            try {
+                messageList.put(message);
+            } catch (InterruptedException ex) {
+                logger.error("Add message error");
+            }
+        }
+        ConsumerBatchMessage<Message> batchMessage = new ConsumerBatchMessage<>(messageList);
+        try {
+            messageBlockingQueue.put(batchMessage);
+        } catch (InterruptedException e) {
+            logger.error("Put message to queue error", e);
+            throw new RuntimeException(e);
+        }
+        boolean isCompleted;
+        try {
+            isCompleted = batchMessage.waitFinish(batchProcessTimeout);
+        } catch (InterruptedException e) {
+            logger.error("Interrupted when waiting messages to be finished.", e);
+            throw new RuntimeException(e);
+        }
+        boolean isSuccess = batchMessage.isSuccess();
+        return isCompleted && isSuccess;
+    }
+
+    @Override
+    public void subscribe() throws CanalClientException {
+        this.subscribe(null);
+    }
+
+    @Override
+    public void unsubscribe() throws CanalClientException {
+        this.rocketMQConsumer.unsubscribe(this.topic);
+    }
+
+    @Override
+    public Message get(int batchSize) throws CanalClientException {
+        Message message = getWithoutAck(batchSize);
+        ack(message.getId());
+        return message;
+    }
+
+    @Override
+    public Message get(int batchSize, Long timeout, TimeUnit unit) throws CanalClientException {
+        Message message = getWithoutAck(batchSize, timeout, unit);
+        ack(message.getId());
+        return message;
+    }
+
+    private Message getMessage(ConsumerBatchMessage consumerBatchMessage) {
+        BlockingQueue<Message> messageList = consumerBatchMessage.getData();
+        if (messageList != null & messageList.size() > 0) {
+            Message message = messageList.poll();
+            messageCache.put(message.getId(), consumerBatchMessage);
+            return message;
+        }
+        return null;
+    }
+
+    @Override
+    public Message getWithoutAck(int batchSize) throws CanalClientException {
+        ConsumerBatchMessage batchMessage = messageBlockingQueue.poll();
+        if (batchMessage != null) {
+            return getMessage(batchMessage);
+        }
+        return null;
+    }
+
+    @Override
+    public Message getWithoutAck(int batchSize, Long timeout, TimeUnit unit) throws CanalClientException {
+        try {
+            ConsumerBatchMessage batchMessage = messageBlockingQueue.poll(timeout, unit);
+            return getMessage(batchMessage);
+        } catch (InterruptedException ex) {
+            logger.warn("Get message timeout", ex);
+            throw new CanalClientException("Failed to fetch the data after: " + timeout);
+        }
+    }
+
+    @Override
+    public void ack(long batchId) throws CanalClientException {
+        ConsumerBatchMessage batchMessage = messageCache.get(batchId);
+        if (batchMessage != null) {
+            batchMessage.ack();
+        }
+    }
+
+    @Override
+    public void rollback(long batchId) throws CanalClientException {
+
+    }
+
+    @Override
+    public void rollback() throws CanalClientException {
+
+    }
+
+    @Override
+    public void stopRunning() throws CanalClientException {
+        this.rocketMQConsumer.shutdown();
+        connected = false;
+    }
+
+}

+ 20 - 0
client/src/main/java/com/alibaba/otter/canal/client/rocketmq/RocketMQCanalConnectorProvider.java

@@ -0,0 +1,20 @@
+package com.alibaba.otter.canal.client.rocketmq;
+
+/**
+ * RocketMQ connector provider.
+ */
+public class RocketMQCanalConnectorProvider {
+
+    /**
+     * Create RocketMQ connector
+     *
+     * @param nameServers name servers for RocketMQ
+     * @param topic
+     * @param groupId
+     * @return {@link RocketMQCanalConnector}
+     */
+    public static RocketMQCanalConnector newRocketMQConnector(String nameServers, String topic, String groupId) {
+        return new RocketMQCanalConnector(nameServers, topic, groupId);
+    }
+
+}

+ 13 - 0
client/src/main/java/com/alibaba/otter/canal/client/rocketmq/RocketMQCanalListener.java

@@ -0,0 +1,13 @@
+package com.alibaba.otter.canal.client.rocketmq;
+
+import java.util.List;
+
+import org.apache.rocketmq.common.message.MessageExt;
+
+/**
+ * RocketMQ message listener
+ */
+public interface RocketMQCanalListener {
+
+    boolean onReceive(List<MessageExt> messageExts);
+}

+ 114 - 114
client/src/test/java/com/alibaba/otter/canal/client/running/ClientRunningTest.java

@@ -20,117 +20,117 @@ import com.alibaba.otter.canal.common.zookeeper.ZookeeperPathUtils;
 
 public class ClientRunningTest extends AbstractZkTest {
 
-	private ZkClientx zkclientx = new ZkClientx(cluster1 + ";" + cluster2);
-	private short     clientId  = 1001;
-
-	@Before
-	public void setUp() {
-		String path = ZookeeperPathUtils.getDestinationPath(destination);
-		zkclientx.deleteRecursive(path);
-
-		zkclientx.createPersistent(ZookeeperPathUtils.getClientIdNodePath(this.destination, clientId), true);
-	}
-
-	@After
-	public void tearDown() {
-		String path = ZookeeperPathUtils.getDestinationPath(destination);
-		zkclientx.deleteRecursive(path);
-	}
-
-	@Test
-	public void testOneServer() {
-		final CountDownLatch countLatch = new CountDownLatch(2);
-		ClientRunningMonitor runningMonitor = buildClientRunning(countLatch, clientId, 2088);
-		runningMonitor.start();
-		sleep(2000L);
-		runningMonitor.stop();
-		sleep(2000L);
-
-		if (countLatch.getCount() != 0) {
-			Assert.fail();
-		}
-	}
-
-	@Test
-	public void testMultiServer() {
-		final CountDownLatch countLatch = new CountDownLatch(30);
-		final ClientRunningMonitor runningMonitor1 = buildClientRunning(countLatch, clientId, 2088);
-		final ClientRunningMonitor runningMonitor2 = buildClientRunning(countLatch, clientId, 2089);
-		final ClientRunningMonitor runningMonitor3 = buildClientRunning(countLatch, clientId, 2090);
-		final ExecutorService executor = Executors.newFixedThreadPool(3);
-		executor.submit(new Runnable() {
-
-			public void run() {
-				for (int i = 0; i < 10; i++) {
-					if (!runningMonitor1.isStart()) {
-						runningMonitor1.start();
-					}
-					sleep(2000L + RandomUtils.nextInt(500));
-					runningMonitor1.stop();
-					sleep(2000L + RandomUtils.nextInt(500));
-				}
-			}
-
-		});
-
-		executor.submit(new Runnable() {
-
-			public void run() {
-				for (int i = 0; i < 10; i++) {
-					if (!runningMonitor2.isStart()) {
-						runningMonitor2.start();
-					}
-					sleep(2000L + RandomUtils.nextInt(500));
-					runningMonitor2.stop();
-					sleep(2000L + RandomUtils.nextInt(500));
-				}
-			}
-
-		});
-
-		executor.submit(new Runnable() {
-
-			public void run() {
-				for (int i = 0; i < 10; i++) {
-					if (!runningMonitor3.isStart()) {
-						runningMonitor3.start();
-					}
-					sleep(2000L + RandomUtils.nextInt(500));
-					runningMonitor3.stop();
-					sleep(2000L + RandomUtils.nextInt(500));
-				}
-			}
-
-		});
-
-		sleep(30000L);
-	}
-
-	private ClientRunningMonitor buildClientRunning(final CountDownLatch countLatch, final short clientId,
-																									final int port) {
-		ClientRunningData clientData = new ClientRunningData();
-		clientData.setClientId(clientId);
-		clientData.setAddress(AddressUtils.getHostIp());
-
-		ClientRunningMonitor runningMonitor = new ClientRunningMonitor();
-		runningMonitor.setDestination(destination);
-		runningMonitor.setZkClient(zkclientx);
-		runningMonitor.setClientData(clientData);
-		runningMonitor.setListener(new ClientRunningListener() {
-
-			public InetSocketAddress processActiveEnter() {
-				System.out.println(String.format("clientId:%s port:%s has start", clientId, port));
-				countLatch.countDown();
-				return new InetSocketAddress(AddressUtils.getHostIp(), port);
-			}
-
-			public void processActiveExit() {
-				countLatch.countDown();
-				System.out.println(String.format("clientId:%s port:%s has stop", clientId, port));
-			}
-
-		});
-		runningMonitor.setDelayTime(1);
-		return runningMonitor;
-	}
-}
+    private ZkClientx zkclientx = new ZkClientx(cluster1 + ";" + cluster2);
+    private short     clientId  = 1001;
+
+    @Before
+    public void setUp() {
+        String path = ZookeeperPathUtils.getDestinationPath(destination);
+        zkclientx.deleteRecursive(path);
+
+        zkclientx.createPersistent(ZookeeperPathUtils.getClientIdNodePath(this.destination, clientId), true);
+    }
+
+    @After
+    public void tearDown() {
+        String path = ZookeeperPathUtils.getDestinationPath(destination);
+        zkclientx.deleteRecursive(path);
+    }
+
+    @Test
+    public void testOneServer() {
+        final CountDownLatch countLatch = new CountDownLatch(2);
+        ClientRunningMonitor runningMonitor = buildClientRunning(countLatch, clientId, 2088);
+        runningMonitor.start();
+        sleep(2000L);
+        runningMonitor.stop();
+        sleep(2000L);
+
+        if (countLatch.getCount() != 0) {
+            Assert.fail();
+        }
+    }
+
+    @Test
+    public void testMultiServer() {
+        final CountDownLatch countLatch = new CountDownLatch(30);
+        final ClientRunningMonitor runningMonitor1 = buildClientRunning(countLatch, clientId, 2088);
+        final ClientRunningMonitor runningMonitor2 = buildClientRunning(countLatch, clientId, 2089);
+        final ClientRunningMonitor runningMonitor3 = buildClientRunning(countLatch, clientId, 2090);
+        final ExecutorService executor = Executors.newFixedThreadPool(3);
+        executor.submit(new Runnable() {
+
+            public void run() {
+                for (int i = 0; i < 10; i++) {
+                    if (!runningMonitor1.isStart()) {
+                        runningMonitor1.start();
+                    }
+                    sleep(2000L + RandomUtils.nextInt(500));
+                    runningMonitor1.stop();
+                    sleep(2000L + RandomUtils.nextInt(500));
+                }
+            }
+
+        });
+
+        executor.submit(new Runnable() {
+
+            public void run() {
+                for (int i = 0; i < 10; i++) {
+                    if (!runningMonitor2.isStart()) {
+                        runningMonitor2.start();
+                    }
+                    sleep(2000L + RandomUtils.nextInt(500));
+                    runningMonitor2.stop();
+                    sleep(2000L + RandomUtils.nextInt(500));
+                }
+            }
+
+        });
+
+        executor.submit(new Runnable() {
+
+            public void run() {
+                for (int i = 0; i < 10; i++) {
+                    if (!runningMonitor3.isStart()) {
+                        runningMonitor3.start();
+                    }
+                    sleep(2000L + RandomUtils.nextInt(500));
+                    runningMonitor3.stop();
+                    sleep(2000L + RandomUtils.nextInt(500));
+                }
+            }
+
+        });
+
+        sleep(30000L);
+    }
+
+    private ClientRunningMonitor buildClientRunning(final CountDownLatch countLatch, final short clientId,
+                                                    final int port) {
+        ClientRunningData clientData = new ClientRunningData();
+        clientData.setClientId(clientId);
+        clientData.setAddress(AddressUtils.getHostIp());
+
+        ClientRunningMonitor runningMonitor = new ClientRunningMonitor();
+        runningMonitor.setDestination(destination);
+        runningMonitor.setZkClient(zkclientx);
+        runningMonitor.setClientData(clientData);
+        runningMonitor.setListener(new ClientRunningListener() {
+
+            public InetSocketAddress processActiveEnter() {
+                System.out.println(String.format("clientId:%s port:%s has start", clientId, port));
+                countLatch.countDown();
+                return new InetSocketAddress(AddressUtils.getHostIp(), port);
+            }
+
+            public void processActiveExit() {
+                countLatch.countDown();
+                System.out.println(String.format("clientId:%s port:%s has stop", clientId, port));
+            }
+
+        });
+        runningMonitor.setDelayTime(1);
+        return runningMonitor;
+    }
+}

+ 1 - 2
client/src/test/java/com/alibaba/otter/canal/client/running/kafka/CanalKafkaClientExample.java

@@ -41,8 +41,7 @@ public class CanalKafkaClientExample {
 
     public static void main(String[] args) {
         try {
-            final CanalKafkaClientExample kafkaCanalClientExample = new CanalKafkaClientExample(
-                AbstractKafkaTest.zkServers,
+            final CanalKafkaClientExample kafkaCanalClientExample = new CanalKafkaClientExample(AbstractKafkaTest.zkServers,
                 AbstractKafkaTest.servers,
                 AbstractKafkaTest.topic,
                 AbstractKafkaTest.partition,

+ 9 - 0
client/src/test/java/com/alibaba/otter/canal/client/running/rocketmq/AbstractRocektMQTest.java

@@ -0,0 +1,9 @@
+package com.alibaba.otter.canal.client.running.rocketmq;
+
+public class AbstractRocektMQTest {
+
+    public static String topic       = "example";
+    public static String groupId     = "group";
+    public static String nameServers = "localhost:9876";
+
+}

+ 134 - 0
client/src/test/java/com/alibaba/otter/canal/client/running/rocketmq/CanalRocketMQClientExample.java

@@ -0,0 +1,134 @@
+package com.alibaba.otter.canal.client.running.rocketmq;
+
+import org.apache.kafka.common.errors.WakeupException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.util.Assert;
+
+import com.alibaba.otter.canal.client.rocketmq.RocketMQCanalConnector;
+import com.alibaba.otter.canal.client.rocketmq.RocketMQCanalConnectorProvider;
+import com.alibaba.otter.canal.client.running.kafka.AbstractKafkaTest;
+import com.alibaba.otter.canal.protocol.Message;
+
+/**
+ * Kafka client example
+ *
+ * @author machengyuan @ 2018-6-12
+ * @version 1.0.0
+ */
+public class CanalRocketMQClientExample extends AbstractRocektMQTest {
+
+    protected final static Logger           logger  = LoggerFactory.getLogger(CanalRocketMQClientExample.class);
+
+    private RocketMQCanalConnector          connector;
+
+    private static volatile boolean         running = false;
+
+    private Thread                          thread  = null;
+
+    private Thread.UncaughtExceptionHandler handler = new Thread.UncaughtExceptionHandler() {
+
+                                                        public void uncaughtException(Thread t, Throwable e) {
+                                                            logger.error("parse events has an error", e);
+                                                        }
+                                                    };
+
+    public CanalRocketMQClientExample(String nameServers, String topic, String groupId){
+        connector = RocketMQCanalConnectorProvider.newRocketMQConnector(nameServers, topic, groupId);
+    }
+
+    public static void main(String[] args) {
+        try {
+            final CanalRocketMQClientExample rocketMQClientExample = new CanalRocketMQClientExample(nameServers,
+                topic,
+                groupId);
+            logger.info("## Start the rocketmq consumer: {}-{}", AbstractKafkaTest.topic, AbstractKafkaTest.groupId);
+            rocketMQClientExample.start();
+            logger.info("## The canal rocketmq consumer is running now ......");
+            Runtime.getRuntime().addShutdownHook(new Thread() {
+
+                public void run() {
+                    try {
+                        logger.info("## Stop the rocketmq consumer");
+                        rocketMQClientExample.stop();
+                    } catch (Throwable e) {
+                        logger.warn("## Something goes wrong when stopping rocketmq consumer:", e);
+                    } finally {
+                        logger.info("## Rocketmq consumer is down.");
+                    }
+                }
+
+            });
+            while (running)
+                ;
+        } catch (Throwable e) {
+            logger.error("## Something going wrong when starting up the rocketmq consumer:", e);
+            System.exit(0);
+        }
+    }
+
+    public void start() {
+        Assert.notNull(connector, "connector is null");
+        thread = new Thread(new Runnable() {
+
+            public void run() {
+                process();
+            }
+        });
+        thread.setUncaughtExceptionHandler(handler);
+        thread.start();
+        running = true;
+    }
+
+    public void stop() {
+        if (!running) {
+            return;
+        }
+        connector.stopRunning();
+        running = false;
+        if (thread != null) {
+            try {
+                thread.join();
+            } catch (InterruptedException e) {
+                // ignore
+            }
+        }
+    }
+
+    private void process() {
+        while (!running)
+            ;
+        while (running) {
+            try {
+                connector.connect();
+                connector.subscribe();
+                while (running) {
+                    Message message = connector.getWithoutAck(1); // 获取message
+                    try {
+                        if (message == null) {
+                            continue;
+                        }
+                        long batchId = message.getId();
+                        int size = message.getEntries().size();
+                        if (batchId == -1 || size == 0) {
+                        } else {
+                            logger.info(message.toString());
+                        }
+                    } catch (Exception e) {
+                        logger.error(e.getMessage(), e);
+                    }
+                    connector.ack(message.getId()); // 提交确认
+                }
+            } catch (Exception e) {
+                logger.error(e.getMessage(), e);
+            }
+        }
+
+        try {
+            connector.unsubscribe();
+        } catch (WakeupException e) {
+            // No-op. Continue process
+        }
+        connector.stopRunning();
+    }
+}

+ 3 - 0
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/LogDecoder.java

@@ -189,6 +189,7 @@ public final class LogDecoder {
                 /* updating position in context */
                 logPosition.position = header.getLogPos();
                 event.fillTable(context);
+                header.putGtid(context.getGtidSet(), gtidLogEvent);
                 return event;
             }
             case LogEvent.UPDATE_ROWS_EVENT_V1: {
@@ -196,6 +197,7 @@ public final class LogDecoder {
                 /* updating position in context */
                 logPosition.position = header.getLogPos();
                 event.fillTable(context);
+                header.putGtid(context.getGtidSet(), gtidLogEvent);
                 return event;
             }
             case LogEvent.DELETE_ROWS_EVENT_V1: {
@@ -203,6 +205,7 @@ public final class LogDecoder {
                 /* updating position in context */
                 logPosition.position = header.getLogPos();
                 event.fillTable(context);
+                header.putGtid(context.getGtidSet(), gtidLogEvent);
                 return event;
             }
             case LogEvent.ROTATE_EVENT: {

+ 4 - 4
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogBuffer.java

@@ -1082,10 +1082,10 @@ public final class RowsLogBuffer {
                 buffer.fillBytes(binary, 0, len);
 
                 /* Warning unsupport cloumn type */
-                logger.warn(String.format("!! Unsupport column type MYSQL_TYPE_GEOMETRY: meta=%d (%04X), len = %d",
-                    meta,
-                    meta,
-                    len));
+                // logger.warn(String.format("!! Unsupport column type MYSQL_TYPE_GEOMETRY: meta=%d (%04X), len = %d",
+                // meta,
+                // meta,
+                // len));
                 javaType = Types.BINARY;
                 value = binary;
                 length = len;

+ 45 - 41
dbsync/src/test/java/com/taobao/tddl/dbsync/binlog/DirectLogFetcherTest.java

@@ -8,6 +8,15 @@ import java.sql.Statement;
 import org.junit.Assert;
 import org.junit.Test;
 
+import com.taobao.tddl.dbsync.binlog.event.DeleteRowsLogEvent;
+import com.taobao.tddl.dbsync.binlog.event.QueryLogEvent;
+import com.taobao.tddl.dbsync.binlog.event.RotateLogEvent;
+import com.taobao.tddl.dbsync.binlog.event.RowsQueryLogEvent;
+import com.taobao.tddl.dbsync.binlog.event.UpdateRowsLogEvent;
+import com.taobao.tddl.dbsync.binlog.event.WriteRowsLogEvent;
+import com.taobao.tddl.dbsync.binlog.event.XidLogEvent;
+import com.taobao.tddl.dbsync.binlog.event.mariadb.AnnotateRowsEvent;
+
 public class DirectLogFetcherTest extends BaseLogFetcherTest {
 
     @Test
@@ -15,54 +24,49 @@ public class DirectLogFetcherTest extends BaseLogFetcherTest {
         DirectLogFetcher fecther = new DirectLogFetcher();
         try {
             Class.forName("com.mysql.jdbc.Driver");
-            Connection connection = DriverManager.getConnection("jdbc:mysql://100.81.154.142:3306", "root", "hello");
+            Connection connection = DriverManager.getConnection("jdbc:mysql://127.0.0.1:3306", "root", "hello");
             Statement statement = connection.createStatement();
             statement.execute("SET @master_binlog_checksum='@@global.binlog_checksum'");
             statement.execute("SET @mariadb_slave_capability='" + LogEvent.MARIA_SLAVE_CAPABILITY_MINE + "'");
 
-            fecther.open(connection, "mysql-bin.000006", 120L, 2);
+            fecther.open(connection, "mysql-bin.000007", 89797036L, 2);
 
-            LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.UNKNOWN_EVENT);
+            LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.ENUM_END_EVENT);
             LogContext context = new LogContext();
             while (fecther.fetch()) {
-                decoder.decode(fecther, context);
-                continue;
-                // if (event == null) {
-                // continue;
-                // }
-                //
-                // int eventType = event.getHeader().getType();
-                // switch (eventType) {
-                // case LogEvent.ROTATE_EVENT:
-                // binlogFileName = ((RotateLogEvent) event).getFilename();
-                // break;
-                // case LogEvent.WRITE_ROWS_EVENT_V1:
-                // case LogEvent.WRITE_ROWS_EVENT:
-                // parseRowsEvent((WriteRowsLogEvent) event);
-                // break;
-                // case LogEvent.UPDATE_ROWS_EVENT_V1:
-                // case LogEvent.UPDATE_ROWS_EVENT:
-                // parseRowsEvent((UpdateRowsLogEvent) event);
-                // break;
-                // case LogEvent.DELETE_ROWS_EVENT_V1:
-                // case LogEvent.DELETE_ROWS_EVENT:
-                // parseRowsEvent((DeleteRowsLogEvent) event);
-                // break;
-                // case LogEvent.QUERY_EVENT:
-                // parseQueryEvent((QueryLogEvent) event);
-                // break;
-                // case LogEvent.ROWS_QUERY_LOG_EVENT:
-                // parseRowsQueryEvent((RowsQueryLogEvent) event);
-                // break;
-                // case LogEvent.ANNOTATE_ROWS_EVENT:
-                // parseAnnotateRowsEvent((AnnotateRowsEvent) event);
-                // break;
-                // case LogEvent.XID_EVENT:
-                // parseXidEvent((XidLogEvent) event);
-                // break;
-                // default:
-                // break;
-                // }
+                LogEvent event = decoder.decode(fecther, context);
+                int eventType = event.getHeader().getType();
+                switch (eventType) {
+                    case LogEvent.ROTATE_EVENT:
+                        binlogFileName = ((RotateLogEvent) event).getFilename();
+                        break;
+                    case LogEvent.WRITE_ROWS_EVENT_V1:
+                    case LogEvent.WRITE_ROWS_EVENT:
+                        parseRowsEvent((WriteRowsLogEvent) event);
+                        break;
+                    case LogEvent.UPDATE_ROWS_EVENT_V1:
+                    case LogEvent.UPDATE_ROWS_EVENT:
+                        parseRowsEvent((UpdateRowsLogEvent) event);
+                        break;
+                    case LogEvent.DELETE_ROWS_EVENT_V1:
+                    case LogEvent.DELETE_ROWS_EVENT:
+                        parseRowsEvent((DeleteRowsLogEvent) event);
+                        break;
+                    case LogEvent.QUERY_EVENT:
+                        parseQueryEvent((QueryLogEvent) event);
+                        break;
+                    case LogEvent.ROWS_QUERY_LOG_EVENT:
+                        parseRowsQueryEvent((RowsQueryLogEvent) event);
+                        break;
+                    case LogEvent.ANNOTATE_ROWS_EVENT:
+                        parseAnnotateRowsEvent((AnnotateRowsEvent) event);
+                        break;
+                    case LogEvent.XID_EVENT:
+                        parseXidEvent((XidLogEvent) event);
+                        break;
+                    default:
+                        break;
+                }
             }
         } catch (Exception e) {
             e.printStackTrace();

+ 1 - 1
deployer/pom.xml

@@ -40,7 +40,7 @@
 						<exclude>**/canal.properties</exclude>
 						<exclude>**/spring/**</exclude>
 						<exclude>**/example/**</exclude>
-						<exclude>**/kafka.yml</exclude>
+						<exclude>**/mq.yml</exclude>
 					</excludes>
 				</configuration>
 			</plugin>

+ 15 - 12
deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalLauncher.java

@@ -1,25 +1,25 @@
 package com.alibaba.otter.canal.deployer;
 
+import com.alibaba.otter.canal.kafka.CanalKafkaProducer;
+import com.alibaba.otter.canal.rocketmq.CanalRocketMQProducer;
+import com.alibaba.otter.canal.server.CanalMQStarter;
+import com.alibaba.otter.canal.spi.CanalMQProducer;
 import java.io.FileInputStream;
 import java.util.Properties;
-
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.alibaba.otter.canal.kafka.CanalKafkaStarter;
-import com.alibaba.otter.canal.server.CanalServerStarter;
-
 /**
  * canal独立版本启动的入口类
- * 
+ *
  * @author jianghang 2012-11-6 下午05:20:49
  * @version 1.0.0
  */
 public class CanalLauncher {
 
     private static final String CLASSPATH_URL_PREFIX = "classpath:";
-    private static final Logger logger               = LoggerFactory.getLogger(CanalLauncher.class);
+    private static final Logger logger = LoggerFactory.getLogger(CanalLauncher.class);
 
     public static void main(String[] args) throws Throwable {
         try {
@@ -55,16 +55,19 @@ public class CanalLauncher {
 
             });
 
-            CanalServerStarter canalServerStarter = null;
+            CanalMQProducer canalMQProducer = null;
             String serverMode = controller.getProperty(properties, CanalConstants.CANAL_SERVER_MODE);
             if (serverMode.equalsIgnoreCase("kafka")) {
-                canalServerStarter = new CanalKafkaStarter();
-            } else if (serverMode.equalsIgnoreCase("rocketMQ")) {
-                // 预留rocketMQ启动
+                canalMQProducer = new CanalKafkaProducer();
+            } else if (serverMode.equalsIgnoreCase("rocketmq")) {
+                canalMQProducer = new CanalRocketMQProducer();
             }
+            if (canalMQProducer != null) {
+                CanalMQStarter canalServerStarter = new CanalMQStarter(canalMQProducer);
+                if (canalServerStarter != null) {
+                    canalServerStarter.init();
+                }
 
-            if (canalServerStarter != null) {
-                canalServerStarter.init();
             }
         } catch (Throwable e) {
             logger.error("## Something goes wrong when starting up the canal Server:", e);

+ 1 - 1
deployer/src/main/resources/canal.properties

@@ -9,7 +9,7 @@ canal.zkServers=
 # flush data to zk
 canal.zookeeper.flush.period = 1000
 canal.withoutNetty = false
-# tcp, kafka, rocketMQ
+# tcp, kafka, RocketMQ
 canal.serverMode = tcp
 # flush meta cursor/parse position to file
 canal.file.data.dir = ${canal.conf.dir}

+ 6 - 2
deployer/src/main/resources/example/instance.properties

@@ -31,8 +31,12 @@ canal.instance.tsdb.enable=true
 
 # username/password
 canal.instance.dbUsername=canal
-canal.instance.dbPassword=canal
-canal.instance.connectionCharset=UTF-8
+canal.instance.dbPassword=cZozNf1mzW6EQLGO2q9u99619xbZLO0fbua3EX08r4BWNXb8lAt1aHrTEOBttd6UY8Vnuc0easlVXZDdLtt8BQ==
+canal.instance.pwdPublicKey=MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALK4BUxdDltRRE5/zXpVEVPUgunvscYFtEip3pmLlhrWpacX7y7GCMo2/JM6LeHmiiNdH1FWgGCpUfircSwlWKUCAwEAAQ==
+canal.instance.connectionCharset = UTF-8
+canal.instance.defaultDatabaseName =test
+# enable druid Decrypt database password
+canal.instance.enableDruid=true
 
 # table regex
 canal.instance.filter.regex=.*\\..*

+ 4 - 3
deployer/src/main/resources/kafka.yml → deployer/src/main/resources/mq.yml

@@ -1,4 +1,4 @@
-servers: slave1:6667,slave2:6667,slave3:6667
+servers: localhost:9876 #for rocketmq: means the nameserver
 retries: 0
 batchSize: 16384
 lingerMs: 1
@@ -11,8 +11,9 @@ canalGetTimeout: 100
 flatMessage: true
 
 canalDestinations:
-- canalDestination: example
-  topic: exp3
+  - canalDestination: example
+    topic: example
+    partition: 1
 #  #对应topic分区数量
 #  partitionsNum: 3
 #  partitionHash:

+ 6 - 2
deployer/src/main/resources/spring/default-instance.xml

@@ -126,18 +126,22 @@
 		
 		<!-- 解析数据库信息 -->
 		<property name="masterInfo">
-			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo">
+			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo" init-method="initPwd">
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
+				<property name="pwdPublicKey" value="${canal.instance.pwdPublicKey:retl}" />
+				<property name="enableDruid" value="${canal.instance.enableDruid:false}" />
 				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
-			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo">
+			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo" init-method="initPwd">
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
+				<property name="pwdPublicKey" value="${canal.instance.pwdPublicKey:retl}" />
+				<property name="enableDruid" value="${canal.instance.enableDruid:false}" />
 				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>

+ 7 - 3
deployer/src/main/resources/spring/file-instance.xml

@@ -111,22 +111,26 @@
 		
 		<!-- 解析数据库信息 -->
 		<property name="masterInfo">
-			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo">
+			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo" init-method="initPwd">
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
+				<property name="pwdPublicKey" value="${canal.instance.pwdPublicKey:retl}" />
+				<property name="enableDruid" value="${canal.instance.enableDruid:false}" />
 				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
-			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo">
+			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo" init-method="initPwd">
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
+				<property name="pwdPublicKey" value="${canal.instance.pwdPublicKey:retl}" />
+				<property name="enableDruid" value="${canal.instance.enableDruid:false}" />
 				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
-		
+
 		<!-- 解析起始位点 -->
 		<property name="masterPosition">
 			<bean class="com.alibaba.otter.canal.protocol.position.EntryPosition">

+ 12 - 4
deployer/src/main/resources/spring/group-instance.xml

@@ -108,18 +108,22 @@
 		
 		<!-- 解析数据库信息 -->
 		<property name="masterInfo">
-			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo">
+			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo" init-method="initPwd">
 				<property name="address" value="${canal.instance.master1.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
+				<property name="pwdPublicKey" value="${canal.instance.pwdPublicKey:retl}" />
+				<property name="enableDruid" value="${canal.instance.enableDruid:false}" />
 				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
-			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo">
+			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo" init-method="initPwd">
 				<property name="address" value="${canal.instance.standby1.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
+				<property name="pwdPublicKey" value="${canal.instance.pwdPublicKey:retl}" />
+				<property name="enableDruid" value="${canal.instance.enableDruid:false}" />
 				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
@@ -206,18 +210,22 @@
 		
 		<!-- 解析数据库信息 -->
 		<property name="masterInfo">
-			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo">
+			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo" init-method="initPwd">
 				<property name="address" value="${canal.instance.master2.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
+				<property name="pwdPublicKey" value="${canal.instance.pwdPublicKey:retl}" />
+				<property name="enableDruid" value="${canal.instance.enableDruid:false}" />
 				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
-			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo">
+			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo" init-method="initPwd">
 				<property name="address" value="${canal.instance.standby2.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
+				<property name="pwdPublicKey" value="${canal.instance.pwdPublicKey:retl}" />
+				<property name="enableDruid" value="${canal.instance.enableDruid:false}" />
 				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>

+ 7 - 3
deployer/src/main/resources/spring/memory-instance.xml

@@ -96,21 +96,25 @@
 		
 		<!-- failover切换时回退的时间 -->
 		<property name="fallbackIntervalInSeconds" value="${canal.instance.fallbackIntervalInSeconds:60}" />
-		
+
 		<!-- 解析数据库信息 -->
 		<property name="masterInfo">
-			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo">
+			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo" init-method="initPwd">
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
+				<property name="pwdPublicKey" value="${canal.instance.pwdPublicKey:retl}" />
+				<property name="enableDruid" value="${canal.instance.enableDruid:false}" />
 				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
-			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo">
+			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo" init-method="initPwd">
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
+				<property name="pwdPublicKey" value="${canal.instance.pwdPublicKey:retl}" />
+				<property name="enableDruid" value="${canal.instance.enableDruid:false}" />
 				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>

+ 3 - 1
docker/image/admin/app.sh

@@ -87,7 +87,9 @@ function start_canal() {
         exit 1;
     else
         if [ "$destination" != "" ] && [ "$destination" != "example" ] ; then
-            mv /home/admin/canal-server/conf/example /home/admin/canal-server/conf/$destination
+            if [ -d /home/admin/canal-server/conf/example ]; then
+                mv /home/admin/canal-server/conf/example /home/admin/canal-server/conf/$destination
+            fi
         fi 
     fi
     su admin -c 'cd /home/admin/canal-server/bin/ && sh restart.sh 1>>/tmp/start.log 2>&1'

+ 10 - 6
example/src/main/java/com/alibaba/otter/canal/example/SimpleCanalClientPermanceTest.java

@@ -1,6 +1,8 @@
 package com.alibaba.otter.canal.example;
+
 import java.net.InetSocketAddress;
 import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.TimeUnit;
 
 import com.alibaba.otter.canal.client.CanalConnector;
 import com.alibaba.otter.canal.client.CanalConnectors;
@@ -44,7 +46,7 @@ public class SimpleCanalClientPermanceTest {
             connector.connect();
             connector.subscribe();
             while (true) {
-                Message message = connector.getWithoutAck(batchSize);
+                Message message = connector.getWithoutAck(batchSize, 100L, TimeUnit.MILLISECONDS);
                 long batchId = message.getId();
                 int size = message.getRawEntries().size();
                 sum += size;
@@ -53,11 +55,13 @@ public class SimpleCanalClientPermanceTest {
                 queue.add(batchId);
                 if (count % 10 == 0) {
                     end = System.currentTimeMillis();
-                    long tps = (perSum * 1000) / (end - start);
-                    System.out.println(" total : " + sum + " , current : " + perSum + " , cost : " + (end - start)
-                                       + " , tps : " + tps);
-                    start = end;
-                    perSum = 0;
+                    if (end - start != 0) {
+                        long tps = (perSum * 1000) / (end - start);
+                        System.out.println(" total : " + sum + " , current : " + perSum + " , cost : " + (end - start)
+                                           + " , tps : " + tps);
+                        start = end;
+                        perSum = 0;
+                    }
                 }
             }
         } catch (Throwable e) {

+ 4 - 1
instance/spring/src/test/resources/retl/instance.properties

@@ -27,9 +27,12 @@ canal.instance.master2.timestamp =
 
 # username/password
 canal.instance.dbUsername = xxxxx
-canal.instance.dbPassword = xxxxx
+canal.instance.dbPassword=cZozNf1mzW6EQLGO2q9u99619xbZLO0fbua3EX08r4BWNXb8lAt1aHrTEOBttd6UY8Vnuc0easlVXZDdLtt8BQ==
+canal.instance.pwdPublicKey=MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALK4BUxdDltRRE5/zXpVEVPUgunvscYFtEip3pmLlhrWpacX7y7GCMo2/JM6LeHmiiNdH1FWgGCpUfircSwlWKUCAwEAAQ==
 canal.instance.defaultDatabaseName =
 canal.instance.connectionCharset = UTF-8
+# enable druid Decrypt database password
+canal.instance.enableDruid=true
 
 # table regex
 canal.instance.filter.regex = .*\\..*

+ 2 - 2
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/AbstractEventParser.java

@@ -317,10 +317,10 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
                     eventSink.interrupt();
                     transactionBuffer.reset();// 重置一下缓冲队列,重新记录数据
                     binlogParser.reset();// 重新置位
-                    if (multiStageCoprocessor != null) {
+                    if (multiStageCoprocessor != null && multiStageCoprocessor.isStart()) {
                         // 处理 RejectedExecutionException
                         try {
-                            multiStageCoprocessor.reset();
+                            multiStageCoprocessor.stop();
                         } catch (Throwable t) {
                             logger.debug("multi processor rejected:", t);
                         }

+ 0 - 2
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/MultiStageCoprocessor.java

@@ -25,6 +25,4 @@ public interface MultiStageCoprocessor extends CanalLifeCycle {
     public boolean publish(LogBuffer buffer);
 
     public boolean publish(LogEvent event);
-
-    public void reset();
 }

+ 23 - 25
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlMultiStageCoprocessor.java

@@ -37,7 +37,6 @@ import com.taobao.tddl.dbsync.binlog.event.RowsLogEvent;
 import com.taobao.tddl.dbsync.binlog.event.UpdateRowsLogEvent;
 import com.taobao.tddl.dbsync.binlog.event.WriteRowsLogEvent;
 
-
 /**
  * 针对解析器提供一个多阶段协同的处理
  * 
@@ -53,21 +52,21 @@ import com.taobao.tddl.dbsync.binlog.event.WriteRowsLogEvent;
  */
 public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implements MultiStageCoprocessor {
 
-    private static final int             maxFullTimes = 10;
-    private LogEventConvert              logEventConvert;
-    private EventTransactionBuffer       transactionBuffer;
-    private ErosaConnection              connection;
-
-    private int                          parserThreadCount;
-    private int                          ringBufferSize;
-    private RingBuffer<MessageEvent>     disruptorMsgBuffer;
-    private ExecutorService              parserExecutor;
-    private ExecutorService              stageExecutor;
-    private String                       destination;
-    private volatile CanalParseException exception;
-    private AtomicLong                   eventsPublishBlockingTime;
-    private GTIDSet                      gtidSet;
-    private WorkerPool<MessageEvent>     workerPool;
+    private static final int                  maxFullTimes = 10;
+    private LogEventConvert                   logEventConvert;
+    private EventTransactionBuffer            transactionBuffer;
+    private ErosaConnection                   connection;
+
+    private int                               parserThreadCount;
+    private int                               ringBufferSize;
+    private RingBuffer<MessageEvent>          disruptorMsgBuffer;
+    private ExecutorService                   parserExecutor;
+    private ExecutorService                   stageExecutor;
+    private String                            destination;
+    private volatile CanalParseException      exception;
+    private AtomicLong                        eventsPublishBlockingTime;
+    private GTIDSet                           gtidSet;
+    private WorkerPool<MessageEvent>          workerPool;
     private BatchEventProcessor<MessageEvent> simpleParserStage;
     private BatchEventProcessor<MessageEvent> sinkStoreStage;
 
@@ -138,6 +137,10 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
         try {
             parserExecutor.shutdownNow();
             while (!parserExecutor.awaitTermination(1, TimeUnit.SECONDS)) {
+                if (parserExecutor.isShutdown() || parserExecutor.isTerminated()) {
+                    break;
+                }
+
                 parserExecutor.shutdownNow();
             }
         } catch (Throwable e) {
@@ -147,6 +150,10 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
         try {
             stageExecutor.shutdownNow();
             while (!stageExecutor.awaitTermination(1, TimeUnit.SECONDS)) {
+                if (stageExecutor.isShutdown() || stageExecutor.isTerminated()) {
+                    break;
+                }
+
                 stageExecutor.shutdownNow();
             }
         } catch (Throwable e) {
@@ -227,15 +234,6 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
 
     }
 
-    @Override
-    public void reset() {
-        if (isStart()) {
-            stop();
-        }
-
-        start();
-    }
-
     private class SimpleParserStage implements EventHandler<MessageEvent>, LifecycleAware {
 
         private LogDecoder decoder;

+ 29 - 2
parse/src/main/java/com/alibaba/otter/canal/parse/support/AuthenticationInfo.java

@@ -2,21 +2,32 @@ package com.alibaba.otter.canal.parse.support;
 
 import java.net.InetSocketAddress;
 
+import com.alibaba.druid.filter.config.ConfigTools;
 import org.apache.commons.lang.builder.ToStringBuilder;
 import org.apache.commons.lang.builder.ToStringStyle;
 
 /**
  * 数据库认证信息
- * 
+ *
  * @author jianghang 2012-7-11 上午11:22:19
  * @version 1.0.0
  */
 public class AuthenticationInfo {
 
+
+
     private InetSocketAddress address;            // 主库信息
     private String            username;           // 帐号
     private String            password;           // 密码
-    private String            defaultDatabaseName; // 默认链接的数据库
+    private String            defaultDatabaseName;// 默认链接的数据库
+    private String            pwdPublicKey;       //公钥
+    private boolean           enableDruid;        //是否使用druid加密解密数据库密码
+
+    public void initPwd() throws Exception{
+        if (enableDruid) {
+            this.password = ConfigTools.decrypt(pwdPublicKey, password);
+        }
+    }
 
     public AuthenticationInfo(){
         super();
@@ -65,6 +76,22 @@ public class AuthenticationInfo {
         this.defaultDatabaseName = defaultDatabaseName;
     }
 
+    public String getPwdPublicKey() {
+        return pwdPublicKey;
+    }
+
+    public void setPwdPublicKey(String pwdPublicKey) {
+        this.pwdPublicKey = pwdPublicKey;
+    }
+
+    public boolean isEnableDruid() {
+        return enableDruid;
+    }
+
+    public void setEnableDruid(boolean enableDruid) {
+        this.enableDruid = enableDruid;
+    }
+
     @Override
     public String toString() {
         return ToStringBuilder.reflectionToString(this, ToStringStyle.DEFAULT_STYLE);

+ 8 - 4
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/FastsqlSchemaTest.java

@@ -18,13 +18,17 @@ public class FastsqlSchemaTest {
     @Test
     public void testSimple() throws FileNotFoundException, IOException {
         SchemaRepository repository = new SchemaRepository(JdbcConstants.MYSQL);
-        String sql = "create table yushitai_test.card_record ( id bigint auto_increment) auto_increment=256 "
-                     + "; alter table yushitai_test.card_record add column customization_id bigint unsigned NOT NULL COMMENT 'TEST' ;"
-                     + "; rename table yushitai_test.card_record to yushitai_test._card_record_del;";
+        String sql = "create table yushitai_test.card_record ( id bigint auto_increment, name varchar(32) DEFAULT NULL) auto_increment=256 "
+                     + "alter table yushitai_test.card_record add index index_name(name) ;"
+                     + "alter table yushitai_test.card_record add index index_name(name) ;"
+                     + "alter table yushitai_test.card_record add Constraint pk_id PRIMARY KEY (id);"
+                     + "alter table yushitai_test.card_record add Constraint pk_id PRIMARY KEY (id);"
+                     + "alter table yushitai_test.card_record add Constraint UNIQUE index uk_name(name);"
+                     + "alter table yushitai_test.card_record add Constraint UNIQUE index uk_name(name);";
         repository.console(sql);
 
         repository.setDefaultSchema("yushitai_test");
-        SchemaObject table = repository.findTable("_card_record_del");
+        SchemaObject table = repository.findTable("card_record");
         System.out.println(table.getStatement().toString());
     }
 }

+ 15 - 0
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MemoryTableMeta_DDL_Test.java

@@ -37,4 +37,19 @@ public class MemoryTableMeta_DDL_Test {
         meta = memoryTableMeta.find("yushitai_test", "_card_record_gho");
         Assert.assertNull(meta);
     }
+
+    @Test
+    public void test2() throws Throwable {
+        MemoryTableMeta memoryTableMeta = new MemoryTableMeta();
+        URL url = Thread.currentThread().getContextClassLoader().getResource("dummy.txt");
+        File dummyFile = new File(url.getFile());
+        File create = new File(dummyFile.getParent() + "/ddl", "ddl_test2.sql");
+        String sql = StringUtils.join(IOUtils.readLines(new FileInputStream(create)), "\n");
+        memoryTableMeta.apply(null, "test", sql, null);
+
+        TableMeta meta = memoryTableMeta.find("yushitai_test", "card_record");
+        System.out.println(meta);
+        Assert.assertEquals(meta.getFieldMetaByName("id").isKey(), true);
+        Assert.assertEquals(meta.getFieldMetaByName("name").isUnique(), true);
+    }
 }

+ 8 - 0
parse/src/test/resources/ddl/ddl_test2.sql

@@ -0,0 +1,8 @@
+CREATE TABLE yushitai_test.card_record (
+	id bigint AUTO_INCREMENT,
+	name varchar(32) DEFAULT NULL,
+	alias varchar(32) DEFAULT NULL,
+	INDEX index_name(name),
+	CONSTRAINT pk_id PRIMARY KEY (id),
+	UNIQUE uk_name (name,alias)
+) AUTO_INCREMENT = 256

+ 2 - 2
pom.xml

@@ -99,7 +99,7 @@
         <java_source_version>1.7</java_source_version>
         <java_target_version>1.7</java_target_version>
         <file_encoding>UTF-8</file_encoding>
-        <spring_version>3.2.9.RELEASE</spring_version>
+        <spring_version>3.2.18.RELEASE</spring_version>
     </properties>
 
     <modules>
@@ -247,7 +247,7 @@
             <dependency>
                 <groupId>com.alibaba.fastsql</groupId>
                 <artifactId>fastsql</artifactId>
-                <version>2.0.0_preview_540</version>
+                <version>2.0.0_preview_630</version>
             </dependency>
             <dependency>
                 <groupId>com.alibaba</groupId>

File diff suppressed because it is too large
+ 264 - 405
protocol/src/main/java/com/alibaba/otter/canal/protocol/CanalEntry.java


+ 4 - 3
protocol/src/main/java/com/alibaba/otter/canal/protocol/ClientIdentity.java

@@ -13,9 +13,10 @@ import com.alibaba.otter.canal.common.utils.CanalToStringStyle;
  */
 public class ClientIdentity implements Serializable {
 
-    private String destination;
-    private short  clientId;
-    private String filter;
+    private static final long serialVersionUID = -8262100681930834834L;
+    private String            destination;
+    private short             clientId;
+    private String            filter;
 
     public ClientIdentity(){
 

+ 25 - 7
protocol/src/main/java/com/alibaba/otter/canal/protocol/FlatMessage.java

@@ -1,7 +1,12 @@
 package com.alibaba.otter.canal.protocol;
 
 import java.io.Serializable;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import com.google.protobuf.ByteString;
 
@@ -18,6 +23,9 @@ public class FlatMessage implements Serializable {
     private String                    table;
     private Boolean                   isDdl;
     private String                    type;
+    // binlog executeTime
+    private Long                      es;
+    // dml build timeStamp
     private Long                      ts;
     private String                    sql;
     private Map<String, Integer>      sqlType;
@@ -120,6 +128,14 @@ public class FlatMessage implements Serializable {
         this.old = old;
     }
 
+    public Long getEs() {
+        return es;
+    }
+
+    public void setEs(Long es) {
+        this.es = es;
+    }
+
     /**
      * 将Message转换为FlatMessage
      * 
@@ -147,9 +163,8 @@ public class FlatMessage implements Serializable {
                 try {
                     rowChange = CanalEntry.RowChange.parseFrom(entry.getStoreValue());
                 } catch (Exception e) {
-                    throw new RuntimeException(
-                        "ERROR ## parser of eromanga-event has an error , data:" + entry.toString(),
-                        e);
+                    throw new RuntimeException("ERROR ## parser of eromanga-event has an error , data:"
+                                               + entry.toString(), e);
                 }
 
                 CanalEntry.EventType eventType = rowChange.getEventType();
@@ -160,6 +175,7 @@ public class FlatMessage implements Serializable {
                 flatMessage.setTable(entry.getHeader().getTableName());
                 flatMessage.setIsDdl(rowChange.getIsDdl());
                 flatMessage.setType(eventType.toString());
+                flatMessage.setEs(entry.getHeader().getExecuteTime());
                 flatMessage.setTs(System.currentTimeMillis());
                 flatMessage.setSql(rowChange.getSql());
 
@@ -273,6 +289,8 @@ public class FlatMessage implements Serializable {
                         flatMessageTmp.setSql(flatMessage.getSql());
                         flatMessageTmp.setSqlType(flatMessage.getSqlType());
                         flatMessageTmp.setMysqlType(flatMessage.getMysqlType());
+                        flatMessageTmp.setEs(flatMessage.getEs());
+                        flatMessageTmp.setTs(flatMessage.getTs());
                     }
                     List<Map<String, String>> data = flatMessageTmp.getData();
                     if (data == null) {
@@ -297,8 +315,8 @@ public class FlatMessage implements Serializable {
 
     @Override
     public String toString() {
-        return "FlatMessage{" + "id=" + id + ", database='" + database + '\'' + ", table='" + table + '\'' + ", isDdl="
-               + isDdl + ", type='" + type + '\'' + ", ts=" + ts + ", sql='" + sql + '\'' + ", sqlType=" + sqlType
-               + ", mysqlType=" + mysqlType + ", data=" + data + ", old=" + old + '}';
+        return "FlatMessage [id=" + id + ", database=" + database + ", table=" + table + ", isDdl=" + isDdl + ", type="
+               + type + ", es=" + es + ", ts=" + ts + ", sql=" + sql + ", sqlType=" + sqlType + ", mysqlType="
+               + mysqlType + ", data=" + data + ", old=" + old + "]";
     }
 }

+ 5 - 0
server/pom.xml

@@ -42,6 +42,11 @@
 				</exclusion>
 			</exclusions>
 		</dependency>
+		<dependency>
+			<groupId>org.apache.rocketmq</groupId>
+			<artifactId>rocketmq-client</artifactId>
+			<version>4.3.0</version>
+		</dependency>
 		<!--kafka_2.11_1.1.1 exclusion掉了netty 的依赖,但CanalServerWithNetty 依赖 netty3,升级kafka至 1.1.1 需要显示加入,否则会启动失败 -->
 		<dependency>
 			<groupId>org.jboss.netty</groupId>

+ 73 - 0
server/src/main/java/com/alibaba/otter/canal/common/CanalMessageSerializer.java

@@ -0,0 +1,73 @@
+package com.alibaba.otter.canal.common;
+
+import java.util.List;
+
+import org.apache.kafka.common.errors.SerializationException;
+import org.springframework.util.CollectionUtils;
+
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.CanalPacket;
+import com.alibaba.otter.canal.protocol.CanalPacket.PacketType;
+import com.alibaba.otter.canal.protocol.Message;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedOutputStream;
+import com.google.protobuf.WireFormat;
+
+public class CanalMessageSerializer {
+
+    @SuppressWarnings("deprecation")
+    public static byte[] serializer(Message data) {
+        try {
+            if (data != null) {
+                if (data.getId() != -1) {
+                    if (data.isRaw() && !CollectionUtils.isEmpty(data.getRawEntries())) {
+                        // for performance
+                        List<ByteString> rowEntries = data.getRawEntries();
+                        // message size
+                        int messageSize = 0;
+                        messageSize += CodedOutputStream.computeInt64Size(1, data.getId());
+
+                        int dataSize = 0;
+                        for (int i = 0; i < rowEntries.size(); i++) {
+                            dataSize += CodedOutputStream.computeBytesSizeNoTag(rowEntries.get(i));
+                        }
+                        messageSize += dataSize;
+                        messageSize += 1 * rowEntries.size();
+                        // packet size
+                        int size = 0;
+                        size += CodedOutputStream.computeEnumSize(3, PacketType.MESSAGES.getNumber());
+                        size += CodedOutputStream.computeTagSize(5)
+                                + CodedOutputStream.computeRawVarint32Size(messageSize) + messageSize;
+                        // build data
+                        byte[] body = new byte[size];
+                        CodedOutputStream output = CodedOutputStream.newInstance(body);
+                        output.writeEnum(3, PacketType.MESSAGES.getNumber());
+
+                        output.writeTag(5, WireFormat.WIRETYPE_LENGTH_DELIMITED);
+                        output.writeRawVarint32(messageSize);
+                        // message
+                        output.writeInt64(1, data.getId());
+                        for (int i = 0; i < rowEntries.size(); i++) {
+                            output.writeBytes(2, rowEntries.get(i));
+                        }
+                        output.checkNoSpaceLeft();
+                        return body;
+                    } else if (!CollectionUtils.isEmpty(data.getEntries())) {
+                        CanalPacket.Messages.Builder messageBuilder = CanalPacket.Messages.newBuilder();
+                        for (CanalEntry.Entry entry : data.getEntries()) {
+                            messageBuilder.addMessages(entry.toByteString());
+                        }
+
+                        CanalPacket.Packet.Builder packetBuilder = CanalPacket.Packet.newBuilder();
+                        packetBuilder.setType(PacketType.MESSAGES);
+                        packetBuilder.setBody(messageBuilder.build().toByteString());
+                        return packetBuilder.build().toByteArray();
+                    }
+                }
+            }
+        } catch (Exception e) {
+            throw new SerializationException("Error when serializing message to byte[] ");
+        }
+        return null;
+    }
+}

+ 10 - 2
server/src/main/java/com/alibaba/otter/canal/kafka/KafkaProperties.java → server/src/main/java/com/alibaba/otter/canal/common/MQProperties.java

@@ -1,4 +1,4 @@
-package com.alibaba.otter.canal.kafka;
+package com.alibaba.otter.canal.common;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -10,7 +10,7 @@ import java.util.Map;
  * @author machengyuan 2018-6-11 下午05:30:49
  * @version 1.0.0
  */
-public class KafkaProperties {
+public class MQProperties {
 
     private String                 servers                = "localhost:6667";
     private int                    retries                = 0;
@@ -18,6 +18,7 @@ public class KafkaProperties {
     private int                    lingerMs               = 1;
     private long                   bufferMemory           = 33554432L;
     private boolean                filterTransactionEntry = true;
+    private String                 producerGroup          = "Canal-Producer";
     private int                    canalBatchSize         = 50;
     private Long                   canalGetTimeout;
     private boolean                flatMessage            = true;
@@ -153,4 +154,11 @@ public class KafkaProperties {
         this.filterTransactionEntry = filterTransactionEntry;
     }
 
+    public String getProducerGroup() {
+        return producerGroup;
+    }
+
+    public void setProducerGroup(String producerGroup) {
+        this.producerGroup = producerGroup;
+    }
 }

+ 68 - 47
server/src/main/java/com/alibaba/otter/canal/kafka/CanalKafkaProducer.java

@@ -1,18 +1,23 @@
 package com.alibaba.otter.canal.kafka;
 
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Properties;
+import java.util.concurrent.Future;
 
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.Producer;
 import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.clients.producer.RecordMetadata;
 import org.apache.kafka.common.serialization.StringSerializer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.alibaba.fastjson.JSON;
+import com.alibaba.otter.canal.common.MQProperties;
 import com.alibaba.otter.canal.protocol.FlatMessage;
 import com.alibaba.otter.canal.protocol.Message;
+import com.alibaba.otter.canal.spi.CanalMQProducer;;
 
 /**
  * kafka producer 主操作类
@@ -20,7 +25,7 @@ import com.alibaba.otter.canal.protocol.Message;
  * @author machengyuan 2018-6-11 下午05:30:49
  * @version 1.0.0
  */
-public class CanalKafkaProducer {
+public class CanalKafkaProducer implements CanalMQProducer {
 
     private static final Logger       logger = LoggerFactory.getLogger(CanalKafkaProducer.class);
 
@@ -28,9 +33,10 @@ public class CanalKafkaProducer {
 
     private Producer<String, String>  producer2;                                                 // 用于扁平message的数据投递
 
-    private KafkaProperties           kafkaProperties;
+    private MQProperties              kafkaProperties;
 
-    public void init(KafkaProperties kafkaProperties) {
+    @Override
+    public void init(MQProperties kafkaProperties) {
         this.kafkaProperties = kafkaProperties;
         Properties properties = new Properties();
         properties.put("bootstrap.servers", kafkaProperties.getServers());
@@ -51,6 +57,7 @@ public class CanalKafkaProducer {
         // producer.initTransactions();
     }
 
+    @Override
     public void stop() {
         try {
             logger.info("## stop the kafka producer");
@@ -67,10 +74,12 @@ public class CanalKafkaProducer {
         }
     }
 
-    public void send(KafkaProperties.CanalDestination canalDestination, Message message, Callback callback) {
-        try {
-            // producer.beginTransaction();
-            if (!kafkaProperties.getFlatMessage()) {
+    @Override
+    public void send(MQProperties.CanalDestination canalDestination, Message message, Callback callback) {
+
+        // producer.beginTransaction();
+        if (!kafkaProperties.getFlatMessage()) {
+            try {
                 ProducerRecord<String, Message> record;
                 if (canalDestination.getPartition() != null) {
                     record = new ProducerRecord<String, Message>(canalDestination.getTopic(),
@@ -81,64 +90,76 @@ public class CanalKafkaProducer {
                     record = new ProducerRecord<String, Message>(canalDestination.getTopic(), 0, null, message);
                 }
 
-                producer.send(record);
-            } else {
-                // 发送扁平数据json
-                List<FlatMessage> flatMessages = FlatMessage.messageConverter(message);
-                if (flatMessages != null) {
-                    for (FlatMessage flatMessage : flatMessages) {
-                        if (canalDestination.getPartition() != null) {
+                producer.send(record).get();
+            } catch (Exception e) {
+                logger.error(e.getMessage(), e);
+                // producer.abortTransaction();
+                callback.rollback();
+            }
+        } else {
+            // 发送扁平数据json
+            List<FlatMessage> flatMessages = FlatMessage.messageConverter(message);
+            if (flatMessages != null) {
+                for (FlatMessage flatMessage : flatMessages) {
+                    if (canalDestination.getPartition() != null) {
+                        try {
                             ProducerRecord<String, String> record = new ProducerRecord<String, String>(canalDestination
                                 .getTopic(), canalDestination.getPartition(), null, JSON.toJSONString(flatMessage));
                             producer2.send(record);
-                        } else {
-                            if (canalDestination.getPartitionHash() != null
-                                && !canalDestination.getPartitionHash().isEmpty()) {
-                                FlatMessage[] partitionFlatMessage = FlatMessage.messagePartition(flatMessage,
-                                    canalDestination.getPartitionsNum(),
-                                    canalDestination.getPartitionHash());
-                                int length = partitionFlatMessage.length;
-                                for (int i = 0; i < length; i++) {
-                                    FlatMessage flatMessagePart = partitionFlatMessage[i];
-                                    if (flatMessagePart != null) {
+                        } catch (Exception e) {
+                            logger.error(e.getMessage(), e);
+                            // producer.abortTransaction();
+                            callback.rollback();
+                        }
+                    } else {
+                        if (canalDestination.getPartitionHash() != null
+                            && !canalDestination.getPartitionHash().isEmpty()) {
+                            FlatMessage[] partitionFlatMessage = FlatMessage.messagePartition(flatMessage,
+                                canalDestination.getPartitionsNum(),
+                                canalDestination.getPartitionHash());
+                            int length = partitionFlatMessage.length;
+                            for (int i = 0; i < length; i++) {
+                                FlatMessage flatMessagePart = partitionFlatMessage[i];
+                                if (flatMessagePart != null) {
+                                    try {
                                         ProducerRecord<String, String> record = new ProducerRecord<String, String>(
-                                                canalDestination.getTopic(),
-                                                i,
-                                                null,
-                                                JSON.toJSONString(flatMessagePart));
-                                        producer2.send(record);
+                                            canalDestination.getTopic(),
+                                            i,
+                                            null,
+                                            JSON.toJSONString(flatMessagePart));
+                                        producer2.send(record).get();
+                                    } catch (Exception e) {
+                                        logger.error(e.getMessage(), e);
+                                        // producer.abortTransaction();
+                                        callback.rollback();
                                     }
                                 }
-                            } else {
+                            }
+                        } else {
+                            try {
                                 ProducerRecord<String, String> record = new ProducerRecord<String, String>(
                                     canalDestination.getTopic(),
                                     0,
                                     null,
                                     JSON.toJSONString(flatMessage));
-                                producer2.send(record);
+                                producer2.send(record).get();
+                            } catch (Exception e) {
+                                logger.error(e.getMessage(), e);
+                                // producer.abortTransaction();
+                                callback.rollback();
                             }
                         }
-
                     }
                 }
             }
-
-            // producer.commitTransaction();
-            callback.commit();
-            if (logger.isDebugEnabled()) {
-                logger.debug("send message to kafka topic: {}", canalDestination.getTopic());
-            }
-        } catch (Exception e) {
-            logger.error(e.getMessage(), e);
-            // producer.abortTransaction();
-            callback.rollback();
         }
-    }
-
-    public interface Callback {
 
-        void commit();
+        // producer.commitTransaction();
+        callback.commit();
+        if (logger.isDebugEnabled()) {
+            logger.debug("send message to kafka topic: {}", canalDestination.getTopic());
+        }
 
-        void rollback();
     }
+
 }

+ 2 - 62
server/src/main/java/com/alibaba/otter/canal/kafka/MessageSerializer.java

@@ -1,19 +1,11 @@
 package com.alibaba.otter.canal.kafka;
 
-import java.util.List;
 import java.util.Map;
 
-import org.apache.kafka.common.errors.SerializationException;
 import org.apache.kafka.common.serialization.Serializer;
-import org.springframework.util.CollectionUtils;
 
-import com.alibaba.otter.canal.protocol.CanalEntry;
-import com.alibaba.otter.canal.protocol.CanalPacket;
-import com.alibaba.otter.canal.protocol.CanalPacket.PacketType;
+import com.alibaba.otter.canal.common.CanalMessageSerializer;
 import com.alibaba.otter.canal.protocol.Message;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.CodedOutputStream;
-import com.google.protobuf.WireFormat;
 
 /**
  * Kafka Message类的序列化
@@ -28,60 +20,8 @@ public class MessageSerializer implements Serializer<Message> {
     }
 
     @Override
-    @SuppressWarnings("deprecation")
     public byte[] serialize(String topic, Message data) {
-        try {
-            if (data != null) {
-                if (data.getId() != -1) {
-                    if (data.isRaw() && !CollectionUtils.isEmpty(data.getRawEntries())) {
-                        // for performance
-                        List<ByteString> rowEntries = data.getRawEntries();
-                        // message size
-                        int messageSize = 0;
-                        messageSize += CodedOutputStream.computeInt64Size(1, data.getId());
-
-                        int dataSize = 0;
-                        for (int i = 0; i < rowEntries.size(); i++) {
-                            dataSize += CodedOutputStream.computeBytesSizeNoTag(rowEntries.get(i));
-                        }
-                        messageSize += dataSize;
-                        messageSize += 1 * rowEntries.size();
-                        // packet size
-                        int size = 0;
-                        size += CodedOutputStream.computeEnumSize(3, PacketType.MESSAGES.getNumber());
-                        size += CodedOutputStream.computeTagSize(5)
-                                + CodedOutputStream.computeRawVarint32Size(messageSize) + messageSize;
-                        // build data
-                        byte[] body = new byte[size];
-                        CodedOutputStream output = CodedOutputStream.newInstance(body);
-                        output.writeEnum(3, PacketType.MESSAGES.getNumber());
-
-                        output.writeTag(5, WireFormat.WIRETYPE_LENGTH_DELIMITED);
-                        output.writeRawVarint32(messageSize);
-                        // message
-                        output.writeInt64(1, data.getId());
-                        for (int i = 0; i < rowEntries.size(); i++) {
-                            output.writeBytes(2, rowEntries.get(i));
-                        }
-                        output.checkNoSpaceLeft();
-                        return body;
-                    } else if (!CollectionUtils.isEmpty(data.getEntries())) {
-                        CanalPacket.Messages.Builder messageBuilder = CanalPacket.Messages.newBuilder();
-                        for (CanalEntry.Entry entry : data.getEntries()) {
-                            messageBuilder.addMessages(entry.toByteString());
-                        }
-
-                        CanalPacket.Packet.Builder packetBuilder = CanalPacket.Packet.newBuilder();
-                        packetBuilder.setType(PacketType.MESSAGES);
-                        packetBuilder.setBody(messageBuilder.build().toByteString());
-                        return packetBuilder.build().toByteArray();
-                    }
-                }
-            }
-        } catch (Exception e) {
-            throw new SerializationException("Error when serializing message to byte[] ");
-        }
-        return null;
+        return CanalMessageSerializer.serializer(data);
     }
 
     @Override

+ 66 - 0
server/src/main/java/com/alibaba/otter/canal/rocketmq/CanalRocketMQProducer.java

@@ -0,0 +1,66 @@
+package com.alibaba.otter.canal.rocketmq;
+
+import com.alibaba.otter.canal.common.CanalMessageSerializer;
+import com.alibaba.otter.canal.common.MQProperties;
+import com.alibaba.otter.canal.server.exception.CanalServerException;
+import com.alibaba.otter.canal.spi.CanalMQProducer;
+import java.util.List;
+import org.apache.rocketmq.client.exception.MQBrokerException;
+import org.apache.rocketmq.client.exception.MQClientException;
+import org.apache.rocketmq.client.producer.DefaultMQProducer;
+import org.apache.rocketmq.client.producer.MessageQueueSelector;
+import org.apache.rocketmq.common.message.Message;
+import org.apache.rocketmq.common.message.MessageQueue;
+import org.apache.rocketmq.remoting.exception.RemotingException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class CanalRocketMQProducer implements CanalMQProducer {
+
+    private static final Logger logger = LoggerFactory.getLogger(CanalRocketMQProducer.class);
+
+    private DefaultMQProducer   defaultMQProducer;
+
+    @Override
+    public void init(MQProperties rocketMQProperties) {
+        defaultMQProducer = new DefaultMQProducer();
+        defaultMQProducer.setNamesrvAddr(rocketMQProperties.getServers());
+        defaultMQProducer.setProducerGroup(rocketMQProperties.getProducerGroup());
+        defaultMQProducer.setRetryTimesWhenSendFailed(rocketMQProperties.getRetries());
+        logger.info("##Start RocketMQ producer##");
+        try {
+            defaultMQProducer.start();
+        } catch (MQClientException ex) {
+            throw new CanalServerException("Start RocketMQ producer error", ex);
+        }
+    }
+
+    @Override
+    public void send(final MQProperties.CanalDestination destination, com.alibaba.otter.canal.protocol.Message data,
+                     Callback callback) {
+        try {
+            Message message = new Message(destination.getTopic(), CanalMessageSerializer.serializer(data));
+            this.defaultMQProducer.send(message, new MessageQueueSelector() {
+
+                @Override
+                public MessageQueue select(List<MessageQueue> mqs, Message msg, Object arg) {
+                    int partition = 0;
+                    if (destination.getPartition() != null) {
+                        partition = destination.getPartition();
+                    }
+                    return mqs.get(partition);
+                }
+            }, null);
+            callback.commit();
+        } catch (MQClientException | RemotingException | MQBrokerException | InterruptedException e) {
+            logger.error("Send message error!", e);
+            callback.rollback();
+        }
+    }
+
+    @Override
+    public void stop() {
+        logger.info("## Stop RocketMQ producer##");
+        this.defaultMQProducer.shutdown();
+    }
+}

+ 38 - 38
server/src/main/java/com/alibaba/otter/canal/kafka/CanalKafkaStarter.java → server/src/main/java/com/alibaba/otter/canal/server/CanalMQStarter.java

@@ -1,4 +1,4 @@
-package com.alibaba.otter.canal.kafka;
+package com.alibaba.otter.canal.server;
 
 import java.io.FileInputStream;
 import java.util.List;
@@ -11,21 +11,16 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.yaml.snakeyaml.Yaml;
 
-import com.alibaba.otter.canal.kafka.KafkaProperties.CanalDestination;
+import com.alibaba.otter.canal.common.MQProperties;
+import com.alibaba.otter.canal.kafka.CanalKafkaProducer;
 import com.alibaba.otter.canal.protocol.ClientIdentity;
 import com.alibaba.otter.canal.protocol.Message;
-import com.alibaba.otter.canal.server.CanalServerStarter;
 import com.alibaba.otter.canal.server.embedded.CanalServerWithEmbedded;
+import com.alibaba.otter.canal.spi.CanalMQProducer;
 
-/**
- * kafka 启动类
- *
- * @author machengyuan 2018-6-11 下午05:30:49
- * @version 1.0.0
- */
-public class CanalKafkaStarter implements CanalServerStarter {
+public class CanalMQStarter {
 
-    private static final Logger logger               = LoggerFactory.getLogger(CanalKafkaStarter.class);
+    private static final Logger logger               = LoggerFactory.getLogger(CanalMQStarter.class);
 
     private static final String CLASSPATH_URL_PREFIX = "classpath:";
 
@@ -33,37 +28,42 @@ public class CanalKafkaStarter implements CanalServerStarter {
 
     private ExecutorService     executorService;
 
-    private CanalKafkaProducer  canalKafkaProducer;
+    private CanalMQProducer     canalMQProducer;
 
-    private KafkaProperties     kafkaProperties;
+    private MQProperties        properties;
+
+    public CanalMQStarter(CanalMQProducer canalMQProducer){
+        this.canalMQProducer = canalMQProducer;
+    }
 
     public void init() {
         try {
-            logger.info("## load kafka configurations");
-            String conf = System.getProperty("kafka.conf", "classpath:kafka.yml");
+            logger.info("## load MQ configurations");
+            String conf = System.getProperty("mq.conf", "classpath:mq.yml");
 
             if (conf.startsWith(CLASSPATH_URL_PREFIX)) {
                 conf = StringUtils.substringAfter(conf, CLASSPATH_URL_PREFIX);
-                kafkaProperties = new Yaml().loadAs(CanalKafkaStarter.class.getClassLoader().getResourceAsStream(conf),
-                    KafkaProperties.class);
+                properties = new Yaml().loadAs(CanalMQStarter.class.getClassLoader().getResourceAsStream(conf),
+                    MQProperties.class);
             } else {
-                kafkaProperties = new Yaml().loadAs(new FileInputStream(conf), KafkaProperties.class);
+                properties = new Yaml().loadAs(new FileInputStream(conf), MQProperties.class);
             }
 
             // 初始化 kafka producer
-            canalKafkaProducer = new CanalKafkaProducer();
-            canalKafkaProducer.init(kafkaProperties);
+            // canalMQProducer = new CanalKafkaProducer();
+            canalMQProducer.init(properties);
             // set filterTransactionEntry
-            // if (kafkaProperties.isFilterTransactionEntry()) {
-            // System.setProperty("canal.instance.filter.transaction.entry", "true");
-            // }
+            if (properties.isFilterTransactionEntry()) {
+                System.setProperty("canal.instance.filter.transaction.entry", "true");
+            }
+
             // 对应每个instance启动一个worker线程
-            List<CanalDestination> destinations = kafkaProperties.getCanalDestinations();
+            List<MQProperties.CanalDestination> destinations = properties.getCanalDestinations();
 
             executorService = Executors.newFixedThreadPool(destinations.size());
 
-            logger.info("## start the kafka workers.");
-            for (final CanalDestination destination : destinations) {
+            logger.info("## start the MQ workers.");
+            for (final MQProperties.CanalDestination destination : destinations) {
                 executorService.execute(new Runnable() {
 
                     @Override
@@ -73,31 +73,31 @@ public class CanalKafkaStarter implements CanalServerStarter {
                 });
             }
             running = true;
-            logger.info("## the kafka workers is running now ......");
+            logger.info("## the MQ workers is running now ......");
             Runtime.getRuntime().addShutdownHook(new Thread() {
 
                 public void run() {
                     try {
-                        logger.info("## stop the kafka workers");
+                        logger.info("## stop the MQ workers");
                         running = false;
                         executorService.shutdown();
-                        canalKafkaProducer.stop();
+                        canalMQProducer.stop();
                     } catch (Throwable e) {
-                        logger.warn("##something goes wrong when stopping kafka workers:", e);
+                        logger.warn("##something goes wrong when stopping MQ workers:", e);
                     } finally {
-                        logger.info("## canal kafka is down.");
+                        logger.info("## canal MQ is down.");
                     }
                 }
 
             });
 
         } catch (Throwable e) {
-            logger.error("## Something goes wrong when starting up the canal kafka workers:", e);
+            logger.error("## Something goes wrong when starting up the canal MQ workers:", e);
             System.exit(0);
         }
     }
 
-    private void worker(CanalDestination destination) {
+    private void worker(MQProperties.CanalDestination destination) {
         while (!running)
             ;
         logger.info("## start the canal consumer: {}.", destination.getCanalDestination());
@@ -118,20 +118,20 @@ public class CanalKafkaStarter implements CanalServerStarter {
 
                 while (running) {
                     Message message;
-                    if (kafkaProperties.getCanalGetTimeout() != null) {
+                    if (properties.getCanalGetTimeout() != null) {
                         message = server.getWithoutAck(clientIdentity,
-                            kafkaProperties.getCanalBatchSize(),
-                            kafkaProperties.getCanalGetTimeout(),
+                            properties.getCanalBatchSize(),
+                            properties.getCanalGetTimeout(),
                             TimeUnit.MILLISECONDS);
                     } else {
-                        message = server.getWithoutAck(clientIdentity, kafkaProperties.getCanalBatchSize());
+                        message = server.getWithoutAck(clientIdentity, properties.getCanalBatchSize());
                     }
 
                     final long batchId = message.getId();
                     try {
                         int size = message.isRaw() ? message.getRawEntries().size() : message.getEntries().size();
                         if (batchId != -1 && size != 0) {
-                            canalKafkaProducer.send(destination, message, new CanalKafkaProducer.Callback() {
+                            canalMQProducer.send(destination, message, new CanalKafkaProducer.Callback() {
 
                                 @Override
                                 public void commit() {

+ 1 - 1
server/src/main/java/com/alibaba/otter/canal/server/netty/handler/SessionHandler.java

@@ -50,7 +50,7 @@ public class SessionHandler extends SimpleChannelHandler {
         this.embeddedServer = embeddedServer;
     }
 
-    @SuppressWarnings("deprecation")
+    @SuppressWarnings({ "deprecation" })
     public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
         logger.info("message receives in session handler...");
         long start = System.nanoTime();

+ 37 - 0
server/src/main/java/com/alibaba/otter/canal/spi/CanalMQProducer.java

@@ -0,0 +1,37 @@
+package com.alibaba.otter.canal.spi;
+
+import java.io.IOException;
+
+import com.alibaba.otter.canal.common.MQProperties;
+import com.alibaba.otter.canal.protocol.Message;
+
+public interface CanalMQProducer {
+
+    /**
+     * Init producer.
+     *
+     * @param mqProperties MQ config
+     */
+    void init(MQProperties mqProperties);
+
+    /**
+     * Send canal message to related topic
+     *
+     * @param canalDestination canal mq destination
+     * @param message canal message
+     * @throws IOException
+     */
+    void send(MQProperties.CanalDestination canalDestination, Message message, Callback callback) throws IOException;
+
+    /**
+     * Stop MQ producer service
+     */
+    void stop();
+
+    interface Callback {
+
+        void commit();
+
+        void rollback();
+    }
+}

+ 1 - 0
server/src/test/java/com/alibaba/otter/canal/server/ProtocolTest.java

@@ -56,6 +56,7 @@ public class ProtocolTest {
         }
     }
 
+    @SuppressWarnings("deprecation")
     private byte[] buildData(Message message) throws IOException {
         List<ByteString> rowEntries = message.getRawEntries();
         // message size

+ 9 - 6
store/src/main/java/com/alibaba/otter/canal/store/memory/MemoryEventStoreWithBuffer.java

@@ -7,6 +7,8 @@ import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
 
+import org.apache.commons.lang.StringUtils;
+
 import com.alibaba.otter.canal.protocol.CanalEntry;
 import com.alibaba.otter.canal.protocol.CanalEntry.EventType;
 import com.alibaba.otter.canal.protocol.position.LogPosition;
@@ -37,14 +39,14 @@ public class MemoryEventStoreWithBuffer extends AbstractCanalStoreScavenge imple
 
     private static final long INIT_SEQUENCE = -1;
     private int               bufferSize    = 16 * 1024;
-    private int               bufferMemUnit = 1024;                         // memsize的单位,默认为1kb大小
+    private int               bufferMemUnit = 1024;                                      // memsize的单位,默认为1kb大小
     private int               indexMask;
     private Event[]           entries;
 
     // 记录下put/get/ack操作的三个下标
-    private AtomicLong        putSequence   = new AtomicLong(INIT_SEQUENCE); // 代表当前put操作最后一次写操作发生的位置
-    private AtomicLong        getSequence   = new AtomicLong(INIT_SEQUENCE); // 代表当前get操作读取的最后一条的位置
-    private AtomicLong        ackSequence   = new AtomicLong(INIT_SEQUENCE); // 代表当前ack操作的最后一条的位置
+    private AtomicLong        putSequence   = new AtomicLong(INIT_SEQUENCE);             // 代表当前put操作最后一次写操作发生的位置
+    private AtomicLong        getSequence   = new AtomicLong(INIT_SEQUENCE);             // 代表当前get操作读取的最后一条的位置
+    private AtomicLong        ackSequence   = new AtomicLong(INIT_SEQUENCE);             // 代表当前ack操作的最后一条的位置
 
     // 记录下put/get/ack操作的三个memsize大小
     private AtomicLong        putMemSize    = new AtomicLong(0);
@@ -66,7 +68,7 @@ public class MemoryEventStoreWithBuffer extends AbstractCanalStoreScavenge imple
     private Condition         notFull       = lock.newCondition();
     private Condition         notEmpty      = lock.newCondition();
 
-    private BatchMode         batchMode     = BatchMode.ITEMSIZE;           // 默认为内存大小模式
+    private BatchMode         batchMode     = BatchMode.ITEMSIZE;                        // 默认为内存大小模式
     private boolean           ddlIsolation  = false;
 
     public MemoryEventStoreWithBuffer(){
@@ -335,7 +337,8 @@ public class MemoryEventStoreWithBuffer extends AbstractCanalStoreScavenge imple
 
         for (int i = entrys.size() - 1; i >= 0; i--) {
             Event event = entrys.get(i);
-            if (CanalEntry.EntryType.TRANSACTIONBEGIN == event.getEntryType()
+            // GTID模式,ack的位点必须是事务结尾,因为下一次订阅的时候mysql会发送这个gtid之后的next,如果在事务头就记录了会丢这最后一个事务
+            if ((CanalEntry.EntryType.TRANSACTIONBEGIN == event.getEntryType() && StringUtils.isEmpty(event.getGtid()))
                 || CanalEntry.EntryType.TRANSACTIONEND == event.getEntryType() || isDdl(event.getEventType())) {
                 // 将事务头/尾设置可被为ack的点
                 range.setAck(CanalEventUtils.createPosition(event));

Some files were not shown because too many files changed in this diff