Browse Source

Merge branch 'master' into pyf2

Payon 6 years ago
parent
commit
b1dc5bd59f
100 changed files with 5158 additions and 377 deletions
  1. 2 0
      .gitignore
  2. 10 0
      README.md
  3. 1 0
      client/src/main/java/com/alibaba/otter/canal/client/CanalConnector.java
  4. 6 3
      client/src/main/java/com/alibaba/otter/canal/client/CanalConnectors.java
  5. 21 10
      client/src/main/java/com/alibaba/otter/canal/client/impl/ClusterCanalConnector.java
  6. 45 10
      client/src/main/java/com/alibaba/otter/canal/client/impl/SimpleCanalConnector.java
  7. 4 0
      client/src/main/java/com/alibaba/otter/canal/client/impl/running/ClientRunningMonitor.java
  8. 5 0
      common/src/main/java/com/alibaba/otter/canal/common/utils/NamedThreadFactory.java
  9. 0 1
      common/src/main/java/com/alibaba/otter/canal/common/zookeeper/running/ServerRunningMonitor.java
  10. 0 1
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/LogContext.java
  11. 2 0
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/LogDecoder.java
  12. 20 0
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/GtidLogEvent.java
  13. 13 0
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/LogHeader.java
  14. 66 59
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogBuffer.java
  15. 1 1
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogEvent.java
  16. 52 0
      dbsync/src/test/java/com/taobao/tddl/dbsync/FetcherPerformanceTest.java
  17. 41 52
      dbsync/src/test/java/com/taobao/tddl/dbsync/binlog/DirectLogFetcherTest.java
  18. 8 0
      deployer/pom.xml
  19. 15 0
      deployer/src/main/bin/metrics_env.sh
  20. 5 0
      deployer/src/main/bin/restart.sh
  21. 6 1
      deployer/src/main/bin/startup.sh
  22. 1 0
      deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalConstants.java
  23. 28 9
      deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalController.java
  24. 1 0
      deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalLauncher.java
  25. 17 1
      deployer/src/main/resources/canal.properties
  26. 9 10
      deployer/src/main/resources/example/instance.properties
  27. 5 1
      deployer/src/main/resources/logback.xml
  28. 9 3
      deployer/src/main/resources/spring/default-instance.xml
  29. 9 3
      deployer/src/main/resources/spring/file-instance.xml
  30. 15 4
      deployer/src/main/resources/spring/group-instance.xml
  31. 7 1
      deployer/src/main/resources/spring/local-instance.xml
  32. 9 3
      deployer/src/main/resources/spring/memory-instance.xml
  33. 2 2
      deployer/src/main/resources/spring/tsdb/h2-tsdb.xml
  34. 2 2
      deployer/src/main/resources/spring/tsdb/mysql-tsdb.xml
  35. 67 0
      docker/Dockerfile
  36. 30 0
      docker/build.sh
  37. 117 0
      docker/image/admin/app.sh
  38. 2 0
      docker/image/admin/bin/clean_log
  39. 45 0
      docker/image/admin/bin/clean_log.sh
  40. 13 0
      docker/image/admin/health.sh
  41. 11 0
      docker/image/alidata/bin/exec_rc_local.sh
  42. 6 0
      docker/image/alidata/bin/lark-wait
  43. 27 0
      docker/image/alidata/bin/main.sh
  44. 19 0
      docker/image/alidata/init/02init-sshd.sh
  45. 66 0
      docker/image/alidata/init/fix-hosts.py
  46. 40 0
      docker/image/alidata/lib/proc.sh
  47. 92 0
      docker/run.sh
  48. 2 2
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlConnector.java
  49. 2 2
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/GTIDSet.java
  50. 25 60
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/MysqlGTIDSet.java
  51. 22 30
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/UUIDSet.java
  52. 18 20
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/client/BinlogDumpGTIDCommandPacket.java
  53. 33 1
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/BioSocketChannel.java
  54. 0 2
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/BioSocketChannelPool.java
  55. 7 0
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/NettySocketChannel.java
  56. 3 4
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/NettySocketChannelPool.java
  57. 2 0
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/SocketChannel.java
  58. 2 2
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/utils/ByteHelper.java
  59. 4 4
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/utils/PacketManager.java
  60. 35 45
      driver/src/test/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlGTIDSetTest.java
  61. 5 5
      driver/src/test/java/com/alibaba/otter/canal/parse/driver/mysql/UUIDSetTest.java
  62. 76 0
      example/pom.xml
  63. 2 9
      example/src/main/java/com/alibaba/otter/canal/example/AbstractCanalClientTest.java
  64. 68 0
      example/src/main/java/com/alibaba/otter/canal/example/SimpleCanalClientPermanceTest.java
  65. 144 0
      example/src/main/java/com/alibaba/otter/canal/example/db/AbstractDbClient.java
  66. 488 0
      example/src/main/java/com/alibaba/otter/canal/example/db/CanalConnectorClient.java
  67. 35 0
      example/src/main/java/com/alibaba/otter/canal/example/db/MysqlLoadLauncher.java
  68. 169 0
      example/src/main/java/com/alibaba/otter/canal/example/db/PropertyPlaceholderConfigurer.java
  69. 44 0
      example/src/main/java/com/alibaba/otter/canal/example/db/ServiceLocator.java
  70. 121 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/AbstractDbDialect.java
  71. 105 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/AbstractSqlTemplate.java
  72. 20 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/DbDialect.java
  73. 40 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/SqlTemplate.java
  74. 93 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/TableType.java
  75. 32 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/mysql/MysqlDialect.java
  76. 84 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/mysql/MysqlSqlTemplate.java
  77. 207 0
      example/src/main/java/com/alibaba/otter/canal/example/db/mysql/AbstractMysqlClient.java
  78. 23 0
      example/src/main/java/com/alibaba/otter/canal/example/db/mysql/MysqlClient.java
  79. 50 0
      example/src/main/java/com/alibaba/otter/canal/example/db/utils/ByteArrayConverter.java
  80. 326 0
      example/src/main/java/com/alibaba/otter/canal/example/db/utils/DdlUtils.java
  81. 140 0
      example/src/main/java/com/alibaba/otter/canal/example/db/utils/SqlTimestampConverter.java
  82. 315 0
      example/src/main/java/com/alibaba/otter/canal/example/db/utils/SqlUtils.java
  83. 53 0
      example/src/main/resources/client-spring.xml
  84. 16 0
      example/src/main/resources/client.properties
  85. 11 13
      instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/CanalInstanceWithManager.java
  86. 25 1
      instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/model/CanalParameter.java
  87. 111 0
      kafka-client/pom.xml
  88. 242 0
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/KafkaCanalConnector.java
  89. 62 0
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/KafkaCanalConnectors.java
  90. 64 0
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/MessageDeserializer.java
  91. 39 0
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningData.java
  92. 21 0
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningListener.java
  93. 281 0
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningMonitor.java
  94. 26 0
      kafka-client/src/test/java/com/alibaba/otter/canal/kafka/client/running/AbstractKafkaTest.java
  95. 144 0
      kafka-client/src/test/java/com/alibaba/otter/canal/kafka/client/running/CanalKafkaClientExample.java
  96. 62 0
      kafka-client/src/test/java/com/alibaba/otter/canal/kafka/client/running/KafkaClientRunningTest.java
  97. 19 0
      kafka-client/src/test/resources/logback.xml
  98. 140 0
      kafka/pom.xml
  99. 64 0
      kafka/src/main/assembly/dev.xml
  100. 64 0
      kafka/src/main/assembly/release.xml

+ 2 - 0
.gitignore

@@ -14,3 +14,5 @@ jtester.properties
 .idea/
 *.iml
 .DS_Store
+*.tar.gz
+*.rpm

+ 10 - 0
README.md

@@ -5,6 +5,8 @@
 <ol>
 <li>canal QQ讨论群已经建立,群号:161559791 ,欢迎加入进行技术讨论。</li>
 <li>canal消费端项目开源: Otter(分布式数据库同步系统),地址:<a href="https://github.com/alibaba/otter">https://github.com/alibaba/otter</a></li>
+<li>Canal已在阿里云推出商业化版本 <a href="https://www.aliyun.com/product/dts?spm=a2c4g.11186623.cloudEssentials.80.srdwr7">数据传输服务DTS</a>, 开通即用,免去部署维护的昂贵使用成本。DTS针对阿里云RDS、DRDS等产品进行了适配,解决了Binlog日志回收,主备切换、VPC网络切换等场景下的订阅高可用问题。同时,针对RDS进行了针对性的性能优化。出于稳定性、性能及成本的考虑,强烈推荐阿里云用户使用DTS产品。<a href="https://help.aliyun.com/document_detail/26592.html?spm=a2c4g.11174283.6.539.t1Y91E">DTS产品使用文档</a></li>
+DTS支持阿里云RDS&DRDS的Binlog日志实时订阅,现推出首月免费体验,限时限量,<a href="https://common-buy.aliyun.com/?commodityCode=dtspre&request=%7b%22dts_function%22%3a%22data_subscribe%22%7d">立即体验>>></a>
 </ol>
 
 <h1>背景</h1>
@@ -73,6 +75,14 @@ See the wiki page for : <a href="https://github.com/alibaba/canal/wiki" >wiki文
 <li>阿里巴巴去Oracle数据迁移同步工具(目标支持MySQL/DRDS):<a href="http://github.com/alibaba/yugong">http://github.com/alibaba/yugong</a></li>
 </ol>
 
+<h1>相关产品</h1>
+<ol>
+<li><a href="https://www.aliyun.com/product/drds?spm=5176.55326.cloudEssentials.71.69fd227dRPZj9K">阿里云分布式数据库DRDS</a></li>
+<li><a href="https://www.aliyun.com/product/dts?spm=5176.7947010.cloudEssentials.80.33f734f4JOAxSP">阿里云数据传输服务DTS</a></li>
+<li><a href="https://www.aliyun.com/product/dbs?spm=5176.54487.cloudEssentials.83.34b851a8GmVZg6">阿里云数据库备份服务DBS</a></li>
+<li><a href="https://www.aliyun.com/product/dms?spm=5176.169464.cloudEssentials.81.2e1066feC1sBBL">阿里云数据管理服务DMS</a></li>
+</ol>
+
 <h1>问题反馈</h1>
 <ol>
 <li>qq交流群: 161559791 </li>

+ 1 - 0
client/src/main/java/com/alibaba/otter/canal/client/CanalConnector.java

@@ -152,6 +152,7 @@ public interface CanalConnector {
 
     /**
      * 中断的阻塞,用于优雅停止client
+     * 
      * @throws CanalClientException
      */
     void stopRunning() throws CanalClientException;

+ 6 - 3
client/src/main/java/com/alibaba/otter/canal/client/CanalConnectors.java

@@ -29,7 +29,8 @@ public class CanalConnectors {
     public static CanalConnector newSingleConnector(SocketAddress address, String destination, String username,
                                                     String password) {
         SimpleCanalConnector canalConnector = new SimpleCanalConnector(address, username, password, destination);
-        canalConnector.setSoTimeout(30 * 1000);
+        canalConnector.setSoTimeout(60 * 1000);
+        canalConnector.setIdleTimeout(60 * 60 * 1000);
         return canalConnector;
     }
 
@@ -48,7 +49,8 @@ public class CanalConnectors {
             password,
             destination,
             new SimpleNodeAccessStrategy(addresses));
-        canalConnector.setSoTimeout(30 * 1000);
+        canalConnector.setSoTimeout(60 * 1000);
+        canalConnector.setIdleTimeout(60 * 60 * 1000);
         return canalConnector;
     }
 
@@ -67,7 +69,8 @@ public class CanalConnectors {
             password,
             destination,
             new ClusterNodeAccessStrategy(destination, ZkClientx.getZkClient(zkServers)));
-        canalConnector.setSoTimeout(30 * 1000);
+        canalConnector.setSoTimeout(60 * 1000);
+        canalConnector.setIdleTimeout(60 * 60 * 1000);
         return canalConnector;
     }
 }

+ 21 - 10
client/src/main/java/com/alibaba/otter/canal/client/impl/ClusterCanalConnector.java

@@ -22,7 +22,8 @@ public class ClusterCanalConnector implements CanalConnector {
     private final Logger            logger        = LoggerFactory.getLogger(this.getClass());
     private String                  username;
     private String                  password;
-    private int                     soTimeout     = 10000;
+    private int                     soTimeout     = 60000;
+    private int                     idleTimeout   = 60 * 60 * 1000;
     private int                     retryTimes    = 3;                                       // 设置-1时可以subscribe阻塞等待时优雅停机
     private int                     retryInterval = 5000;                                    // 重试的时间间隔,默认5秒
     private CanalNodeAccessStrategy accessStrategy;
@@ -52,6 +53,7 @@ public class ClusterCanalConnector implements CanalConnector {
 
                     };
                     currentConnector.setSoTimeout(soTimeout);
+                    currentConnector.setIdleTimeout(idleTimeout);
                     if (filter != null) {
                         currentConnector.setFilter(filter);
                     }
@@ -110,10 +112,8 @@ public class ClusterCanalConnector implements CanalConnector {
                     logger.info("block waiting interrupted by other thread.");
                     return;
                 } else {
-                    logger.warn(String.format(
-                            "something goes wrong when subscribing from server: %s",
-                            currentConnector != null ? currentConnector.getAddress() : "null"),
-                            t);
+                    logger.warn(String.format("something goes wrong when subscribing from server: %s",
+                        currentConnector != null ? currentConnector.getAddress() : "null"), t);
                     times++;
                     restart();
                     logger.info("restart the connector for next round retry.");
@@ -184,7 +184,7 @@ public class ClusterCanalConnector implements CanalConnector {
                 return msg;
             } catch (Throwable t) {
                 logger.warn(String.format("something goes wrong when getWithoutAck data from server:%s",
-                    currentConnector.getAddress()), t);
+                    currentConnector != null ? currentConnector.getAddress() : "null"), t);
                 times++;
                 restart();
                 logger.info("restart the connector for next round retry.");
@@ -201,7 +201,7 @@ public class ClusterCanalConnector implements CanalConnector {
                 return msg;
             } catch (Throwable t) {
                 logger.warn(String.format("something goes wrong when getWithoutAck data from server:%s",
-                    currentConnector.getAddress()), t);
+                    currentConnector != null ? currentConnector.getAddress() : "null"), t);
                 times++;
                 restart();
                 logger.info("restart the connector for next round retry.");
@@ -218,7 +218,8 @@ public class ClusterCanalConnector implements CanalConnector {
                 return;
             } catch (Throwable t) {
                 logger.warn(String.format("something goes wrong when rollbacking data from server:%s",
-                    currentConnector.getAddress()), t);
+                    currentConnector != null ? currentConnector.getAddress() : "null"),
+                    t);
                 times++;
                 restart();
                 logger.info("restart the connector for next round retry.");
@@ -235,7 +236,8 @@ public class ClusterCanalConnector implements CanalConnector {
                 return;
             } catch (Throwable t) {
                 logger.warn(String.format("something goes wrong when rollbacking data from server:%s",
-                    currentConnector.getAddress()), t);
+                    currentConnector != null ? currentConnector.getAddress() : "null"),
+                    t);
                 times++;
                 restart();
                 logger.info("restart the connector for next round retry.");
@@ -253,7 +255,8 @@ public class ClusterCanalConnector implements CanalConnector {
                 return;
             } catch (Throwable t) {
                 logger.warn(String.format("something goes wrong when acking data from server:%s",
-                    currentConnector.getAddress()), t);
+                    currentConnector != null ? currentConnector.getAddress() : "null"),
+                    t);
                 times++;
                 restart();
                 logger.info("restart the connector for next round retry.");
@@ -300,6 +303,14 @@ public class ClusterCanalConnector implements CanalConnector {
         this.soTimeout = soTimeout;
     }
 
+    public int getIdleTimeout() {
+        return idleTimeout;
+    }
+
+    public void setIdleTimeout(int idleTimeout) {
+        this.idleTimeout = idleTimeout;
+    }
+
     public int getRetryTimes() {
         return retryTimes;
     }

+ 45 - 10
client/src/main/java/com/alibaba/otter/canal/client/impl/SimpleCanalConnector.java

@@ -55,6 +55,7 @@ public class SimpleCanalConnector implements CanalConnector {
     private String               username;
     private String               password;
     private int                  soTimeout             = 60000;                                              // milliseconds
+    private int                  idleTimeout           = 60 * 60 * 1000;                                     // client和server之间的空闲链接超时的时间,默认为1小时
     private String               filter;                                                                     // 记录上一次的filter提交值,便于自动重试时提交
 
     private final ByteBuffer     readHeader            = ByteBuffer.allocate(4).order(ByteOrder.BIG_ENDIAN);
@@ -70,23 +71,29 @@ public class SimpleCanalConnector implements CanalConnector {
     private volatile boolean     connected             = false;                                              // 代表connected是否已正常执行,因为有HA,不代表在工作中
     private boolean              rollbackOnConnect     = true;                                               // 是否在connect链接成功后,自动执行rollback操作
     private boolean              rollbackOnDisConnect  = false;                                              // 是否在connect链接成功后,自动执行rollback操作
-
+    private boolean              lazyParseEntry        = false;                                              // 是否自动化解析Entry对象,如果考虑最大化性能可以延后解析
     // 读写数据分别使用不同的锁进行控制,减小锁粒度,读也需要排他锁,并发度容易造成数据包混乱,反序列化失败
     private Object               readDataLock          = new Object();
     private Object               writeDataLock         = new Object();
 
-    private boolean              running               = false;
+    private volatile boolean     running               = false;
 
     public SimpleCanalConnector(SocketAddress address, String username, String password, String destination){
-        this(address, username, password, destination, 60000);
+        this(address, username, password, destination, 60000, 60 * 60 * 1000);
     }
 
     public SimpleCanalConnector(SocketAddress address, String username, String password, String destination,
                                 int soTimeout){
+        this(address, username, password, destination, soTimeout, 60 * 60 * 1000);
+    }
+
+    public SimpleCanalConnector(SocketAddress address, String username, String password, String destination,
+                                int soTimeout, int idleTimeout){
         this.address = address;
         this.username = username;
         this.password = password;
         this.soTimeout = soTimeout;
+        this.idleTimeout = idleTimeout;
         this.clientIdentity = new ClientIdentity(destination, (short) 1001);
     }
 
@@ -157,8 +164,8 @@ public class SimpleCanalConnector implements CanalConnector {
             ClientAuth ca = ClientAuth.newBuilder()
                 .setUsername(username != null ? username : "")
                 .setPassword(ByteString.copyFromUtf8(password != null ? password : ""))
-                .setNetReadTimeout(soTimeout)
-                .setNetWriteTimeout(soTimeout)
+                .setNetReadTimeout(idleTimeout)
+                .setNetWriteTimeout(idleTimeout)
                 .build();
             writeWithHeader(Packet.newBuilder()
                 .setType(PacketType.CLIENTAUTHENTICATION)
@@ -282,6 +289,9 @@ public class SimpleCanalConnector implements CanalConnector {
 
     public Message getWithoutAck(int batchSize, Long timeout, TimeUnit unit) throws CanalClientException {
         waitClientRunning();
+        if (!running) {
+            return null;
+        }
         try {
             int size = (batchSize <= 0) ? 1000 : batchSize;
             long time = (timeout == null || timeout < 0) ? -1 : timeout; // -1代表不做timeout控制
@@ -309,7 +319,8 @@ public class SimpleCanalConnector implements CanalConnector {
     }
 
     private Message receiveMessages() throws IOException {
-        Packet p = Packet.parseFrom(readNextPacket());
+        byte[] data = readNextPacket();
+        Packet p = Packet.parseFrom(data);
         switch (p.getType()) {
             case MESSAGES: {
                 if (!p.getCompression().equals(Compression.NONE)) {
@@ -318,8 +329,13 @@ public class SimpleCanalConnector implements CanalConnector {
 
                 Messages messages = Messages.parseFrom(p.getBody());
                 Message result = new Message(messages.getBatchId());
-                for (ByteString byteString : messages.getMessagesList()) {
-                    result.addEntry(Entry.parseFrom(byteString));
+                if (lazyParseEntry) {
+                    // byteString
+                    result.setRawEntries(messages.getMessagesList());
+                } else {
+                    for (ByteString byteString : messages.getMessagesList()) {
+                        result.addEntry(Entry.parseFrom(byteString));
+                    }
                 }
                 return result;
             }
@@ -335,6 +351,9 @@ public class SimpleCanalConnector implements CanalConnector {
 
     public void ack(long batchId) throws CanalClientException {
         waitClientRunning();
+        if (!running) {
+            return;
+        }
         ClientAck ca = ClientAck.newBuilder()
             .setDestination(clientIdentity.getDestination())
             .setClientId(String.valueOf(clientIdentity.getClientId()))
@@ -500,6 +519,14 @@ public class SimpleCanalConnector implements CanalConnector {
         this.soTimeout = soTimeout;
     }
 
+    public int getIdleTimeout() {
+        return idleTimeout;
+    }
+
+    public void setIdleTimeout(int idleTimeout) {
+        this.idleTimeout = idleTimeout;
+    }
+
     public void setZkClientx(ZkClientx zkClientx) {
         this.zkClientx = zkClientx;
         initClientRunningMonitor(this.clientIdentity);
@@ -517,11 +544,19 @@ public class SimpleCanalConnector implements CanalConnector {
         this.filter = filter;
     }
 
+    public boolean isLazyParseEntry() {
+        return lazyParseEntry;
+    }
+
+    public void setLazyParseEntry(boolean lazyParseEntry) {
+        this.lazyParseEntry = lazyParseEntry;
+    }
+
     public void stopRunning() {
         if (running) {
-            running = false;  //设置为非running状态
+            running = false; // 设置为非running状态
             if (!mutex.state()) {
-                mutex.set(true);  //中断阻塞
+                mutex.set(true); // 中断阻塞
             }
         }
     }

+ 4 - 0
client/src/main/java/com/alibaba/otter/canal/client/impl/running/ClientRunningMonitor.java

@@ -98,6 +98,10 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
         String path = ZookeeperPathUtils.getDestinationClientRunning(this.destination, clientData.getClientId());
         zkClient.unsubscribeDataChanges(path, dataListener);
         releaseRunning(); // 尝试一下release
+        //Fix issue #697
+        if (delayExector != null) {
+            delayExector.shutdown();
+        }
     }
 
     // 改动记录:

+ 5 - 0
common/src/main/java/com/alibaba/otter/canal/common/utils/NamedThreadFactory.java

@@ -23,6 +23,11 @@ public class NamedThreadFactory implements ThreadFactory {
 
                                                                        public void uncaughtException(Thread t,
                                                                                                      Throwable e) {
+                                                                           if (e instanceof InterruptedException
+                                                                               || (e.getCause() != null && e.getCause() instanceof InterruptedException)) {
+                                                                               return;
+                                                                           }
+
                                                                            logger.error("from " + t.getName(), e);
                                                                        }
                                                                    };

+ 0 - 1
common/src/main/java/com/alibaba/otter/canal/common/zookeeper/running/ServerRunningMonitor.java

@@ -4,7 +4,6 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import com.alibaba.otter.canal.common.CanalException;
 import org.I0Itec.zkclient.IZkDataListener;
 import org.I0Itec.zkclient.exception.ZkException;
 import org.I0Itec.zkclient.exception.ZkInterruptedException;

+ 0 - 1
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/LogContext.java

@@ -58,7 +58,6 @@ public final class LogContext {
 
     public void reset() {
         formatDescription = FormatDescriptionLogEvent.FORMAT_DESCRIPTION_EVENT_5_x;
-
         mapOfTable.clear();
     }
 }

+ 2 - 0
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/LogDecoder.java

@@ -109,6 +109,8 @@ public final class LogDecoder {
                         /* Decoding binary-log to event */
                         event = decode(buffer, header, context);
                         if (event != null) {
+                            // set logFileName
+                            event.getHeader().setLogFileName(context.getLogPosition().getFileName());
                             event.setSemival(buffer.semival);
                         }
                     } catch (IOException e) {

+ 20 - 0
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/GtidLogEvent.java

@@ -17,10 +17,13 @@ public class GtidLogEvent extends LogEvent {
     public static final int ENCODED_FLAG_LENGTH = 1;
     // / Length of SID in event encoding
     public static final int ENCODED_SID_LENGTH  = 16;
+    public static final int LOGICAL_TIMESTAMP_TYPE_CODE  = 2;
 
     private boolean         commitFlag;
     private UUID            sid;
     private long            gno;
+    private Long            lastCommitted;
+    private Long            sequenceNumber;
 
     public GtidLogEvent(LogHeader header, LogBuffer buffer, FormatDescriptionLogEvent descriptionEvent){
         super(header);
@@ -40,6 +43,15 @@ public class GtidLogEvent extends LogEvent {
 
         gno = buffer.getLong64();
 
+        // support gtid lastCommitted and sequenceNumber
+        // fix bug #776
+        if (buffer.hasRemaining() && buffer.remaining() > 16 && buffer.getUint8() == LOGICAL_TIMESTAMP_TYPE_CODE) {
+            lastCommitted = buffer.getLong64();
+            sequenceNumber = buffer.getLong64();
+        }
+
+
+
         // ignore gtid info read
         // sid.copy_from((uchar *)ptr_buffer);
         // ptr_buffer+= ENCODED_SID_LENGTH;
@@ -62,4 +74,12 @@ public class GtidLogEvent extends LogEvent {
     public long getGno() {
         return gno;
     }
+
+    public Long getLastCommitted() {
+        return lastCommitted;
+    }
+
+    public Long getSequenceNumber() {
+        return sequenceNumber;
+    }
 }

+ 13 - 0
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/LogHeader.java

@@ -115,6 +115,11 @@ public final class LogHeader {
      */
     protected long      crc;        // ha_checksum
 
+    /**
+     * binlog fileName
+     */
+    protected String    logFileName;
+
     /* for Start_event_v3 */
     public LogHeader(final int type){
         this.type = type;
@@ -270,6 +275,14 @@ public final class LogHeader {
         return checksumAlg;
     }
 
+    public String getLogFileName() {
+        return logFileName;
+    }
+
+    public void setLogFileName(String logFileName) {
+        this.logFileName = logFileName;
+    }
+
     private void processCheckSum(LogBuffer buffer) {
         if (checksumAlg != LogEvent.BINLOG_CHECKSUM_ALG_OFF && checksumAlg != LogEvent.BINLOG_CHECKSUM_ALG_UNDEF) {
             crc = buffer.getUint32(eventLen - LogEvent.BINLOG_CHECKSUM_LEN);

+ 66 - 59
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogBuffer.java

@@ -27,6 +27,7 @@ public final class RowsLogBuffer {
     public static final long   DATETIMEF_INT_OFS = 0x8000000000L;
     public static final long   TIMEF_INT_OFS     = 0x800000L;
     public static final long   TIMEF_OFS         = 0x800000000000L;
+    private static char[]      digits            = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' };
 
     private final LogBuffer    buffer;
     private final int          columnLen;
@@ -489,17 +490,18 @@ public final class RowsLogBuffer {
                     // t % 100);
 
                     StringBuilder builder = new StringBuilder();
-                    builder.append(formatNumber(d / 10000, 4))
-                        .append('-')
-                        .append(formatNumber((d % 10000) / 100, 2))
-                        .append('-')
-                        .append(formatNumber(d % 100, 2))
-                        .append(' ')
-                        .append(formatNumber(t / 10000, 2))
-                        .append(':')
-                        .append(formatNumber((t % 10000) / 100, 2))
-                        .append(':')
-                        .append(formatNumber(t % 100, 2));
+                    builder.append(26);
+                    appendNumber4(builder, d / 10000);
+                    builder.append('-');
+                    appendNumber2(builder, (d % 10000) / 100);
+                    builder.append('-');
+                    appendNumber2(builder, d % 100);
+                    builder.append(' ');
+                    appendNumber2(builder, t / 10000);
+                    builder.append(':');
+                    appendNumber2(builder, (t % 10000) / 100);
+                    builder.append(':');
+                    appendNumber2(builder, t % 100);
                     value = builder.toString();
                 }
                 javaType = Types.TIMESTAMP;
@@ -562,18 +564,18 @@ public final class RowsLogBuffer {
                     // (int) ((hms >> 6) % (1 << 6)),
                     // (int) (hms % (1 << 6)));
 
-                    StringBuilder builder = new StringBuilder();
-                    builder.append(formatNumber((int) (ym / 13), 4))
-                        .append('-')
-                        .append(formatNumber((int) (ym % 13), 2))
-                        .append('-')
-                        .append(formatNumber((int) (ymd % (1 << 5)), 2))
-                        .append(' ')
-                        .append(formatNumber((int) (hms >> 12), 2))
-                        .append(':')
-                        .append(formatNumber((int) ((hms >> 6) % (1 << 6)), 2))
-                        .append(':')
-                        .append(formatNumber((int) (hms % (1 << 6)), 2));
+                    StringBuilder builder = new StringBuilder(26);
+                    appendNumber4(builder, (int) (ym / 13));
+                    builder.append('-');
+                    appendNumber2(builder, (int) (ym % 13));
+                    builder.append('-');
+                    appendNumber2(builder, (int) (ymd % (1 << 5)));
+                    builder.append(' ');
+                    appendNumber2(builder, (int) (hms >> 12));
+                    builder.append(':');
+                    appendNumber2(builder, (int) ((hms >> 6) % (1 << 6)));
+                    builder.append(':');
+                    appendNumber2(builder, (int) (hms % (1 << 6)));
                     second = builder.toString();
                 }
 
@@ -609,15 +611,15 @@ public final class RowsLogBuffer {
                     // (u32 % 10000) / 100,
                     // u32 % 100);
 
-                    StringBuilder builder = new StringBuilder();
+                    StringBuilder builder = new StringBuilder(12);
                     if (i32 < 0) {
                         builder.append('-');
                     }
-                    builder.append(formatNumber(u32 / 10000, 2))
-                        .append(':')
-                        .append(formatNumber((u32 % 10000) / 100, 2))
-                        .append(':')
-                        .append(formatNumber(u32 % 100, 2));
+                    appendNumber2(builder, u32 / 10000);
+                    builder.append(':');
+                    appendNumber2(builder, (u32 % 10000) / 100);
+                    builder.append(':');
+                    appendNumber2(builder, u32 % 100);
                     value = builder.toString();
                 }
                 javaType = Types.TIME;
@@ -718,15 +720,15 @@ public final class RowsLogBuffer {
                     // (int) ((intpart >> 6) % (1 << 6)),
                     // (int) (intpart % (1 << 6)));
 
-                    StringBuilder builder = new StringBuilder();
+                    StringBuilder builder = new StringBuilder(12);
                     if (ltime < 0) {
                         builder.append('-');
                     }
-                    builder.append(formatNumber((int) ((intpart >> 12) % (1 << 10)), 2))
-                        .append(':')
-                        .append(formatNumber((int) ((intpart >> 6) % (1 << 6)), 2))
-                        .append(':')
-                        .append(formatNumber((int) (intpart % (1 << 6)), 2));
+                    appendNumber2(builder, (int) ((intpart >> 12) % (1 << 10)));
+                    builder.append(':');
+                    appendNumber2(builder, (int) ((intpart >> 6) % (1 << 6)));
+                    builder.append(':');
+                    appendNumber2(builder, (int) (intpart % (1 << 6)));
                     second = builder.toString();
                 }
 
@@ -770,12 +772,12 @@ public final class RowsLogBuffer {
                     // value = String.format("%04d-%02d-%02d", i32 / (16 * 32),
                     // i32 / 32 % 16, i32 % 32);
 
-                    StringBuilder builder = new StringBuilder();
-                    builder.append(formatNumber(i32 / (16 * 32), 4))
-                        .append('-')
-                        .append(formatNumber(i32 / 32 % 16, 2))
-                        .append('-')
-                        .append(formatNumber(i32 % 32, 2));
+                    StringBuilder builder = new StringBuilder(12);
+                    appendNumber4(builder, i32 / (16 * 32));
+                    builder.append('-');
+                    appendNumber2(builder, i32 / 32 % 16);
+                    builder.append('-');
+                    appendNumber2(builder, i32 % 32);
                     value = builder.toString();
                 }
                 javaType = Types.DATE;
@@ -1126,29 +1128,34 @@ public final class RowsLogBuffer {
         return sec.substring(0, meta);
     }
 
-    private String formatNumber(int d, int size) {
-        return leftPad(String.valueOf(d), size, '0');
+    private void appendNumber4(StringBuilder builder, int d) {
+        if (d >= 1000) {
+            builder.append(digits[d / 1000])
+                .append(digits[(d / 100) % 10])
+                .append(digits[(d / 10) % 10])
+                .append(digits[d % 10]);
+        } else if (d >= 100) {
+            builder.append('0');
+            appendNumber3(builder, d);
+        }
     }
 
-    private String leftPad(String str, int size, char padChar) {
-        if (str == null) {
-            return null;
-        }
-        int pads = size - str.length();
-        if (pads <= 0) {
-            return str; // returns original String when possible
+    private void appendNumber3(StringBuilder builder, int d) {
+        if (d >= 100) {
+            builder.append(digits[d / 100])
+                .append(digits[(d / 10) % 10])
+                .append(digits[d % 10]);
+        } else {
+            builder.append('0');
+            appendNumber2(builder, d);
         }
-        return padding(pads, padChar).concat(str);
     }
 
-    private String padding(int repeat, char padChar) throws IndexOutOfBoundsException {
-        if (repeat < 0) {
-            throw new IndexOutOfBoundsException("Cannot pad a negative amount: " + repeat);
-        }
-        final char[] buf = new char[repeat];
-        for (int i = 0; i < buf.length; i++) {
-            buf[i] = padChar;
+    private void appendNumber2(StringBuilder builder, int d) {
+        if (d >= 10) {
+            builder.append(digits[(d / 10) % 10]).append(digits[d % 10]);
+        } else {
+            builder.append('0').append(digits[d]);
         }
-        return new String(buf);
     }
 }

+ 1 - 1
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogEvent.java

@@ -194,7 +194,7 @@ public abstract class RowsLogEvent extends LogEvent {
     }
 
     public final RowsLogBuffer getRowsBuf(String charsetName) {
-        return new RowsLogBuffer(rowsBuf.duplicate(), columnLen, charsetName);
+        return new RowsLogBuffer(rowsBuf, columnLen, charsetName);
     }
 
     public final int getFlags(final int flags) {

+ 52 - 0
dbsync/src/test/java/com/taobao/tddl/dbsync/FetcherPerformanceTest.java

@@ -0,0 +1,52 @@
+package com.taobao.tddl.dbsync;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.taobao.tddl.dbsync.binlog.DirectLogFetcher;
+import com.taobao.tddl.dbsync.binlog.LogEvent;
+
+public class FetcherPerformanceTest {
+
+    public static void main(String args[]) {
+        DirectLogFetcher fetcher = new DirectLogFetcher();
+        try {
+            Class.forName("com.mysql.jdbc.Driver");
+            Connection connection = DriverManager.getConnection("jdbc:mysql://127.0.0.1:3306",
+                "root",
+                "hello");
+            Statement statement = connection.createStatement();
+            statement.execute("SET @master_binlog_checksum='@@global.binlog_checksum'");
+            statement.execute("SET @mariadb_slave_capability='" + LogEvent.MARIA_SLAVE_CAPABILITY_MINE + "'");
+
+            fetcher.open(connection, "mysql-bin.000006", 120L, 2);
+
+            AtomicLong sum = new AtomicLong(0);
+            long start = System.currentTimeMillis();
+            long last = 0;
+            long end = 0;
+
+            while (fetcher.fetch()) {
+                sum.incrementAndGet();
+                long current = sum.get();
+                if (current - last >= 100000) {
+                    end = System.currentTimeMillis();
+                    long tps = ((current - last) * 1000) / (end - start);
+                    System.out.println(" total : " + sum + " , cost : " + (end - start) + " , tps : " + tps);
+                    last = current;
+                    start = end;
+                }
+            }
+        } catch (Exception e) {
+            e.printStackTrace();
+        } finally {
+            try {
+                fetcher.close();
+            } catch (IOException e) {
+            }
+        }
+    }
+}

+ 41 - 52
dbsync/src/test/java/com/taobao/tddl/dbsync/binlog/DirectLogFetcherTest.java

@@ -8,15 +8,6 @@ import java.sql.Statement;
 import org.junit.Assert;
 import org.junit.Test;
 
-import com.taobao.tddl.dbsync.binlog.event.DeleteRowsLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.QueryLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.RotateLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.RowsQueryLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.UpdateRowsLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.WriteRowsLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.XidLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.mariadb.AnnotateRowsEvent;
-
 public class DirectLogFetcherTest extends BaseLogFetcherTest {
 
     @Test
@@ -24,56 +15,54 @@ public class DirectLogFetcherTest extends BaseLogFetcherTest {
         DirectLogFetcher fecther = new DirectLogFetcher();
         try {
             Class.forName("com.mysql.jdbc.Driver");
-            Connection connection = DriverManager.getConnection("jdbc:mysql://127.0.0.1:3306", "root", "hello");
+            Connection connection = DriverManager.getConnection("jdbc:mysql://100.81.154.142:3306", "root", "hello");
             Statement statement = connection.createStatement();
             statement.execute("SET @master_binlog_checksum='@@global.binlog_checksum'");
             statement.execute("SET @mariadb_slave_capability='" + LogEvent.MARIA_SLAVE_CAPABILITY_MINE + "'");
 
-            fecther.open(connection, "mysql-bin.000001", 4L, 2);
+            fecther.open(connection, "mysql-bin.000006", 120L, 2);
 
-            LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.ENUM_END_EVENT);
+            LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.UNKNOWN_EVENT);
             LogContext context = new LogContext();
             while (fecther.fetch()) {
-                LogEvent event = null;
-                event = decoder.decode(fecther, context);
-
-                if (event == null) {
-                    continue;
-                    // throw new RuntimeException("parse failed");
-                }
-
-                int eventType = event.getHeader().getType();
-                switch (eventType) {
-                    case LogEvent.ROTATE_EVENT:
-                        binlogFileName = ((RotateLogEvent) event).getFilename();
-                        break;
-                    case LogEvent.WRITE_ROWS_EVENT_V1:
-                    case LogEvent.WRITE_ROWS_EVENT:
-                        parseRowsEvent((WriteRowsLogEvent) event);
-                        break;
-                    case LogEvent.UPDATE_ROWS_EVENT_V1:
-                    case LogEvent.UPDATE_ROWS_EVENT:
-                        parseRowsEvent((UpdateRowsLogEvent) event);
-                        break;
-                    case LogEvent.DELETE_ROWS_EVENT_V1:
-                    case LogEvent.DELETE_ROWS_EVENT:
-                        parseRowsEvent((DeleteRowsLogEvent) event);
-                        break;
-                    case LogEvent.QUERY_EVENT:
-                        parseQueryEvent((QueryLogEvent) event);
-                        break;
-                    case LogEvent.ROWS_QUERY_LOG_EVENT:
-                        parseRowsQueryEvent((RowsQueryLogEvent) event);
-                        break;
-                    case LogEvent.ANNOTATE_ROWS_EVENT:
-                        parseAnnotateRowsEvent((AnnotateRowsEvent) event);
-                        break;
-                    case LogEvent.XID_EVENT:
-                        parseXidEvent((XidLogEvent) event);
-                        break;
-                    default:
-                        break;
-                }
+                decoder.decode(fecther, context);
+                continue;
+                // if (event == null) {
+                // continue;
+                // }
+                //
+                // int eventType = event.getHeader().getType();
+                // switch (eventType) {
+                // case LogEvent.ROTATE_EVENT:
+                // binlogFileName = ((RotateLogEvent) event).getFilename();
+                // break;
+                // case LogEvent.WRITE_ROWS_EVENT_V1:
+                // case LogEvent.WRITE_ROWS_EVENT:
+                // parseRowsEvent((WriteRowsLogEvent) event);
+                // break;
+                // case LogEvent.UPDATE_ROWS_EVENT_V1:
+                // case LogEvent.UPDATE_ROWS_EVENT:
+                // parseRowsEvent((UpdateRowsLogEvent) event);
+                // break;
+                // case LogEvent.DELETE_ROWS_EVENT_V1:
+                // case LogEvent.DELETE_ROWS_EVENT:
+                // parseRowsEvent((DeleteRowsLogEvent) event);
+                // break;
+                // case LogEvent.QUERY_EVENT:
+                // parseQueryEvent((QueryLogEvent) event);
+                // break;
+                // case LogEvent.ROWS_QUERY_LOG_EVENT:
+                // parseRowsQueryEvent((RowsQueryLogEvent) event);
+                // break;
+                // case LogEvent.ANNOTATE_ROWS_EVENT:
+                // parseAnnotateRowsEvent((AnnotateRowsEvent) event);
+                // break;
+                // case LogEvent.XID_EVENT:
+                // parseXidEvent((XidLogEvent) event);
+                // break;
+                // default:
+                // break;
+                // }
             }
         } catch (Exception e) {
             e.printStackTrace();

+ 8 - 0
deployer/pom.xml

@@ -16,6 +16,14 @@
 			<artifactId>canal.server</artifactId>
 			<version>${project.version}</version>
 		</dependency>
+
+		<!-- 这里指定runtime的metrics provider-->
+		<!--<dependency>-->
+			<!--<groupId>com.alibaba.otter</groupId>-->
+			<!--<artifactId>canal.prometheus</artifactId>-->
+			<!--<version>${project.version}</version>-->
+			<!--<scope>runtime</scope>-->
+		<!--</dependency>-->
 	</dependencies>
 	
 	<build>

+ 15 - 0
deployer/src/main/bin/metrics_env.sh

@@ -0,0 +1,15 @@
+#!/bin/bash
+# Additional line arg for current prometheus solution
+case "`uname`" in
+Linux)
+    bin_abs_path=$(readlink -f $(dirname $0))
+	;;
+*)
+	bin_abs_path=`cd $(dirname $0); pwd`
+	;;
+esac
+base=${bin_abs_path}/..
+if [ $(ls $base/lib/aspectjweaver*.jar | wc -l) -eq 1 ]; then
+    WEAVER=$(ls $base/lib/aspectjweaver*.jar)
+    METRICS_OPTS=" -javaagent:"${WEAVER}" "
+fi

+ 5 - 0
deployer/src/main/bin/restart.sh

@@ -0,0 +1,5 @@
+#!/bin/bash
+
+sh stop.sh
+
+sh startup.sh

+ 6 - 1
deployer/src/main/bin/startup.sh

@@ -94,7 +94,12 @@ then
 	echo LOG CONFIGURATION : $logback_configurationFile
 	echo canal conf : $canal_conf 
 	echo CLASSPATH :$CLASSPATH
-	$JAVA $JAVA_OPTS $JAVA_DEBUG_OPT $CANAL_OPTS -classpath .:$CLASSPATH com.alibaba.otter.canal.deployer.CanalLauncher 1>>$base/logs/canal/canal.log 2>&1 &
+#   metrics support options
+#	if [ -x $base/bin/metrics_env.sh ]; then
+#	    . $base/bin/metrics_env.sh
+#	    echo METRICS_OPTS $METRICS_OPTS
+#	fi
+	$JAVA $JAVA_OPTS $METRICS_OPTS $JAVA_DEBUG_OPT $CANAL_OPTS -classpath .:$CLASSPATH com.alibaba.otter.canal.deployer.CanalLauncher 1>>$base/logs/canal/canal.log 2>&1 &
 	echo $! > $base/bin/canal.pid 
 	
 	echo "cd to $current_path for continue"

+ 1 - 0
deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalConstants.java

@@ -16,6 +16,7 @@ public class CanalConstants {
     public static final String CANAL_IP                          = ROOT + "." + "ip";
     public static final String CANAL_PORT                        = ROOT + "." + "port";
     public static final String CANAL_ZKSERVERS                   = ROOT + "." + "zkServers";
+    public static final String CANAL_WITHOUT_NETTY               = ROOT + "." + "withoutNetty";
 
     public static final String CANAL_DESTINATIONS                = ROOT + "." + "destinations";
     public static final String CANAL_AUTO_SCAN                   = ROOT + "." + "auto.scan";

+ 28 - 9
deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalController.java

@@ -34,6 +34,7 @@ import com.alibaba.otter.canal.instance.core.CanalInstanceGenerator;
 import com.alibaba.otter.canal.instance.manager.CanalConfigClient;
 import com.alibaba.otter.canal.instance.manager.ManagerCanalInstanceGenerator;
 import com.alibaba.otter.canal.instance.spring.SpringCanalInstanceGenerator;
+import com.alibaba.otter.canal.parse.CanalEventParser;
 import com.alibaba.otter.canal.server.embedded.CanalServerWithEmbedded;
 import com.alibaba.otter.canal.server.exception.CanalServerException;
 import com.alibaba.otter.canal.server.netty.CanalServerWithNetty;
@@ -43,7 +44,7 @@ import com.google.common.collect.MigrateMap;
 
 /**
  * canal调度控制器
- * 
+ *
  * @author jianghang 2012-11-8 下午12:03:11
  * @version 1.0.0
  */
@@ -97,9 +98,12 @@ public class CanalController {
         port = Integer.valueOf(getProperty(properties, CanalConstants.CANAL_PORT));
         embededCanalServer = CanalServerWithEmbedded.instance();
         embededCanalServer.setCanalInstanceGenerator(instanceGenerator);// 设置自定义的instanceGenerator
-        canalServer = CanalServerWithNetty.instance();
-        canalServer.setIp(ip);
-        canalServer.setPort(port);
+        String canalWithoutNetty = getProperty(properties, CanalConstants.CANAL_WITHOUT_NETTY);
+        if (canalWithoutNetty == null || "false".equals(canalWithoutNetty)) {
+            canalServer = CanalServerWithNetty.instance();
+            canalServer.setIp(ip);
+            canalServer.setPort(port);
+        }
 
         // 处理下ip为空,默认使用hostIp暴露到zk中
         if (StringUtils.isEmpty(ip)) {
@@ -300,7 +304,7 @@ public class CanalController {
                     return instanceGenerator.generate(destination);
                 } else if (config.getMode().isSpring()) {
                     SpringCanalInstanceGenerator instanceGenerator = new SpringCanalInstanceGenerator();
-                    synchronized (this) {
+                    synchronized (CanalEventParser.class) {
                         try {
                             // 设置当前正在加载的通道,加载spring查找文件时会用到该变量
                             System.setProperty(CanalConstants.CANAL_DESTINATION_PROPERTY, destination);
@@ -376,7 +380,18 @@ public class CanalController {
     }
 
     private String getProperty(Properties properties, String key) {
-        return StringUtils.trim(properties.getProperty(StringUtils.trim(key)));
+        key = StringUtils.trim(key);
+        String value = System.getProperty(key);
+
+        if (value == null) {
+            value = System.getenv(key);
+        }
+
+        if (value == null) {
+            value = properties.getProperty(key);
+        }
+
+        return StringUtils.trim(value);
     }
 
     public void start() throws Throwable {
@@ -431,11 +446,15 @@ public class CanalController {
         }
 
         // 启动网络接口
-        canalServer.start();
+        if (canalServer != null) {
+            canalServer.start();
+        }
     }
 
     public void stop() throws Throwable {
-        canalServer.stop();
+        if (canalServer != null) {
+            canalServer.stop();
+        }
 
         if (autoScan) {
             for (InstanceConfigMonitor monitor : instanceConfigMonitors.values()) {
@@ -454,7 +473,7 @@ public class CanalController {
         // 释放canal的工作节点
         releaseCid(ZookeeperPathUtils.getCanalClusterNode(ip + ":" + port));
         logger.info("## stop the canal server[{}:{}]", ip, port);
-        
+
         if (zkclientx != null) {
             zkclientx.close();
         }

+ 1 - 0
deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalLauncher.java

@@ -59,6 +59,7 @@ public class CanalLauncher {
 
     private static void setGlobalUncaughtExceptionHandler() {
         Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
+
             @Override
             public void uncaughtException(Thread t, Throwable e) {
                 logger.error("UnCaughtException", e);

+ 17 - 1
deployer/src/main/resources/canal.properties

@@ -3,10 +3,11 @@
 #################################################
 canal.id= 1
 canal.ip=
-canal.port= 11111
+canal.port=11111
 canal.zkServers=
 # flush data to zk
 canal.zookeeper.flush.period = 1000
+canal.withoutNetty = false
 # flush meta cursor/parse position to file
 canal.file.data.dir = ${canal.conf.dir}
 canal.file.flush.period = 1000
@@ -42,6 +43,7 @@ canal.instance.filter.query.dml = false
 canal.instance.filter.query.ddl = false
 canal.instance.filter.table.error = false
 canal.instance.filter.rows = false
+canal.instance.filter.transaction.entry = false
 
 # binlog format/image check
 canal.instance.binlog.format = ROW,STATEMENT,MIXED 
@@ -50,6 +52,20 @@ canal.instance.binlog.image = FULL,MINIMAL,NOBLOB
 # binlog ddl isolation
 canal.instance.get.ddl.isolation = false
 
+# parallel parser config
+canal.instance.parser.parallel = true
+## concurrent thread number, default 60% available processors, suggest not to exceed Runtime.getRuntime().availableProcessors()
+#canal.instance.parser.parallelThreadSize = 16
+## disruptor ringbuffer size, must be power of 2
+canal.instance.parser.parallelBufferSize = 256
+
+# table meta tsdb info
+canal.instance.tsdb.enable=true
+canal.instance.tsdb.dir=${canal.file.data.dir:../conf}/${canal.instance.destination:}
+canal.instance.tsdb.url=jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
+canal.instance.tsdb.dbUsername=canal
+canal.instance.tsdb.dbPassword=canal
+
 #################################################
 ######### 		destinations		############# 
 #################################################

+ 9 - 10
deployer/src/main/resources/example/instance.properties

@@ -1,11 +1,12 @@
 #################################################
-## mysql serverId
-canal.instance.mysql.slaveId=0
+## mysql serverId , v1.0.26+ will autoGen 
+# canal.instance.mysql.slaveId=0
 
-# position info
-canal.instance.master.address=127.0.0.1:3306
 # enable gtid use true/false
 canal.instance.gtidon=false
+
+# position info
+canal.instance.master.address=127.0.0.1:3306
 canal.instance.master.journal.name=
 canal.instance.master.position=
 canal.instance.master.timestamp=
@@ -13,23 +14,21 @@ canal.instance.master.gtid=
 
 # table meta tsdb info
 canal.instance.tsdb.enable=true
-canal.instance.tsdb.dir=${canal.file.data.dir:../conf}/${canal.instance.destination:}
-canal.instance.tsdb.url=jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
 #canal.instance.tsdb.url=jdbc:mysql://127.0.0.1:3306/canal_tsdb
-canal.instance.tsdb.dbUsername=canal
-canal.instance.tsdb.dbPassword=canal
-
+#canal.instance.tsdb.dbUsername=canal
+#canal.instance.tsdb.dbPassword=canal
 
 #canal.instance.standby.address =
 #canal.instance.standby.journal.name =
 #canal.instance.standby.position = 
 #canal.instance.standby.timestamp =
 #canal.instance.standby.gtid=
+
 # username/password
 canal.instance.dbUsername=canal
 canal.instance.dbPassword=canal
-canal.instance.defaultDatabaseName=test
 canal.instance.connectionCharset=UTF-8
+
 # table regex
 canal.instance.filter.regex=.*\\..*
 # table black regex

+ 5 - 1
deployer/src/main/resources/logback.xml

@@ -70,9 +70,13 @@
         <appender-ref ref="CANAL-ROOT" />
     </logger>
     <logger name="com.alibaba.otter.canal.meta.FileMixedMetaManager" additivity="false">  
-        <level value="INFO" />  
+        <level value="INFO" />
         <appender-ref ref="CANAL-META" />
     </logger>
+	<logger name="com.alibaba.otter.canal.kafka" additivity="false">
+		<level value="INFO" />
+		<appender-ref ref="CANAL-ROOT" />
+	</logger>
     
 	<root level="WARN">
 		<appender-ref ref="STDOUT"/>

+ 9 - 3
deployer/src/main/resources/spring/default-instance.xml

@@ -81,6 +81,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
@@ -148,7 +149,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -156,7 +157,7 @@
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		
@@ -191,6 +192,11 @@
 		<property name="tsdbSpringXml" value="${canal.instance.tsdb.spring.xml:}"/>
 		
 		<!--是否启用GTID模式-->
-		<property name="isGTIDMode" value="${canal.instance.gtidon}"/>
+		<property name="isGTIDMode" value="${canal.instance.gtidon:false}"/>
+		
+		<!-- parallel parser -->
+		<property name="parallel" value="${canal.instance.parser.parallel:true}" />
+		<property name="parallelThreadSize" value="${canal.instance.parser.parallelThreadSize}" />
+		<property name="parallelBufferSize" value="${canal.instance.parser.parallelBufferSize:256}" />
 	</bean>
 </beans>

+ 9 - 3
deployer/src/main/resources/spring/file-instance.xml

@@ -67,6 +67,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
@@ -133,7 +134,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -141,7 +142,7 @@
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		
@@ -176,6 +177,11 @@
 		<property name="tsdbSpringXml" value="${canal.instance.tsdb.spring.xml:}"/>
 
 		<!--是否启用GTID模式-->
-		<property name="isGTIDMode" value="${canal.instance.gtidon}"/>
+		<property name="isGTIDMode" value="${canal.instance.gtidon:false}"/>
+		
+		<!-- parallel parser -->
+		<property name="parallel" value="${canal.instance.parser.parallel:true}" />
+		<property name="parallelThreadSize" value="${canal.instance.parser.parallelThreadSize}" />
+		<property name="parallelBufferSize" value="${canal.instance.parser.parallelBufferSize:256}" />
 	</bean>
 </beans>

+ 15 - 4
deployer/src/main/resources/spring/group-instance.xml

@@ -64,6 +64,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 	
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.group.GroupEventParser">
@@ -130,7 +131,7 @@
 				<property name="address" value="${canal.instance.master1.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -138,7 +139,7 @@
 				<property name="address" value="${canal.instance.standby1.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		
@@ -166,6 +167,11 @@
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />
 		<property name="supportBinlogImages" value="${canal.instance.binlog.image}" />
+		
+		<!-- parallel parser -->
+		<property name="parallel" value="${canal.instance.parser.parallel:true}" />
+		<property name="parallelThreadSize" value="${canal.instance.parser.parallelThreadSize}" />
+		<property name="parallelBufferSize" value="${canal.instance.parser.parallelBufferSize:256}" />
 	</bean>
 	
 	<bean id="eventParser2" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
@@ -223,7 +229,7 @@
 				<property name="address" value="${canal.instance.master2.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -231,7 +237,7 @@
 				<property name="address" value="${canal.instance.standby2.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		
@@ -260,5 +266,10 @@
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />
 		<property name="supportBinlogImages" value="${canal.instance.binlog.image}" />
+		
+		<!-- parallel parser -->
+		<property name="parallel" value="${canal.instance.parser.parallel:true}" />
+		<property name="parallelThreadSize" value="${canal.instance.parser.parallelThreadSize}" />
+		<property name="parallelBufferSize" value="${canal.instance.parser.parallelBufferSize:256}" />
 	</bean>
 </beans>

+ 7 - 1
deployer/src/main/resources/spring/local-instance.xml

@@ -67,6 +67,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.rds.RdsLocalBinlogEventParser">
@@ -113,7 +114,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		
@@ -137,5 +138,10 @@
 		<!--表结构相关-->
 		<property name="enableTsdb" value="${canal.instance.tsdb.enable:true}"/>
 		<property name="tsdbSpringXml" value="${canal.instance.tsdb.spring.xml:}"/>
+		
+		<!-- parallel parser -->
+		<property name="parallel" value="${canal.instance.parser.parallel:true}" />
+		<property name="parallelThreadSize" value="${canal.instance.parser.parallelThreadSize}" />
+		<property name="parallelBufferSize" value="${canal.instance.parser.parallelBufferSize:256}" />
 	</bean>
 </beans>

+ 9 - 3
deployer/src/main/resources/spring/memory-instance.xml

@@ -64,6 +64,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
@@ -121,7 +122,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -129,7 +130,7 @@
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		
@@ -164,6 +165,11 @@
 		<property name="tsdbSpringXml" value="${canal.instance.tsdb.spring.xml:}"/>
 		
 		<!--是否启用GTID模式-->
-		<property name="isGTIDMode" value="${canal.instance.gtidon}"/>
+		<property name="isGTIDMode" value="${canal.instance.gtidon:false}"/>
+		
+		<!-- parallel parser -->
+		<property name="parallel" value="${canal.instance.parser.parallel:true}" />
+		<property name="parallelThreadSize" value="${canal.instance.parser.parallelThreadSize}" />
+		<property name="parallelBufferSize" value="${canal.instance.parser.parallelBufferSize:256}" />
 	</bean>
 </beans>

+ 2 - 2
deployer/src/main/resources/spring/tsdb/h2-tsdb.xml

@@ -31,8 +31,8 @@
     <bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" destroy-method="close">
         <property name="driverClassName" value="org.h2.Driver" />
 		<property name="url" value="${canal.instance.tsdb.url:}" />
-		<property name="username" value="${canal.instance.tsdb.dbUsername:canal}" />
-		<property name="password" value="${canal.instance.tsdb.dbPassword:canal}" />
+		<property name="username" value="${canal.instance.tsdb.dbUsername:}" />
+		<property name="password" value="${canal.instance.tsdb.dbPassword:}" />
       	<property name="maxActive" value="30" />
         <property name="initialSize" value="0" />
         <property name="minIdle" value="1" />

+ 2 - 2
deployer/src/main/resources/spring/tsdb/mysql-tsdb.xml

@@ -31,8 +31,8 @@
     <bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" destroy-method="close">
         <property name="driverClassName" value="com.mysql.jdbc.Driver" />
 		<property name="url" value="${canal.instance.tsdb.url:}" />
-		<property name="username" value="${canal.instance.tsdb.dbUsername:canal}" />
-		<property name="password" value="${canal.instance.tsdb.dbPassword:canal}" />
+		<property name="username" value="${canal.instance.tsdb.dbUsername:}" />
+		<property name="password" value="${canal.instance.tsdb.dbPassword:}" />
         <property name="maxActive" value="30" />
         <property name="initialSize" value="0" />
         <property name="minIdle" value="1" />

+ 67 - 0
docker/Dockerfile

@@ -0,0 +1,67 @@
+FROM centos:centos6.7
+
+MAINTAINER agapple (jianghang115@gmail.com)
+
+# install system
+RUN \
+    /bin/cp -f /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
+    echo 'root:Hello1234' | chpasswd && \
+    groupadd -r admin && useradd -g admin admin && \
+    yum install -y man && \
+    yum install -y dstat && \
+    yum install -y unzip && \
+    yum install -y nc && \
+    yum install -y openssh-server && \
+    yum install -y tar && \
+    yum install -y which && \
+    yum install -y wget && \
+    yum install -y perl && \
+    yum install -y file && \
+    ssh-keygen -q -N "" -t dsa -f /etc/ssh/ssh_host_dsa_key && \
+    ssh-keygen -q -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key && \
+    sed -ri 's/session    required     pam_loginuid.so/#session    required     pam_loginuid.so/g' /etc/pam.d/sshd && \
+    sed -i -e 's/^#Port 22$/Port 2222/' /etc/ssh/sshd_config && \
+    mkdir -p /root/.ssh && chown root.root /root && chmod 700 /root/.ssh && \
+    yum install -y cronie && \
+    sed -i '/session required pam_loginuid.so/d' /etc/pam.d/crond && \
+    yum clean all && \
+    true
+
+# install canal
+COPY image/ /tmp/docker/
+COPY canal.deployer-*.tar.gz /home/admin/
+COPY jdk-8-linux-x64.rpm /tmp/
+
+RUN \
+    cp -R /tmp/docker/alidata /alidata && \
+    chmod +x /alidata/bin/* && \
+    mkdir -p /home/admin && \
+    cp -R /tmp/docker/admin/* /home/admin/  && \
+    /bin/cp -f alidata/bin/lark-wait /usr/bin/lark-wait && \
+
+    touch /var/lib/rpm/* && \ 
+    yum -y install /tmp/jdk-8-linux-x64.rpm && \
+    /bin/rm -f /tmp/jdk-8-linux-x64.rpm && \
+
+    echo "export JAVA_HOME=/usr/java/latest" >> /etc/profile && \
+    echo "export PATH=\$JAVA_HOME/bin:\$PATH" >> /etc/profile && \
+    /bin/mv /home/admin/bin/clean_log /etc/cron.d && \
+
+    mkdir -p /home/admin/canal-server && \
+    tar -xzvf /home/admin/canal.deployer-*.tar.gz -C /home/admin/canal-server && \
+    /bin/rm -f /home/admin/canal.deployer-*.tar.gz && \
+
+    mkdir -p home/admin/canal-server/logs  && \
+    chmod +x /home/admin/*.sh  && \
+    chmod +x /home/admin/bin/*.sh  && \
+    chown admin: -R /home/admin && \
+    yum clean all && \
+    true
+
+# 2222 sys , 8080 web , 8000 debug , 11111 canal
+EXPOSE 2222 11111 8000 8080
+
+WORKDIR /home/admin
+
+ENTRYPOINT [ "/alidata/bin/main.sh" ]
+CMD [ "/home/admin/app.sh" ]

+ 30 - 0
docker/build.sh

@@ -0,0 +1,30 @@
+#!/bin/bash
+
+current_path=`pwd`
+case "`uname`" in
+    Darwin)
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+    Linux)
+        bin_abs_path=$(readlink -f $(dirname $0))
+        ;;
+    *)
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+esac
+BASE=${bin_abs_path}
+
+if [ ! -f $BASE/jdk*.rpm ] ; then
+    DOWNLOAD_LINK="http://download.oracle.com/otn-pub/java/jdk/8u181-b13/96a7b8442fe848ef90c96a2fad6ed6d1/jdk-8u181-linux-x64.tar.gz"
+    wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=xxx; oraclelicense=accept-securebackup-cookie" "$DOWNLOAD_LINK" -O $BASE/jdk-8-linux-x64.rpm
+fi
+
+cd $BASE/../ && mvn clean package -Dmaven.test.skip -Denv=release && cd $current_path ;
+
+if [ "$1" == "kafka" ] ; then
+	cp $BASE/../target/canal-kafka-*.tar.gz $BASE/
+	docker build --no-cache -t canal/canal-server $BASE/
+else 
+	cp $BASE/../target/canal.deployer-*.tar.gz $BASE/
+	docker build --no-cache -t canal/canal-server $BASE/
+fi

+ 117 - 0
docker/image/admin/app.sh

@@ -0,0 +1,117 @@
+#!/bin/bash
+set -e
+
+source /etc/profile
+export JAVA_HOME=/usr/java/latest
+export PATH=$JAVA_HOME/bin:$PATH
+touch /tmp/start.log
+chown admin: /tmp/start.log
+chown -R admin: /home/admin/canal-server
+host=`hostname -i`
+
+# waitterm
+#   wait TERM/INT signal.
+#   see: http://veithen.github.io/2014/11/16/sigterm-propagation.html
+waitterm() {
+        local PID
+        # any process to block
+        tail -f /dev/null &
+        PID="$!"
+        # setup trap, could do nothing, or just kill the blocker
+        trap "kill -TERM ${PID}" TERM INT
+        # wait for signal, ignore wait exit code
+        wait "${PID}" || true
+        # clear trap
+        trap - TERM INT
+        # wait blocker, ignore blocker exit code
+        wait "${PID}" 2>/dev/null || true
+}
+
+# waittermpid "${PIDFILE}".
+#   monitor process by pidfile && wait TERM/INT signal.
+#   if the process disappeared, return 1, means exit with ERROR.
+#   if TERM or INT signal received, return 0, means OK to exit.
+waittermpid() {
+        local PIDFILE PID do_run error
+        PIDFILE="${1?}"
+        do_run=true
+        error=0
+        trap "do_run=false" TERM INT
+        while "${do_run}" ; do
+                PID="$(cat "${PIDFILE}")"
+                if ! ps -p "${PID}" >/dev/null 2>&1 ; then
+                        do_run=false
+                        error=1
+                else
+                        sleep 1
+                fi
+        done
+        trap - TERM INT
+        return "${error}"
+}
+
+
+function checkStart() {
+    local name=$1
+    local cmd=$2
+    local timeout=$3
+    cost=5
+    while [ $timeout -gt 0 ]; do
+        ST=`eval $cmd`
+        if [ "$ST" == "0" ]; then
+            sleep 1
+            let timeout=timeout-1
+            let cost=cost+1
+        elif [ "$ST" == "" ]; then
+            sleep 1
+            let timeout=timeout-1
+            let cost=cost+1
+        else
+            break
+        fi
+    done
+    echo "start $name successful"
+}
+
+
+function start_canal() {
+    echo "start canal ..."
+    serverPort=`perl -le 'print $ENV{"canal.port"}'`
+    if [ -z "$serverPort" ] ; then
+        serverPort=11111
+    fi
+
+    destination=`perl -le 'print $ENV{"canal.destinations"}'`
+    if [[ "$destination" =~ ',' ]]; then
+        echo "multi destination:$destination is not support"
+        exit 1;
+    else
+        mv /home/admin/canal-server/conf/example /home/admin/canal-server/conf/$destination
+    fi
+    su admin -c 'cd /home/admin/canal-server/bin/ && sh restart.sh 1>>/tmp/start.log 2>&1'
+    sleep 5
+    #check start
+    checkStart "canal" "nc 127.0.0.1 $serverPort -w 1 -z | wc -l" 30
+}
+
+function stop_canal() {
+    echo "stop canal"
+    su admin -c 'cd /home/admin/canal-server/bin/ && sh stop.sh 1>>/tmp/start.log 2>&1'
+    echo "stop canal successful ..."
+}
+
+echo "==> START ..."
+
+start_canal
+
+echo "==> START SUCCESSFUL ..."
+
+tail -f /dev/null &
+# wait TERM signal
+waitterm
+
+echo "==> STOP"
+
+stop_canal
+
+echo "==> STOP SUCCESSFUL ..."

+ 2 - 0
docker/image/admin/bin/clean_log

@@ -0,0 +1,2 @@
+# cron clean log once per minute
+*/2 * * * * admin /home/admin/bin/clean_log.sh >>/tmp/clean_log.log 2>&1

+ 45 - 0
docker/image/admin/bin/clean_log.sh

@@ -0,0 +1,45 @@
+#!/bin/bash
+
+# Global Settings
+PATH="$HOME/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/X11R6/bin:/root/bin"
+export PATH
+
+CUTOFF="85"
+#获取磁盘使用率最高的分区
+USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+before=$USAGE
+
+baseClean(){
+    #删除tmp目录15天前的文件。
+    #更新文档时间戳
+    if [ -d /tmp/hsperfdata_admin ]
+    then
+        touch /tmp/hsperfdata_admin
+        touch /tmp/hsperfdata_admin/*
+    fi
+
+    find /tmp/ -type f -mtime +15 | xargs -t rm -rf >/dev/null 2>&1
+
+
+    now=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+    echo "before:$before; now:$now"
+}
+
+CANAL_DIR="/home/admin/canal-server/logs"
+if [[ -d $CANAL_DIR ]]; then
+  USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+  if [[ $USAGE -ge 90 ]]; then
+        find $CANAL_DIR -type f -mtime +7 | xargs rm -rf {}
+  fi
+  USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+  if [[ $USAGE -ge 80 ]]; then
+        find $CANAL_DIR -type f -mtime +3 | xargs rm -rf {}
+  fi
+  USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+  if [[ $USAGE -ge 80 ]]; then
+        find $CANAL_DIR -type d -empty -mtime +3 | grep -v canal | xargs rm -rf {}
+        find $CANAL_DIR -type f -iname '*.tmp' | xargs rm -rf {}
+  fi
+  baseClean
+  exit 0
+fi

+ 13 - 0
docker/image/admin/health.sh

@@ -0,0 +1,13 @@
+#!/bin/sh
+CHECK_URL="http://127.0.0.1:8080/metrics"
+CHECK_POINT="success"
+CHECK_COUNT=`curl -s --connect-timeout 7 --max-time 7 $CHECK_URL | grep -c $CHECK_POINT`
+if [ $CHECK_COUNT -eq 0 ]; then
+    echo "[FAILED]"
+    status=0
+	error=1
+else
+    echo "[  OK  ]"
+    status=1
+	error=0
+fi

+ 11 - 0
docker/image/alidata/bin/exec_rc_local.sh

@@ -0,0 +1,11 @@
+#!/bin/bash
+
+if [ "${SKIP_EXEC_RC_LOCAL}" = "YES" ] ; then
+	echo "skip /etc/rc.local: SKIP_EXEC_RC_LOCAL=${SKIP_EXEC_RC_LOCAL}"
+	exit
+fi
+
+if [ "${DOCKER_DEPLOY_TYPE}" = "HOST" ] ; then
+	echo "skip /etc/rc.local: DOCKER_DEPLOY_TYPE=${DOCKER_DEPLOY_TYPE}"
+	exit
+fi

+ 6 - 0
docker/image/alidata/bin/lark-wait

@@ -0,0 +1,6 @@
+#!/bin/bash
+set -e
+
+chown admin: -R /home/admin/
+source /alidata/lib/proc.sh
+waitterm

+ 27 - 0
docker/image/alidata/bin/main.sh

@@ -0,0 +1,27 @@
+#!/bin/bash
+
+[ -n "${DOCKER_DEPLOY_TYPE}" ] || DOCKER_DEPLOY_TYPE="VM"
+echo "DOCKER_DEPLOY_TYPE=${DOCKER_DEPLOY_TYPE}"
+
+# run init scripts
+for e in $(ls /alidata/init/*) ; do
+	[ -x "${e}" ] || continue
+	echo "==> INIT $e"
+	$e
+	echo "==> EXIT CODE: $?"
+done
+
+echo "==> INIT DEFAULT"
+service sshd start
+service crond start
+
+#echo "check hostname -i: `hostname -i`"
+#hti_num=`hostname -i|awk '{print NF}'`
+#if [ $hti_num -gt 1 ];then
+#    echo "hostname -i result error:`hostname -i`"
+#    exit 120
+#fi
+
+echo "==> INIT DONE"
+echo "==> RUN ${*}"
+exec "${@}"

+ 19 - 0
docker/image/alidata/init/02init-sshd.sh

@@ -0,0 +1,19 @@
+#!/bin/bash
+
+# set port
+if [ -z "${SSHD_PORT}" ] ; then
+	SSHD_PORT=22
+	[ "${DOCKER_DEPLOY_TYPE}" = "HOST" ] && SSHD_PORT=2222
+fi
+
+sed -r -i '/^OPTIONS=/ d' /etc/sysconfig/sshd
+echo 'OPTIONS="-p '"${SSHD_PORT}"'"' >> /etc/sysconfig/sshd
+
+# set admin ssh pulic key
+if [ "${USE_ADMIN_PASSAGE}" = "YES" ] ; then
+    echo "set admin passage"
+    mkdir -p /home/admin/.ssh
+    chown admin:admin /home/admin/.ssh
+    chown admin:admin /home/admin/.ssh/authorized_keys
+    chmod 644 /home/admin/.ssh/authorized_keys
+fi

+ 66 - 0
docker/image/alidata/init/fix-hosts.py

@@ -0,0 +1,66 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#****************************************************************#
+# Create Date: 2017-01-06 17:58
+#***************************************************************#
+
+import socket
+import shutil
+from time import gmtime, strftime
+
+# get host_name
+host_name = socket.gethostname()
+tmp_file = "/tmp/.lark-fix-host.hosts"
+host_file = "/etc/hosts"
+bak_file_name = "/tmp/hosts-fix-bak.%s" % ( strftime("%Y-%m-%d_%H-%M-%S", gmtime()) )
+
+# load /etc/hosts file context
+FH = open(host_file,"r")
+file_lines = [ i.rstrip() for i in FH.readlines()]
+FH.close()
+file_lines_reverse = file_lines[::-1]
+new_lines = []
+bad_lines = []
+last_match_line = ""
+
+for line in file_lines_reverse:
+    if line.find(host_name) < 0:  # 不匹配的行直接跳过
+        new_lines.append(line + "\n")
+        continue
+
+    cols = line.split()
+    new_cols = []
+    if cols[0].startswith("#"): # 跳过已经注释掉的行
+        new_lines.append(line + "\n")
+        continue
+    for col in cols:
+        if not col == host_name: # 跳过不匹配的列
+            new_cols.append(col)
+            continue
+
+        if cols[0] == "127.0.0.1": # 如果第一列是 127.0.0.1 就跳过匹配的列, 防止 hostname -i 返回 127.0.0.1
+            continue
+
+        # 如果已经发现过匹配的列, 就丢掉重复的列
+        if not len(last_match_line) == 0:
+            continue
+
+        new_cols.append(col)
+        last_match_line = line
+
+    # 跳过 xx.xx.xx.xx hostname 这样的重复列
+    if len(new_cols) == 1:
+        continue
+
+    new_l = "%s\n" % " ".join(new_cols)
+    new_lines.append(new_l)
+
+# save tmp hosts
+
+FH2=file(tmp_file,"w+")
+FH2.writelines( new_lines[::-1])
+FH2.close()
+
+# mv to /etc/hosts
+shutil.copy(host_file, bak_file_name)
+shutil.move(tmp_file, host_file)

+ 40 - 0
docker/image/alidata/lib/proc.sh

@@ -0,0 +1,40 @@
+# waitterm
+#   wait TERM/INT signal.
+#   see: http://veithen.github.io/2014/11/16/sigterm-propagation.html
+waitterm() {
+	local PID
+	# any process to block
+	tail -f /dev/null &
+	PID="$!"
+	# setup trap, could do nothing, or just kill the blocker
+	trap "kill -TERM ${PID}" TERM INT
+	# wait for signal, ignore wait exit code
+	wait "${PID}" || true
+	# clear trap
+	trap - TERM INT
+	# wait blocker, ignore blocker exit code
+	wait "${PID}" 2>/dev/null || true
+}
+
+# waittermpid "${PIDFILE}".
+#   monitor process by pidfile && wait TERM/INT signal.
+#   if the process disappeared, return 1, means exit with ERROR.
+#   if TERM or INT signal received, return 0, means OK to exit.
+waittermpid() {
+	local PIDFILE PID do_run error
+	PIDFILE="${1?}"
+	do_run=true
+	error=0
+	trap "do_run=false" TERM INT
+	while "${do_run}" ; do
+		PID="$(cat "${PIDFILE}")"
+		if ! ps -p "${PID}" >/dev/null 2>&1 ; then
+			do_run=false
+			error=1
+		else
+			sleep 1
+		fi
+	done
+	trap - TERM INT
+	return "${error}"
+}

+ 92 - 0
docker/run.sh

@@ -0,0 +1,92 @@
+#!/bin/bash
+
+function usage() {
+    echo "Usage:"
+    echo "  run.sh [CONFIG]"
+    echo "example:"
+    echo "  run.sh -e canal.instance.master.address=127.0.0.1:3306 \\"
+    echo "         -e canal.instance.dbUsername=canal \\"
+    echo "         -e canal.instance.dbPassword=canal \\"
+    echo "         -e canal.instance.connectionCharset=UTF-8 \\"
+    echo "         -e canal.instance.tsdb.enable=true \\"
+    echo "         -e canal.instance.gtidon=false \\"
+    echo "         -e canal.instance.filter.regex=.*\\..* "
+    exit
+}
+
+function check_port() {
+    local port=$1
+    local TL=$(which telnet)
+    if [ -f $TL ]; then
+        data=`echo quit | telnet 127.0.0.1 $port| grep -ic connected`
+        echo $data
+        return
+    fi
+
+    local NC=$(which nc)
+    if [ -f $NC ]; then
+        data=`nc -z -w 1 127.0.0.1 $port | grep -ic succeeded`
+        echo $data
+        return
+    fi
+    echo "0"
+    return
+}
+
+function getMyIp() {
+    case "`uname`" in
+        Darwin)
+         myip=`echo "show State:/Network/Global/IPv4" | scutil | grep PrimaryInterface | awk '{print $3}' | xargs ifconfig | grep inet | grep -v inet6 | awk '{print $2}'`
+         ;;
+        *)
+         myip=`ip route get 1 | awk '{print $NF;exit}'`
+         ;;
+  esac
+  echo $myip
+}
+
+NET_MODE=""
+case "`uname`" in
+    Darwin)
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+    Linux)
+        bin_abs_path=$(readlink -f $(dirname $0))
+        NET_MODE="--net=host"
+        ;;
+    *)
+        NET_MODE="--net=host"
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+esac
+BASE=${bin_abs_path}
+if [ $# -eq 0 ]; then
+    usage
+elif [ "$1" == "-h" ] ; then
+    usage
+elif [ "$1" == "help" ] ; then
+    usage
+fi
+
+DATA="$BASE/data"
+mkdir -p $DATA
+CONFIG=${@:1}
+#VOLUMNS="-v $DATA:/home/admin/canal-server/logs"
+PORTLIST="8000 8080 2222 11111"
+PORTS=""
+for PORT in $PORTLIST ; do
+    #exist=`check_port $PORT`
+    exist="0"
+    if [ "$exist" == "0" ]; then
+        PORTS="$PORTS -p $PORT:$PORT"
+    else
+        echo "port $PORT is used , pls check"
+        exit 1
+    fi
+done
+
+MEMORY="-m 4096m"
+LOCALHOST=`getMyIp`
+cmd="docker run -it -h $LOCALHOST $CONFIG --name=canal-server $VOLUMNS $NET_MODE $PORTS $MEMORY canal/canal-server"
+echo $cmd
+eval $cmd

+ 2 - 2
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlConnector.java

@@ -32,7 +32,7 @@ public class MysqlConnector {
     private String              password;
 
     private byte                charsetNumber     = 33;
-    private String              defaultSchema     = "retl";
+    private String              defaultSchema     = "test";
     private int                 soTimeout         = 30 * 1000;
     private int                 connTimeout       = 5 * 1000;
     private int                 receiveBufferSize = 16 * 1024;
@@ -53,7 +53,7 @@ public class MysqlConnector {
         String addr = address.getHostString();
         int port = address.getPort();
         this.address = new InetSocketAddress(addr, port);
-        
+
         this.username = username;
         this.password = password;
     }

+ 2 - 2
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/GTIDSet.java

@@ -3,8 +3,7 @@ package com.alibaba.otter.canal.parse.driver.mysql.packets;
 import java.io.IOException;
 
 /**
- * Created by hiwjd on 2018/4/23.
- * hiwjd0@gmail.com
+ * Created by hiwjd on 2018/4/23. hiwjd0@gmail.com
  */
 public interface GTIDSet {
 
@@ -17,6 +16,7 @@ public interface GTIDSet {
 
     /**
      * 更新当前实例
+     * 
      * @param str
      * @throws Exception
      */

+ 25 - 60
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/MysqlGTIDSet.java

@@ -1,14 +1,14 @@
 package com.alibaba.otter.canal.parse.driver.mysql.packets;
 
-import com.alibaba.otter.canal.parse.driver.mysql.utils.ByteHelper;
-
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
-import java.util.*;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.alibaba.otter.canal.parse.driver.mysql.utils.ByteHelper;
 
 /**
- * Created by hiwjd on 2018/4/23.
- * hiwjd0@gmail.com
+ * Created by hiwjd on 2018/4/23. hiwjd0@gmail.com
  */
 public class MysqlGTIDSet implements GTIDSet {
 
@@ -56,61 +56,26 @@ public class MysqlGTIDSet implements GTIDSet {
     }
 
     /**
-     * 解析如下格式的字符串为MysqlGTIDSet:
-     *
-     * 726757ad-4455-11e8-ae04-0242ac110002:1 =>
-     *   MysqlGTIDSet{
-     *     sets: {
-     *       726757ad-4455-11e8-ae04-0242ac110002: UUIDSet{
-     *         SID: 726757ad-4455-11e8-ae04-0242ac110002,
-     *         intervals: [{start:1, stop:2}]
-     *       }
-     *     }
-     *   }
-     *
-     * 726757ad-4455-11e8-ae04-0242ac110002:1-3 =>
-     *   MysqlGTIDSet{
-     *     sets: {
-     *       726757ad-4455-11e8-ae04-0242ac110002: UUIDSet{
-     *         SID: 726757ad-4455-11e8-ae04-0242ac110002,
-     *         intervals: [{start:1, stop:4}]
-     *       }
-     *     }
-     *   }
-     *
-     * 726757ad-4455-11e8-ae04-0242ac110002:1-3:4 =>
-     *   MysqlGTIDSet{
-     *     sets: {
-     *       726757ad-4455-11e8-ae04-0242ac110002: UUIDSet{
-     *         SID: 726757ad-4455-11e8-ae04-0242ac110002,
-     *         intervals: [{start:1, stop:5}]
-     *       }
-     *     }
-     *   }
-     *
-     * 726757ad-4455-11e8-ae04-0242ac110002:1-3:7-9 =>
-     *   MysqlGTIDSet{
-     *     sets: {
-     *       726757ad-4455-11e8-ae04-0242ac110002: UUIDSet{
-     *         SID: 726757ad-4455-11e8-ae04-0242ac110002,
-     *         intervals: [{start:1, stop:4}, {start:7, stop: 10}]
-     *       }
-     *     }
-     *   }
-     *
-     * 726757ad-4455-11e8-ae04-0242ac110002:1-3,726757ad-4455-11e8-ae04-0242ac110003:4 =>
-     *   MysqlGTIDSet{
-     *     sets: {
-     *       726757ad-4455-11e8-ae04-0242ac110002: UUIDSet{
-     *         SID: 726757ad-4455-11e8-ae04-0242ac110002,
-     *         intervals: [{start:1, stop:4}]
-     *       },
-     *       726757ad-4455-11e8-ae04-0242ac110003: UUIDSet{
-     *         SID: 726757ad-4455-11e8-ae04-0242ac110002,
-     *         intervals: [{start:4, stop:5}]
-     *       }
-     *     }
-     *   }
+     * 解析如下格式的字符串为MysqlGTIDSet: 726757ad-4455-11e8-ae04-0242ac110002:1 =>
+     * MysqlGTIDSet{ sets: { 726757ad-4455-11e8-ae04-0242ac110002: UUIDSet{ SID:
+     * 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:1, stop:2}] } }
+     * } 726757ad-4455-11e8-ae04-0242ac110002:1-3 => MysqlGTIDSet{ sets: {
+     * 726757ad-4455-11e8-ae04-0242ac110002: UUIDSet{ SID:
+     * 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:1, stop:4}] } }
+     * } 726757ad-4455-11e8-ae04-0242ac110002:1-3:4 => MysqlGTIDSet{ sets: {
+     * 726757ad-4455-11e8-ae04-0242ac110002: UUIDSet{ SID:
+     * 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:1, stop:5}] } }
+     * } 726757ad-4455-11e8-ae04-0242ac110002:1-3:7-9 => MysqlGTIDSet{ sets: {
+     * 726757ad-4455-11e8-ae04-0242ac110002: UUIDSet{ SID:
+     * 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:1, stop:4},
+     * {start:7, stop: 10}] } } }
+     * 726757ad-4455-11e8-ae04-0242ac110002:1-3,726757
+     * ad-4455-11e8-ae04-0242ac110003:4 => MysqlGTIDSet{ sets: {
+     * 726757ad-4455-11e8-ae04-0242ac110002: UUIDSet{ SID:
+     * 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:1, stop:4}] },
+     * 726757ad-4455-11e8-ae04-0242ac110003: UUIDSet{ SID:
+     * 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:4, stop:5}] } }
+     * }
      *
      * @param gtidData
      * @return

+ 22 - 30
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/UUIDSet.java

@@ -1,7 +1,5 @@
 package com.alibaba.otter.canal.parse.driver.mysql.packets;
 
-import com.alibaba.otter.canal.parse.driver.mysql.utils.ByteHelper;
-
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
@@ -10,13 +8,14 @@ import java.util.Collections;
 import java.util.List;
 import java.util.UUID;
 
+import com.alibaba.otter.canal.parse.driver.mysql.utils.ByteHelper;
+
 /**
- * Created by hiwjd on 2018/4/23.
- * hiwjd0@gmail.com
+ * Created by hiwjd on 2018/4/23. hiwjd0@gmail.com
  */
 public class UUIDSet {
 
-    public UUID SID;
+    public UUID           SID;
     public List<Interval> intervals;
 
     public byte[] encode() throws IOException {
@@ -54,6 +53,7 @@ public class UUIDSet {
     }
 
     public static class Interval implements Comparable<Interval> {
+
         public long start;
         public long stop;
 
@@ -85,19 +85,15 @@ public class UUIDSet {
     }
 
     /**
-     * 解析如下格式字符串为UUIDSet:
-     *
-     * 726757ad-4455-11e8-ae04-0242ac110002:1 =>
-     *   UUIDSet{SID: 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:1, stop:2}]}
-     *
-     * 726757ad-4455-11e8-ae04-0242ac110002:1-3 =>
-     *   UUIDSet{SID: 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:1, stop:4}]}
-     *
-     * 726757ad-4455-11e8-ae04-0242ac110002:1-3:4
-     *   UUIDSet{SID: 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:1, stop:5}]}
-     *
-     * 726757ad-4455-11e8-ae04-0242ac110002:1-3:7-9
-     *   UUIDSet{SID: 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:1, stop:4}, {start:7, stop:10}]}
+     * 解析如下格式字符串为UUIDSet: 726757ad-4455-11e8-ae04-0242ac110002:1 => UUIDSet{SID:
+     * 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:1, stop:2}]}
+     * 726757ad-4455-11e8-ae04-0242ac110002:1-3 => UUIDSet{SID:
+     * 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:1, stop:4}]}
+     * 726757ad-4455-11e8-ae04-0242ac110002:1-3:4 UUIDSet{SID:
+     * 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:1, stop:5}]}
+     * 726757ad-4455-11e8-ae04-0242ac110002:1-3:7-9 UUIDSet{SID:
+     * 726757ad-4455-11e8-ae04-0242ac110002, intervals: [{start:1, stop:4},
+     * {start:7, stop:10}]}
      *
      * @param str
      * @return
@@ -110,7 +106,7 @@ public class UUIDSet {
         }
 
         List<Interval> intervals = new ArrayList<Interval>();
-        for (int i=1; i<ss.length; i++) {
+        for (int i = 1; i < ss.length; i++) {
             intervals.add(parseInterval(ss[i]));
         }
 
@@ -134,7 +130,7 @@ public class UUIDSet {
                 sb.append(":");
                 sb.append(interval.start);
                 sb.append("-");
-                sb.append(interval.stop-1);
+                sb.append(interval.stop - 1);
             }
         }
 
@@ -142,12 +138,8 @@ public class UUIDSet {
     }
 
     /**
-     * 解析如下格式字符串为Interval:
-     *
-     * 1 => Interval{start:1, stop:2}
-     * 1-3 => Interval{start:1, stop:4}
-     *
-     * 注意!字符串格式表达时[n,m]是两侧都包含的,Interval表达时[n,m)右侧开
+     * 解析如下格式字符串为Interval: 1 => Interval{start:1, stop:2} 1-3 =>
+     * Interval{start:1, stop:4} 注意!字符串格式表达时[n,m]是两侧都包含的,Interval表达时[n,m)右侧开
      *
      * @param str
      * @return
@@ -173,8 +165,8 @@ public class UUIDSet {
     }
 
     /**
-     * 把{start,stop}连续的合并掉:
-     * [{start:1, stop:4},{start:4, stop:5}] => [{start:1, stop:5}]
+     * 把{start,stop}连续的合并掉: [{start:1, stop:4},{start:4, stop:5}] => [{start:1,
+     * stop:5}]
      *
      * @param intervals
      * @return
@@ -183,11 +175,11 @@ public class UUIDSet {
         List<Interval> combined = new ArrayList<Interval>();
         Collections.sort(intervals);
         int len = intervals.size();
-        for (int i=0; i<len; i++) {
+        for (int i = 0; i < len; i++) {
             combined.add(intervals.get(i));
 
             int j;
-            for (j=i+1; j<len; j++) {
+            for (j = i + 1; j < len; j++) {
                 if (intervals.get(i).stop >= intervals.get(j).start) {
                     intervals.get(i).stop = intervals.get(j).stop;
                 } else {

+ 18 - 20
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/client/BinlogDumpGTIDCommandPacket.java

@@ -1,28 +1,26 @@
 package com.alibaba.otter.canal.parse.driver.mysql.packets.client;
 
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+
 import com.alibaba.otter.canal.parse.driver.mysql.packets.CommandPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.GTIDSet;
 import com.alibaba.otter.canal.parse.driver.mysql.utils.ByteHelper;
 
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-
 /**
- * Created by hiwjd on 2018/4/24.
- * hiwjd0@gmail.com
- *
+ * Created by hiwjd on 2018/4/24. hiwjd0@gmail.com
  * https://dev.mysql.com/doc/internals/en/com-binlog-dump-gtid.html
  */
 public class BinlogDumpGTIDCommandPacket extends CommandPacket {
 
-    public static final int BINLOG_DUMP_NON_BLOCK = 0x01;
+    public static final int BINLOG_DUMP_NON_BLOCK   = 0x01;
     public static final int BINLOG_THROUGH_POSITION = 0x02;
-    public static final int BINLOG_THROUGH_GTID = 0x04;
+    public static final int BINLOG_THROUGH_GTID     = 0x04;
 
-    public long    slaveServerId;
-    public GTIDSet gtidSet;
+    public long             slaveServerId;
+    public GTIDSet          gtidSet;
 
-    public BinlogDumpGTIDCommandPacket() {
+    public BinlogDumpGTIDCommandPacket(){
         setCommand((byte) 0x1e);
     }
 
@@ -51,15 +49,15 @@ public class BinlogDumpGTIDCommandPacket extends CommandPacket {
         // 6. [4] data-size
         ByteHelper.writeUnsignedIntLittleEndian(bs.length, out);
         // 7, [] data
-        //       [8] n_sids // 文档写的是4个字节,其实是8个字节
-        //       for n_sids {
-        //          [16] SID
-        //          [8] n_intervals
-        //          for n_intervals {
-        //             [8] start (signed)
-        //             [8] end (signed)
-        //          }
-        //       }
+        // [8] n_sids // 文档写的是4个字节,其实是8个字节
+        // for n_sids {
+        // [16] SID
+        // [8] n_intervals
+        // for n_intervals {
+        // [8] start (signed)
+        // [8] end (signed)
+        // }
+        // }
         out.write(bs);
         // }
 

+ 33 - 1
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/BioSocketChannel.java

@@ -1,5 +1,6 @@
 package com.alibaba.otter.canal.parse.driver.mysql.socket;
 
+import java.io.BufferedInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
@@ -24,7 +25,7 @@ public class BioSocketChannel implements SocketChannel {
 
     BioSocketChannel(Socket socket) throws IOException{
         this.socket = socket;
-        this.input = socket.getInputStream();
+        this.input = new BufferedInputStream(socket.getInputStream(), 16384);
         this.output = socket.getOutputStream();
     }
 
@@ -93,6 +94,37 @@ public class BioSocketChannel implements SocketChannel {
         return data;
     }
 
+    @Override
+    public void read(byte[] data, int off, int len, int timeout) throws IOException {
+        InputStream input = this.input;
+        int accTimeout = 0;
+        if (input == null) {
+            throw new SocketException("Socket already closed.");
+        }
+
+        int n = 0;
+        while (n < len && accTimeout < timeout) {
+            try {
+                int read = input.read(data, off + n, len - n);
+                if (read > -1) {
+                    n += read;
+                } else {
+                    throw new IOException("EOF encountered.");
+                }
+            } catch (SocketTimeoutException te) {
+                if (Thread.interrupted()) {
+                    throw new ClosedByInterruptException();
+                }
+                accTimeout += SO_TIMEOUT;
+            }
+        }
+
+        if (n < len && accTimeout >= timeout) {
+            throw new SocketTimeoutException("Timeout occurred, failed to read " + len + " bytes in " + timeout
+                                             + " milliseconds.");
+        }
+    }
+
     public boolean isConnected() {
         Socket socket = this.socket;
         if (socket != null) {

+ 0 - 2
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/BioSocketChannelPool.java

@@ -11,8 +11,6 @@ public abstract class BioSocketChannelPool {
 
     public static BioSocketChannel open(SocketAddress address) throws Exception {
         Socket socket = new Socket();
-        socket.setReceiveBufferSize(32 * 1024);
-        socket.setSendBufferSize(32 * 1024);
         socket.setSoTimeout(BioSocketChannel.SO_TIMEOUT);
         socket.setTcpNoDelay(true);
         socket.setKeepAlive(true);

+ 7 - 0
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/NettySocketChannel.java

@@ -11,6 +11,7 @@ import io.netty.util.internal.SystemPropertyUtil;
 import java.io.IOException;
 import java.net.SocketAddress;
 
+import org.apache.commons.lang.NotImplementedException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -202,6 +203,11 @@ public class NettySocketChannel implements SocketChannel {
         } while (true);
     }
 
+    @Override
+    public void read(byte[] data, int off, int len, int timeout) throws IOException {
+        throw new NotImplementedException();
+    }
+
     public boolean isConnected() {
         return channel != null ? true : false;
     }
@@ -224,4 +230,5 @@ public class NettySocketChannel implements SocketChannel {
         }
     }
 
+
 }

+ 3 - 4
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/NettySocketChannelPool.java

@@ -29,19 +29,18 @@ import org.slf4j.LoggerFactory;
 @SuppressWarnings({ "rawtypes", "deprecation" })
 public abstract class NettySocketChannelPool {
 
-    private static EventLoopGroup              group     = new NioEventLoopGroup();                         // 非阻塞IO线程组
-    private static Bootstrap                   boot      = new Bootstrap();                                 // 主
+    private static EventLoopGroup              group     = new NioEventLoopGroup();                              // 非阻塞IO线程组
+    private static Bootstrap                   boot      = new Bootstrap();                                      // 主
     private static Map<Channel, SocketChannel> chManager = new ConcurrentHashMap<Channel, SocketChannel>();
     private static final Logger                logger    = LoggerFactory.getLogger(NettySocketChannelPool.class);
 
     static {
         boot.group(group)
             .channel(NioSocketChannel.class)
-            .option(ChannelOption.SO_RCVBUF, 32 * 1024)
-            .option(ChannelOption.SO_SNDBUF, 32 * 1024)
             .option(ChannelOption.TCP_NODELAY, true)
             // 如果是延时敏感型应用,建议关闭Nagle算法
             .option(ChannelOption.SO_KEEPALIVE, true)
+            .option(ChannelOption.SO_REUSEADDR, true)
             .option(ChannelOption.RCVBUF_ALLOCATOR, AdaptiveRecvByteBufAllocator.DEFAULT)
             .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
             //

+ 2 - 0
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/SocketChannel.java

@@ -15,6 +15,8 @@ public interface SocketChannel {
 
     public byte[] read(int readSize, int timeout) throws IOException;
 
+    public void read(byte[] data, int off, int len, int timeout) throws IOException;
+
     public boolean isConnected();
 
     public SocketAddress getRemoteSocketAddress();

+ 2 - 2
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/utils/ByteHelper.java

@@ -108,7 +108,7 @@ public abstract class ByteHelper {
 
         return out.toByteArray();
     }
-    
+
     public static void write8ByteUnsignedIntLittleEndian(long data, ByteArrayOutputStream out) {
         out.write((byte) (data & 0xFF));
         out.write((byte) (data >>> 8));
@@ -119,7 +119,7 @@ public abstract class ByteHelper {
         out.write((byte) (data >>> 48));
         out.write((byte) (data >>> 56));
     }
-    
+
     public static void writeUnsignedIntLittleEndian(long data, ByteArrayOutputStream out) {
         out.write((byte) (data & 0xFF));
         out.write((byte) (data >>> 8));

+ 4 - 4
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/utils/PacketManager.java

@@ -14,15 +14,15 @@ public abstract class PacketManager {
     }
 
     public static HeaderPacket readHeader(SocketChannel ch, int len, int timeout) throws IOException {
-    	HeaderPacket header = new HeaderPacket();
-    	header.fromBytes(ch.read(len, timeout));
-    	return header;
+        HeaderPacket header = new HeaderPacket();
+        header.fromBytes(ch.read(len, timeout));
+        return header;
     }
 
     public static byte[] readBytes(SocketChannel ch, int len) throws IOException {
         return ch.read(len);
     }
-    
+
     public static byte[] readBytes(SocketChannel ch, int len, int timeout) throws IOException {
         return ch.read(len, timeout);
     }

+ 35 - 45
driver/src/test/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlGTIDSetTest.java

@@ -1,17 +1,23 @@
 package com.alibaba.otter.canal.parse.driver.mysql;
 
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import org.junit.Test;
+
 import com.alibaba.otter.canal.parse.driver.mysql.packets.GTIDSet;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.MysqlGTIDSet;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.UUIDSet;
-import org.junit.Test;
-import static org.junit.Assert.*;
-
-import java.io.IOException;
-import java.util.*;
 
 /**
- * Created by hiwjd on 2018/4/25.
- * hiwjd0@gmail.com
+ * Created by hiwjd on 2018/4/25. hiwjd0@gmail.com
  */
 public class MysqlGTIDSetTest {
 
@@ -20,14 +26,12 @@ public class MysqlGTIDSetTest {
         GTIDSet gtidSet = MysqlGTIDSet.parse("726757ad-4455-11e8-ae04-0242ac110002:1");
         byte[] bytes = gtidSet.encode();
 
-        byte[] expected = {
-                0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x67, 0x57, (byte)0xad,
-                0x44, 0x55, 0x11, (byte)0xe8, (byte)0xae, 0x04, 0x02, 0x42, (byte)0xac, 0x11, 0x00, 0x02,
-                0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-                0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-        };
+        byte[] expected = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x67, 0x57, (byte) 0xad, 0x44, 0x55,
+                0x11, (byte) 0xe8, (byte) 0xae, 0x04, 0x02, 0x42, (byte) 0xac, 0x11, 0x00, 0x02, 0x01, 0x00, 0x00,
+                0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+                0x00, 0x00, 0x00, 0x00 };
 
-        for (int i=0; i<bytes.length; i++) {
+        for (int i = 0; i < bytes.length; i++) {
             assertEquals(expected[i], bytes[i]);
         }
     }
@@ -35,31 +39,17 @@ public class MysqlGTIDSetTest {
     @Test
     public void testParse() {
         Map<String, MysqlGTIDSet> cases = new HashMap<String, MysqlGTIDSet>(5);
-        cases.put(
-                "726757ad-4455-11e8-ae04-0242ac110002:1",
-                buildForTest(new Material("726757ad-4455-11e8-ae04-0242ac110002", 1, 2))
-        );
-        cases.put(
-                "726757ad-4455-11e8-ae04-0242ac110002:1-3",
-                buildForTest(new Material("726757ad-4455-11e8-ae04-0242ac110002", 1, 4))
-        );
-        cases.put(
-                "726757ad-4455-11e8-ae04-0242ac110002:1-3:4",
-                buildForTest(new Material("726757ad-4455-11e8-ae04-0242ac110002", 1, 5))
-        );
-        cases.put(
-                "726757ad-4455-11e8-ae04-0242ac110002:1-3:7-9",
-                buildForTest(new Material("726757ad-4455-11e8-ae04-0242ac110002", 1, 4, 7, 10))
-        );
-        cases.put(
-                "726757ad-4455-11e8-ae04-0242ac110002:1-3,726757ad-4455-11e8-ae04-0242ac110003:4",
-                buildForTest(
-                        Arrays.asList(
-                            new Material("726757ad-4455-11e8-ae04-0242ac110002", 1, 4),
-                                new Material("726757ad-4455-11e8-ae04-0242ac110003", 4, 5)
-                        )
-                )
-        );
+        cases.put("726757ad-4455-11e8-ae04-0242ac110002:1",
+            buildForTest(new Material("726757ad-4455-11e8-ae04-0242ac110002", 1, 2)));
+        cases.put("726757ad-4455-11e8-ae04-0242ac110002:1-3",
+            buildForTest(new Material("726757ad-4455-11e8-ae04-0242ac110002", 1, 4)));
+        cases.put("726757ad-4455-11e8-ae04-0242ac110002:1-3:4",
+            buildForTest(new Material("726757ad-4455-11e8-ae04-0242ac110002", 1, 5)));
+        cases.put("726757ad-4455-11e8-ae04-0242ac110002:1-3:7-9",
+            buildForTest(new Material("726757ad-4455-11e8-ae04-0242ac110002", 1, 4, 7, 10)));
+        cases.put("726757ad-4455-11e8-ae04-0242ac110002:1-3,726757ad-4455-11e8-ae04-0242ac110003:4",
+            buildForTest(Arrays.asList(new Material("726757ad-4455-11e8-ae04-0242ac110002", 1, 4),
+                new Material("726757ad-4455-11e8-ae04-0242ac110003", 4, 5))));
 
         for (Map.Entry<String, MysqlGTIDSet> entry : cases.entrySet()) {
             MysqlGTIDSet expected = entry.getValue();
@@ -71,7 +61,7 @@ public class MysqlGTIDSetTest {
 
     private static class Material {
 
-        public Material(String uuid, long start, long stop) {
+        public Material(String uuid, long start, long stop){
             this.uuid = uuid;
             this.start = start;
             this.stop = stop;
@@ -79,7 +69,7 @@ public class MysqlGTIDSetTest {
             this.stop1 = 0;
         }
 
-        public Material(String uuid, long start, long stop, long start1, long stop1) {
+        public Material(String uuid, long start, long stop, long start1, long stop1){
             this.uuid = uuid;
             this.start = start;
             this.stop = stop;
@@ -88,10 +78,10 @@ public class MysqlGTIDSetTest {
         }
 
         public String uuid;
-        public long start;
-        public long stop;
-        public long start1;
-        public long stop1;
+        public long   start;
+        public long   stop;
+        public long   start1;
+        public long   stop1;
     }
 
     private MysqlGTIDSet buildForTest(Material material) {

+ 5 - 5
driver/src/test/java/com/alibaba/otter/canal/parse/driver/mysql/UUIDSetTest.java

@@ -1,16 +1,16 @@
 package com.alibaba.otter.canal.parse.driver.mysql;
 
-import com.alibaba.otter.canal.parse.driver.mysql.packets.UUIDSet;
-import org.junit.Test;
+import static org.junit.Assert.assertEquals;
 
 import java.util.HashMap;
 import java.util.Map;
 
-import static org.junit.Assert.*;
+import org.junit.Test;
+
+import com.alibaba.otter.canal.parse.driver.mysql.packets.UUIDSet;
 
 /**
- * Created by hiwjd on 2018/4/26.
- * hiwjd0@gmail.com
+ * Created by hiwjd on 2018/4/26. hiwjd0@gmail.com
  */
 public class UUIDSetTest {
 

+ 76 - 0
example/pom.xml

@@ -21,6 +21,82 @@
 			<artifactId>canal.protocol</artifactId>
 			<version>${project.version}</version>
 		</dependency>
+		<dependency>
+			<groupId>com.alibaba</groupId>
+			<artifactId>druid</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>mysql</groupId>
+			<artifactId>mysql-connector-java</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.ddlutils</groupId>
+			<artifactId>ddlutils</artifactId>
+			<version>1.0</version>
+			<exclusions>
+				<exclusion>
+					<groupId>commons-beanutils</groupId>
+					<artifactId>commons-beanutils-core</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-lang</groupId>
+					<artifactId>commons-lang</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-dbcp</groupId>
+					<artifactId>commons-dbcp</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-pool</groupId>
+					<artifactId>commons-pool</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-logging</groupId>
+					<artifactId>commons-logging-api</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>dom4j</groupId>
+					<artifactId>dom4j</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>stax</groupId>
+					<artifactId>stax-api</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-collections</groupId>
+					<artifactId>commons-collections</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-digester</groupId>
+					<artifactId>commons-digester</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-betwixt</groupId>
+					<artifactId>commons-betwixt</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.commons</groupId>
+			<artifactId>commons-pool2</artifactId>
+			<version>2.5.0</version>
+		</dependency>
+		<dependency>
+			<groupId>commons-beanutils</groupId>
+			<artifactId>commons-beanutils</artifactId>
+			<version>1.8.2</version>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.commons</groupId>
+			<artifactId>commons-lang3</artifactId>
+			<version>3.7</version>
+		</dependency>
+		<dependency>
+			<groupId>commons-collections</groupId>
+			<artifactId>commons-collections</artifactId>
+			<version>3.2</version>
+		</dependency>
+
 		<!-- test dependency -->
 		<dependency>
 			<groupId>junit</groupId>

+ 2 - 9
example/src/main/java/com/alibaba/otter/canal/example/AbstractCanalClientTest.java

@@ -3,6 +3,7 @@ package com.alibaba.otter.canal.example;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.SystemUtils;
@@ -13,7 +14,6 @@ import org.springframework.util.Assert;
 import org.springframework.util.CollectionUtils;
 
 import com.alibaba.otter.canal.client.CanalConnector;
-import com.alibaba.otter.canal.client.impl.ClusterCanalConnector;
 import com.alibaba.otter.canal.protocol.CanalEntry.Column;
 import com.alibaba.otter.canal.protocol.CanalEntry.Entry;
 import com.alibaba.otter.canal.protocol.CanalEntry.EntryType;
@@ -38,7 +38,6 @@ public class AbstractCanalClientTest {
     protected static final String             SEP                = SystemUtils.LINE_SEPARATOR;
     protected static final String             DATE_FORMAT        = "yyyy-MM-dd HH:mm:ss";
     protected volatile boolean                running            = false;
-    private volatile boolean                  waiting            = true;
     protected Thread.UncaughtExceptionHandler handler            = new Thread.UncaughtExceptionHandler() {
 
                                                                      public void uncaughtException(Thread t, Throwable e) {
@@ -96,13 +95,8 @@ public class AbstractCanalClientTest {
         if (!running) {
             return;
         }
+        connector.stopRunning();
         running = false;
-        if (waiting) {
-            if (connector instanceof ClusterCanalConnector) {
-                ((ClusterCanalConnector) connector).setRetryTimes(-1);
-            }
-            thread.interrupt();
-        }
         if (thread != null) {
             try {
                 thread.join();
@@ -121,7 +115,6 @@ public class AbstractCanalClientTest {
                 MDC.put("destination", destination);
                 connector.connect();
                 connector.subscribe();
-                waiting = false;
                 while (running) {
                     Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据
                     long batchId = message.getId();

+ 68 - 0
example/src/main/java/com/alibaba/otter/canal/example/SimpleCanalClientPermanceTest.java

@@ -0,0 +1,68 @@
+package com.alibaba.otter.canal.example;
+import java.net.InetSocketAddress;
+import java.util.concurrent.ArrayBlockingQueue;
+
+import com.alibaba.otter.canal.client.CanalConnector;
+import com.alibaba.otter.canal.client.CanalConnectors;
+import com.alibaba.otter.canal.client.impl.SimpleCanalConnector;
+import com.alibaba.otter.canal.protocol.Message;
+
+public class SimpleCanalClientPermanceTest {
+
+    public static void main(String args[]) {
+        String destination = "example";
+        String ip = "127.0.0.1";
+        int batchSize = 1024;
+        int count = 0;
+        int sum = 0;
+        int perSum = 0;
+        long start = System.currentTimeMillis();
+        long end = 0;
+        final ArrayBlockingQueue<Long> queue = new ArrayBlockingQueue<Long>(100);
+        try {
+            final CanalConnector connector = CanalConnectors.newSingleConnector(new InetSocketAddress(ip, 11111),
+                destination,
+                "",
+                "");
+
+            Thread ackThread = new Thread(new Runnable() {
+
+                @Override
+                public void run() {
+                    while (true) {
+                        try {
+                            long batchId = queue.take();
+                            connector.ack(batchId);
+                        } catch (InterruptedException e) {
+                        }
+                    }
+                }
+            });
+            ackThread.start();
+
+            ((SimpleCanalConnector) connector).setLazyParseEntry(true);
+            connector.connect();
+            connector.subscribe();
+            while (true) {
+                Message message = connector.getWithoutAck(batchSize);
+                long batchId = message.getId();
+                int size = message.getRawEntries().size();
+                sum += size;
+                perSum += size;
+                count++;
+                queue.add(batchId);
+                if (count % 10 == 0) {
+                    end = System.currentTimeMillis();
+                    long tps = (perSum * 1000) / (end - start);
+                    System.out.println(" total : " + sum + " , current : " + perSum + " , cost : " + (end - start)
+                                       + " , tps : " + tps);
+                    start = end;
+                    perSum = 0;
+                }
+            }
+        } catch (Throwable e) {
+            e.printStackTrace();
+        }
+    }
+
+}

+ 144 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/AbstractDbClient.java

@@ -0,0 +1,144 @@
+package com.alibaba.otter.canal.example.db;
+
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.Message;
+import org.slf4j.MDC;
+
+import java.util.Date;
+import java.util.List;
+
+public abstract class AbstractDbClient extends CanalConnectorClient {
+
+
+    public abstract void insert(CanalEntry.Header header, List<CanalEntry.Column> afterColumns);
+
+    public abstract void update(CanalEntry.Header header, List<CanalEntry.Column> afterColumns);
+
+    public abstract void delete(CanalEntry.Header header, List<CanalEntry.Column> beforeColumns);
+
+
+    @Override
+    public synchronized void start() {
+        if (running) {
+            return;
+        }
+        super.start();
+    }
+
+    @Override
+    public synchronized void stop() {
+        if (!running) {
+            return;
+        }
+        super.stop();
+        MDC.remove("destination");
+    }
+
+    @Override
+    protected void processMessage(Message message) {
+        long batchId = message.getId();
+        //遍历每条消息
+        for (CanalEntry.Entry entry : message.getEntries()) {
+            session(entry);//no exception
+        }
+        //ack all the time。
+        connector.ack(batchId);
+    }
+
+    private void session(CanalEntry.Entry entry) {
+        CanalEntry.EntryType entryType = entry.getEntryType();
+        int times = 0;
+        boolean success = false;
+        while (!success) {
+            if (times > 0) {
+                /**
+                 * 1:retry,重试,重试默认为3次,由retryTimes参数决定,如果重试次数达到阈值,则跳过,并且记录日志。
+                 * 2:ignore,直接忽略,不重试,记录日志。
+                 */
+                if (exceptionStrategy == ExceptionStrategy.RETRY.code) {
+                    if (times >= retryTimes) {
+                        break;
+                    }
+                } else {
+                    break;
+                }
+            }
+            try {
+                switch (entryType) {
+                    case TRANSACTIONBEGIN:
+                        transactionBegin(entry);
+                        break;
+                    case TRANSACTIONEND:
+                        transactionEnd(entry);
+                        break;
+                    case ROWDATA:
+                        rowData(entry);
+                        break;
+                    default:
+                        break;
+                }
+                success = true;
+            } catch (Exception e) {
+                times++;
+                logger.error("parse event has an error ,times: + " + times + ", data:" + entry.toString(), e);
+            }
+
+        }
+    }
+
+    private void rowData(CanalEntry.Entry entry) throws Exception {
+        CanalEntry.RowChange rowChange = CanalEntry.RowChange.parseFrom(entry.getStoreValue());
+        CanalEntry.EventType eventType = rowChange.getEventType();
+        CanalEntry.Header header = entry.getHeader();
+        long executeTime = header.getExecuteTime();
+        long delayTime = new Date().getTime() - executeTime;
+        String sql = rowChange.getSql();
+
+        try {
+            if (!isDML(eventType) || rowChange.getIsDdl()) {
+                processDDL(header, eventType, sql);
+                return;
+            }
+            //处理DML数据
+            processDML(header, eventType, rowChange, sql);
+        } catch (Exception e) {
+            logger.error("process event error ,", e);
+            logger.error(rowFormat,
+                    new Object[]{header.getLogfileName(), String.valueOf(header.getLogfileOffset()),
+                            header.getSchemaName(), header.getTableName(), eventType,
+                            String.valueOf(executeTime), String.valueOf(delayTime)});
+            throw e;//重新抛出
+        }
+    }
+
+    /**
+     * 处理 dml 数据
+     *
+     * @param header
+     * @param eventType
+     * @param rowChange
+     * @param sql
+     */
+    protected void processDML(CanalEntry.Header header, CanalEntry.EventType eventType, CanalEntry.RowChange rowChange, String sql) {
+        for (CanalEntry.RowData rowData : rowChange.getRowDatasList()) {
+            switch (eventType) {
+                case DELETE:
+                    delete(header, rowData.getBeforeColumnsList());
+                    break;
+                case INSERT:
+                    insert(header, rowData.getAfterColumnsList());
+                    break;
+                case UPDATE:
+                    update(header, rowData.getAfterColumnsList());
+                    break;
+                default:
+                    whenOthers(header, sql);
+            }
+        }
+    }
+
+}
+
+
+
+

+ 488 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/CanalConnectorClient.java

@@ -0,0 +1,488 @@
+package com.alibaba.otter.canal.example.db;
+
+import com.alibaba.otter.canal.client.CanalConnector;
+import com.alibaba.otter.canal.client.CanalConnectors;
+import com.alibaba.otter.canal.common.AbstractCanalLifeCycle;
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.Message;
+import org.apache.commons.lang.SystemUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
+import org.springframework.beans.factory.InitializingBean;
+import org.springframework.util.CollectionUtils;
+
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.List;
+
+public abstract class CanalConnectorClient extends AbstractCanalLifeCycle implements InitializingBean {
+
+    protected static final Logger logger = LoggerFactory.getLogger(CanalConnectorClient.class);
+    protected static final String SEP = SystemUtils.LINE_SEPARATOR;
+    protected static String contextFormat;
+    protected static String rowFormat;
+    protected static String transactionFormat;
+    protected static final String DATE_FORMAT = "yyyy-MM-dd HH:mm:ss";
+
+    static {
+        StringBuilder sb = new StringBuilder();
+        sb.append(SEP)
+                .append("-------------Batch-------------")
+                .append(SEP)
+                .append("* Batch Id: [{}] ,count : [{}] , Mem size : [{}] , Time : {}")
+                .append(SEP)
+                .append("* Start : [{}] ")
+                .append(SEP)
+                .append("* End : [{}] ")
+                .append(SEP)
+                .append("-------------------------------")
+                .append(SEP);
+        contextFormat = sb.toString();
+
+        sb = new StringBuilder();
+        sb.append(SEP)
+                .append("+++++++++++++Row+++++++++++++>>>")
+                .append("binlog[{}:{}] , name[{},{}] , eventType : {} , executeTime : {} , delay : {}ms")
+                .append(SEP);
+        rowFormat = sb.toString();
+
+        sb = new StringBuilder();
+        sb.append(SEP)
+                .append("===========Transaction {} : {}=======>>>")
+                .append("binlog[{}:{}] , executeTime : {} , delay : {}ms")
+                .append(SEP);
+        transactionFormat = sb.toString();
+    }
+
+    private String zkServers;//cluster
+    private String address;//single,ip:port
+    private String destination;
+    private String username;
+    private String password;
+    private int batchSize = 5 * 1024;
+    private String filter = "";//同canal filter,用于过滤database或者table的相关数据。
+    protected boolean debug = false;//开启debug,会把每条消息的详情打印
+
+    //1:retry,重试,重试默认为3次,由retryTimes参数决定,如果重试次数达到阈值,则跳过,并且记录日志。
+    //2:ignore,直接忽略,不重试,记录日志。
+    protected int exceptionStrategy = 1;
+    protected int retryTimes = 3;
+    protected int waitingTime = 100;//当binlog没有数据时,主线程等待的时间,单位ms,大于0
+
+
+    protected CanalConnector connector;
+    protected Thread thread;
+
+    protected Thread.UncaughtExceptionHandler handler = new Thread.UncaughtExceptionHandler() {
+
+        public void uncaughtException(Thread t, Throwable e) {
+            logger.error("process message has an error", e);
+        }
+    };
+
+    @Override
+    public void afterPropertiesSet() {
+        if (waitingTime <= 0) {
+            throw new IllegalArgumentException("waitingTime must be greater than 0");
+        }
+        if (ExceptionStrategy.codeOf(exceptionStrategy) == null) {
+            throw new IllegalArgumentException("exceptionStrategy is not valid,1 or 2");
+        }
+        start();
+    }
+
+    @Override
+    public void start() {
+        if (running) {
+            return;
+        }
+        super.start();
+        initConnector();
+
+        thread = new Thread(new Runnable() {
+
+            public void run() {
+                process();
+            }
+        });
+
+        thread.setUncaughtExceptionHandler(handler);
+        thread.start();
+    }
+
+    @Override
+    public void stop() {
+        if (!running) {
+            return;
+        }
+        super.stop();
+        quietlyStop(thread);
+    }
+
+    protected void quietlyStop(Thread task) {
+        if (task != null) {
+            task.interrupt();
+            try {
+                task.join();
+            } catch (InterruptedException e) {
+                // ignore
+            }
+        }
+    }
+
+    public void process() {
+        int times = 0;
+        while (running) {
+            try {
+                sleepWhenFailed(times);
+                //after block, should check the status of thread.
+                if (!running) {
+                    break;
+                }
+                MDC.put("destination", destination);
+                connector.connect();
+                connector.subscribe(filter);
+                connector.rollback();
+                times = 0;//reset;
+
+                while (running) {
+                    // 获取指定数量的数据,不确认
+                    Message message = connector.getWithoutAck(batchSize);
+
+                    long batchId = message.getId();
+                    int size = message.getEntries().size();
+
+                    if (batchId == -1 || size == 0) {
+                        try {
+                            Thread.sleep(waitingTime);
+                        } catch (InterruptedException e) {
+                            //
+                        }
+                        continue;
+                    }
+                    //logger
+                    printBatch(message, batchId);
+
+                    processMessage(message);
+
+                }
+            } catch (Exception e) {
+                logger.error("process error!", e);
+                if (times > 20) {
+                    times = 0;
+                }
+                times++;
+            } finally {
+                connector.disconnect();
+                MDC.remove("destination");
+            }
+        }
+    }
+
+    protected abstract void processMessage(Message message);
+
+
+    private void initConnector() {
+        if (zkServers != null && zkServers.length() > 0) {
+            connector = CanalConnectors.newClusterConnector(zkServers, destination, username, password);
+        } else if (address != null) {
+            String[] segments = address.split(":");
+            SocketAddress socketAddress = new InetSocketAddress(segments[0], Integer.valueOf(segments[1]));
+            connector = CanalConnectors.newSingleConnector(socketAddress, destination, username, password);
+        } else {
+            throw new IllegalArgumentException("zkServers or address cant be null at same time,you should specify one of them!");
+        }
+
+    }
+
+    /**
+     * 用于控制当连接异常时,重试的策略,我们不应该每次都是立即重试,否则将可能导致大量的错误,在空转时导致CPU过高的问题
+     * sleep策略基于简单的累加
+     *
+     * @param times
+     */
+    private void sleepWhenFailed(int times) {
+        if (times <= 0) {
+            return;
+        }
+        try {
+            int sleepTime = 1000 + times * 100;//最大sleep 3s。
+            Thread.sleep(sleepTime);
+        } catch (Exception ex) {
+            //
+        }
+    }
+
+    /**
+     * 打印当前batch的摘要信息
+     *
+     * @param message
+     * @param batchId
+     */
+    protected void printBatch(Message message, long batchId) {
+        if (!debug) {
+            return;
+        }
+        List<CanalEntry.Entry> entries = message.getEntries();
+        if (CollectionUtils.isEmpty(entries)) {
+            return;
+        }
+
+        long memSize = 0;
+        for (CanalEntry.Entry entry : entries) {
+            memSize += entry.getHeader().getEventLength();
+        }
+        int size = entries.size();
+        String startPosition = buildPosition(entries.get(0));
+        String endPosition = buildPosition(message.getEntries().get(size - 1));
+
+        SimpleDateFormat format = new SimpleDateFormat(DATE_FORMAT);
+        logger.info(contextFormat, new Object[]{batchId, size, memSize, format.format(new Date()), startPosition, endPosition});
+    }
+
+    protected String buildPosition(CanalEntry.Entry entry) {
+        CanalEntry.Header header = entry.getHeader();
+        long time = header.getExecuteTime();
+        Date date = new Date(time);
+        SimpleDateFormat format = new SimpleDateFormat(DATE_FORMAT);
+        StringBuilder sb = new StringBuilder();
+        sb.append(header.getLogfileName())
+                .append(":")
+                .append(header.getLogfileOffset())
+                .append(":")
+                .append(header.getExecuteTime())
+                .append("(")
+                .append(format.format(date))
+                .append(")");
+        return sb.toString();
+    }
+
+    /**
+     * default,only logging information
+     *
+     * @param entry
+     */
+    protected void transactionBegin(CanalEntry.Entry entry) {
+        if (!debug) {
+            return;
+        }
+        try {
+            CanalEntry.TransactionBegin begin = CanalEntry.TransactionBegin.parseFrom(entry.getStoreValue());
+            // 打印事务头信息,执行的线程id,事务耗时
+            CanalEntry.Header header = entry.getHeader();
+            long executeTime = header.getExecuteTime();
+            long delayTime = new Date().getTime() - executeTime;
+            logger.info(transactionFormat,
+                    new Object[]{"begin", begin.getTransactionId(), header.getLogfileName(),
+                            String.valueOf(header.getLogfileOffset()),
+                            String.valueOf(header.getExecuteTime()), String.valueOf(delayTime)});
+        } catch (Exception e) {
+            logger.error("parse event has an error , data:" + entry.toString(), e);
+        }
+    }
+
+    protected void transactionEnd(CanalEntry.Entry entry) {
+        if (!debug) {
+            return;
+        }
+        try {
+            CanalEntry.TransactionEnd end = CanalEntry.TransactionEnd.parseFrom(entry.getStoreValue());
+            // 打印事务提交信息,事务id
+            CanalEntry.Header header = entry.getHeader();
+            long executeTime = header.getExecuteTime();
+            long delayTime = new Date().getTime() - executeTime;
+
+            logger.info(transactionFormat,
+                    new Object[]{"end", end.getTransactionId(), header.getLogfileName(),
+                            String.valueOf(header.getLogfileOffset()),
+                            String.valueOf(header.getExecuteTime()), String.valueOf(delayTime)});
+        } catch (Exception e) {
+            logger.error("parse event has an error , data:" + entry.toString(), e);
+        }
+    }
+
+    /**
+     * 判断事件类型为DML 数据
+     *
+     * @param eventType
+     * @return
+     */
+    protected boolean isDML(CanalEntry.EventType eventType) {
+        switch (eventType) {
+            case INSERT:
+            case UPDATE:
+            case DELETE:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    /**
+     * 处理 DDL数据
+     *
+     * @param header
+     * @param eventType
+     * @param sql
+     */
+
+    protected void processDDL(CanalEntry.Header header, CanalEntry.EventType eventType, String sql) {
+        if (!debug) {
+            return;
+        }
+        String table = header.getSchemaName() + "." + header.getTableName();
+        //对于DDL,直接执行,因为没有行变更数据
+        switch (eventType) {
+            case CREATE:
+                logger.warn("parse create table event, table: {}, sql: {}", table, sql);
+                return;
+            case ALTER:
+                logger.warn("parse alter table event, table: {}, sql: {}", table, sql);
+                return;
+            case TRUNCATE:
+                logger.warn("parse truncate table event, table: {}, sql: {}", table, sql);
+                return;
+            case ERASE:
+            case QUERY:
+                logger.warn("parse event : {}, sql: {} . ignored!", eventType.name(), sql);
+                return;
+            case RENAME:
+                logger.warn("parse rename table event, table: {}, sql: {}", table, sql);
+                return;
+            case CINDEX:
+                logger.warn("parse create index event, table: {}, sql: {}", table, sql);
+                return;
+            case DINDEX:
+                logger.warn("parse delete index event, table: {}, sql: {}", table, sql);
+                return;
+            default:
+                logger.warn("parse unknown event: {}, table: {}, sql: {}", new String[]{eventType.name(), table, sql});
+                break;
+        }
+    }
+
+    /**
+     * 强烈建议捕获异常,非上述已列出的其他操作,非核心
+     * 除了“insert”、“update”、“delete”操作之外的,其他类型的操作.
+     * 默认实现为“无操作”
+     *
+     * @param header 可以从header中获得schema、table的名称
+     * @param sql
+     */
+    public void whenOthers(CanalEntry.Header header, String sql) {
+        String schema = header.getSchemaName();
+        String table = header.getTableName();
+        logger.error("ignore event,schema: {},table: {},SQL: {}", new String[]{schema, table, sql});
+    }
+
+    public enum ExceptionStrategy {
+        RETRY(1), IGNORE(2);
+        public int code;
+
+        ExceptionStrategy(int code) {
+            this.code = code;
+        }
+
+        public static ExceptionStrategy codeOf(Integer code) {
+            if (code != null) {
+                for (ExceptionStrategy e : ExceptionStrategy.values()) {
+                    if (e.code == code) {
+                        return e;
+                    }
+                }
+            }
+            return null;
+        }
+    }
+
+    public String getZkServers() {
+        return zkServers;
+    }
+
+    public void setZkServers(String zkServers) {
+        this.zkServers = zkServers;
+    }
+
+    public String getAddress() {
+        return address;
+    }
+
+    public void setAddress(String address) {
+        this.address = address;
+    }
+
+    public String getDestination() {
+        return destination;
+    }
+
+    public void setDestination(String destination) {
+        this.destination = destination;
+    }
+
+    public String getUsername() {
+        return username;
+    }
+
+    public void setUsername(String username) {
+        this.username = username;
+    }
+
+    public String getPassword() {
+        return password;
+    }
+
+    public void setPassword(String password) {
+        this.password = password;
+    }
+
+    public int getBatchSize() {
+        return batchSize;
+    }
+
+    public void setBatchSize(int batchSize) {
+        this.batchSize = batchSize;
+    }
+
+    public String getFilter() {
+        return filter;
+    }
+
+    public void setFilter(String filter) {
+        this.filter = filter;
+    }
+
+    public boolean isDebug() {
+        return debug;
+    }
+
+    public void setDebug(boolean debug) {
+        this.debug = debug;
+    }
+
+    public int getExceptionStrategy() {
+        return exceptionStrategy;
+    }
+
+    public void setExceptionStrategy(int exceptionStrategy) {
+        this.exceptionStrategy = exceptionStrategy;
+    }
+
+    public int getRetryTimes() {
+        return retryTimes;
+    }
+
+    public void setRetryTimes(int retryTimes) {
+        this.retryTimes = retryTimes;
+    }
+
+    public int getWaitingTime() {
+        return waitingTime;
+    }
+
+    public void setWaitingTime(int waitingTime) {
+        this.waitingTime = waitingTime;
+    }
+}

+ 35 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/MysqlLoadLauncher.java

@@ -0,0 +1,35 @@
+package com.alibaba.otter.canal.example.db;
+
+import com.alibaba.otter.canal.example.db.mysql.MysqlClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MysqlLoadLauncher {
+    private static final Logger logger = LoggerFactory.getLogger(MysqlLoadLauncher.class);
+
+    public static void main(String[] args) {
+        try {
+            logger.info("## start the canal mysql client.");
+            final MysqlClient client = ServiceLocator.getMysqlClient();
+            logger.info("## the canal consumer is running now ......");
+            client.start();
+            Runtime.getRuntime().addShutdownHook(new Thread() {
+
+                public void run() {
+                    try {
+                        logger.info("## stop the canal consumer");
+                        client.stop();
+                    } catch (Throwable e) {
+                        logger.warn("##something goes wrong when stopping canal consumer:\n{}", e);
+                    } finally {
+                        logger.info("## canal consumer is down.");
+                    }
+                }
+
+            });
+        } catch (Throwable e) {
+            logger.error("## Something goes wrong when starting up the canal consumer:\n{}", e);
+            System.exit(0);
+        }
+    }
+}

+ 169 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/PropertyPlaceholderConfigurer.java

@@ -0,0 +1,169 @@
+package com.alibaba.otter.canal.example.db;
+
+import org.springframework.beans.factory.InitializingBean;
+import org.springframework.context.ResourceLoaderAware;
+import org.springframework.core.io.Resource;
+import org.springframework.core.io.ResourceLoader;
+import org.springframework.util.Assert;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * 扩展Spring的
+ * {@linkplain org.springframework.beans.factory.config.PropertyPlaceholderConfigurer}
+ * ,增加默认值的功能。 例如:${placeholder:defaultValue},假如placeholder的值不存在,则默认取得
+ * defaultValue。
+ * 
+ * @author jianghang 2013-1-24 下午03:37:56
+ * @version 1.0.0
+ */
+public class PropertyPlaceholderConfigurer extends org.springframework.beans.factory.config.PropertyPlaceholderConfigurer implements ResourceLoaderAware, InitializingBean {
+
+    private static final String PLACEHOLDER_PREFIX = "${";
+    private static final String PLACEHOLDER_SUFFIX = "}";
+    private ResourceLoader      loader;
+    private String[]            locationNames;
+
+    public PropertyPlaceholderConfigurer(){
+        setIgnoreUnresolvablePlaceholders(true);
+    }
+
+    public void setResourceLoader(ResourceLoader loader) {
+        this.loader = loader;
+    }
+
+    public void setLocationNames(String[] locations) {
+        this.locationNames = locations;
+    }
+
+    public void afterPropertiesSet() throws Exception {
+        Assert.notNull(loader, "no resourceLoader");
+
+        if (locationNames != null) {
+            for (int i = 0; i < locationNames.length; i++) {
+                locationNames[i] = resolveSystemPropertyPlaceholders(locationNames[i]);
+            }
+        }
+
+        if (locationNames != null) {
+            List<Resource> resources = new ArrayList<Resource>(locationNames.length);
+
+            for (String location : locationNames) {
+                location = trimToNull(location);
+
+                if (location != null) {
+                    resources.add(loader.getResource(location));
+                }
+            }
+
+            super.setLocations(resources.toArray(new Resource[resources.size()]));
+        }
+    }
+
+    private String resolveSystemPropertyPlaceholders(String text) {
+        StringBuilder buf = new StringBuilder(text);
+
+        for (int startIndex = buf.indexOf(PLACEHOLDER_PREFIX); startIndex >= 0;) {
+            int endIndex = buf.indexOf(PLACEHOLDER_SUFFIX, startIndex + PLACEHOLDER_PREFIX.length());
+
+            if (endIndex != -1) {
+                String placeholder = buf.substring(startIndex + PLACEHOLDER_PREFIX.length(), endIndex);
+                int nextIndex = endIndex + PLACEHOLDER_SUFFIX.length();
+
+                try {
+                    String value = resolveSystemPropertyPlaceholder(placeholder);
+
+                    if (value != null) {
+                        buf.replace(startIndex, endIndex + PLACEHOLDER_SUFFIX.length(), value);
+                        nextIndex = startIndex + value.length();
+                    } else {
+                        System.err.println("Could not resolve placeholder '"
+                                           + placeholder
+                                           + "' in ["
+                                           + text
+                                           + "] as system property: neither system property nor environment variable found");
+                    }
+                } catch (Throwable ex) {
+                    System.err.println("Could not resolve placeholder '" + placeholder + "' in [" + text
+                                       + "] as system property: " + ex);
+                }
+
+                startIndex = buf.indexOf(PLACEHOLDER_PREFIX, nextIndex);
+            } else {
+                startIndex = -1;
+            }
+        }
+
+        return buf.toString();
+    }
+
+    private String resolveSystemPropertyPlaceholder(String placeholder) {
+        DefaultablePlaceholder dp = new DefaultablePlaceholder(placeholder);
+        String value = System.getProperty(dp.placeholder);
+
+        if (value == null) {
+            value = System.getenv(dp.placeholder);
+        }
+
+        if (value == null) {
+            value = dp.defaultValue;
+        }
+
+        return value;
+    }
+
+    @Override
+    protected String resolvePlaceholder(String placeholder, Properties props, int systemPropertiesMode) {
+        DefaultablePlaceholder dp = new DefaultablePlaceholder(placeholder);
+        String value = super.resolvePlaceholder(dp.placeholder, props, systemPropertiesMode);
+
+        if (value == null) {
+            value = dp.defaultValue;
+        }
+
+        return trimToEmpty(value);
+    }
+
+    private static class DefaultablePlaceholder {
+
+        private final String defaultValue;
+        private final String placeholder;
+
+        public DefaultablePlaceholder(String placeholder){
+            int commaIndex = placeholder.indexOf(":");
+            String defaultValue = null;
+
+            if (commaIndex >= 0) {
+                defaultValue = trimToEmpty(placeholder.substring(commaIndex + 1));
+                placeholder = trimToEmpty(placeholder.substring(0, commaIndex));
+            }
+
+            this.placeholder = placeholder;
+            this.defaultValue = defaultValue;
+        }
+    }
+
+    private String trimToNull(String str) {
+        if (str == null) {
+            return null;
+        }
+
+        String result = str.trim();
+
+        if (result == null || result.length() == 0) {
+            return null;
+        }
+
+        return result;
+    }
+
+    public static String trimToEmpty(String str) {
+        if (str == null) {
+            return "";
+        }
+
+        return str.trim();
+    }
+}

+ 44 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/ServiceLocator.java

@@ -0,0 +1,44 @@
+package com.alibaba.otter.canal.example.db;
+
+import com.alibaba.otter.canal.example.db.mysql.MysqlClient;
+import org.springframework.beans.factory.DisposableBean;
+import org.springframework.context.ApplicationContext;
+import org.springframework.context.support.ClassPathXmlApplicationContext;
+import org.springframework.util.Assert;
+
+public class ServiceLocator implements DisposableBean {
+
+    private static ApplicationContext applicationContext = null;
+
+    static {
+        try {
+            applicationContext = new ClassPathXmlApplicationContext("classpath:client-spring.xml");
+        } catch (RuntimeException e) {
+            throw e;
+        }
+    }
+
+    private static <T> T getBean(String name) {
+        assertContextInjected();
+        return (T) applicationContext.getBean(name);
+    }
+
+
+    private static void clearHolder() {
+        ServiceLocator.applicationContext = null;
+    }
+
+    @Override
+    public void destroy() throws Exception {
+        ServiceLocator.clearHolder();
+    }
+
+    private static void assertContextInjected() {
+        Assert.state(applicationContext != null, "ApplicationContext not set");
+    }
+
+
+    public static MysqlClient getMysqlClient() {
+        return getBean("mysqlClient");
+    }
+}

+ 121 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/AbstractDbDialect.java

@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect;
+
+import com.alibaba.otter.canal.example.db.utils.DdlUtils;
+import com.google.common.base.Function;
+import com.google.common.collect.MigrateMap;
+import org.apache.commons.lang.exception.NestableRuntimeException;
+import org.apache.ddlutils.model.Table;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.ConnectionCallback;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.datasource.DataSourceTransactionManager;
+import org.springframework.jdbc.support.lob.LobHandler;
+import org.springframework.transaction.TransactionDefinition;
+import org.springframework.transaction.support.TransactionTemplate;
+import org.springframework.util.Assert;
+
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+public abstract class AbstractDbDialect implements DbDialect {
+
+    protected int databaseMajorVersion;
+    protected int databaseMinorVersion;
+    protected String databaseName;
+    protected JdbcTemplate jdbcTemplate;
+    protected TransactionTemplate transactionTemplate;
+    protected LobHandler lobHandler;
+    protected Map<List<String>, Table> tables;
+
+    public AbstractDbDialect(final JdbcTemplate jdbcTemplate, LobHandler lobHandler) {
+        this.jdbcTemplate = jdbcTemplate;
+        this.lobHandler = lobHandler;
+        // 初始化transction
+        this.transactionTemplate = new TransactionTemplate();
+        transactionTemplate.setTransactionManager(new DataSourceTransactionManager(jdbcTemplate.getDataSource()));
+        transactionTemplate.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRES_NEW);
+
+        // 初始化一些数据
+        jdbcTemplate.execute(new ConnectionCallback() {
+
+            public Object doInConnection(Connection c) throws SQLException, DataAccessException {
+                DatabaseMetaData meta = c.getMetaData();
+                databaseName = meta.getDatabaseProductName();
+                databaseMajorVersion = meta.getDatabaseMajorVersion();
+                databaseMinorVersion = meta.getDatabaseMinorVersion();
+
+                return null;
+            }
+        });
+
+        initTables(jdbcTemplate);
+    }
+
+    public Table findTable(String schema, String table, boolean useCache) {
+        List<String> key = Arrays.asList(schema, table);
+        if (useCache == false) {
+            tables.remove(key);
+        }
+
+        return tables.get(key);
+    }
+
+    public Table findTable(String schema, String table) {
+        return findTable(schema, table, true);
+    }
+
+    public LobHandler getLobHandler() {
+        return lobHandler;
+    }
+
+    public JdbcTemplate getJdbcTemplate() {
+        return jdbcTemplate;
+    }
+
+    public TransactionTemplate getTransactionTemplate() {
+        return transactionTemplate;
+    }
+
+    private void initTables(final JdbcTemplate jdbcTemplate) {
+        this.tables = MigrateMap.makeComputingMap(new Function<List<String>, Table>() {
+
+            public Table apply(List<String> names) {
+                Assert.isTrue(names.size() == 2);
+                try {
+                    Table table = DdlUtils.findTable(jdbcTemplate, names.get(0), names.get(0), names.get(1));
+                    if (table == null) {
+                        throw new NestableRuntimeException("no found table [" + names.get(0) + "." + names.get(1)
+                                + "] , pls check");
+                    } else {
+                        return table;
+                    }
+                } catch (Exception e) {
+                    throw new NestableRuntimeException("find table [" + names.get(0) + "." + names.get(1) + "] error",
+                            e);
+                }
+            }
+        });
+    }
+
+
+}

+ 105 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/AbstractSqlTemplate.java

@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect;
+
+/**
+ * 默认的基于标准SQL实现的CRUD sql封装
+ * 
+ * @author jianghang 2011-10-27 下午01:37:00
+ * @version 4.0.0
+ */
+public abstract class AbstractSqlTemplate implements SqlTemplate {
+
+    private static final String DOT = ".";
+
+    public String getSelectSql(String schemaName, String tableName, String[] pkNames, String[] columnNames) {
+        StringBuilder sql = new StringBuilder("select ");
+        int size = columnNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(columnNames[i])).append((i + 1 < size) ? " , " : "");
+        }
+
+        sql.append(" from ").append(getFullName(schemaName, tableName)).append(" where ( ");
+        appendColumnEquals(sql, pkNames, "and");
+        sql.append(" ) ");
+        return sql.toString().intern();// 不使用intern,避免方法区内存消耗过多
+    }
+
+    public String getUpdateSql(String schemaName, String tableName, String[] pkNames, String[] columnNames) {
+        StringBuilder sql = new StringBuilder("update " + getFullName(schemaName, tableName) + " set ");
+        appendColumnEquals(sql, columnNames, ",");
+        sql.append(" where (");
+        appendColumnEquals(sql, pkNames, "and");
+        sql.append(")");
+        return sql.toString().intern(); // 不使用intern,避免方法区内存消耗过多
+    }
+
+    public String getInsertSql(String schemaName, String tableName, String[] pkNames, String[] columnNames) {
+        StringBuilder sql = new StringBuilder("insert into " + getFullName(schemaName, tableName) + "(");
+        String[] allColumns = new String[pkNames.length + columnNames.length];
+        System.arraycopy(columnNames, 0, allColumns, 0, columnNames.length);
+        System.arraycopy(pkNames, 0, allColumns, columnNames.length, pkNames.length);
+
+        int size = allColumns.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(allColumns[i])).append((i + 1 < size) ? "," : "");
+        }
+
+        sql.append(") values (");
+        appendColumnQuestions(sql, allColumns);
+        sql.append(")");
+        return sql.toString().intern();// intern优化,避免出现大量相同的字符串
+    }
+
+    public String getDeleteSql(String schemaName, String tableName, String[] pkNames) {
+        StringBuilder sql = new StringBuilder("delete from " + getFullName(schemaName, tableName) + " where ");
+        appendColumnEquals(sql, pkNames, "and");
+        return sql.toString().intern();// intern优化,避免出现大量相同的字符串
+    }
+
+    protected String getFullName(String schemaName, String tableName) {
+        StringBuilder sb = new StringBuilder();
+        if (schemaName != null) {
+            sb.append(appendEscape(schemaName)).append(DOT);
+        }
+        sb.append(appendEscape(tableName));
+        return sb.toString().intern();
+    }
+
+    // ================ helper method ============
+
+    protected String appendEscape(String columnName) {
+        return columnName;
+    }
+
+    protected void appendColumnQuestions(StringBuilder sql, String[] columns) {
+        int size = columns.length;
+        for (int i = 0; i < size; i++) {
+            sql.append("?").append((i + 1 < size) ? " , " : "");
+        }
+    }
+
+    protected void appendColumnEquals(StringBuilder sql, String[] columns, String separator) {
+        int size = columns.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(" ").append(appendEscape(columns[i])).append(" = ").append("? ");
+            if (i != size - 1) {
+                sql.append(separator);
+            }
+        }
+    }
+}

+ 20 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/DbDialect.java

@@ -0,0 +1,20 @@
+package com.alibaba.otter.canal.example.db.dialect;
+
+import org.apache.ddlutils.model.Table;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.support.lob.LobHandler;
+import org.springframework.transaction.support.TransactionTemplate;
+
+public interface DbDialect {
+
+    LobHandler getLobHandler();
+
+    JdbcTemplate getJdbcTemplate();
+
+    TransactionTemplate getTransactionTemplate();
+
+    Table findTable(String schema, String table);
+
+    Table findTable(String schema, String table, boolean useCache);
+
+}

+ 40 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/SqlTemplate.java

@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect;
+
+/**
+ * sql构造模板操作
+ * 
+ * @author jianghang 2011-10-27 下午01:31:15
+ * @version 4.0.0
+ */
+public interface SqlTemplate {
+
+    public String getSelectSql(String schemaName, String tableName, String[] pkNames, String[] columnNames);
+
+    public String getUpdateSql(String schemaName, String tableName, String[] pkNames, String[] columnNames);
+
+    public String getDeleteSql(String schemaName, String tableName, String[] pkNames);
+
+    public String getInsertSql(String schemaName, String tableName, String[] pkNames, String[] columnNames);
+
+    /**
+     * 获取对应的mergeSql
+     */
+    public String getMergeSql(String schemaName, String tableName, String[] pkNames, String[] columnNames,
+                              String[] viewColumnNames, boolean updatePks);
+}

+ 93 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/TableType.java

@@ -0,0 +1,93 @@
+package com.alibaba.otter.canal.example.db.dialect;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+/**
+ * An enumeration wrapper around JDBC table types.
+ */
+public enum TableType {
+
+    /**
+     * Unknown
+     */
+    unknown,
+
+    /**
+     * System table
+     */
+    system_table,
+
+    /**
+     * Global temporary
+     */
+    global_temporary,
+
+    /**
+     * Local temporary
+     */
+    local_temporary,
+
+    /**
+     * Table
+     */
+    table,
+
+    /**
+     * View
+     */
+    view,
+
+    /**
+     * Alias
+     */
+    alias,
+
+    /**
+     * Synonym
+     */
+    synonym,;
+
+    /**
+     * Converts an array of table types to an array of their corresponding string values.
+     *
+     * @param tableTypes Array of table types
+     * @return Array of string table types
+     */
+    public static String[] toStrings(final TableType[] tableTypes) {
+        if ((tableTypes == null) || (tableTypes.length == 0)) {
+            return new String[0];
+        }
+
+        final List<String> tableTypeStrings = new ArrayList<String>(tableTypes.length);
+
+        for (final TableType tableType : tableTypes) {
+            if (tableType != null) {
+                tableTypeStrings.add(tableType.toString().toUpperCase(Locale.ENGLISH));
+            }
+        }
+
+        return tableTypeStrings.toArray(new String[tableTypeStrings.size()]);
+    }
+
+    /**
+     * Converts an array of string table types to an array of their corresponding enumeration values.
+     *
+     * @param tableTypeStrings Array of string table types
+     * @return Array of table types
+     */
+    public static TableType[] valueOf(final String[] tableTypeStrings) {
+        if ((tableTypeStrings == null) || (tableTypeStrings.length == 0)) {
+            return new TableType[0];
+        }
+
+        final List<TableType> tableTypes = new ArrayList<TableType>(tableTypeStrings.length);
+
+        for (final String tableTypeString : tableTypeStrings) {
+            tableTypes.add(valueOf(tableTypeString.toLowerCase(Locale.ENGLISH)));
+        }
+
+        return tableTypes.toArray(new TableType[tableTypes.size()]);
+    }
+}

+ 32 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/mysql/MysqlDialect.java

@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect.mysql;
+
+import com.alibaba.otter.canal.example.db.dialect.AbstractDbDialect;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.support.lob.LobHandler;
+
+public class MysqlDialect extends AbstractDbDialect {
+
+    public MysqlDialect(JdbcTemplate jdbcTemplate, LobHandler lobHandler) {
+        super(jdbcTemplate, lobHandler);
+    }
+
+    public boolean isEmptyStringNulled() {
+        return false;
+    }
+}

+ 84 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/mysql/MysqlSqlTemplate.java

@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect.mysql;
+
+import com.alibaba.otter.canal.example.db.dialect.AbstractSqlTemplate;
+
+/**
+ * mysql sql生成模板
+ *
+ * @author jianghang 2011-10-27 下午01:41:20
+ * @version 4.0.0
+ */
+public class MysqlSqlTemplate extends AbstractSqlTemplate {
+
+    private static final String ESCAPE = "`";
+
+    public String getMergeSql(String schemaName, String tableName, String[] pkNames, String[] columnNames,
+                              String[] viewColumnNames, boolean includePks) {
+        StringBuilder sql = new StringBuilder("insert into " + getFullName(schemaName, tableName) + "(");
+        int size = columnNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(columnNames[i])).append(" , ");
+        }
+        size = pkNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(pkNames[i])).append((i + 1 < size) ? " , " : "");
+        }
+
+        sql.append(") values (");
+        size = columnNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append("?").append(" , ");
+        }
+        size = pkNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append("?").append((i + 1 < size) ? " , " : "");
+        }
+        sql.append(")");
+        sql.append(" on duplicate key update ");
+
+        size = columnNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(columnNames[i]))
+                    .append("=values(")
+                    .append(appendEscape(columnNames[i]))
+                    .append(")");
+            if (includePks) {
+                sql.append(" , ");
+            } else {
+                sql.append((i + 1 < size) ? " , " : "");
+            }
+        }
+
+        if (includePks) {
+            // mysql merge sql匹配了uniqe / primary key时都会执行update,所以需要更新pk信息
+            size = pkNames.length;
+            for (int i = 0; i < size; i++) {
+                sql.append(appendEscape(pkNames[i])).append("=values(").append(appendEscape(pkNames[i])).append(")");
+                sql.append((i + 1 < size) ? " , " : "");
+            }
+        }
+
+        return sql.toString().intern();// intern优化,避免出现大量相同的字符串
+    }
+
+    protected String appendEscape(String columnName) {
+        return ESCAPE + columnName + ESCAPE;
+    }
+
+}

+ 207 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/mysql/AbstractMysqlClient.java

@@ -0,0 +1,207 @@
+package com.alibaba.otter.canal.example.db.mysql;
+
+import com.alibaba.fastjson.JSON;
+import com.alibaba.otter.canal.example.db.AbstractDbClient;
+import com.alibaba.otter.canal.example.db.dialect.DbDialect;
+import com.alibaba.otter.canal.example.db.dialect.mysql.MysqlDialect;
+import com.alibaba.otter.canal.example.db.dialect.mysql.MysqlSqlTemplate;
+import com.alibaba.otter.canal.example.db.dialect.SqlTemplate;
+import com.alibaba.otter.canal.example.db.utils.SqlUtils;
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+import org.apache.commons.lang.StringUtils;
+import org.apache.ddlutils.model.Column;
+import org.apache.ddlutils.model.Table;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.core.PreparedStatementSetter;
+import org.springframework.jdbc.core.StatementCreatorUtils;
+import org.springframework.jdbc.support.lob.DefaultLobHandler;
+import org.springframework.jdbc.support.lob.LobCreator;
+import org.springframework.transaction.TransactionStatus;
+import org.springframework.transaction.support.TransactionCallback;
+
+import javax.sql.DataSource;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+
+public abstract class AbstractMysqlClient extends AbstractDbClient {
+
+    private DataSource dataSource;
+
+    private DbDialect dbDialect;
+    private SqlTemplate sqlTemplate;
+
+    protected Integer execute(final CanalEntry.Header header, final List<CanalEntry.Column> columns) {
+        final String sql = getSql(header, columns);
+        final LobCreator lobCreator = dbDialect.getLobHandler().getLobCreator();
+        dbDialect.getTransactionTemplate().execute(new TransactionCallback() {
+
+            public Object doInTransaction(TransactionStatus status) {
+                try {
+                    JdbcTemplate template = dbDialect.getJdbcTemplate();
+                    int affect = template.update(sql, new PreparedStatementSetter() {
+
+                        public void setValues(PreparedStatement ps) throws SQLException {
+                            doPreparedStatement(ps, dbDialect, lobCreator, header, columns);
+                        }
+                    });
+                    return affect;
+                } finally {
+                    lobCreator.close();
+                }
+            }
+        });
+        return 0;
+    }
+
+    private String getSql(CanalEntry.Header header, List<CanalEntry.Column> columns) {
+        List<String> pkNames = new ArrayList<>();
+        List<String> colNames = new ArrayList<>();
+        for (CanalEntry.Column column : columns) {
+            if (column.getIsKey()) {
+                pkNames.add(column.getName());
+            } else {
+                colNames.add(column.getName());
+            }
+        }
+        String sql = "";
+        CanalEntry.EventType eventType = header.getEventType();
+        switch (eventType) {
+            case INSERT:
+                sql = sqlTemplate.getInsertSql(header.getSchemaName(), header.getTableName(), pkNames.toArray(new String[]{}), colNames.toArray(new String[]{}));
+                break;
+            case UPDATE:
+                sql = sqlTemplate.getUpdateSql(header.getSchemaName(), header.getTableName(), pkNames.toArray(new String[]{}), colNames.toArray(new String[]{}));
+                break;
+            case DELETE:
+                sql = sqlTemplate.getDeleteSql(header.getSchemaName(), header.getTableName(), pkNames.toArray(new String[]{}));
+        }
+        logger.info("Execute sql: {}", sql);
+        return sql;
+    }
+
+    private void doPreparedStatement(PreparedStatement ps, DbDialect dbDialect, LobCreator lobCreator,
+                                     CanalEntry.Header header, List<CanalEntry.Column> columns) throws SQLException {
+
+        List<CanalEntry.Column> rebuildColumns = new ArrayList<>(columns.size());
+
+        List<CanalEntry.Column> keyColumns = new ArrayList<>(columns.size());
+        List<CanalEntry.Column> notKeyColumns = new ArrayList<>(columns.size());
+        for (CanalEntry.Column column : columns) {
+            if (column.getIsKey()) {
+                keyColumns.add(column);
+            } else {
+                notKeyColumns.add(column);
+            }
+        }
+        CanalEntry.EventType eventType = header.getEventType();
+        switch (eventType) {
+            case INSERT:
+            case UPDATE:
+                // insert/update语句对应的字段数序都是将主键排在后面
+                rebuildColumns.addAll(notKeyColumns);
+                rebuildColumns.addAll(keyColumns);
+                break;
+            case DELETE:
+                rebuildColumns.addAll(keyColumns);
+        }
+
+        // 获取一下当前字段名的数据是否必填
+        Table table = dbDialect.findTable(header.getSchemaName(), header.getTableName());
+        Map<String, Boolean> isRequiredMap = new HashMap();
+        for (Column tableColumn : table.getColumns()) {
+            isRequiredMap.put(StringUtils.lowerCase(tableColumn.getName()), tableColumn.isRequired());
+        }
+
+        List<Object> values = new ArrayList<>(rebuildColumns.size());
+        for (int i = 0; i < rebuildColumns.size(); i++) {
+            int paramIndex = i + 1;
+            CanalEntry.Column column = rebuildColumns.get(i);
+            int sqlType = column.getSqlType();
+
+            Boolean isRequired = isRequiredMap.get(StringUtils.lowerCase(column.getName()));
+            if (isRequired == null) {
+                // 清理一下目标库的表结构,二次检查一下
+                table = dbDialect.findTable(header.getSchemaName(), header.getTableName());
+
+                isRequiredMap = new HashMap<>();
+                for (Column tableColumn : table.getColumns()) {
+                    isRequiredMap.put(StringUtils.lowerCase(tableColumn.getName()), tableColumn.isRequired());
+                }
+
+                isRequired = isRequiredMap.get(StringUtils.lowerCase(column.getName()));
+                if (isRequired == null) {
+                    throw new CanalClientException(String.format("column name %s is not found in Table[%s]",
+                            column.getName(),
+                            table.toString()));
+                }
+            }
+
+            Object param;
+            if (sqlType == Types.TIME || sqlType == Types.TIMESTAMP || sqlType == Types.DATE) {
+                // 解决mysql的0000-00-00 00:00:00问题,直接依赖mysql
+                // driver进行处理,如果转化为Timestamp会出错
+                param = column.getValue();
+                if (param instanceof String && StringUtils.isEmpty(String.valueOf(param))) {
+                    param = null;
+                }
+            } else {
+                param = SqlUtils.stringToSqlValue(column.getValue(),
+                        sqlType,
+                        isRequired,
+                        column.getIsNull());
+            }
+
+            try {
+                switch (sqlType) {
+                    case Types.CLOB:
+                        lobCreator.setClobAsString(ps, paramIndex, (String) param);
+                        break;
+                    case Types.BLOB:
+                        lobCreator.setBlobAsBytes(ps, paramIndex, (byte[]) param);
+                        break;
+                    case Types.TIME:
+                    case Types.TIMESTAMP:
+                    case Types.DATE:
+                        ps.setObject(paramIndex, param);
+                        break;
+                    case Types.BIT:
+                        StatementCreatorUtils.setParameterValue(ps, paramIndex, Types.DECIMAL, null, param);
+                        break;
+                    default:
+                        StatementCreatorUtils.setParameterValue(ps, paramIndex, sqlType, null, param);
+                        break;
+                }
+                values.add(param);
+            } catch (SQLException ex) {
+                logger.error("## SetParam error , [sqltype={}, value={}]",
+                        new Object[]{sqlType, param});
+                throw ex;
+            }
+        }
+        logger.info("## sql values: {}", JSON.toJSONString(values));
+    }
+
+    @Override
+    public void afterPropertiesSet() {
+        JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
+        DefaultLobHandler lobHandler = new DefaultLobHandler();
+        lobHandler.setStreamAsLob(true);
+        dbDialect = new MysqlDialect(jdbcTemplate, lobHandler);
+        sqlTemplate = new MysqlSqlTemplate();
+    }
+
+    public DataSource getDataSource() {
+        return dataSource;
+    }
+
+    public void setDataSource(DataSource dataSource) {
+        this.dataSource = dataSource;
+    }
+}

+ 23 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/mysql/MysqlClient.java

@@ -0,0 +1,23 @@
+package com.alibaba.otter.canal.example.db.mysql;
+
+import com.alibaba.otter.canal.protocol.CanalEntry;
+
+import java.util.List;
+
+public class MysqlClient extends AbstractMysqlClient {
+
+    @Override
+    public void insert(CanalEntry.Header header, List<CanalEntry.Column> afterColumns) {
+        execute(header, afterColumns);
+    }
+
+    @Override
+    public void update(CanalEntry.Header header, List<CanalEntry.Column> afterColumns) {
+        execute(header, afterColumns);
+    }
+
+    @Override
+    public void delete(CanalEntry.Header header, List<CanalEntry.Column> beforeColumns) {
+        execute(header, beforeColumns);
+    }
+}

+ 50 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/utils/ByteArrayConverter.java

@@ -0,0 +1,50 @@
+package com.alibaba.otter.canal.example.db.utils;
+
+import org.apache.commons.beanutils.ConversionException;
+import org.apache.commons.beanutils.Converter;
+import org.apache.commons.beanutils.converters.ArrayConverter;
+import org.apache.commons.beanutils.converters.ByteConverter;
+
+public class ByteArrayConverter implements Converter {
+
+    public static final Converter SQL_BYTES = new ByteArrayConverter(null);
+    private static final Converter converter = new ArrayConverter(byte[].class, new ByteConverter());
+
+    protected final Object defaultValue;
+    protected final boolean useDefault;
+
+    public ByteArrayConverter() {
+        this.defaultValue = null;
+        this.useDefault = false;
+    }
+
+    public ByteArrayConverter(Object defaultValue) {
+        this.defaultValue = defaultValue;
+        this.useDefault = true;
+    }
+
+    public Object convert(Class type, Object value) {
+        if (value == null) {
+            if (useDefault) {
+                return (defaultValue);
+            } else {
+                throw new ConversionException("No value specified");
+            }
+        }
+
+        if (value instanceof byte[]) {
+            return (value);
+        }
+
+        // BLOB类型,canal直接存储为String("ISO-8859-1")
+        if (value instanceof String) {
+            try {
+                return ((String) value).getBytes("ISO-8859-1");
+            } catch (Exception e) {
+                throw new ConversionException(e);
+            }
+        }
+
+        return converter.convert(type, value); // byteConvertor进行转化
+    }
+}

+ 326 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/utils/DdlUtils.java

@@ -0,0 +1,326 @@
+package com.alibaba.otter.canal.example.db.utils;
+
+import com.alibaba.otter.canal.example.db.dialect.TableType;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.commons.lang.builder.ToStringStyle;
+import org.apache.commons.lang.math.NumberUtils;
+import org.apache.ddlutils.model.Column;
+import org.apache.ddlutils.model.Table;
+import org.apache.ddlutils.platform.DatabaseMetaDataWrapper;
+import org.apache.ddlutils.platform.MetaDataColumnDescriptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.ConnectionCallback;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.support.JdbcUtils;
+
+import java.sql.*;
+import java.util.*;
+
+
+public class DdlUtils {
+
+    private static final Logger logger = LoggerFactory.getLogger(DdlUtils.class);
+    private static TableType[] SUPPORTED_TABLE_TYPES = new TableType[]{TableType.view, TableType.table};
+    private final static Map<Integer, String> _defaultSizes = new HashMap<Integer, String>();
+
+    static {
+        _defaultSizes.put(new Integer(1), "254");
+        _defaultSizes.put(new Integer(12), "254");
+        _defaultSizes.put(new Integer(-1), "254");
+        _defaultSizes.put(new Integer(-2), "254");
+        _defaultSizes.put(new Integer(-3), "254");
+        _defaultSizes.put(new Integer(-4), "254");
+        _defaultSizes.put(new Integer(4), "32");
+        _defaultSizes.put(new Integer(-5), "64");
+        _defaultSizes.put(new Integer(7), "7,0");
+        _defaultSizes.put(new Integer(6), "15,0");
+        _defaultSizes.put(new Integer(8), "15,0");
+        _defaultSizes.put(new Integer(3), "15,15");
+        _defaultSizes.put(new Integer(2), "15,15");
+    }
+
+
+    public static Table findTable(final JdbcTemplate jdbcTemplate, final String catalogName, final String schemaName,
+                                  final String tableName) {
+        return (Table) jdbcTemplate.execute(new ConnectionCallback() {
+
+            public Object doInConnection(Connection con) throws SQLException, DataAccessException {
+                Table table = null;
+                DatabaseMetaDataWrapper metaData = new DatabaseMetaDataWrapper();
+                try {
+
+                    DatabaseMetaData databaseMetaData = con.getMetaData();
+
+                    metaData.setMetaData(databaseMetaData);
+                    metaData.setTableTypes(TableType.toStrings(SUPPORTED_TABLE_TYPES));
+                    metaData.setCatalog(catalogName);
+                    metaData.setSchemaPattern(schemaName);
+
+                    String convertTableName = tableName;
+                    if (databaseMetaData.storesUpperCaseIdentifiers()) {
+                        metaData.setCatalog(catalogName.toUpperCase());
+                        metaData.setSchemaPattern(schemaName.toUpperCase());
+                        convertTableName = tableName.toUpperCase();
+                    }
+                    if (databaseMetaData.storesLowerCaseIdentifiers()) {
+                        metaData.setCatalog(catalogName.toLowerCase());
+                        metaData.setSchemaPattern(schemaName.toLowerCase());
+                        convertTableName = tableName.toLowerCase();
+                    }
+
+                    ResultSet tableData = null;
+                    try {
+                        tableData = metaData.getTables(convertTableName);
+
+                        while ((tableData != null) && tableData.next()) {
+                            Map<String, Object> values = readColumns(tableData, initColumnsForTable());
+
+                            table = readTable(metaData, values);
+                            if (table.getName().equalsIgnoreCase(tableName)) {
+                                break;
+                            }
+                        }
+                    } finally {
+                        JdbcUtils.closeResultSet(tableData);
+                    }
+                } catch (Exception e) {
+                    logger.error(e.getMessage(), e);
+                }
+
+                makeAllColumnsPrimaryKeysIfNoPrimaryKeysFound(table);
+                return table;
+            }
+        });
+    }
+
+    /**
+     * Treat tables with no primary keys as a table with all primary keys.
+     */
+    private static void makeAllColumnsPrimaryKeysIfNoPrimaryKeysFound(Table table) {
+        if ((table != null) && (table.getPrimaryKeyColumns() != null) && (table.getPrimaryKeyColumns().length == 0)) {
+            Column[] allCoumns = table.getColumns();
+
+            for (Column column : allCoumns) {
+                column.setPrimaryKey(true);
+            }
+        }
+    }
+
+    private static Table readTable(DatabaseMetaDataWrapper metaData, Map<String, Object> values) throws SQLException {
+        String tableName = (String) values.get("TABLE_NAME");
+        Table table = null;
+
+        if ((tableName != null) && (tableName.length() > 0)) {
+            table = new Table();
+            table.setName(tableName);
+            table.setType((String) values.get("TABLE_TYPE"));
+            table.setCatalog((String) values.get("TABLE_CAT"));
+            table.setSchema((String) values.get("TABLE_SCHEM"));
+            table.setDescription((String) values.get("REMARKS"));
+            table.addColumns(readColumns(metaData, tableName));
+
+            Collection<String> primaryKeys = readPrimaryKeyNames(metaData, tableName);
+
+            for (Object key : primaryKeys) {
+                Column col = table.findColumn((String) key, true);
+
+                if (col != null) {
+                    col.setPrimaryKey(true);
+                } else {
+                    throw new NullPointerException(String.format("%s pk %s is null - %s %s",
+                            tableName,
+                            key,
+                            ToStringBuilder.reflectionToString(metaData, ToStringStyle.SIMPLE_STYLE),
+                            ToStringBuilder.reflectionToString(values, ToStringStyle.SIMPLE_STYLE)));
+                }
+            }
+        }
+
+        return table;
+    }
+
+    private static List<MetaDataColumnDescriptor> initColumnsForTable() {
+        List<MetaDataColumnDescriptor> result = new ArrayList<MetaDataColumnDescriptor>();
+
+        result.add(new MetaDataColumnDescriptor("TABLE_NAME", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("TABLE_TYPE", Types.VARCHAR, "UNKNOWN"));
+        result.add(new MetaDataColumnDescriptor("TABLE_CAT", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("TABLE_SCHEM", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("REMARKS", Types.VARCHAR));
+
+        return result;
+    }
+
+    private static List<MetaDataColumnDescriptor> initColumnsForColumn() {
+        List<MetaDataColumnDescriptor> result = new ArrayList<MetaDataColumnDescriptor>();
+
+        // As suggested by Alexandre Borgoltz, we're reading the COLUMN_DEF
+        // first because Oracle
+        // has problems otherwise (it seemingly requires a LONG column to be the
+        // first to be read)
+        // See also DDLUTILS-29
+        result.add(new MetaDataColumnDescriptor("COLUMN_DEF", Types.VARCHAR));
+
+        // we're also reading the table name so that a model reader impl can
+        // filter manually
+        result.add(new MetaDataColumnDescriptor("TABLE_NAME", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("COLUMN_NAME", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("TYPE_NAME", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("DATA_TYPE", Types.INTEGER, new Integer(Types.OTHER)));
+        result.add(new MetaDataColumnDescriptor("NUM_PREC_RADIX", Types.INTEGER, new Integer(10)));
+        result.add(new MetaDataColumnDescriptor("DECIMAL_DIGITS", Types.INTEGER, new Integer(0)));
+        result.add(new MetaDataColumnDescriptor("COLUMN_SIZE", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("IS_NULLABLE", Types.VARCHAR, "YES"));
+        result.add(new MetaDataColumnDescriptor("REMARKS", Types.VARCHAR));
+
+        return result;
+    }
+
+    private static List<MetaDataColumnDescriptor> initColumnsForPK() {
+        List<MetaDataColumnDescriptor> result = new ArrayList<MetaDataColumnDescriptor>();
+
+        result.add(new MetaDataColumnDescriptor("COLUMN_NAME", Types.VARCHAR));
+
+        // we're also reading the table name so that a model reader impl can
+        // filter manually
+        result.add(new MetaDataColumnDescriptor("TABLE_NAME", Types.VARCHAR));
+
+        // the name of the primary key is currently only interesting to the pk
+        // index name resolution
+        result.add(new MetaDataColumnDescriptor("PK_NAME", Types.VARCHAR));
+
+        return result;
+    }
+
+    private static List<Column> readColumns(DatabaseMetaDataWrapper metaData, String tableName) throws SQLException {
+        ResultSet columnData = null;
+
+        try {
+            columnData = metaData.getColumns(tableName, null);
+
+            List<Column> columns = new ArrayList<Column>();
+            Map<String, Object> values;
+
+            for (; columnData.next(); columns.add(readColumn(metaData, values))) {
+                Map<String, Object> tmp = readColumns(columnData, initColumnsForColumn());
+                if (tableName.equalsIgnoreCase((String) tmp.get("TABLE_NAME"))) {
+                    values = tmp;
+                } else {
+                    break;
+                }
+            }
+
+            return columns;
+        } finally {
+            JdbcUtils.closeResultSet(columnData);
+        }
+    }
+
+    private static Column readColumn(DatabaseMetaDataWrapper metaData, Map<String, Object> values) throws SQLException {
+        Column column = new Column();
+
+        column.setName((String) values.get("COLUMN_NAME"));
+        column.setDefaultValue((String) values.get("COLUMN_DEF"));
+        column.setTypeCode(((Integer) values.get("DATA_TYPE")).intValue());
+
+        String typeName = (String) values.get("TYPE_NAME");
+        // column.setType(typeName);
+
+        if ((typeName != null) && typeName.startsWith("TIMESTAMP")) {
+            column.setTypeCode(Types.TIMESTAMP);
+        }
+        // modify 2013-09-25,处理下unsigned
+        if ((typeName != null) && StringUtils.containsIgnoreCase(typeName, "UNSIGNED")) {
+            // 如果为unsigned,往上调大一个量级,避免数据溢出
+            switch (column.getTypeCode()) {
+                case Types.TINYINT:
+                    column.setTypeCode(Types.SMALLINT);
+                    break;
+                case Types.SMALLINT:
+                    column.setTypeCode(Types.INTEGER);
+                    break;
+                case Types.INTEGER:
+                    column.setTypeCode(Types.BIGINT);
+                    break;
+                case Types.BIGINT:
+                    column.setTypeCode(Types.DECIMAL);
+                    break;
+                default:
+                    break;
+            }
+        }
+
+        Integer precision = (Integer) values.get("NUM_PREC_RADIX");
+
+        if (precision != null) {
+            column.setPrecisionRadix(precision.intValue());
+        }
+
+        String size = (String) values.get("COLUMN_SIZE");
+
+        if (size == null) {
+            size = (String) _defaultSizes.get(new Integer(column.getTypeCode()));
+        }
+
+        // we're setting the size after the precision and radix in case
+        // the database prefers to return them in the size value
+        column.setSize(size);
+
+        int scale = 0;
+        Object dec_digits = values.get("DECIMAL_DIGITS");
+
+        if (dec_digits instanceof String) {
+            scale = (dec_digits == null) ? 0 : NumberUtils.toInt(dec_digits.toString());
+        } else if (dec_digits instanceof Integer) {
+            scale = (dec_digits == null) ? 0 : (Integer) dec_digits;
+        }
+
+        if (scale != 0) {
+            column.setScale(scale);
+        }
+
+        column.setRequired("NO".equalsIgnoreCase(((String) values.get("IS_NULLABLE")).trim()));
+        column.setDescription((String) values.get("REMARKS"));
+        return column;
+    }
+
+    private static Map<String, Object> readColumns(ResultSet resultSet, List<MetaDataColumnDescriptor> columnDescriptors)
+            throws SQLException {
+        Map<String, Object> values = new HashMap<String, Object>();
+        MetaDataColumnDescriptor descriptor;
+
+        for (Iterator<MetaDataColumnDescriptor> it = columnDescriptors.iterator(); it.hasNext(); values.put(descriptor.getName(),
+                descriptor.readColumn(resultSet))) {
+            descriptor = (MetaDataColumnDescriptor) it.next();
+        }
+
+        return values;
+    }
+
+    private static Collection<String> readPrimaryKeyNames(DatabaseMetaDataWrapper metaData, String tableName)
+            throws SQLException {
+        ResultSet pkData = null;
+
+        try {
+            List<String> pks = new ArrayList<String>();
+            Map<String, Object> values;
+
+            for (pkData = metaData.getPrimaryKeys(tableName); pkData.next(); pks.add(readPrimaryKeyName(metaData,
+                    values))) {
+                values = readColumns(pkData, initColumnsForPK());
+            }
+
+            return pks;
+        } finally {
+            JdbcUtils.closeResultSet(pkData);
+        }
+    }
+
+    private static String readPrimaryKeyName(DatabaseMetaDataWrapper metaData, Map<String, Object> values)
+            throws SQLException {
+        return (String) values.get("COLUMN_NAME");
+    }
+}

+ 140 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/utils/SqlTimestampConverter.java

@@ -0,0 +1,140 @@
+package com.alibaba.otter.canal.example.db.utils;
+
+import org.apache.commons.beanutils.ConversionException;
+import org.apache.commons.beanutils.Converter;
+import org.apache.commons.lang.time.DateFormatUtils;
+
+import java.sql.Timestamp;
+import java.text.ParseException;
+import java.text.ParsePosition;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Locale;
+
+public class SqlTimestampConverter implements Converter {
+
+    /**
+     * Field description
+     */
+    public static final String[] DATE_FORMATS = new String[]{"yyyy-MM-dd", "HH:mm:ss", "yyyy-MM-dd HH:mm:ss",
+            "yyyy-MM-dd hh:mm:ss.fffffffff", "EEE MMM dd HH:mm:ss zzz yyyy",
+            DateFormatUtils.ISO_DATETIME_FORMAT.getPattern(),
+            DateFormatUtils.ISO_DATETIME_TIME_ZONE_FORMAT.getPattern(),
+            DateFormatUtils.SMTP_DATETIME_FORMAT.getPattern(),};
+
+    public static final Converter SQL_TIMESTAMP = new SqlTimestampConverter(null);
+
+    /**
+     * The default value specified to our Constructor, if any.
+     */
+    private final Object defaultValue;
+
+    /**
+     * Should we return the default value on conversion errors?
+     */
+    private final boolean useDefault;
+
+    /**
+     * Create a {@link Converter} that will throw a {@link ConversionException} if a conversion error occurs.
+     */
+    public SqlTimestampConverter() {
+        this.defaultValue = null;
+        this.useDefault = false;
+    }
+
+    /**
+     * Create a {@link Converter} that will return the specified default value if a conversion error occurs.
+     *
+     * @param defaultValue The default value to be returned
+     */
+    public SqlTimestampConverter(Object defaultValue) {
+        this.defaultValue = defaultValue;
+        this.useDefault = true;
+    }
+
+    /**
+     * Convert the specified input object into an output object of the specified type.
+     *
+     * @param type  Data type to which this value should be converted
+     * @param value The input value to be converted
+     * @throws ConversionException if conversion cannot be performed successfully
+     */
+    public Object convert(Class type, Object value) {
+        if (value == null) {
+            if (useDefault) {
+                return (defaultValue);
+            } else {
+                throw new ConversionException("No value specified");
+            }
+        }
+
+        if (value instanceof java.sql.Date && java.sql.Date.class.equals(type)) {
+            return value;
+        } else if (value instanceof java.sql.Time && java.sql.Time.class.equals(type)) {
+            return value;
+        } else if (value instanceof Timestamp && Timestamp.class.equals(type)) {
+            return value;
+        } else {
+            try {
+                if (java.sql.Date.class.equals(type)) {
+                    return new java.sql.Date(convertTimestamp2TimeMillis(value.toString()));
+                } else if (java.sql.Time.class.equals(type)) {
+                    return new java.sql.Time(convertTimestamp2TimeMillis(value.toString()));
+                } else if (Timestamp.class.equals(type)) {
+                    return new Timestamp(convertTimestamp2TimeMillis(value.toString()));
+                } else {
+                    return new Timestamp(convertTimestamp2TimeMillis(value.toString()));
+                }
+            } catch (Exception e) {
+                throw new ConversionException("Value format invalid: " + e.getMessage(), e);
+            }
+        }
+
+    }
+
+    private Long convertTimestamp2TimeMillis(String input) {
+        if (input == null) {
+            return null;
+        }
+
+        try {
+            // 先处理Timestamp类型
+            return Timestamp.valueOf(input).getTime();
+        } catch (Exception nfe) {
+            try {
+                try {
+                    return parseDate(input, DATE_FORMATS, Locale.ENGLISH).getTime();
+                } catch (Exception err) {
+                    return parseDate(input, DATE_FORMATS, Locale.getDefault()).getTime();
+                }
+            } catch (Exception err) {
+                // 最后处理long time的情况
+                return Long.parseLong(input);
+            }
+        }
+    }
+
+    private Date parseDate(String str, String[] parsePatterns, Locale locale) throws ParseException {
+        if ((str == null) || (parsePatterns == null)) {
+            throw new IllegalArgumentException("Date and Patterns must not be null");
+        }
+
+        SimpleDateFormat parser = null;
+        ParsePosition pos = new ParsePosition(0);
+
+        for (int i = 0; i < parsePatterns.length; i++) {
+            if (i == 0) {
+                parser = new SimpleDateFormat(parsePatterns[0], locale);
+            } else {
+                parser.applyPattern(parsePatterns[i]);
+            }
+            pos.setIndex(0);
+            Date date = parser.parse(str, pos);
+            if ((date != null) && (pos.getIndex() == str.length())) {
+                return date;
+            }
+        }
+
+        throw new ParseException("Unable to parse the date: " + str, -1);
+    }
+}

+ 315 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/utils/SqlUtils.java

@@ -0,0 +1,315 @@
+package com.alibaba.otter.canal.example.db.utils;
+
+import org.apache.commons.beanutils.ConvertUtilsBean;
+import org.apache.commons.lang.StringUtils;
+
+import java.io.UnsupportedEncodingException;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.sql.*;
+import java.util.HashMap;
+import java.util.Map;
+
+public class SqlUtils {
+
+    public static final String REQUIRED_FIELD_NULL_SUBSTITUTE = " ";
+    public static final String SQLDATE_FORMAT = "yyyy-MM-dd";
+    public static final String TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss";
+    private static final Map<Integer, Class<?>> sqlTypeToJavaTypeMap = new HashMap<Integer, Class<?>>();
+    private static final ConvertUtilsBean convertUtilsBean = new ConvertUtilsBean();
+
+    static {
+        // regist Converter
+        convertUtilsBean.register(SqlTimestampConverter.SQL_TIMESTAMP, Date.class);
+        convertUtilsBean.register(SqlTimestampConverter.SQL_TIMESTAMP, Time.class);
+        convertUtilsBean.register(SqlTimestampConverter.SQL_TIMESTAMP, Timestamp.class);
+        convertUtilsBean.register(ByteArrayConverter.SQL_BYTES, byte[].class);
+
+        // bool
+        sqlTypeToJavaTypeMap.put(Types.BOOLEAN, Boolean.class);
+
+        // int
+        sqlTypeToJavaTypeMap.put(Types.TINYINT, Integer.class);
+        sqlTypeToJavaTypeMap.put(Types.SMALLINT, Integer.class);
+        sqlTypeToJavaTypeMap.put(Types.INTEGER, Integer.class);
+
+        // long
+        sqlTypeToJavaTypeMap.put(Types.BIGINT, Long.class);
+        // mysql bit最多64位,无符号
+        sqlTypeToJavaTypeMap.put(Types.BIT, BigInteger.class);
+
+        // decimal
+        sqlTypeToJavaTypeMap.put(Types.REAL, Float.class);
+        sqlTypeToJavaTypeMap.put(Types.FLOAT, Float.class);
+        sqlTypeToJavaTypeMap.put(Types.DOUBLE, Double.class);
+        sqlTypeToJavaTypeMap.put(Types.NUMERIC, BigDecimal.class);
+        sqlTypeToJavaTypeMap.put(Types.DECIMAL, BigDecimal.class);
+
+        // date
+        sqlTypeToJavaTypeMap.put(Types.DATE, Date.class);
+        sqlTypeToJavaTypeMap.put(Types.TIME, Time.class);
+        sqlTypeToJavaTypeMap.put(Types.TIMESTAMP, Timestamp.class);
+
+        // blob
+        sqlTypeToJavaTypeMap.put(Types.BLOB, byte[].class);
+
+        // byte[]
+        sqlTypeToJavaTypeMap.put(Types.REF, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.OTHER, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.ARRAY, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.STRUCT, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.SQLXML, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.BINARY, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.DATALINK, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.DISTINCT, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.VARBINARY, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.JAVA_OBJECT, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.LONGVARBINARY, byte[].class);
+
+        // String
+        sqlTypeToJavaTypeMap.put(Types.CHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.VARCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.LONGVARCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.LONGNVARCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.NCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.NVARCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.NCLOB, String.class);
+        sqlTypeToJavaTypeMap.put(Types.CLOB, String.class);
+    }
+
+    /**
+     * 将指定java.sql.Types的ResultSet value转换成相应的String
+     *
+     * @param rs
+     * @param index
+     * @param sqlType
+     * @return
+     * @throws SQLException
+     */
+    public static String sqlValueToString(ResultSet rs, int index, int sqlType) throws SQLException {
+        Class<?> requiredType = sqlTypeToJavaTypeMap.get(sqlType);
+        if (requiredType == null) {
+            throw new IllegalArgumentException("unknow java.sql.Types - " + sqlType);
+        }
+
+        return getResultSetValue(rs, index, requiredType);
+    }
+
+    /**
+     * sqlValueToString方法的逆向过程
+     *
+     * @param value
+     * @param sqlType
+     * @param isTextRequired
+     * @param isEmptyStringNulled
+     * @return
+     */
+    public static Object stringToSqlValue(String value, int sqlType, boolean isRequired, boolean isEmptyStringNulled) {
+        // 设置变量
+        String sourceValue = value;
+        if (SqlUtils.isTextType(sqlType)) {
+            if ((sourceValue == null) || (StringUtils.isEmpty(sourceValue) && isEmptyStringNulled)) {
+                return isRequired ? REQUIRED_FIELD_NULL_SUBSTITUTE : null;
+            } else {
+                return sourceValue;
+            }
+        } else {
+            if (StringUtils.isEmpty(sourceValue)) {
+                return null;
+            } else {
+                Class<?> requiredType = sqlTypeToJavaTypeMap.get(sqlType);
+                if (requiredType == null) {
+                    throw new IllegalArgumentException("unknow java.sql.Types - " + sqlType);
+                } else if (requiredType.equals(String.class)) {
+                    return sourceValue;
+                } else if (isNumeric(sqlType)) {
+                    return convertUtilsBean.convert(sourceValue.trim(), requiredType);
+                } else {
+                    return convertUtilsBean.convert(sourceValue, requiredType);
+                }
+            }
+        }
+    }
+
+    public static String encoding(String source, int sqlType, String sourceEncoding, String targetEncoding) {
+        switch (sqlType) {
+            case Types.CHAR:
+            case Types.VARCHAR:
+            case Types.LONGVARCHAR:
+            case Types.NCHAR:
+            case Types.NVARCHAR:
+            case Types.LONGNVARCHAR:
+            case Types.CLOB:
+            case Types.NCLOB:
+                if (false == StringUtils.isEmpty(source)) {
+                    String fromEncoding = StringUtils.isBlank(sourceEncoding) ? "UTF-8" : sourceEncoding;
+                    String toEncoding = StringUtils.isBlank(targetEncoding) ? "UTF-8" : targetEncoding;
+
+                    // if (false == StringUtils.equalsIgnoreCase(fromEncoding,
+                    // toEncoding)) {
+                    try {
+                        return new String(source.getBytes(fromEncoding), toEncoding);
+                    } catch (UnsupportedEncodingException e) {
+                        throw new IllegalArgumentException(e.getMessage(), e);
+                    }
+                    // }
+                }
+        }
+
+        return source;
+    }
+
+    /**
+     * Retrieve a JDBC column value from a ResultSet, using the specified value
+     * type.
+     * <p>
+     * Uses the specifically typed ResultSet accessor methods, falling back to
+     * {@link #getResultSetValue(ResultSet, int)} for unknown types.
+     * <p>
+     * Note that the returned value may not be assignable to the specified
+     * required type, in case of an unknown type. Calling code needs to deal
+     * with this case appropriately, e.g. throwing a corresponding exception.
+     *
+     * @param rs           is the ResultSet holding the data
+     * @param index        is the column index
+     * @param requiredType the required value type (may be <code>null</code>)
+     * @return the value object
+     * @throws SQLException if thrown by the JDBC API
+     */
+    private static String getResultSetValue(ResultSet rs, int index, Class<?> requiredType) throws SQLException {
+        if (requiredType == null) {
+            return getResultSetValue(rs, index);
+        }
+
+        Object value = null;
+        boolean wasNullCheck = false;
+
+        // Explicitly extract typed value, as far as possible.
+        if (String.class.equals(requiredType)) {
+            value = rs.getString(index);
+        } else if (boolean.class.equals(requiredType) || Boolean.class.equals(requiredType)) {
+            value = Boolean.valueOf(rs.getBoolean(index));
+            wasNullCheck = true;
+        } else if (byte.class.equals(requiredType) || Byte.class.equals(requiredType)) {
+            value = new Byte(rs.getByte(index));
+            wasNullCheck = true;
+        } else if (short.class.equals(requiredType) || Short.class.equals(requiredType)) {
+            value = new Short(rs.getShort(index));
+            wasNullCheck = true;
+        } else if (int.class.equals(requiredType) || Integer.class.equals(requiredType)) {
+            value = new Long(rs.getLong(index));
+            wasNullCheck = true;
+        } else if (long.class.equals(requiredType) || Long.class.equals(requiredType)) {
+            value = rs.getBigDecimal(index);
+            wasNullCheck = true;
+        } else if (float.class.equals(requiredType) || Float.class.equals(requiredType)) {
+            value = new Float(rs.getFloat(index));
+            wasNullCheck = true;
+        } else if (double.class.equals(requiredType) || Double.class.equals(requiredType)
+                || Number.class.equals(requiredType)) {
+            value = new Double(rs.getDouble(index));
+            wasNullCheck = true;
+        } else if (Time.class.equals(requiredType)) {
+            // try {
+            // value = rs.getTime(index);
+            // } catch (SQLException e) {
+            value = rs.getString(index);// 尝试拿为string对象,0000无法用Time表示
+            // if (value == null && !rs.wasNull()) {
+            // value = "00:00:00"; //
+            // mysql设置了zeroDateTimeBehavior=convertToNull,出现0值时返回为null
+            // }
+            // }
+        } else if (Timestamp.class.equals(requiredType) || Date.class.equals(requiredType)) {
+            // try {
+            // value = convertTimestamp(rs.getTimestamp(index));
+            // } catch (SQLException e) {
+            // 尝试拿为string对象,0000-00-00 00:00:00无法用Timestamp 表示
+            value = rs.getString(index);
+            // if (value == null && !rs.wasNull()) {
+            // value = "0000:00:00 00:00:00"; //
+            // mysql设置了zeroDateTimeBehavior=convertToNull,出现0值时返回为null
+            // }
+            // }
+        } else if (BigDecimal.class.equals(requiredType)) {
+            value = rs.getBigDecimal(index);
+        } else if (BigInteger.class.equals(requiredType)) {
+            value = rs.getBigDecimal(index);
+        } else if (Blob.class.equals(requiredType)) {
+            value = rs.getBlob(index);
+        } else if (Clob.class.equals(requiredType)) {
+            value = rs.getClob(index);
+        } else if (byte[].class.equals(requiredType)) {
+            try {
+                byte[] bytes = rs.getBytes(index);
+                if (bytes == null) {
+                    value = null;
+                } else {
+                    value = new String(bytes, "ISO-8859-1");// 将binary转化为iso-8859-1的字符串
+                }
+            } catch (UnsupportedEncodingException e) {
+                throw new SQLException(e);
+            }
+        } else {
+            // Some unknown type desired -> rely on getObject.
+            value = getResultSetValue(rs, index);
+        }
+
+        // Perform was-null check if demanded (for results that the
+        // JDBC driver returns as primitives).
+        if (wasNullCheck && (value != null) && rs.wasNull()) {
+            value = null;
+        }
+
+        return (value == null) ? null : convertUtilsBean.convert(value);
+    }
+
+    /**
+     * Retrieve a JDBC column value from a ResultSet, using the most appropriate
+     * value type. The returned value should be a detached value object, not
+     * having any ties to the active ResultSet: in particular, it should not be
+     * a Blob or Clob object but rather a byte array respectively String
+     * representation.
+     * <p>
+     * Uses the <code>getObject(index)</code> method, but includes additional
+     * "hacks" to get around Oracle 10g returning a non-standard object for its
+     * TIMESTAMP datatype and a <code>java.sql.Date</code> for DATE columns
+     * leaving out the time portion: These columns will explicitly be extracted
+     * as standard <code>java.sql.Timestamp</code> object.
+     *
+     * @param rs    is the ResultSet holding the data
+     * @param index is the column index
+     * @return the value object
+     * @throws SQLException if thrown by the JDBC API
+     * @see Blob
+     * @see Clob
+     * @see Timestamp
+     */
+    private static String getResultSetValue(ResultSet rs, int index) throws SQLException {
+        Object obj = rs.getObject(index);
+        return (obj == null) ? null : convertUtilsBean.convert(obj);
+    }
+
+    // private static Object convertTimestamp(Timestamp timestamp) {
+    // return (timestamp == null) ? null : timestamp.getTime();
+    // }
+
+    /**
+     * Check whether the given SQL type is numeric.
+     */
+    public static boolean isNumeric(int sqlType) {
+        return (Types.BIT == sqlType) || (Types.BIGINT == sqlType) || (Types.DECIMAL == sqlType)
+                || (Types.DOUBLE == sqlType) || (Types.FLOAT == sqlType) || (Types.INTEGER == sqlType)
+                || (Types.NUMERIC == sqlType) || (Types.REAL == sqlType) || (Types.SMALLINT == sqlType)
+                || (Types.TINYINT == sqlType);
+    }
+
+    public static boolean isTextType(int sqlType) {
+        if (sqlType == Types.CHAR || sqlType == Types.VARCHAR || sqlType == Types.CLOB || sqlType == Types.LONGVARCHAR
+                || sqlType == Types.NCHAR || sqlType == Types.NVARCHAR || sqlType == Types.NCLOB
+                || sqlType == Types.LONGNVARCHAR) {
+            return true;
+        } else {
+            return false;
+        }
+    }
+}

+ 53 - 0
example/src/main/resources/client-spring.xml

@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.0.xsd"
+       default-autowire="byName">
+
+    <bean class="com.alibaba.otter.canal.example.db.PropertyPlaceholderConfigurer" lazy-init="false">
+        <property name="ignoreResourceNotFound" value="true"/>
+        <property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_OVERRIDE"/><!-- 允许system覆盖 -->
+        <property name="locationNames">
+            <list>
+                <value>classpath:client.properties</value>
+            </list>
+        </property>
+    </bean>
+
+    <bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" destroy-method="close">
+        <property name="driverClassName" value="com.mysql.jdbc.Driver"/>
+        <property name="url" value="${target.mysql.url:}"/>
+        <property name="username" value="${target.mysql.dbUsername:canal}"/>
+        <property name="password" value="${target.mysql.dbPassword:canal}"/>
+        <property name="maxActive" value="30"/>
+        <property name="initialSize" value="0"/>
+        <property name="minIdle" value="1"/>
+        <property name="maxWait" value="10000"/>
+        <property name="timeBetweenEvictionRunsMillis" value="60000"/>
+        <property name="minEvictableIdleTimeMillis" value="300000"/>
+        <property name="validationQuery" value="SELECT 1"/>
+        <property name="exceptionSorterClassName" value="com.alibaba.druid.pool.vendor.MySqlExceptionSorter"/>
+        <property name="validConnectionCheckerClassName" value="com.alibaba.druid.pool.vendor.MySqlValidConnectionChecker"/>
+        <property name="testWhileIdle" value="true"/>
+        <property name="testOnBorrow" value="false"/>
+        <property name="testOnReturn" value="false"/>
+        <property name="useUnfairLock" value="true"/>
+    </bean>
+
+    <bean name="canalConnectorClient" class="com.alibaba.otter.canal.example.db.CanalConnectorClient" abstract="true">
+        <property name="zkServers" value="${zk.servers:127.0.0.1:2181}"/>
+        <property name="debug" value="${client.debug:true}"/>
+        <property name="destination" value="${client.destination:example}"/>
+        <property name="username" value="${client.username:canal}"/>
+        <property name="password" value="${client.password:canal}"/>
+        <property name="exceptionStrategy" value="${client.exceptionstrategy:1}"/>
+        <property name="retryTimes" value="${client.retrytimes:3}"/>
+        <property name="filter" value="${client.filter:.*\\..*}"/>
+        <property name="waitingTime" value="${client.waiting.time:10}"/>
+    </bean>
+
+
+    <bean id="mysqlClient" class="com.alibaba.otter.canal.example.db.mysql.MysqlClient" lazy-init="true" parent="canalConnectorClient">
+        <property name="dataSource" ref="dataSource"/>
+    </bean>
+</beans>

+ 16 - 0
example/src/main/resources/client.properties

@@ -0,0 +1,16 @@
+# client 配置
+zk.servers=127.0.0.1:2181
+# 5 * 1024
+client.batch.size=5120
+client.debug=false
+client.destination=example
+client.username=canal
+client.password=canal
+client.exceptionstrategy=1
+client.retrytimes=3
+client.filter=.*\\..*
+
+# 同步目标: mysql 配置
+target.mysql.url=jdbc:mysql://127.0.0.1:4306
+target.mysql.username=root
+target.mysql.password=123456

+ 11 - 13
instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/CanalInstanceWithManager.java

@@ -6,7 +6,11 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
+
 import com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.HistoryTableMetaCache;
+
+import com.alibaba.otter.canal.meta.FileMixedMetaManager;
+
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -21,13 +25,7 @@ import com.alibaba.otter.canal.filter.aviater.AviaterRegexFilter;
 import com.alibaba.otter.canal.instance.core.AbstractCanalInstance;
 import com.alibaba.otter.canal.instance.manager.model.Canal;
 import com.alibaba.otter.canal.instance.manager.model.CanalParameter;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.DataSourcing;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.HAMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.IndexMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.MetaMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.SourcingType;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.StorageMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.StorageScavengeMode;
+import com.alibaba.otter.canal.instance.manager.model.CanalParameter.*;
 import com.alibaba.otter.canal.meta.MemoryMetaManager;
 import com.alibaba.otter.canal.meta.PeriodMixedMetaManager;
 import com.alibaba.otter.canal.meta.ZooKeeperMetaManager;
@@ -38,12 +36,7 @@ import com.alibaba.otter.canal.parse.inbound.AbstractEventParser;
 import com.alibaba.otter.canal.parse.inbound.group.GroupEventParser;
 import com.alibaba.otter.canal.parse.inbound.mysql.LocalBinlogEventParser;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser;
-import com.alibaba.otter.canal.parse.index.CanalLogPositionManager;
-import com.alibaba.otter.canal.parse.index.FailbackLogPositionManager;
-import com.alibaba.otter.canal.parse.index.MemoryLogPositionManager;
-import com.alibaba.otter.canal.parse.index.MetaLogPositionManager;
-import com.alibaba.otter.canal.parse.index.PeriodMixedLogPositionManager;
-import com.alibaba.otter.canal.parse.index.ZooKeeperLogPositionManager;
+import com.alibaba.otter.canal.parse.index.*;
 import com.alibaba.otter.canal.parse.support.AuthenticationInfo;
 import com.alibaba.otter.canal.protocol.position.EntryPosition;
 import com.alibaba.otter.canal.sink.entry.EntryEventSink;
@@ -121,6 +114,11 @@ public class CanalInstanceWithManager extends AbstractCanalInstance {
             ZooKeeperMetaManager zooKeeperMetaManager = new ZooKeeperMetaManager();
             zooKeeperMetaManager.setZkClientx(getZkclientx());
             ((PeriodMixedMetaManager) metaManager).setZooKeeperMetaManager(zooKeeperMetaManager);
+        } else if (mode.isLocalFile()){
+            FileMixedMetaManager fileMixedMetaManager = new FileMixedMetaManager();
+            fileMixedMetaManager.setDataDir(parameters.getDataDir());
+            fileMixedMetaManager.setPeriod(parameters.getMetaFileFlushPeriod());
+            metaManager = fileMixedMetaManager;
         } else {
             throw new CanalException("unsupport MetaMode for " + mode);
         }

+ 25 - 1
instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/model/CanalParameter.java

@@ -29,8 +29,10 @@ public class CanalParameter implements Serializable {
     private Long                     zkClusterId;                                                    // zk集群id,为管理方便
     private List<String>             zkClusters;                                                     // zk集群地址
 
+    private String                   dataDir                            = "../conf";                 // 默认本地文件数据的目录默认是conf
     // meta相关参数
     private MetaMode                 metaMode                           = MetaMode.MEMORY;           // meta机制
+    private Integer                  metaFileFlushPeriod                = 1000;                      // meta刷新间隔
 
     // storage存储
     private Integer                  transactionSize                    = 1024;                      // 支持处理的transaction事务大小
@@ -247,7 +249,9 @@ public class CanalParameter implements Serializable {
         /** 文件存储模式 */
         ZOOKEEPER,
         /** 混合模式,内存+文件 */
-        MIXED;
+        MIXED,
+        /** 本地文件存储模式*/
+        LOCAL_FILE;
 
         public boolean isMemory() {
             return this.equals(MetaMode.MEMORY);
@@ -260,6 +264,10 @@ public class CanalParameter implements Serializable {
         public boolean isMixed() {
             return this.equals(MetaMode.MIXED);
         }
+
+        public boolean isLocalFile(){
+            return this.equals(MetaMode.LOCAL_FILE);
+        }
     }
 
     public static enum IndexMode {
@@ -394,6 +402,22 @@ public class CanalParameter implements Serializable {
         return storageMode;
     }
 
+    public String getDataDir() {
+        return dataDir;
+    }
+
+    public void setDataDir(String dataDir) {
+        this.dataDir = dataDir;
+    }
+
+    public Integer getMetaFileFlushPeriod() {
+        return metaFileFlushPeriod;
+    }
+
+    public void setMetaFileFlushPeriod(Integer metaFileFlushPeriod) {
+        this.metaFileFlushPeriod = metaFileFlushPeriod;
+    }
+
     public void setStorageMode(StorageMode storageMode) {
         this.storageMode = storageMode;
     }

+ 111 - 0
kafka-client/pom.xml

@@ -0,0 +1,111 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <artifactId>canal</artifactId>
+        <groupId>com.alibaba.otter</groupId>
+        <version>1.0.26-SNAPSHOT</version>
+        <relativePath>../pom.xml</relativePath>
+    </parent>
+    <groupId>com.alibaba.otter</groupId>
+    <artifactId>canal.kafka.client</artifactId>
+    <packaging>jar</packaging>
+    <name>canal kafka client module for otter ${project.version}</name>
+
+
+    <dependencies>
+        <dependency>
+            <groupId>com.alibaba.otter</groupId>
+            <artifactId>canal.protocol</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.kafka</groupId>
+            <artifactId>kafka-clients</artifactId>
+            <version>0.9.0.1</version>
+        </dependency>
+
+        <!-- junit -->
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+        </dependency>
+    </dependencies>
+
+    <profiles>
+        <profile>
+            <id>dev</id>
+            <activation>
+                <activeByDefault>true</activeByDefault>
+                <property>
+                    <name>env</name>
+                    <value>!javadoc</value>
+                </property>
+            </activation>
+        </profile>
+
+        <profile>
+            <id>javadoc</id>
+            <activation>
+                <property>
+                    <name>env</name>
+                    <value>javadoc</value>
+                </property>
+            </activation>
+            <build>
+                <plugins>
+                    <plugin>
+                        <groupId>org.apache.maven.plugins</groupId>
+                        <artifactId>maven-javadoc-plugin</artifactId>
+                        <version>2.9.1</version>
+                        <executions>
+                            <execution>
+                                <id>attach-javadocs</id>
+                                <phase>package</phase>
+                                <goals>
+                                    <goal>jar</goal>
+                                </goals>
+                            </execution>
+                        </executions>
+                        <configuration>
+                            <aggregate>true</aggregate>
+                            <show>public</show>
+                            <nohelp>true</nohelp>
+                            <header>${project.artifactId}-${project.version}</header>
+                            <footer>${project.artifactId}-${project.version}</footer>
+                            <doctitle>${project.artifactId}-${project.version}</doctitle>
+                            <links>
+                                <link>https://github.com/alibaba/canal</link>
+                            </links>
+                            <outputDirectory>${project.build.directory}/apidocs/apidocs/${project.version}</outputDirectory>
+                        </configuration>
+                    </plugin>
+                    <plugin>
+                        <groupId>org.apache.maven.plugins</groupId>
+                        <artifactId>maven-scm-publish-plugin</artifactId>
+                        <version>1.0-beta-2</version>
+                        <executions>
+                            <execution>
+                                <id>attach-javadocs</id>
+                                <phase>package</phase>
+                                <goals>
+                                    <goal>publish-scm</goal>
+                                </goals>
+                            </execution>
+                        </executions>
+                        <configuration>
+                            <checkoutDirectory>${project.build.directory}/scmpublish</checkoutDirectory>
+                            <checkinComment>Publishing javadoc for ${project.artifactId}:${project.version}</checkinComment>
+                            <content>${project.build.directory}/apidocs</content>
+                            <skipDeletedFiles>true</skipDeletedFiles>
+                            <pubScmUrl>scm:git:git@github.com:alibaba/canal.git</pubScmUrl>
+                            <scmBranch>gh-pages</scmBranch>
+                        </configuration>
+                    </plugin>
+                </plugins>
+            </build>
+        </profile>
+    </profiles>
+</project>

+ 242 - 0
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/KafkaCanalConnector.java

@@ -0,0 +1,242 @@
+package com.alibaba.otter.canal.kafka.client;
+
+import com.alibaba.otter.canal.common.utils.AddressUtils;
+import com.alibaba.otter.canal.common.utils.BooleanMutex;
+import com.alibaba.otter.canal.common.zookeeper.ZkClientx;
+import com.alibaba.otter.canal.kafka.client.running.ClientRunningData;
+import com.alibaba.otter.canal.kafka.client.running.ClientRunningListener;
+import com.alibaba.otter.canal.kafka.client.running.ClientRunningMonitor;
+import com.alibaba.otter.canal.protocol.Message;
+import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.serialization.StringDeserializer;
+
+import java.util.Collections;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * canal kafka 数据操作客户端
+ *
+ * @author machengyuan @ 2018-6-12
+ * @version 1.0.0
+ */
+public class KafkaCanalConnector {
+
+    private KafkaConsumer<String, Message> kafkaConsumer;
+    private String topic;
+    private Integer partition;
+    private Properties properties;
+    private ClientRunningMonitor runningMonitor;  // 运行控制
+    private ZkClientx zkClientx;
+    private BooleanMutex mutex = new BooleanMutex(false);
+    private volatile boolean connected = false;
+    private volatile boolean running = false;
+
+    public KafkaCanalConnector(String zkServers, String servers, String topic, Integer partition, String groupId) {
+        this.topic = topic;
+        this.partition = partition;
+
+        properties = new Properties();
+        properties.put("bootstrap.servers", servers);
+        properties.put("group.id", groupId);
+        properties.put("enable.auto.commit", false);
+        properties.put("auto.commit.interval.ms", "1000");
+        properties.put("auto.offset.reset", "latest"); //如果没有offset则从最后的offset开始读
+        properties.put("request.timeout.ms", "40000"); // 必须大于session.timeout.ms的设置
+        properties.put("session.timeout.ms", "30000"); // 默认为30秒
+        properties.put("max.poll.records", "1"); // 所以一次只取一条数据
+        properties.put("key.deserializer", StringDeserializer.class.getName());
+        properties.put("value.deserializer", MessageDeserializer.class.getName());
+
+        if (zkServers != null) {
+            zkClientx = new ZkClientx(zkServers);
+
+            ClientRunningData clientData = new ClientRunningData();
+            clientData.setGroupId(groupId);
+            clientData.setAddress(AddressUtils.getHostIp());
+
+            runningMonitor = new ClientRunningMonitor();
+            runningMonitor.setTopic(topic);
+            runningMonitor.setZkClient(zkClientx);
+            runningMonitor.setClientData(clientData);
+            runningMonitor.setListener(new ClientRunningListener() {
+                public void processActiveEnter() {
+                    mutex.set(true);
+                }
+
+                public void processActiveExit() {
+                    mutex.set(false);
+                }
+            });
+        }
+
+    }
+
+    /**
+     * 重新设置sessionTime
+     *
+     * @param timeout
+     * @param unit
+     */
+    public void setSessionTimeout(Long timeout, TimeUnit unit) {
+        long t = unit.toMillis(timeout);
+        properties.put("request.timeout.ms", String.valueOf(t + 60000));
+        properties.put("session.timeout.ms", String.valueOf(t));
+    }
+
+    /**
+     * 打开连接
+     */
+    public void connect() {
+        if (connected) {
+            return;
+        }
+
+        if (runningMonitor != null) {
+            if (!runningMonitor.isStart()) {
+                runningMonitor.start();
+            }
+        }
+
+        connected = true;
+
+        if (kafkaConsumer == null) {
+            kafkaConsumer = new KafkaConsumer<String, Message>(properties);
+        }
+    }
+
+    /**
+     * 关闭链接
+     */
+    public void disconnnect() {
+        kafkaConsumer.close();
+
+        connected = false;
+        if (runningMonitor.isStart()) {
+            runningMonitor.stop();
+        }
+    }
+
+    private void waitClientRunning() {
+        try {
+            if (zkClientx != null) {
+                if (!connected) {// 未调用connect
+                    throw new CanalClientException("should connect first");
+                }
+
+                running = true;
+                mutex.get();// 阻塞等待
+            } else {
+                // 单机模式直接设置为running
+                running = true;
+            }
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            throw new CanalClientException(e);
+        }
+    }
+
+    public boolean checkValid() {
+        if (zkClientx != null) {
+            return mutex.state();
+        } else {
+            return true;// 默认都放过
+        }
+    }
+
+    /**
+     * 订阅topic
+     */
+    public void subscribe() {
+        waitClientRunning();
+        if (!running) {
+            return;
+        }
+
+        if (partition == null) {
+            kafkaConsumer.subscribe(Collections.singletonList(topic));
+        } else {
+            TopicPartition topicPartition = new TopicPartition(topic, partition);
+            kafkaConsumer.assign(Collections.singletonList(topicPartition));
+        }
+    }
+
+    /**
+     * 取消订阅
+     */
+    public void unsubscribe() {
+        waitClientRunning();
+        if (!running) {
+            return;
+        }
+
+        kafkaConsumer.unsubscribe();
+    }
+
+    /**
+     * 获取数据,自动进行确认
+     *
+     * @return
+     */
+    public Message get() {
+        return get(100L, TimeUnit.MILLISECONDS);
+    }
+
+    public Message get(Long timeout, TimeUnit unit) {
+        waitClientRunning();
+        if (!running) {
+            return null;
+        }
+
+        Message message = getWithoutAck(timeout, unit);
+        this.ack();
+        return message;
+    }
+
+    public Message getWithoutAck() {
+        return getWithoutAck(100L, TimeUnit.MILLISECONDS);
+    }
+
+    /**
+     * 获取数据,不进行确认,等待处理完成手工确认
+     *
+     * @return
+     */
+    public Message getWithoutAck(Long timeout, TimeUnit unit) {
+        waitClientRunning();
+        if (!running) {
+            return null;
+        }
+
+        ConsumerRecords<String, Message> records = kafkaConsumer.poll(unit.toMillis(timeout)); // 基于配置,最多只能poll到一条数据
+
+        if (!records.isEmpty()) {
+            return records.iterator().next().value();
+        }
+        return null;
+    }
+
+    /**
+     * 提交offset,如果超过 session.timeout.ms 设置的时间没有ack则会抛出异常,ack失败
+     */
+    public void ack() {
+        waitClientRunning();
+        if (!running) {
+            return;
+        }
+
+        kafkaConsumer.commitSync();
+    }
+
+    public void stopRunning() {
+        if (running) {
+            running = false; // 设置为非running状态
+            if (!mutex.state()) {
+                mutex.set(true); // 中断阻塞
+            }
+        }
+    }
+}

+ 62 - 0
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/KafkaCanalConnectors.java

@@ -0,0 +1,62 @@
+package com.alibaba.otter.canal.kafka.client;
+
+/**
+ * canal kafka connectors创建工具类
+ *
+ * @author machengyuan @ 2018-6-12
+ * @version 1.0.0
+ */
+public class KafkaCanalConnectors {
+
+    /**
+     * 创建kafka客户端链接,独立运行不注册zk信息
+     *
+     * @param servers
+     * @param topic
+     * @param partition
+     * @param groupId
+     * @return
+     */
+    public static KafkaCanalConnector newKafkaConnector(String servers, String topic, Integer partition, String groupId) {
+        return new KafkaCanalConnector(null, servers, topic, partition, groupId);
+    }
+
+    /**
+     * 创建kafka客户端链接,独立运行不注册zk信息
+     *
+     * @param servers
+     * @param topic
+     * @param groupId
+     * @return
+     */
+    public static KafkaCanalConnector newKafkaConnector(String servers, String topic, String groupId) {
+        return new KafkaCanalConnector(null, servers, topic, null, groupId);
+    }
+
+    /**
+     * 创建kafka客户端链接
+     *
+     * @param zkServers
+     * @param servers
+     * @param topic
+     * @param partition
+     * @param groupId
+     * @return
+     */
+    public static KafkaCanalConnector newKafkaConnector(String zkServers, String servers, String topic, Integer partition, String groupId) {
+        return new KafkaCanalConnector(zkServers, servers, topic, partition, groupId);
+    }
+
+    /**
+     * 创建kafka客户端链接
+     *
+     * @param zkServers
+     * @param servers
+     * @param topic
+     * @param groupId
+     * @return
+     */
+    public static KafkaCanalConnector newKafkaConnector(String zkServers, String servers, String topic, String groupId) {
+        return new KafkaCanalConnector(zkServers, servers, topic, null, groupId);
+    }
+}

+ 64 - 0
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/MessageDeserializer.java

@@ -0,0 +1,64 @@
+package com.alibaba.otter.canal.kafka.client;
+
+import java.util.Map;
+
+import org.apache.kafka.common.serialization.Deserializer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.CanalPacket;
+import com.alibaba.otter.canal.protocol.Message;
+import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+import com.google.protobuf.ByteString;
+
+/**
+ * Kafka Message类的反序列化
+ *
+ * @author machengyuan @ 2018-6-12
+ * @version 1.0.0
+ */
+public class MessageDeserializer implements Deserializer<Message> {
+
+    private static Logger logger = LoggerFactory.getLogger(MessageDeserializer.class);
+
+    @Override
+    public void configure(Map<String, ?> configs, boolean isKey) {
+    }
+
+    @Override
+    public Message deserialize(String topic, byte[] data) {
+        try {
+            if (data == null) {
+                return null;
+            }
+            else {
+                CanalPacket.Packet p = CanalPacket.Packet.parseFrom(data);
+                switch (p.getType()) {
+                    case MESSAGES: {
+                        if (!p.getCompression().equals(CanalPacket.Compression.NONE)) {
+                            throw new CanalClientException("compression is not supported in this connector");
+                        }
+
+                        CanalPacket.Messages messages = CanalPacket.Messages.parseFrom(p.getBody());
+                        Message result = new Message(messages.getBatchId());
+                        for (ByteString byteString : messages.getMessagesList()) {
+                            result.addEntry(CanalEntry.Entry.parseFrom(byteString));
+                        }
+                        return result;
+                    }
+                    default:
+                        break;
+                }
+            }
+        } catch (Exception e) {
+            logger.error("Error when deserializing byte[] to message ");
+        }
+        return null;
+    }
+
+    @Override
+    public void close() {
+        // nothing to do
+    }
+}

+ 39 - 0
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningData.java

@@ -0,0 +1,39 @@
+package com.alibaba.otter.canal.kafka.client.running;
+
+/**
+ * client running状态信息
+ *
+ * @author machengyuan 2018-06-20 下午04:10:12
+ * @version 1.0.0
+ */
+public class ClientRunningData {
+
+    private String  groupId;
+    private String  address;
+    private boolean active = true;
+
+    public String getGroupId() {
+        return groupId;
+    }
+
+    public void setGroupId(String groupId) {
+        this.groupId = groupId;
+    }
+
+    public String getAddress() {
+        return address;
+    }
+
+    public void setAddress(String address) {
+        this.address = address;
+    }
+
+    public boolean isActive() {
+        return active;
+    }
+
+    public void setActive(boolean active) {
+        this.active = active;
+    }
+
+}

+ 21 - 0
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningListener.java

@@ -0,0 +1,21 @@
+package com.alibaba.otter.canal.kafka.client.running;
+
+/**
+ * client running状态信息
+ *
+ * @author machengyuan 2018-06-20 下午04:10:12
+ * @version 1.0.0
+ */
+public interface ClientRunningListener {
+
+    /**
+     * 触发现在轮到自己做为active
+     */
+    public void processActiveEnter();
+
+    /**
+     * 触发一下当前active模式失败
+     */
+    public void processActiveExit();
+
+}

+ 281 - 0
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningMonitor.java

@@ -0,0 +1,281 @@
+package com.alibaba.otter.canal.kafka.client.running;
+
+import java.text.MessageFormat;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import org.I0Itec.zkclient.IZkDataListener;
+import org.I0Itec.zkclient.exception.ZkException;
+import org.I0Itec.zkclient.exception.ZkInterruptedException;
+import org.I0Itec.zkclient.exception.ZkNoNodeException;
+import org.I0Itec.zkclient.exception.ZkNodeExistsException;
+import org.apache.zookeeper.CreateMode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
+
+import com.alibaba.otter.canal.common.AbstractCanalLifeCycle;
+import com.alibaba.otter.canal.common.utils.AddressUtils;
+import com.alibaba.otter.canal.common.utils.BooleanMutex;
+import com.alibaba.otter.canal.common.utils.JsonUtils;
+import com.alibaba.otter.canal.common.zookeeper.ZkClientx;
+import com.alibaba.otter.canal.common.zookeeper.ZookeeperPathUtils;
+import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+
+
+/**
+ * kafka client running状态信息
+ *
+ * @author machengyuan 2018-06-20 下午04:10:12
+ * @version 1.0.0
+ */
+public class ClientRunningMonitor extends AbstractCanalLifeCycle {
+
+    private static final String TOPIC_ROOT_NODE             = ZookeeperPathUtils.CANAL_ROOT_NODE
+                                                              + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + "topics";
+
+    private static final String TOPIC_NODE                  = TOPIC_ROOT_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR
+                                                              + "{0}";
+
+    private static final String TOPIC_CLIENTID_NODE         = TOPIC_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR
+                                                              + "{1}";
+
+    private static final String TOPIC_CLIENTID_RUNNING_NODE = TOPIC_CLIENTID_NODE
+                                                              + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR
+                                                              + ZookeeperPathUtils.RUNNING_NODE;
+
+    private static String getTopicClientRunning(String topic, String groupId) {
+        return MessageFormat.format(TOPIC_CLIENTID_RUNNING_NODE, topic, groupId);
+    }
+
+    private static String getClientIdNodePath(String topic, String groupId) {
+        return MessageFormat.format(TOPIC_CLIENTID_NODE, topic, groupId);
+    }
+
+    private static final Logger        logger       = LoggerFactory.getLogger(ClientRunningMonitor.class);
+    private ZkClientx                  zkClient;
+    private String                     topic;
+    private ClientRunningData          clientData;
+    private IZkDataListener            dataListener;
+    private BooleanMutex               mutex        = new BooleanMutex(false);
+    private volatile boolean           release      = false;
+    private volatile ClientRunningData activeData;
+    private ScheduledExecutorService   delayExector = Executors.newScheduledThreadPool(1);
+    private ClientRunningListener      listener;
+    private int                        delayTime    = 5;
+
+    private static Integer             virtualPort;
+
+    public ClientRunningMonitor(){
+        if (virtualPort == null) {
+            Random rand = new Random();
+            virtualPort = rand.nextInt(9000) + 1000;
+        }
+
+        dataListener = new IZkDataListener() {
+
+            public void handleDataChange(String dataPath, Object data) throws Exception {
+                MDC.put("kafkaTopic", topic);
+                ClientRunningData runningData = JsonUtils.unmarshalFromByte((byte[]) data, ClientRunningData.class);
+                if (!isMine(runningData.getAddress())) {
+                    mutex.set(false);
+                }
+
+                if (!runningData.isActive() && isMine(runningData.getAddress())) { // 说明出现了主动释放的操作,并且本机之前是active
+                    release = true;
+                    releaseRunning();// 彻底释放mainstem
+                }
+
+                activeData = (ClientRunningData) runningData;
+            }
+
+            public void handleDataDeleted(String dataPath) throws Exception {
+                MDC.put("kafkaTopic", topic);
+
+                mutex.set(false);
+                // 触发一下退出,可能是人为干预的释放操作或者网络闪断引起的session expired timeout
+                processActiveExit();
+                if (!release && activeData != null && isMine(activeData.getAddress())) {
+                    // 如果上一次active的状态就是本机,则即时触发一下active抢占
+                    initRunning();
+                } else {
+                    // 否则就是等待delayTime,避免因网络瞬端或者zk异常,导致出现频繁的切换操作
+                    delayExector.schedule(new Runnable() {
+
+                        public void run() {
+                            initRunning();
+                        }
+                    }, delayTime, TimeUnit.SECONDS);
+                }
+            }
+
+        };
+
+    }
+
+    public void start() {
+        super.start();
+
+        String path = getTopicClientRunning(this.topic, clientData.getGroupId());
+
+        zkClient.subscribeDataChanges(path, dataListener);
+        initRunning();
+    }
+
+    public void stop() {
+        super.stop();
+        String path = getTopicClientRunning(this.topic, clientData.getGroupId());
+        zkClient.unsubscribeDataChanges(path, dataListener);
+        releaseRunning(); // 尝试一下release
+        // Fix issue #697
+        if (delayExector != null) {
+            delayExector.shutdown();
+        }
+    }
+
+    // 改动记录:
+    // 1,在方法上加synchronized关键字,保证同步顺序执行;
+    // 2,判断Zk上已经存在的activeData是否是本机,是的话把mutex重置为true,否则会导致死锁
+    // 3,增加异常处理,保证出现异常时,running节点能被删除,否则会导致死锁
+    public synchronized void initRunning() {
+        if (!isStart()) {
+            return;
+        }
+
+        String path = getTopicClientRunning(this.topic, clientData.getGroupId());
+        // 序列化
+        byte[] bytes = JsonUtils.marshalToByte(clientData);
+        try {
+            mutex.set(false);
+            zkClient.create(path, bytes, CreateMode.EPHEMERAL);
+            processActiveEnter();// 触发一下事件
+            activeData = clientData;
+            mutex.set(true);
+        } catch (ZkNodeExistsException e) {
+            bytes = zkClient.readData(path, true);
+            if (bytes == null) {// 如果不存在节点,立即尝试一次
+                initRunning();
+            } else {
+                activeData = JsonUtils.unmarshalFromByte(bytes, ClientRunningData.class);
+                // 如果发现已经存在,判断一下是否自己,避免活锁
+                if (activeData.getAddress().contains(":") && isMine(activeData.getAddress())) {
+                    mutex.set(true);
+                }
+            }
+        } catch (ZkNoNodeException e) {
+            zkClient.createPersistent(getClientIdNodePath(this.topic, clientData.getGroupId()), true); // 尝试创建父节点
+            initRunning();
+        } catch (Throwable t) {
+            logger.error(MessageFormat.format("There is an error when execute initRunning method, with destination [{0}].",
+                topic),
+                t);
+            // 出现任何异常尝试release
+            releaseRunning();
+            throw new CanalClientException("something goes wrong in initRunning method. ", t);
+        }
+    }
+
+    /**
+     * 阻塞等待自己成为active,如果自己成为active,立马返回
+     *
+     * @throws InterruptedException
+     */
+    public void waitForActive() throws InterruptedException {
+        initRunning();
+        mutex.get();
+    }
+
+    /**
+     * 检查当前的状态
+     */
+    public boolean check() {
+        String path = getTopicClientRunning(this.topic, clientData.getGroupId());
+        // ZookeeperPathUtils.getDestinationClientRunning(this.destination,
+        // clientData.getClientId());
+        try {
+            byte[] bytes = zkClient.readData(path);
+            ClientRunningData eventData = JsonUtils.unmarshalFromByte(bytes, ClientRunningData.class);
+            activeData = eventData;// 更新下为最新值
+            // 检查下nid是否为自己
+            boolean result = isMine(activeData.getAddress());
+            if (!result) {
+                logger.warn("canal is running in [{}] , but not in [{}]",
+                    activeData.getAddress(),
+                    clientData.getAddress());
+            }
+            return result;
+        } catch (ZkNoNodeException e) {
+            logger.warn("canal is not run any in node");
+            return false;
+        } catch (ZkInterruptedException e) {
+            logger.warn("canal check is interrupt");
+            Thread.interrupted();// 清除interrupt标记
+            return check();
+        } catch (ZkException e) {
+            logger.warn("canal check is failed");
+            return false;
+        }
+    }
+
+    public boolean releaseRunning() {
+        if (check()) {
+            String path = getTopicClientRunning(this.topic, clientData.getGroupId());
+            zkClient.delete(path);
+            mutex.set(false);
+            processActiveExit();
+            return true;
+        }
+
+        return false;
+    }
+
+    // ====================== helper method ======================
+
+    private boolean isMine(String address) {
+        return address.equals(clientData.getAddress());
+    }
+
+    private void processActiveEnter() {
+        if (listener != null) {
+            // 触发回调
+            listener.processActiveEnter();
+            this.clientData.setAddress(/* address */AddressUtils.getHostIp() + ":" + virtualPort);
+
+            String path = getTopicClientRunning(this.topic, clientData.getGroupId());
+            // 序列化
+            byte[] bytes = JsonUtils.marshalToByte(clientData);
+            zkClient.writeData(path, bytes);
+        }
+    }
+
+    private void processActiveExit() {
+        if (listener != null) {
+            listener.processActiveExit();
+        }
+    }
+
+    public void setListener(ClientRunningListener listener) {
+        this.listener = listener;
+    }
+
+    // ===================== setter / getter =======================
+
+    public void setTopic(String topic) {
+        this.topic = topic;
+    }
+
+    public void setClientData(ClientRunningData clientData) {
+        this.clientData = clientData;
+    }
+
+    public void setDelayTime(int delayTime) {
+        this.delayTime = delayTime;
+    }
+
+    public void setZkClient(ZkClientx zkClient) {
+        this.zkClient = zkClient;
+    }
+
+}

+ 26 - 0
kafka-client/src/test/java/com/alibaba/otter/canal/kafka/client/running/AbstractKafkaTest.java

@@ -0,0 +1,26 @@
+package com.alibaba.otter.canal.kafka.client.running;
+
+import org.junit.Assert;
+
+/**
+ * Kafka 测试基类
+ *
+ * @author machengyuan @ 2018-6-12
+ * @version 1.0.0
+ */
+public abstract class AbstractKafkaTest {
+
+    public static String  topic     = "example";
+    public static Integer partition = null;
+    public static String  groupId   = "g4";
+    public static String  servers   = "slave1:6667,slave2:6667,slave3:6667";
+    public static String  zkServers = "slave1:2181,slave2:2181,slave3:2181";
+
+    public void sleep(long time) {
+        try {
+            Thread.sleep(time);
+        } catch (InterruptedException e) {
+            Assert.fail(e.getMessage());
+        }
+    }
+}

+ 144 - 0
kafka-client/src/test/java/com/alibaba/otter/canal/kafka/client/running/CanalKafkaClientExample.java

@@ -0,0 +1,144 @@
+package com.alibaba.otter.canal.kafka.client.running;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.kafka.common.errors.WakeupException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.util.Assert;
+
+import com.alibaba.otter.canal.kafka.client.KafkaCanalConnector;
+import com.alibaba.otter.canal.kafka.client.KafkaCanalConnectors;
+import com.alibaba.otter.canal.protocol.Message;
+
+/**
+ * Kafka client example
+ *
+ * @author machengyuan @ 2018-6-12
+ * @version 1.0.0
+ */
+public class CanalKafkaClientExample {
+
+    protected final static Logger logger = LoggerFactory.getLogger(CanalKafkaClientExample.class);
+
+    private KafkaCanalConnector connector;
+
+    private static volatile boolean running = false;
+
+    private Thread thread = null;
+
+    private Thread.UncaughtExceptionHandler handler = new Thread.UncaughtExceptionHandler() {
+
+        public void uncaughtException(Thread t, Throwable e) {
+            logger.error("parse events has an error", e);
+        }
+    };
+
+    public CanalKafkaClientExample(String zkServers, String servers, String topic, Integer partition, String groupId) {
+        connector = KafkaCanalConnectors.newKafkaConnector(zkServers, servers, topic, partition, groupId);
+    }
+
+    public static void main(String[] args) {
+        try {
+            final CanalKafkaClientExample kafkaCanalClientExample = new CanalKafkaClientExample(AbstractKafkaTest.zkServers,
+                    AbstractKafkaTest.servers,
+                    AbstractKafkaTest.topic,
+                    AbstractKafkaTest.partition,
+                    AbstractKafkaTest.groupId);
+            logger.info("## start the kafka consumer: {}-{}", AbstractKafkaTest.topic, AbstractKafkaTest.groupId);
+            kafkaCanalClientExample.start();
+            logger.info("## the canal kafka consumer is running now ......");
+            Runtime.getRuntime().addShutdownHook(new Thread() {
+
+                public void run() {
+                    try {
+                        logger.info("## stop the kafka consumer");
+                        kafkaCanalClientExample.stop();
+                    } catch (Throwable e) {
+                        logger.warn("##something goes wrong when stopping kafka consumer:", e);
+                    } finally {
+                        logger.info("## kafka consumer is down.");
+                    }
+                }
+
+            });
+            while (running)
+                ;
+        } catch (Throwable e) {
+            logger.error("## Something goes wrong when starting up the kafka consumer:", e);
+            System.exit(0);
+        }
+    }
+
+    public void start() {
+        Assert.notNull(connector, "connector is null");
+        thread = new Thread(new Runnable() {
+
+            public void run() {
+                process();
+            }
+        });
+        thread.setUncaughtExceptionHandler(handler);
+        thread.start();
+        running = true;
+    }
+
+    public void stop() {
+        if (!running) {
+            return;
+        }
+        connector.stopRunning();
+        running = false;
+        if (thread != null) {
+            try {
+                thread.join();
+            } catch (InterruptedException e) {
+                // ignore
+            }
+        }
+    }
+
+    private void process() {
+        while (!running)
+            ;
+        while (running) {
+            try {
+                connector.connect();
+                connector.subscribe();
+                while (running) {
+                    try {
+                        Message message = connector.getWithoutAck(1L, TimeUnit.SECONDS); // 获取message
+                        if (message == null) {
+                            continue;
+                        }
+                        long batchId = message.getId();
+                        int size = message.getEntries().size();
+                        if (batchId == -1 || size == 0) {
+                            // try {
+                            // Thread.sleep(1000);
+                            // } catch (InterruptedException e) {
+                            // }
+                        } else {
+                            // printSummary(message, batchId, size);
+                            // printEntry(message.getEntries());
+                            logger.info(message.toString());
+                        }
+
+                        connector.ack(); // 提交确认
+                    } catch (Exception e) {
+                        logger.error(e.getMessage(), e);
+                    }
+                }
+            } catch (Exception e) {
+                logger.error(e.getMessage(), e);
+            }
+        }
+
+        try {
+            connector.unsubscribe();
+        } catch (WakeupException e) {
+            // No-op. Continue process
+        }
+        connector.disconnnect();
+    }
+}

+ 62 - 0
kafka-client/src/test/java/com/alibaba/otter/canal/kafka/client/running/KafkaClientRunningTest.java

@@ -0,0 +1,62 @@
+package com.alibaba.otter.canal.kafka.client.running;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.kafka.common.errors.WakeupException;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.otter.canal.kafka.client.KafkaCanalConnector;
+import com.alibaba.otter.canal.kafka.client.KafkaCanalConnectors;
+import com.alibaba.otter.canal.protocol.Message;
+
+/**
+ * Kafka consumer获取Message的测试例子
+ *
+ * @author machengyuan @ 2018-6-12
+ * @version 1.0.0
+ */
+public class KafkaClientRunningTest extends AbstractKafkaTest {
+
+    private Logger  logger  = LoggerFactory.getLogger(KafkaClientRunningTest.class);
+
+    private boolean running = true;
+
+    @Test
+    public void testKafkaConsumer() {
+        final ExecutorService executor = Executors.newFixedThreadPool(1);
+
+        final KafkaCanalConnector connector = KafkaCanalConnectors.newKafkaConnector(servers, topic, partition, groupId);
+
+        executor.submit(new Runnable() {
+
+            @Override
+            public void run() {
+                connector.connect();
+                connector.subscribe();
+                while (running) {
+                    try {
+                        Message message = connector.getWithoutAck(3L, TimeUnit.SECONDS);
+                        if (message != null) {
+                            System.out.println(message);
+                        }
+                        connector.ack();
+                    } catch (WakeupException e) {
+                        // ignore
+                    }
+                }
+                connector.unsubscribe();
+                connector.disconnnect();
+            }
+        });
+
+        sleep(60000);
+        running = false;
+        executor.shutdown();
+        logger.info("shutdown completed");
+    }
+
+}

+ 19 - 0
kafka-client/src/test/resources/logback.xml

@@ -0,0 +1,19 @@
+<configuration scan="true" scanPeriod=" 5 seconds">
+
+	<jmxConfigurator />
+	<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+		<encoder>
+			<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{56} - %msg%n
+			</pattern>
+		</encoder>
+	</appender>
+
+	<logger name="org.apache.kafka" additivity="false">
+		<level value="ERROR" />
+		<appender-ref ref="STDOUT" />
+	</logger>
+
+	<root level="INFO">
+		<appender-ref ref="STDOUT"/>
+	</root>
+</configuration>

+ 140 - 0
kafka/pom.xml

@@ -0,0 +1,140 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <artifactId>canal</artifactId>
+        <groupId>com.alibaba.otter</groupId>
+        <version>1.0.26-SNAPSHOT</version>
+        <relativePath>../pom.xml</relativePath>
+    </parent>
+    <groupId>com.alibaba.otter</groupId>
+    <artifactId>canal.kafka</artifactId>
+    <packaging>jar</packaging>
+    <name>canal kafka module for otter ${project.version}</name>
+    <dependencies>
+        <dependency>
+            <groupId>com.alibaba.otter</groupId>
+            <artifactId>canal.deployer</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.yaml</groupId>
+            <artifactId>snakeyaml</artifactId>
+            <version>1.17</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.kafka</groupId>
+            <artifactId>kafka_2.11</artifactId>
+            <version>0.9.0.1</version>
+            <exclusions>
+                <exclusion>
+                    <groupId>org.slf4j</groupId>
+                    <artifactId>slf4j-log4j12</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <!-- deploy模块的packaging通常是jar,如果项目中没有java 源代码或资源文件,加上这一段配置使项目能通过构建 -->
+            <plugin>
+                <artifactId>maven-jar-plugin</artifactId>
+                <configuration>
+                    <archive>
+                        <addMavenDescriptor>true</addMavenDescriptor>
+                    </archive>
+                    <excludes>
+                        <exclude>**/logback.xml</exclude>
+                        <exclude>**/canal.properties</exclude>
+                        <exclude>**/spring/**</exclude>
+                        <exclude>**/example/**</exclude>
+                        <exclude>**/kafka.yml</exclude>
+                    </excludes>
+                </configuration>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-assembly-plugin</artifactId>
+                <!-- 这是最新版本,推荐使用这个版本 -->
+                <version>2.2.1</version>
+                <executions>
+                    <execution>
+                        <id>assemble</id>
+                        <goals>
+                            <goal>single</goal>
+                        </goals>
+                        <phase>package</phase>
+                    </execution>
+                </executions>
+                <configuration>
+                    <appendAssemblyId>false</appendAssemblyId>
+                    <attach>false</attach>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+
+    <profiles>
+        <profile>
+            <id>dev</id>
+            <activation>
+                <activeByDefault>true</activeByDefault>
+                <property>
+                    <name>env</name>
+                    <value>!release</value>
+                </property>
+            </activation>
+
+            <build>
+                <plugins>
+                    <plugin>
+                        <artifactId>maven-assembly-plugin</artifactId>
+                        <configuration>
+                            <!-- maven assembly插件需要一个描述文件 来告诉插件包的结构以及打包所需的文件来自哪里 -->
+                            <descriptors>
+                                <descriptor>${basedir}/src/main/assembly/dev.xml</descriptor>
+                            </descriptors>
+                            <finalName>canal</finalName>
+                            <outputDirectory>${project.build.directory}</outputDirectory>
+                        </configuration>
+                    </plugin>
+                </plugins>
+            </build>
+
+        </profile>
+
+        <profile>
+            <id>release</id>
+            <activation>
+                <property>
+                    <name>env</name>
+                    <value>release</value>
+                </property>
+            </activation>
+
+            <build>
+                <plugins>
+                    <plugin>
+                        <artifactId>maven-assembly-plugin</artifactId>
+                        <configuration>
+                            <!-- 发布模式使用的maven assembly插件描述文件 -->
+                            <descriptors>
+                                <descriptor>${basedir}/src/main/assembly/release.xml</descriptor>
+                            </descriptors>
+                            <!-- 如果一个应用的包含多个deploy模块,如果使用同样的包名, 如果把它们复制的一个目录中可能会失败,所以包名加了 artifactId以示区分 -->
+                            <finalName>${project.artifactId}-${project.version}</finalName>
+                            <!-- scm 要求 release 模式打出的包放到顶级目录下的target子目录中 -->
+                            <outputDirectory>${project.parent.build.directory}</outputDirectory>
+                        </configuration>
+                    </plugin>
+                </plugins>
+            </build>
+        </profile>
+    </profiles>
+</project>

+ 64 - 0
kafka/src/main/assembly/dev.xml

@@ -0,0 +1,64 @@
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+	<id>dist</id>
+	<formats>
+		<format>dir</format>
+	</formats>
+	<includeBaseDirectory>false</includeBaseDirectory>
+	<fileSets>
+		<fileSet>
+			<directory>.</directory>
+			<outputDirectory>/</outputDirectory>
+			<includes>
+				<include>README*</include>
+			</includes>
+		</fileSet>
+		<fileSet>
+			<directory>./src/main/bin</directory>
+			<outputDirectory>bin</outputDirectory>
+			<includes>
+				<include>**/*</include>
+			</includes>
+			<fileMode>0755</fileMode>
+		</fileSet>
+		<fileSet>
+			<directory>../deployer/src/main/conf</directory>
+			<outputDirectory>/conf</outputDirectory>
+			<includes>
+				<include>**/*</include>
+			</includes>
+		</fileSet>
+		<fileSet>
+			<directory>../deployer/src/main/resources</directory>
+			<outputDirectory>/conf</outputDirectory>
+			<includes>
+				<include>**/*</include>
+			</includes>
+			<excludes>
+				<exclude>logback.xml</exclude>
+			</excludes>
+		</fileSet>
+		<fileSet>
+			<directory>./src/main/resources</directory>
+			<outputDirectory>/conf</outputDirectory>
+			<includes>
+				<include>**/*</include>
+			</includes>
+		</fileSet>
+		<fileSet>
+			<directory>target</directory>
+			<outputDirectory>logs</outputDirectory>
+			<excludes>
+				<exclude>**/*</exclude>
+			</excludes>
+		</fileSet>
+	</fileSets>
+	<dependencySets>
+		<dependencySet>
+			<outputDirectory>lib</outputDirectory>
+			<excludes>
+				<exclude>junit:junit</exclude>
+			</excludes>
+		</dependencySet>
+	</dependencySets>
+</assembly>

+ 64 - 0
kafka/src/main/assembly/release.xml

@@ -0,0 +1,64 @@
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+	<id>dist</id>
+	<formats>
+		<format>tar.gz</format>
+	</formats>
+	<includeBaseDirectory>false</includeBaseDirectory>
+	<fileSets>
+		<fileSet>
+			<directory>.</directory>
+			<outputDirectory>/</outputDirectory>
+			<includes>
+				<include>README*</include>
+			</includes>
+		</fileSet>
+		<fileSet>
+			<directory>./src/main/bin</directory>
+			<outputDirectory>bin</outputDirectory>
+			<includes>
+				<include>**/*</include>
+			</includes>
+			<fileMode>0755</fileMode>
+		</fileSet>
+		<fileSet>
+			<directory>../deployer/src/main/conf</directory>
+			<outputDirectory>/conf</outputDirectory>
+			<includes>
+				<include>**/*</include>
+			</includes>
+		</fileSet>
+		<fileSet>
+			<directory>../deployer/src/main/resources</directory>
+			<outputDirectory>/conf</outputDirectory>
+			<includes>
+				<include>**/*</include>
+			</includes>
+			<excludes>
+				<exclude>logback.xml</exclude>
+			</excludes>
+		</fileSet>
+		<fileSet>
+			<directory>./src/main/resources</directory>
+			<outputDirectory>/conf</outputDirectory>
+			<includes>
+				<include>**/*</include>
+			</includes>
+		</fileSet>
+		<fileSet>
+			<directory>target</directory>
+			<outputDirectory>logs</outputDirectory>
+			<excludes>
+				<exclude>**/*</exclude>
+			</excludes>
+		</fileSet>
+	</fileSets>
+	<dependencySets>
+		<dependencySet>
+			<outputDirectory>lib</outputDirectory>
+			<excludes>
+				<exclude>junit:junit</exclude>
+			</excludes>
+		</dependencySet>
+	</dependencySets>
+</assembly>

Some files were not shown because too many files changed in this diff