瀏覽代碼

rds binlog 下载支持

charles.lin 6 年之前
父節點
當前提交
61c7f7738d
共有 100 個文件被更改,包括 5453 次插入280 次删除
  1. 2 0
      .gitignore
  2. 10 0
      README.md
  3. 2 1
      client/src/main/java/com/alibaba/otter/canal/client/impl/SimpleCanalConnector.java
  4. 3 2
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/GtidLogEvent.java
  5. 52 0
      dbsync/src/test/java/com/taobao/tddl/dbsync/FetcherPerformanceTest.java
  6. 41 52
      dbsync/src/test/java/com/taobao/tddl/dbsync/binlog/DirectLogFetcherTest.java
  7. 8 0
      deployer/pom.xml
  8. 15 0
      deployer/src/main/bin/metrics_env.sh
  9. 6 1
      deployer/src/main/bin/startup.sh
  10. 14 2
      deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalController.java
  11. 9 1
      deployer/src/main/resources/canal.properties
  12. 9 10
      deployer/src/main/resources/example/instance.properties
  13. 3 2
      deployer/src/main/resources/spring/default-instance.xml
  14. 3 2
      deployer/src/main/resources/spring/file-instance.xml
  15. 5 4
      deployer/src/main/resources/spring/group-instance.xml
  16. 2 1
      deployer/src/main/resources/spring/local-instance.xml
  17. 3 2
      deployer/src/main/resources/spring/memory-instance.xml
  18. 67 0
      docker/Dockerfile
  19. 30 0
      docker/build.sh
  20. 117 0
      docker/image/admin/app.sh
  21. 2 0
      docker/image/admin/bin/clean_log
  22. 45 0
      docker/image/admin/bin/clean_log.sh
  23. 13 0
      docker/image/admin/health.sh
  24. 11 0
      docker/image/alidata/bin/exec_rc_local.sh
  25. 6 0
      docker/image/alidata/bin/lark-wait
  26. 27 0
      docker/image/alidata/bin/main.sh
  27. 19 0
      docker/image/alidata/init/02init-sshd.sh
  28. 66 0
      docker/image/alidata/init/fix-hosts.py
  29. 40 0
      docker/image/alidata/lib/proc.sh
  30. 92 0
      docker/run.sh
  31. 1 1
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlConnector.java
  32. 1 1
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/client/BinlogDumpCommandPacket.java
  33. 2 2
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/BioSocketChannel.java
  34. 76 0
      example/pom.xml
  35. 1 0
      example/src/main/java/com/alibaba/otter/canal/example/AbstractCanalClientTest.java
  36. 68 0
      example/src/main/java/com/alibaba/otter/canal/example/SimpleCanalClientPermanceTest.java
  37. 144 0
      example/src/main/java/com/alibaba/otter/canal/example/db/AbstractDbClient.java
  38. 488 0
      example/src/main/java/com/alibaba/otter/canal/example/db/CanalConnectorClient.java
  39. 35 0
      example/src/main/java/com/alibaba/otter/canal/example/db/MysqlLoadLauncher.java
  40. 169 0
      example/src/main/java/com/alibaba/otter/canal/example/db/PropertyPlaceholderConfigurer.java
  41. 44 0
      example/src/main/java/com/alibaba/otter/canal/example/db/ServiceLocator.java
  42. 121 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/AbstractDbDialect.java
  43. 105 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/AbstractSqlTemplate.java
  44. 20 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/DbDialect.java
  45. 40 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/SqlTemplate.java
  46. 93 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/TableType.java
  47. 32 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/mysql/MysqlDialect.java
  48. 84 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/mysql/MysqlSqlTemplate.java
  49. 207 0
      example/src/main/java/com/alibaba/otter/canal/example/db/mysql/AbstractMysqlClient.java
  50. 23 0
      example/src/main/java/com/alibaba/otter/canal/example/db/mysql/MysqlClient.java
  51. 50 0
      example/src/main/java/com/alibaba/otter/canal/example/db/utils/ByteArrayConverter.java
  52. 326 0
      example/src/main/java/com/alibaba/otter/canal/example/db/utils/DdlUtils.java
  53. 140 0
      example/src/main/java/com/alibaba/otter/canal/example/db/utils/SqlTimestampConverter.java
  54. 315 0
      example/src/main/java/com/alibaba/otter/canal/example/db/utils/SqlUtils.java
  55. 53 0
      example/src/main/resources/client-spring.xml
  56. 16 0
      example/src/main/resources/client.properties
  57. 19 13
      instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/CanalInstanceWithManager.java
  58. 37 1
      instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/model/CanalParameter.java
  59. 3 1
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/MessageDeserializer.java
  60. 2 2
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningData.java
  61. 44 38
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningMonitor.java
  62. 3 3
      kafka/src/main/java/com/alibaba/otter/canal/kafka/CanalServerStarter.java
  63. 27 15
      kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/CanalKafkaProducer.java
  64. 5 2
      kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/CanalKafkaStarter.java
  65. 17 8
      kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/KafkaProperties.java
  66. 52 10
      kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/MessageSerializer.java
  67. 1 0
      kafka/src/main/resources/kafka.yml
  68. 30 0
      parse/src/main/java/com/alibaba/otter/canal/parse/exception/PositionNotFoundException.java
  69. 30 0
      parse/src/main/java/com/alibaba/otter/canal/parse/exception/ServerIdNotMatchException.java
  70. 40 6
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/AbstractEventParser.java
  71. 2 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/ErosaConnection.java
  72. 2 2
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/MultiStageCoprocessor.java
  73. 9 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/ParserExceptionHandler.java
  74. 11 2
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/AbstractMysqlEventParser.java
  75. 53 3
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinLogConnection.java
  76. 18 2
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlConnection.java
  77. 29 9
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParser.java
  78. 38 34
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlMultiStageCoprocessor.java
  79. 0 3
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/DirectLogFetcher.java
  80. 10 4
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java
  81. 6 1
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/TableMetaCache.java
  82. 260 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/BinlogDownloadQueue.java
  83. 149 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsBinlogEventParserProxy.java
  84. 53 8
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsBinlogOpenApi.java
  85. 130 29
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsLocalBinlogEventParser.java
  86. 72 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/BinlogFile.java
  87. 62 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/DescribeBinlogFileResult.java
  88. 69 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/RdsBackupPolicy.java
  89. 19 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/RdsItem.java
  90. 250 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/request/AbstractRequest.java
  91. 41 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/request/DescribeBackupPolicyRequest.java
  92. 56 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/request/DescribeBinlogFilesRequest.java
  93. 191 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/HistoryTableMetaCache.java
  94. 20 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/TableMetaCacheInterface.java
  95. 105 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/TableMetaCacheWithStorage.java
  96. 55 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/TableMetaEntry.java
  97. 18 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/TableMetaStorage.java
  98. 9 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/TableMetaStorageFactory.java
  99. 9 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/exception/CacheConnectionNull.java
  100. 21 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/exception/NoHistoryException.java

+ 2 - 0
.gitignore

@@ -14,3 +14,5 @@ jtester.properties
 .idea/
 *.iml
 .DS_Store
+*.tar.gz
+*.rpm

+ 10 - 0
README.md

@@ -5,6 +5,8 @@
 <ol>
 <li>canal QQ讨论群已经建立,群号:161559791 ,欢迎加入进行技术讨论。</li>
 <li>canal消费端项目开源: Otter(分布式数据库同步系统),地址:<a href="https://github.com/alibaba/otter">https://github.com/alibaba/otter</a></li>
+<li>Canal已在阿里云推出商业化版本 <a href="https://www.aliyun.com/product/dts?spm=a2c4g.11186623.cloudEssentials.80.srdwr7">数据传输服务DTS</a>, 开通即用,免去部署维护的昂贵使用成本。DTS针对阿里云RDS、DRDS等产品进行了适配,解决了Binlog日志回收,主备切换、VPC网络切换等场景下的订阅高可用问题。同时,针对RDS进行了针对性的性能优化。出于稳定性、性能及成本的考虑,强烈推荐阿里云用户使用DTS产品。<a href="https://help.aliyun.com/document_detail/26592.html?spm=a2c4g.11174283.6.539.t1Y91E">DTS产品使用文档</a></li>
+DTS支持阿里云RDS&DRDS的Binlog日志实时订阅,现推出首月免费体验,限时限量,<a href="https://common-buy.aliyun.com/?commodityCode=dtspre&request=%7b%22dts_function%22%3a%22data_subscribe%22%7d">立即体验>>></a>
 </ol>
 
 <h1>背景</h1>
@@ -73,6 +75,14 @@ See the wiki page for : <a href="https://github.com/alibaba/canal/wiki" >wiki文
 <li>阿里巴巴去Oracle数据迁移同步工具(目标支持MySQL/DRDS):<a href="http://github.com/alibaba/yugong">http://github.com/alibaba/yugong</a></li>
 </ol>
 
+<h1>相关产品</h1>
+<ol>
+<li><a href="https://www.aliyun.com/product/drds?spm=5176.55326.cloudEssentials.71.69fd227dRPZj9K">阿里云分布式数据库DRDS</a></li>
+<li><a href="https://www.aliyun.com/product/dts?spm=5176.7947010.cloudEssentials.80.33f734f4JOAxSP">阿里云数据传输服务DTS</a></li>
+<li><a href="https://www.aliyun.com/product/dbs?spm=5176.54487.cloudEssentials.83.34b851a8GmVZg6">阿里云数据库备份服务DBS</a></li>
+<li><a href="https://www.aliyun.com/product/dms?spm=5176.169464.cloudEssentials.81.2e1066feC1sBBL">阿里云数据管理服务DMS</a></li>
+</ol>
+
 <h1>问题反馈</h1>
 <ol>
 <li>qq交流群: 161559791 </li>

+ 2 - 1
client/src/main/java/com/alibaba/otter/canal/client/impl/SimpleCanalConnector.java

@@ -319,7 +319,8 @@ public class SimpleCanalConnector implements CanalConnector {
     }
 
     private Message receiveMessages() throws IOException {
-        Packet p = Packet.parseFrom(readNextPacket());
+        byte[] data = readNextPacket();
+        Packet p = Packet.parseFrom(data);
         switch (p.getType()) {
             case MESSAGES: {
                 if (!p.getCompression().equals(Compression.NONE)) {

+ 3 - 2
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/GtidLogEvent.java

@@ -44,13 +44,14 @@ public class GtidLogEvent extends LogEvent {
         gno = buffer.getLong64();
 
         // support gtid lastCommitted and sequenceNumber
-        // 42 = 1+16+8+1+8+8
-        if (buffer.capacity() > 42 && buffer.getUint8() == LOGICAL_TIMESTAMP_TYPE_CODE) {
+        // fix bug #776
+        if (buffer.hasRemaining() && buffer.remaining() > 16 && buffer.getUint8() == LOGICAL_TIMESTAMP_TYPE_CODE) {
             lastCommitted = buffer.getLong64();
             sequenceNumber = buffer.getLong64();
         }
 
 
+
         // ignore gtid info read
         // sid.copy_from((uchar *)ptr_buffer);
         // ptr_buffer+= ENCODED_SID_LENGTH;

+ 52 - 0
dbsync/src/test/java/com/taobao/tddl/dbsync/FetcherPerformanceTest.java

@@ -0,0 +1,52 @@
+package com.taobao.tddl.dbsync;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.taobao.tddl.dbsync.binlog.DirectLogFetcher;
+import com.taobao.tddl.dbsync.binlog.LogEvent;
+
+public class FetcherPerformanceTest {
+
+    public static void main(String args[]) {
+        DirectLogFetcher fetcher = new DirectLogFetcher();
+        try {
+            Class.forName("com.mysql.jdbc.Driver");
+            Connection connection = DriverManager.getConnection("jdbc:mysql://127.0.0.1:3306",
+                "root",
+                "hello");
+            Statement statement = connection.createStatement();
+            statement.execute("SET @master_binlog_checksum='@@global.binlog_checksum'");
+            statement.execute("SET @mariadb_slave_capability='" + LogEvent.MARIA_SLAVE_CAPABILITY_MINE + "'");
+
+            fetcher.open(connection, "mysql-bin.000006", 120L, 2);
+
+            AtomicLong sum = new AtomicLong(0);
+            long start = System.currentTimeMillis();
+            long last = 0;
+            long end = 0;
+
+            while (fetcher.fetch()) {
+                sum.incrementAndGet();
+                long current = sum.get();
+                if (current - last >= 100000) {
+                    end = System.currentTimeMillis();
+                    long tps = ((current - last) * 1000) / (end - start);
+                    System.out.println(" total : " + sum + " , cost : " + (end - start) + " , tps : " + tps);
+                    last = current;
+                    start = end;
+                }
+            }
+        } catch (Exception e) {
+            e.printStackTrace();
+        } finally {
+            try {
+                fetcher.close();
+            } catch (IOException e) {
+            }
+        }
+    }
+}

+ 41 - 52
dbsync/src/test/java/com/taobao/tddl/dbsync/binlog/DirectLogFetcherTest.java

@@ -8,15 +8,6 @@ import java.sql.Statement;
 import org.junit.Assert;
 import org.junit.Test;
 
-import com.taobao.tddl.dbsync.binlog.event.DeleteRowsLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.QueryLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.RotateLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.RowsQueryLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.UpdateRowsLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.WriteRowsLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.XidLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.mariadb.AnnotateRowsEvent;
-
 public class DirectLogFetcherTest extends BaseLogFetcherTest {
 
     @Test
@@ -24,56 +15,54 @@ public class DirectLogFetcherTest extends BaseLogFetcherTest {
         DirectLogFetcher fecther = new DirectLogFetcher();
         try {
             Class.forName("com.mysql.jdbc.Driver");
-            Connection connection = DriverManager.getConnection("jdbc:mysql://127.0.0.1:3306", "root", "hello");
+            Connection connection = DriverManager.getConnection("jdbc:mysql://100.81.154.142:3306", "root", "hello");
             Statement statement = connection.createStatement();
             statement.execute("SET @master_binlog_checksum='@@global.binlog_checksum'");
             statement.execute("SET @mariadb_slave_capability='" + LogEvent.MARIA_SLAVE_CAPABILITY_MINE + "'");
 
-            fecther.open(connection, "mysql-bin.000001", 4L, 2);
+            fecther.open(connection, "mysql-bin.000006", 120L, 2);
 
-            LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.ENUM_END_EVENT);
+            LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.UNKNOWN_EVENT);
             LogContext context = new LogContext();
             while (fecther.fetch()) {
-                LogEvent event = null;
-                event = decoder.decode(fecther, context);
-
-                if (event == null) {
-                    continue;
-                    // throw new RuntimeException("parse failed");
-                }
-
-                int eventType = event.getHeader().getType();
-                switch (eventType) {
-                    case LogEvent.ROTATE_EVENT:
-                        binlogFileName = ((RotateLogEvent) event).getFilename();
-                        break;
-                    case LogEvent.WRITE_ROWS_EVENT_V1:
-                    case LogEvent.WRITE_ROWS_EVENT:
-                        parseRowsEvent((WriteRowsLogEvent) event);
-                        break;
-                    case LogEvent.UPDATE_ROWS_EVENT_V1:
-                    case LogEvent.UPDATE_ROWS_EVENT:
-                        parseRowsEvent((UpdateRowsLogEvent) event);
-                        break;
-                    case LogEvent.DELETE_ROWS_EVENT_V1:
-                    case LogEvent.DELETE_ROWS_EVENT:
-                        parseRowsEvent((DeleteRowsLogEvent) event);
-                        break;
-                    case LogEvent.QUERY_EVENT:
-                        parseQueryEvent((QueryLogEvent) event);
-                        break;
-                    case LogEvent.ROWS_QUERY_LOG_EVENT:
-                        parseRowsQueryEvent((RowsQueryLogEvent) event);
-                        break;
-                    case LogEvent.ANNOTATE_ROWS_EVENT:
-                        parseAnnotateRowsEvent((AnnotateRowsEvent) event);
-                        break;
-                    case LogEvent.XID_EVENT:
-                        parseXidEvent((XidLogEvent) event);
-                        break;
-                    default:
-                        break;
-                }
+                decoder.decode(fecther, context);
+                continue;
+                // if (event == null) {
+                // continue;
+                // }
+                //
+                // int eventType = event.getHeader().getType();
+                // switch (eventType) {
+                // case LogEvent.ROTATE_EVENT:
+                // binlogFileName = ((RotateLogEvent) event).getFilename();
+                // break;
+                // case LogEvent.WRITE_ROWS_EVENT_V1:
+                // case LogEvent.WRITE_ROWS_EVENT:
+                // parseRowsEvent((WriteRowsLogEvent) event);
+                // break;
+                // case LogEvent.UPDATE_ROWS_EVENT_V1:
+                // case LogEvent.UPDATE_ROWS_EVENT:
+                // parseRowsEvent((UpdateRowsLogEvent) event);
+                // break;
+                // case LogEvent.DELETE_ROWS_EVENT_V1:
+                // case LogEvent.DELETE_ROWS_EVENT:
+                // parseRowsEvent((DeleteRowsLogEvent) event);
+                // break;
+                // case LogEvent.QUERY_EVENT:
+                // parseQueryEvent((QueryLogEvent) event);
+                // break;
+                // case LogEvent.ROWS_QUERY_LOG_EVENT:
+                // parseRowsQueryEvent((RowsQueryLogEvent) event);
+                // break;
+                // case LogEvent.ANNOTATE_ROWS_EVENT:
+                // parseAnnotateRowsEvent((AnnotateRowsEvent) event);
+                // break;
+                // case LogEvent.XID_EVENT:
+                // parseXidEvent((XidLogEvent) event);
+                // break;
+                // default:
+                // break;
+                // }
             }
         } catch (Exception e) {
             e.printStackTrace();

+ 8 - 0
deployer/pom.xml

@@ -16,6 +16,14 @@
 			<artifactId>canal.server</artifactId>
 			<version>${project.version}</version>
 		</dependency>
+
+		<!-- 这里指定runtime的metrics provider-->
+		<!--<dependency>-->
+			<!--<groupId>com.alibaba.otter</groupId>-->
+			<!--<artifactId>canal.prometheus</artifactId>-->
+			<!--<version>${project.version}</version>-->
+			<!--<scope>runtime</scope>-->
+		<!--</dependency>-->
 	</dependencies>
 	
 	<build>

+ 15 - 0
deployer/src/main/bin/metrics_env.sh

@@ -0,0 +1,15 @@
+#!/bin/bash
+# Additional line arg for current prometheus solution
+case "`uname`" in
+Linux)
+    bin_abs_path=$(readlink -f $(dirname $0))
+	;;
+*)
+	bin_abs_path=`cd $(dirname $0); pwd`
+	;;
+esac
+base=${bin_abs_path}/..
+if [ $(ls $base/lib/aspectjweaver*.jar | wc -l) -eq 1 ]; then
+    WEAVER=$(ls $base/lib/aspectjweaver*.jar)
+    METRICS_OPTS=" -javaagent:"${WEAVER}" "
+fi

+ 6 - 1
deployer/src/main/bin/startup.sh

@@ -94,7 +94,12 @@ then
 	echo LOG CONFIGURATION : $logback_configurationFile
 	echo canal conf : $canal_conf 
 	echo CLASSPATH :$CLASSPATH
-	$JAVA $JAVA_OPTS $JAVA_DEBUG_OPT $CANAL_OPTS -classpath .:$CLASSPATH com.alibaba.otter.canal.deployer.CanalLauncher 1>>$base/logs/canal/canal.log 2>&1 &
+#   metrics support options
+#	if [ -x $base/bin/metrics_env.sh ]; then
+#	    . $base/bin/metrics_env.sh
+#	    echo METRICS_OPTS $METRICS_OPTS
+#	fi
+	$JAVA $JAVA_OPTS $METRICS_OPTS $JAVA_DEBUG_OPT $CANAL_OPTS -classpath .:$CLASSPATH com.alibaba.otter.canal.deployer.CanalLauncher 1>>$base/logs/canal/canal.log 2>&1 &
 	echo $! > $base/bin/canal.pid 
 	
 	echo "cd to $current_path for continue"

+ 14 - 2
deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalController.java

@@ -34,6 +34,7 @@ import com.alibaba.otter.canal.instance.core.CanalInstanceGenerator;
 import com.alibaba.otter.canal.instance.manager.CanalConfigClient;
 import com.alibaba.otter.canal.instance.manager.ManagerCanalInstanceGenerator;
 import com.alibaba.otter.canal.instance.spring.SpringCanalInstanceGenerator;
+import com.alibaba.otter.canal.parse.CanalEventParser;
 import com.alibaba.otter.canal.server.embedded.CanalServerWithEmbedded;
 import com.alibaba.otter.canal.server.exception.CanalServerException;
 import com.alibaba.otter.canal.server.netty.CanalServerWithNetty;
@@ -303,7 +304,7 @@ public class CanalController {
                     return instanceGenerator.generate(destination);
                 } else if (config.getMode().isSpring()) {
                     SpringCanalInstanceGenerator instanceGenerator = new SpringCanalInstanceGenerator();
-                    synchronized (this) {
+                    synchronized (CanalEventParser.class) {
                         try {
                             // 设置当前正在加载的通道,加载spring查找文件时会用到该变量
                             System.setProperty(CanalConstants.CANAL_DESTINATION_PROPERTY, destination);
@@ -379,7 +380,18 @@ public class CanalController {
     }
 
     private String getProperty(Properties properties, String key) {
-        return StringUtils.trim(properties.getProperty(StringUtils.trim(key)));
+        key = StringUtils.trim(key);
+        String value = System.getProperty(key);
+
+        if (value == null) {
+            value = System.getenv(key);
+        }
+
+        if (value == null) {
+            value = properties.getProperty(key);
+        }
+
+        return StringUtils.trim(value);
     }
 
     public void start() throws Throwable {

+ 9 - 1
deployer/src/main/resources/canal.properties

@@ -3,7 +3,7 @@
 #################################################
 canal.id= 1
 canal.ip=
-canal.port= 11111
+canal.port=11111
 canal.zkServers=
 # flush data to zk
 canal.zookeeper.flush.period = 1000
@@ -43,6 +43,7 @@ canal.instance.filter.query.dml = false
 canal.instance.filter.query.ddl = false
 canal.instance.filter.table.error = false
 canal.instance.filter.rows = false
+canal.instance.filter.transaction.entry = false
 
 # binlog format/image check
 canal.instance.binlog.format = ROW,STATEMENT,MIXED 
@@ -58,6 +59,13 @@ canal.instance.parser.parallel = true
 ## disruptor ringbuffer size, must be power of 2
 canal.instance.parser.parallelBufferSize = 256
 
+# table meta tsdb info
+canal.instance.tsdb.enable=true
+canal.instance.tsdb.dir=${canal.file.data.dir:../conf}/${canal.instance.destination:}
+canal.instance.tsdb.url=jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
+canal.instance.tsdb.dbUsername=canal
+canal.instance.tsdb.dbPassword=canal
+
 #################################################
 ######### 		destinations		############# 
 #################################################

+ 9 - 10
deployer/src/main/resources/example/instance.properties

@@ -1,11 +1,12 @@
 #################################################
-## mysql serverId
-canal.instance.mysql.slaveId=0
+## mysql serverId , v1.0.26+ will autoGen 
+# canal.instance.mysql.slaveId=0
 
-# position info
-canal.instance.master.address=127.0.0.1:3306
 # enable gtid use true/false
 canal.instance.gtidon=false
+
+# position info
+canal.instance.master.address=127.0.0.1:3306
 canal.instance.master.journal.name=
 canal.instance.master.position=
 canal.instance.master.timestamp=
@@ -13,23 +14,21 @@ canal.instance.master.gtid=
 
 # table meta tsdb info
 canal.instance.tsdb.enable=true
-canal.instance.tsdb.dir=${canal.file.data.dir:../conf}/${canal.instance.destination:}
-canal.instance.tsdb.url=jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
 #canal.instance.tsdb.url=jdbc:mysql://127.0.0.1:3306/canal_tsdb
-canal.instance.tsdb.dbUsername=canal
-canal.instance.tsdb.dbPassword=canal
-
+#canal.instance.tsdb.dbUsername=canal
+#canal.instance.tsdb.dbPassword=canal
 
 #canal.instance.standby.address =
 #canal.instance.standby.journal.name =
 #canal.instance.standby.position = 
 #canal.instance.standby.timestamp =
 #canal.instance.standby.gtid=
+
 # username/password
 canal.instance.dbUsername=canal
 canal.instance.dbPassword=canal
-canal.instance.defaultDatabaseName=test
 canal.instance.connectionCharset=UTF-8
+
 # table regex
 canal.instance.filter.regex=.*\\..*
 # table black regex

+ 3 - 2
deployer/src/main/resources/spring/default-instance.xml

@@ -81,6 +81,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
@@ -148,7 +149,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -156,7 +157,7 @@
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 3 - 2
deployer/src/main/resources/spring/file-instance.xml

@@ -67,6 +67,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
@@ -133,7 +134,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -141,7 +142,7 @@
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 5 - 4
deployer/src/main/resources/spring/group-instance.xml

@@ -64,6 +64,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 	
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.group.GroupEventParser">
@@ -130,7 +131,7 @@
 				<property name="address" value="${canal.instance.master1.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -138,7 +139,7 @@
 				<property name="address" value="${canal.instance.standby1.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		
@@ -228,7 +229,7 @@
 				<property name="address" value="${canal.instance.master2.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -236,7 +237,7 @@
 				<property name="address" value="${canal.instance.standby2.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 2 - 1
deployer/src/main/resources/spring/local-instance.xml

@@ -67,6 +67,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.rds.RdsLocalBinlogEventParser">
@@ -113,7 +114,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 3 - 2
deployer/src/main/resources/spring/memory-instance.xml

@@ -64,6 +64,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
@@ -121,7 +122,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -129,7 +130,7 @@
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 67 - 0
docker/Dockerfile

@@ -0,0 +1,67 @@
+FROM centos:centos6.7
+
+MAINTAINER agapple (jianghang115@gmail.com)
+
+# install system
+RUN \
+    /bin/cp -f /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
+    echo 'root:Hello1234' | chpasswd && \
+    groupadd -r admin && useradd -g admin admin && \
+    yum install -y man && \
+    yum install -y dstat && \
+    yum install -y unzip && \
+    yum install -y nc && \
+    yum install -y openssh-server && \
+    yum install -y tar && \
+    yum install -y which && \
+    yum install -y wget && \
+    yum install -y perl && \
+    yum install -y file && \
+    ssh-keygen -q -N "" -t dsa -f /etc/ssh/ssh_host_dsa_key && \
+    ssh-keygen -q -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key && \
+    sed -ri 's/session    required     pam_loginuid.so/#session    required     pam_loginuid.so/g' /etc/pam.d/sshd && \
+    sed -i -e 's/^#Port 22$/Port 2222/' /etc/ssh/sshd_config && \
+    mkdir -p /root/.ssh && chown root.root /root && chmod 700 /root/.ssh && \
+    yum install -y cronie && \
+    sed -i '/session required pam_loginuid.so/d' /etc/pam.d/crond && \
+    yum clean all && \
+    true
+
+# install canal
+COPY image/ /tmp/docker/
+COPY canal.deployer-*.tar.gz /home/admin/
+COPY jdk-8-linux-x64.rpm /tmp/
+
+RUN \
+    cp -R /tmp/docker/alidata /alidata && \
+    chmod +x /alidata/bin/* && \
+    mkdir -p /home/admin && \
+    cp -R /tmp/docker/admin/* /home/admin/  && \
+    /bin/cp -f alidata/bin/lark-wait /usr/bin/lark-wait && \
+
+    touch /var/lib/rpm/* && \ 
+    yum -y install /tmp/jdk-8-linux-x64.rpm && \
+    /bin/rm -f /tmp/jdk-8-linux-x64.rpm && \
+
+    echo "export JAVA_HOME=/usr/java/latest" >> /etc/profile && \
+    echo "export PATH=\$JAVA_HOME/bin:\$PATH" >> /etc/profile && \
+    /bin/mv /home/admin/bin/clean_log /etc/cron.d && \
+
+    mkdir -p /home/admin/canal-server && \
+    tar -xzvf /home/admin/canal.deployer-*.tar.gz -C /home/admin/canal-server && \
+    /bin/rm -f /home/admin/canal.deployer-*.tar.gz && \
+
+    mkdir -p home/admin/canal-server/logs  && \
+    chmod +x /home/admin/*.sh  && \
+    chmod +x /home/admin/bin/*.sh  && \
+    chown admin: -R /home/admin && \
+    yum clean all && \
+    true
+
+# 2222 sys , 8080 web , 8000 debug , 11111 canal
+EXPOSE 2222 11111 8000 8080
+
+WORKDIR /home/admin
+
+ENTRYPOINT [ "/alidata/bin/main.sh" ]
+CMD [ "/home/admin/app.sh" ]

+ 30 - 0
docker/build.sh

@@ -0,0 +1,30 @@
+#!/bin/bash
+
+current_path=`pwd`
+case "`uname`" in
+    Darwin)
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+    Linux)
+        bin_abs_path=$(readlink -f $(dirname $0))
+        ;;
+    *)
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+esac
+BASE=${bin_abs_path}
+
+if [ ! -f $BASE/jdk*.rpm ] ; then
+    DOWNLOAD_LINK="http://download.oracle.com/otn-pub/java/jdk/8u181-b13/96a7b8442fe848ef90c96a2fad6ed6d1/jdk-8u181-linux-x64.tar.gz"
+    wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=xxx; oraclelicense=accept-securebackup-cookie" "$DOWNLOAD_LINK" -O $BASE/jdk-8-linux-x64.rpm
+fi
+
+cd $BASE/../ && mvn clean package -Dmaven.test.skip -Denv=release && cd $current_path ;
+
+if [ "$1" == "kafka" ] ; then
+	cp $BASE/../target/canal-kafka-*.tar.gz $BASE/
+	docker build --no-cache -t canal/canal-server $BASE/
+else 
+	cp $BASE/../target/canal.deployer-*.tar.gz $BASE/
+	docker build --no-cache -t canal/canal-server $BASE/
+fi

+ 117 - 0
docker/image/admin/app.sh

@@ -0,0 +1,117 @@
+#!/bin/bash
+set -e
+
+source /etc/profile
+export JAVA_HOME=/usr/java/latest
+export PATH=$JAVA_HOME/bin:$PATH
+touch /tmp/start.log
+chown admin: /tmp/start.log
+chown -R admin: /home/admin/canal-server
+host=`hostname -i`
+
+# waitterm
+#   wait TERM/INT signal.
+#   see: http://veithen.github.io/2014/11/16/sigterm-propagation.html
+waitterm() {
+        local PID
+        # any process to block
+        tail -f /dev/null &
+        PID="$!"
+        # setup trap, could do nothing, or just kill the blocker
+        trap "kill -TERM ${PID}" TERM INT
+        # wait for signal, ignore wait exit code
+        wait "${PID}" || true
+        # clear trap
+        trap - TERM INT
+        # wait blocker, ignore blocker exit code
+        wait "${PID}" 2>/dev/null || true
+}
+
+# waittermpid "${PIDFILE}".
+#   monitor process by pidfile && wait TERM/INT signal.
+#   if the process disappeared, return 1, means exit with ERROR.
+#   if TERM or INT signal received, return 0, means OK to exit.
+waittermpid() {
+        local PIDFILE PID do_run error
+        PIDFILE="${1?}"
+        do_run=true
+        error=0
+        trap "do_run=false" TERM INT
+        while "${do_run}" ; do
+                PID="$(cat "${PIDFILE}")"
+                if ! ps -p "${PID}" >/dev/null 2>&1 ; then
+                        do_run=false
+                        error=1
+                else
+                        sleep 1
+                fi
+        done
+        trap - TERM INT
+        return "${error}"
+}
+
+
+function checkStart() {
+    local name=$1
+    local cmd=$2
+    local timeout=$3
+    cost=5
+    while [ $timeout -gt 0 ]; do
+        ST=`eval $cmd`
+        if [ "$ST" == "0" ]; then
+            sleep 1
+            let timeout=timeout-1
+            let cost=cost+1
+        elif [ "$ST" == "" ]; then
+            sleep 1
+            let timeout=timeout-1
+            let cost=cost+1
+        else
+            break
+        fi
+    done
+    echo "start $name successful"
+}
+
+
+function start_canal() {
+    echo "start canal ..."
+    serverPort=`perl -le 'print $ENV{"canal.port"}'`
+    if [ -z "$serverPort" ] ; then
+        serverPort=11111
+    fi
+
+    destination=`perl -le 'print $ENV{"canal.destinations"}'`
+    if [[ "$destination" =~ ',' ]]; then
+        echo "multi destination:$destination is not support"
+        exit 1;
+    else
+        mv /home/admin/canal-server/conf/example /home/admin/canal-server/conf/$destination
+    fi
+    su admin -c 'cd /home/admin/canal-server/bin/ && sh restart.sh 1>>/tmp/start.log 2>&1'
+    sleep 5
+    #check start
+    checkStart "canal" "nc 127.0.0.1 $serverPort -w 1 -z | wc -l" 30
+}
+
+function stop_canal() {
+    echo "stop canal"
+    su admin -c 'cd /home/admin/canal-server/bin/ && sh stop.sh 1>>/tmp/start.log 2>&1'
+    echo "stop canal successful ..."
+}
+
+echo "==> START ..."
+
+start_canal
+
+echo "==> START SUCCESSFUL ..."
+
+tail -f /dev/null &
+# wait TERM signal
+waitterm
+
+echo "==> STOP"
+
+stop_canal
+
+echo "==> STOP SUCCESSFUL ..."

+ 2 - 0
docker/image/admin/bin/clean_log

@@ -0,0 +1,2 @@
+# cron clean log once per minute
+*/2 * * * * admin /home/admin/bin/clean_log.sh >>/tmp/clean_log.log 2>&1

+ 45 - 0
docker/image/admin/bin/clean_log.sh

@@ -0,0 +1,45 @@
+#!/bin/bash
+
+# Global Settings
+PATH="$HOME/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/X11R6/bin:/root/bin"
+export PATH
+
+CUTOFF="85"
+#获取磁盘使用率最高的分区
+USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+before=$USAGE
+
+baseClean(){
+    #删除tmp目录15天前的文件。
+    #更新文档时间戳
+    if [ -d /tmp/hsperfdata_admin ]
+    then
+        touch /tmp/hsperfdata_admin
+        touch /tmp/hsperfdata_admin/*
+    fi
+
+    find /tmp/ -type f -mtime +15 | xargs -t rm -rf >/dev/null 2>&1
+
+
+    now=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+    echo "before:$before; now:$now"
+}
+
+CANAL_DIR="/home/admin/canal-server/logs"
+if [[ -d $CANAL_DIR ]]; then
+  USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+  if [[ $USAGE -ge 90 ]]; then
+        find $CANAL_DIR -type f -mtime +7 | xargs rm -rf {}
+  fi
+  USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+  if [[ $USAGE -ge 80 ]]; then
+        find $CANAL_DIR -type f -mtime +3 | xargs rm -rf {}
+  fi
+  USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+  if [[ $USAGE -ge 80 ]]; then
+        find $CANAL_DIR -type d -empty -mtime +3 | grep -v canal | xargs rm -rf {}
+        find $CANAL_DIR -type f -iname '*.tmp' | xargs rm -rf {}
+  fi
+  baseClean
+  exit 0
+fi

+ 13 - 0
docker/image/admin/health.sh

@@ -0,0 +1,13 @@
+#!/bin/sh
+CHECK_URL="http://127.0.0.1:8080/metrics"
+CHECK_POINT="success"
+CHECK_COUNT=`curl -s --connect-timeout 7 --max-time 7 $CHECK_URL | grep -c $CHECK_POINT`
+if [ $CHECK_COUNT -eq 0 ]; then
+    echo "[FAILED]"
+    status=0
+	error=1
+else
+    echo "[  OK  ]"
+    status=1
+	error=0
+fi

+ 11 - 0
docker/image/alidata/bin/exec_rc_local.sh

@@ -0,0 +1,11 @@
+#!/bin/bash
+
+if [ "${SKIP_EXEC_RC_LOCAL}" = "YES" ] ; then
+	echo "skip /etc/rc.local: SKIP_EXEC_RC_LOCAL=${SKIP_EXEC_RC_LOCAL}"
+	exit
+fi
+
+if [ "${DOCKER_DEPLOY_TYPE}" = "HOST" ] ; then
+	echo "skip /etc/rc.local: DOCKER_DEPLOY_TYPE=${DOCKER_DEPLOY_TYPE}"
+	exit
+fi

+ 6 - 0
docker/image/alidata/bin/lark-wait

@@ -0,0 +1,6 @@
+#!/bin/bash
+set -e
+
+chown admin: -R /home/admin/
+source /alidata/lib/proc.sh
+waitterm

+ 27 - 0
docker/image/alidata/bin/main.sh

@@ -0,0 +1,27 @@
+#!/bin/bash
+
+[ -n "${DOCKER_DEPLOY_TYPE}" ] || DOCKER_DEPLOY_TYPE="VM"
+echo "DOCKER_DEPLOY_TYPE=${DOCKER_DEPLOY_TYPE}"
+
+# run init scripts
+for e in $(ls /alidata/init/*) ; do
+	[ -x "${e}" ] || continue
+	echo "==> INIT $e"
+	$e
+	echo "==> EXIT CODE: $?"
+done
+
+echo "==> INIT DEFAULT"
+service sshd start
+service crond start
+
+#echo "check hostname -i: `hostname -i`"
+#hti_num=`hostname -i|awk '{print NF}'`
+#if [ $hti_num -gt 1 ];then
+#    echo "hostname -i result error:`hostname -i`"
+#    exit 120
+#fi
+
+echo "==> INIT DONE"
+echo "==> RUN ${*}"
+exec "${@}"

+ 19 - 0
docker/image/alidata/init/02init-sshd.sh

@@ -0,0 +1,19 @@
+#!/bin/bash
+
+# set port
+if [ -z "${SSHD_PORT}" ] ; then
+	SSHD_PORT=22
+	[ "${DOCKER_DEPLOY_TYPE}" = "HOST" ] && SSHD_PORT=2222
+fi
+
+sed -r -i '/^OPTIONS=/ d' /etc/sysconfig/sshd
+echo 'OPTIONS="-p '"${SSHD_PORT}"'"' >> /etc/sysconfig/sshd
+
+# set admin ssh pulic key
+if [ "${USE_ADMIN_PASSAGE}" = "YES" ] ; then
+    echo "set admin passage"
+    mkdir -p /home/admin/.ssh
+    chown admin:admin /home/admin/.ssh
+    chown admin:admin /home/admin/.ssh/authorized_keys
+    chmod 644 /home/admin/.ssh/authorized_keys
+fi

+ 66 - 0
docker/image/alidata/init/fix-hosts.py

@@ -0,0 +1,66 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#****************************************************************#
+# Create Date: 2017-01-06 17:58
+#***************************************************************#
+
+import socket
+import shutil
+from time import gmtime, strftime
+
+# get host_name
+host_name = socket.gethostname()
+tmp_file = "/tmp/.lark-fix-host.hosts"
+host_file = "/etc/hosts"
+bak_file_name = "/tmp/hosts-fix-bak.%s" % ( strftime("%Y-%m-%d_%H-%M-%S", gmtime()) )
+
+# load /etc/hosts file context
+FH = open(host_file,"r")
+file_lines = [ i.rstrip() for i in FH.readlines()]
+FH.close()
+file_lines_reverse = file_lines[::-1]
+new_lines = []
+bad_lines = []
+last_match_line = ""
+
+for line in file_lines_reverse:
+    if line.find(host_name) < 0:  # 不匹配的行直接跳过
+        new_lines.append(line + "\n")
+        continue
+
+    cols = line.split()
+    new_cols = []
+    if cols[0].startswith("#"): # 跳过已经注释掉的行
+        new_lines.append(line + "\n")
+        continue
+    for col in cols:
+        if not col == host_name: # 跳过不匹配的列
+            new_cols.append(col)
+            continue
+
+        if cols[0] == "127.0.0.1": # 如果第一列是 127.0.0.1 就跳过匹配的列, 防止 hostname -i 返回 127.0.0.1
+            continue
+
+        # 如果已经发现过匹配的列, 就丢掉重复的列
+        if not len(last_match_line) == 0:
+            continue
+
+        new_cols.append(col)
+        last_match_line = line
+
+    # 跳过 xx.xx.xx.xx hostname 这样的重复列
+    if len(new_cols) == 1:
+        continue
+
+    new_l = "%s\n" % " ".join(new_cols)
+    new_lines.append(new_l)
+
+# save tmp hosts
+
+FH2=file(tmp_file,"w+")
+FH2.writelines( new_lines[::-1])
+FH2.close()
+
+# mv to /etc/hosts
+shutil.copy(host_file, bak_file_name)
+shutil.move(tmp_file, host_file)

+ 40 - 0
docker/image/alidata/lib/proc.sh

@@ -0,0 +1,40 @@
+# waitterm
+#   wait TERM/INT signal.
+#   see: http://veithen.github.io/2014/11/16/sigterm-propagation.html
+waitterm() {
+	local PID
+	# any process to block
+	tail -f /dev/null &
+	PID="$!"
+	# setup trap, could do nothing, or just kill the blocker
+	trap "kill -TERM ${PID}" TERM INT
+	# wait for signal, ignore wait exit code
+	wait "${PID}" || true
+	# clear trap
+	trap - TERM INT
+	# wait blocker, ignore blocker exit code
+	wait "${PID}" 2>/dev/null || true
+}
+
+# waittermpid "${PIDFILE}".
+#   monitor process by pidfile && wait TERM/INT signal.
+#   if the process disappeared, return 1, means exit with ERROR.
+#   if TERM or INT signal received, return 0, means OK to exit.
+waittermpid() {
+	local PIDFILE PID do_run error
+	PIDFILE="${1?}"
+	do_run=true
+	error=0
+	trap "do_run=false" TERM INT
+	while "${do_run}" ; do
+		PID="$(cat "${PIDFILE}")"
+		if ! ps -p "${PID}" >/dev/null 2>&1 ; then
+			do_run=false
+			error=1
+		else
+			sleep 1
+		fi
+	done
+	trap - TERM INT
+	return "${error}"
+}

+ 92 - 0
docker/run.sh

@@ -0,0 +1,92 @@
+#!/bin/bash
+
+function usage() {
+    echo "Usage:"
+    echo "  run.sh [CONFIG]"
+    echo "example:"
+    echo "  run.sh -e canal.instance.master.address=127.0.0.1:3306 \\"
+    echo "         -e canal.instance.dbUsername=canal \\"
+    echo "         -e canal.instance.dbPassword=canal \\"
+    echo "         -e canal.instance.connectionCharset=UTF-8 \\"
+    echo "         -e canal.instance.tsdb.enable=true \\"
+    echo "         -e canal.instance.gtidon=false \\"
+    echo "         -e canal.instance.filter.regex=.*\\..* "
+    exit
+}
+
+function check_port() {
+    local port=$1
+    local TL=$(which telnet)
+    if [ -f $TL ]; then
+        data=`echo quit | telnet 127.0.0.1 $port| grep -ic connected`
+        echo $data
+        return
+    fi
+
+    local NC=$(which nc)
+    if [ -f $NC ]; then
+        data=`nc -z -w 1 127.0.0.1 $port | grep -ic succeeded`
+        echo $data
+        return
+    fi
+    echo "0"
+    return
+}
+
+function getMyIp() {
+    case "`uname`" in
+        Darwin)
+         myip=`echo "show State:/Network/Global/IPv4" | scutil | grep PrimaryInterface | awk '{print $3}' | xargs ifconfig | grep inet | grep -v inet6 | awk '{print $2}'`
+         ;;
+        *)
+         myip=`ip route get 1 | awk '{print $NF;exit}'`
+         ;;
+  esac
+  echo $myip
+}
+
+NET_MODE=""
+case "`uname`" in
+    Darwin)
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+    Linux)
+        bin_abs_path=$(readlink -f $(dirname $0))
+        NET_MODE="--net=host"
+        ;;
+    *)
+        NET_MODE="--net=host"
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+esac
+BASE=${bin_abs_path}
+if [ $# -eq 0 ]; then
+    usage
+elif [ "$1" == "-h" ] ; then
+    usage
+elif [ "$1" == "help" ] ; then
+    usage
+fi
+
+DATA="$BASE/data"
+mkdir -p $DATA
+CONFIG=${@:1}
+#VOLUMNS="-v $DATA:/home/admin/canal-server/logs"
+PORTLIST="8000 8080 2222 11111"
+PORTS=""
+for PORT in $PORTLIST ; do
+    #exist=`check_port $PORT`
+    exist="0"
+    if [ "$exist" == "0" ]; then
+        PORTS="$PORTS -p $PORT:$PORT"
+    else
+        echo "port $PORT is used , pls check"
+        exit 1
+    fi
+done
+
+MEMORY="-m 4096m"
+LOCALHOST=`getMyIp`
+cmd="docker run -it -h $LOCALHOST $CONFIG --name=canal-server $VOLUMNS $NET_MODE $PORTS $MEMORY canal/canal-server"
+echo $cmd
+eval $cmd

+ 1 - 1
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlConnector.java

@@ -32,7 +32,7 @@ public class MysqlConnector {
     private String              password;
 
     private byte                charsetNumber     = 33;
-    private String              defaultSchema     = "retl";
+    private String              defaultSchema     = "test";
     private int                 soTimeout         = 30 * 1000;
     private int                 connTimeout       = 5 * 1000;
     private int                 receiveBufferSize = 16 * 1024;

+ 1 - 1
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/client/BinlogDumpCommandPacket.java

@@ -54,7 +54,7 @@ public class BinlogDumpCommandPacket extends CommandPacket {
         // 1. write 4 bytes bin-log position to start at
         ByteHelper.writeUnsignedIntLittleEndian(binlogPosition, out);
         // 2. write 2 bytes bin-log flags
-        int binlog_flags = BINLOG_DUMP_NON_BLOCK;
+        int binlog_flags = 0;
         binlog_flags |= BINLOG_SEND_ANNOTATE_ROWS_EVENT;
         out.write(binlog_flags);
         out.write(0x00);

+ 2 - 2
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/BioSocketChannel.java

@@ -1,5 +1,6 @@
 package com.alibaba.otter.canal.parse.driver.mysql.socket;
 
+import java.io.BufferedInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
@@ -24,7 +25,7 @@ public class BioSocketChannel implements SocketChannel {
 
     BioSocketChannel(Socket socket) throws IOException{
         this.socket = socket;
-        this.input = socket.getInputStream();
+        this.input = new BufferedInputStream(socket.getInputStream(), 16384);
         this.output = socket.getOutputStream();
     }
 
@@ -164,5 +165,4 @@ public class BioSocketChannel implements SocketChannel {
         this.socket = null;
     }
 
-
 }

+ 76 - 0
example/pom.xml

@@ -21,6 +21,82 @@
 			<artifactId>canal.protocol</artifactId>
 			<version>${project.version}</version>
 		</dependency>
+		<dependency>
+			<groupId>com.alibaba</groupId>
+			<artifactId>druid</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>mysql</groupId>
+			<artifactId>mysql-connector-java</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.ddlutils</groupId>
+			<artifactId>ddlutils</artifactId>
+			<version>1.0</version>
+			<exclusions>
+				<exclusion>
+					<groupId>commons-beanutils</groupId>
+					<artifactId>commons-beanutils-core</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-lang</groupId>
+					<artifactId>commons-lang</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-dbcp</groupId>
+					<artifactId>commons-dbcp</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-pool</groupId>
+					<artifactId>commons-pool</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-logging</groupId>
+					<artifactId>commons-logging-api</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>dom4j</groupId>
+					<artifactId>dom4j</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>stax</groupId>
+					<artifactId>stax-api</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-collections</groupId>
+					<artifactId>commons-collections</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-digester</groupId>
+					<artifactId>commons-digester</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-betwixt</groupId>
+					<artifactId>commons-betwixt</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.commons</groupId>
+			<artifactId>commons-pool2</artifactId>
+			<version>2.5.0</version>
+		</dependency>
+		<dependency>
+			<groupId>commons-beanutils</groupId>
+			<artifactId>commons-beanutils</artifactId>
+			<version>1.8.2</version>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.commons</groupId>
+			<artifactId>commons-lang3</artifactId>
+			<version>3.7</version>
+		</dependency>
+		<dependency>
+			<groupId>commons-collections</groupId>
+			<artifactId>commons-collections</artifactId>
+			<version>3.2</version>
+		</dependency>
+
 		<!-- test dependency -->
 		<dependency>
 			<groupId>junit</groupId>

+ 1 - 0
example/src/main/java/com/alibaba/otter/canal/example/AbstractCanalClientTest.java

@@ -3,6 +3,7 @@ package com.alibaba.otter.canal.example;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.SystemUtils;

+ 68 - 0
example/src/main/java/com/alibaba/otter/canal/example/SimpleCanalClientPermanceTest.java

@@ -0,0 +1,68 @@
+package com.alibaba.otter.canal.example;
+import java.net.InetSocketAddress;
+import java.util.concurrent.ArrayBlockingQueue;
+
+import com.alibaba.otter.canal.client.CanalConnector;
+import com.alibaba.otter.canal.client.CanalConnectors;
+import com.alibaba.otter.canal.client.impl.SimpleCanalConnector;
+import com.alibaba.otter.canal.protocol.Message;
+
+public class SimpleCanalClientPermanceTest {
+
+    public static void main(String args[]) {
+        String destination = "example";
+        String ip = "127.0.0.1";
+        int batchSize = 1024;
+        int count = 0;
+        int sum = 0;
+        int perSum = 0;
+        long start = System.currentTimeMillis();
+        long end = 0;
+        final ArrayBlockingQueue<Long> queue = new ArrayBlockingQueue<Long>(100);
+        try {
+            final CanalConnector connector = CanalConnectors.newSingleConnector(new InetSocketAddress(ip, 11111),
+                destination,
+                "",
+                "");
+
+            Thread ackThread = new Thread(new Runnable() {
+
+                @Override
+                public void run() {
+                    while (true) {
+                        try {
+                            long batchId = queue.take();
+                            connector.ack(batchId);
+                        } catch (InterruptedException e) {
+                        }
+                    }
+                }
+            });
+            ackThread.start();
+
+            ((SimpleCanalConnector) connector).setLazyParseEntry(true);
+            connector.connect();
+            connector.subscribe();
+            while (true) {
+                Message message = connector.getWithoutAck(batchSize);
+                long batchId = message.getId();
+                int size = message.getRawEntries().size();
+                sum += size;
+                perSum += size;
+                count++;
+                queue.add(batchId);
+                if (count % 10 == 0) {
+                    end = System.currentTimeMillis();
+                    long tps = (perSum * 1000) / (end - start);
+                    System.out.println(" total : " + sum + " , current : " + perSum + " , cost : " + (end - start)
+                                       + " , tps : " + tps);
+                    start = end;
+                    perSum = 0;
+                }
+            }
+        } catch (Throwable e) {
+            e.printStackTrace();
+        }
+    }
+
+}

+ 144 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/AbstractDbClient.java

@@ -0,0 +1,144 @@
+package com.alibaba.otter.canal.example.db;
+
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.Message;
+import org.slf4j.MDC;
+
+import java.util.Date;
+import java.util.List;
+
+public abstract class AbstractDbClient extends CanalConnectorClient {
+
+
+    public abstract void insert(CanalEntry.Header header, List<CanalEntry.Column> afterColumns);
+
+    public abstract void update(CanalEntry.Header header, List<CanalEntry.Column> afterColumns);
+
+    public abstract void delete(CanalEntry.Header header, List<CanalEntry.Column> beforeColumns);
+
+
+    @Override
+    public synchronized void start() {
+        if (running) {
+            return;
+        }
+        super.start();
+    }
+
+    @Override
+    public synchronized void stop() {
+        if (!running) {
+            return;
+        }
+        super.stop();
+        MDC.remove("destination");
+    }
+
+    @Override
+    protected void processMessage(Message message) {
+        long batchId = message.getId();
+        //遍历每条消息
+        for (CanalEntry.Entry entry : message.getEntries()) {
+            session(entry);//no exception
+        }
+        //ack all the time。
+        connector.ack(batchId);
+    }
+
+    private void session(CanalEntry.Entry entry) {
+        CanalEntry.EntryType entryType = entry.getEntryType();
+        int times = 0;
+        boolean success = false;
+        while (!success) {
+            if (times > 0) {
+                /**
+                 * 1:retry,重试,重试默认为3次,由retryTimes参数决定,如果重试次数达到阈值,则跳过,并且记录日志。
+                 * 2:ignore,直接忽略,不重试,记录日志。
+                 */
+                if (exceptionStrategy == ExceptionStrategy.RETRY.code) {
+                    if (times >= retryTimes) {
+                        break;
+                    }
+                } else {
+                    break;
+                }
+            }
+            try {
+                switch (entryType) {
+                    case TRANSACTIONBEGIN:
+                        transactionBegin(entry);
+                        break;
+                    case TRANSACTIONEND:
+                        transactionEnd(entry);
+                        break;
+                    case ROWDATA:
+                        rowData(entry);
+                        break;
+                    default:
+                        break;
+                }
+                success = true;
+            } catch (Exception e) {
+                times++;
+                logger.error("parse event has an error ,times: + " + times + ", data:" + entry.toString(), e);
+            }
+
+        }
+    }
+
+    private void rowData(CanalEntry.Entry entry) throws Exception {
+        CanalEntry.RowChange rowChange = CanalEntry.RowChange.parseFrom(entry.getStoreValue());
+        CanalEntry.EventType eventType = rowChange.getEventType();
+        CanalEntry.Header header = entry.getHeader();
+        long executeTime = header.getExecuteTime();
+        long delayTime = new Date().getTime() - executeTime;
+        String sql = rowChange.getSql();
+
+        try {
+            if (!isDML(eventType) || rowChange.getIsDdl()) {
+                processDDL(header, eventType, sql);
+                return;
+            }
+            //处理DML数据
+            processDML(header, eventType, rowChange, sql);
+        } catch (Exception e) {
+            logger.error("process event error ,", e);
+            logger.error(rowFormat,
+                    new Object[]{header.getLogfileName(), String.valueOf(header.getLogfileOffset()),
+                            header.getSchemaName(), header.getTableName(), eventType,
+                            String.valueOf(executeTime), String.valueOf(delayTime)});
+            throw e;//重新抛出
+        }
+    }
+
+    /**
+     * 处理 dml 数据
+     *
+     * @param header
+     * @param eventType
+     * @param rowChange
+     * @param sql
+     */
+    protected void processDML(CanalEntry.Header header, CanalEntry.EventType eventType, CanalEntry.RowChange rowChange, String sql) {
+        for (CanalEntry.RowData rowData : rowChange.getRowDatasList()) {
+            switch (eventType) {
+                case DELETE:
+                    delete(header, rowData.getBeforeColumnsList());
+                    break;
+                case INSERT:
+                    insert(header, rowData.getAfterColumnsList());
+                    break;
+                case UPDATE:
+                    update(header, rowData.getAfterColumnsList());
+                    break;
+                default:
+                    whenOthers(header, sql);
+            }
+        }
+    }
+
+}
+
+
+
+

+ 488 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/CanalConnectorClient.java

@@ -0,0 +1,488 @@
+package com.alibaba.otter.canal.example.db;
+
+import com.alibaba.otter.canal.client.CanalConnector;
+import com.alibaba.otter.canal.client.CanalConnectors;
+import com.alibaba.otter.canal.common.AbstractCanalLifeCycle;
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.Message;
+import org.apache.commons.lang.SystemUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
+import org.springframework.beans.factory.InitializingBean;
+import org.springframework.util.CollectionUtils;
+
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.List;
+
+public abstract class CanalConnectorClient extends AbstractCanalLifeCycle implements InitializingBean {
+
+    protected static final Logger logger = LoggerFactory.getLogger(CanalConnectorClient.class);
+    protected static final String SEP = SystemUtils.LINE_SEPARATOR;
+    protected static String contextFormat;
+    protected static String rowFormat;
+    protected static String transactionFormat;
+    protected static final String DATE_FORMAT = "yyyy-MM-dd HH:mm:ss";
+
+    static {
+        StringBuilder sb = new StringBuilder();
+        sb.append(SEP)
+                .append("-------------Batch-------------")
+                .append(SEP)
+                .append("* Batch Id: [{}] ,count : [{}] , Mem size : [{}] , Time : {}")
+                .append(SEP)
+                .append("* Start : [{}] ")
+                .append(SEP)
+                .append("* End : [{}] ")
+                .append(SEP)
+                .append("-------------------------------")
+                .append(SEP);
+        contextFormat = sb.toString();
+
+        sb = new StringBuilder();
+        sb.append(SEP)
+                .append("+++++++++++++Row+++++++++++++>>>")
+                .append("binlog[{}:{}] , name[{},{}] , eventType : {} , executeTime : {} , delay : {}ms")
+                .append(SEP);
+        rowFormat = sb.toString();
+
+        sb = new StringBuilder();
+        sb.append(SEP)
+                .append("===========Transaction {} : {}=======>>>")
+                .append("binlog[{}:{}] , executeTime : {} , delay : {}ms")
+                .append(SEP);
+        transactionFormat = sb.toString();
+    }
+
+    private String zkServers;//cluster
+    private String address;//single,ip:port
+    private String destination;
+    private String username;
+    private String password;
+    private int batchSize = 5 * 1024;
+    private String filter = "";//同canal filter,用于过滤database或者table的相关数据。
+    protected boolean debug = false;//开启debug,会把每条消息的详情打印
+
+    //1:retry,重试,重试默认为3次,由retryTimes参数决定,如果重试次数达到阈值,则跳过,并且记录日志。
+    //2:ignore,直接忽略,不重试,记录日志。
+    protected int exceptionStrategy = 1;
+    protected int retryTimes = 3;
+    protected int waitingTime = 100;//当binlog没有数据时,主线程等待的时间,单位ms,大于0
+
+
+    protected CanalConnector connector;
+    protected Thread thread;
+
+    protected Thread.UncaughtExceptionHandler handler = new Thread.UncaughtExceptionHandler() {
+
+        public void uncaughtException(Thread t, Throwable e) {
+            logger.error("process message has an error", e);
+        }
+    };
+
+    @Override
+    public void afterPropertiesSet() {
+        if (waitingTime <= 0) {
+            throw new IllegalArgumentException("waitingTime must be greater than 0");
+        }
+        if (ExceptionStrategy.codeOf(exceptionStrategy) == null) {
+            throw new IllegalArgumentException("exceptionStrategy is not valid,1 or 2");
+        }
+        start();
+    }
+
+    @Override
+    public void start() {
+        if (running) {
+            return;
+        }
+        super.start();
+        initConnector();
+
+        thread = new Thread(new Runnable() {
+
+            public void run() {
+                process();
+            }
+        });
+
+        thread.setUncaughtExceptionHandler(handler);
+        thread.start();
+    }
+
+    @Override
+    public void stop() {
+        if (!running) {
+            return;
+        }
+        super.stop();
+        quietlyStop(thread);
+    }
+
+    protected void quietlyStop(Thread task) {
+        if (task != null) {
+            task.interrupt();
+            try {
+                task.join();
+            } catch (InterruptedException e) {
+                // ignore
+            }
+        }
+    }
+
+    public void process() {
+        int times = 0;
+        while (running) {
+            try {
+                sleepWhenFailed(times);
+                //after block, should check the status of thread.
+                if (!running) {
+                    break;
+                }
+                MDC.put("destination", destination);
+                connector.connect();
+                connector.subscribe(filter);
+                connector.rollback();
+                times = 0;//reset;
+
+                while (running) {
+                    // 获取指定数量的数据,不确认
+                    Message message = connector.getWithoutAck(batchSize);
+
+                    long batchId = message.getId();
+                    int size = message.getEntries().size();
+
+                    if (batchId == -1 || size == 0) {
+                        try {
+                            Thread.sleep(waitingTime);
+                        } catch (InterruptedException e) {
+                            //
+                        }
+                        continue;
+                    }
+                    //logger
+                    printBatch(message, batchId);
+
+                    processMessage(message);
+
+                }
+            } catch (Exception e) {
+                logger.error("process error!", e);
+                if (times > 20) {
+                    times = 0;
+                }
+                times++;
+            } finally {
+                connector.disconnect();
+                MDC.remove("destination");
+            }
+        }
+    }
+
+    protected abstract void processMessage(Message message);
+
+
+    private void initConnector() {
+        if (zkServers != null && zkServers.length() > 0) {
+            connector = CanalConnectors.newClusterConnector(zkServers, destination, username, password);
+        } else if (address != null) {
+            String[] segments = address.split(":");
+            SocketAddress socketAddress = new InetSocketAddress(segments[0], Integer.valueOf(segments[1]));
+            connector = CanalConnectors.newSingleConnector(socketAddress, destination, username, password);
+        } else {
+            throw new IllegalArgumentException("zkServers or address cant be null at same time,you should specify one of them!");
+        }
+
+    }
+
+    /**
+     * 用于控制当连接异常时,重试的策略,我们不应该每次都是立即重试,否则将可能导致大量的错误,在空转时导致CPU过高的问题
+     * sleep策略基于简单的累加
+     *
+     * @param times
+     */
+    private void sleepWhenFailed(int times) {
+        if (times <= 0) {
+            return;
+        }
+        try {
+            int sleepTime = 1000 + times * 100;//最大sleep 3s。
+            Thread.sleep(sleepTime);
+        } catch (Exception ex) {
+            //
+        }
+    }
+
+    /**
+     * 打印当前batch的摘要信息
+     *
+     * @param message
+     * @param batchId
+     */
+    protected void printBatch(Message message, long batchId) {
+        if (!debug) {
+            return;
+        }
+        List<CanalEntry.Entry> entries = message.getEntries();
+        if (CollectionUtils.isEmpty(entries)) {
+            return;
+        }
+
+        long memSize = 0;
+        for (CanalEntry.Entry entry : entries) {
+            memSize += entry.getHeader().getEventLength();
+        }
+        int size = entries.size();
+        String startPosition = buildPosition(entries.get(0));
+        String endPosition = buildPosition(message.getEntries().get(size - 1));
+
+        SimpleDateFormat format = new SimpleDateFormat(DATE_FORMAT);
+        logger.info(contextFormat, new Object[]{batchId, size, memSize, format.format(new Date()), startPosition, endPosition});
+    }
+
+    protected String buildPosition(CanalEntry.Entry entry) {
+        CanalEntry.Header header = entry.getHeader();
+        long time = header.getExecuteTime();
+        Date date = new Date(time);
+        SimpleDateFormat format = new SimpleDateFormat(DATE_FORMAT);
+        StringBuilder sb = new StringBuilder();
+        sb.append(header.getLogfileName())
+                .append(":")
+                .append(header.getLogfileOffset())
+                .append(":")
+                .append(header.getExecuteTime())
+                .append("(")
+                .append(format.format(date))
+                .append(")");
+        return sb.toString();
+    }
+
+    /**
+     * default,only logging information
+     *
+     * @param entry
+     */
+    protected void transactionBegin(CanalEntry.Entry entry) {
+        if (!debug) {
+            return;
+        }
+        try {
+            CanalEntry.TransactionBegin begin = CanalEntry.TransactionBegin.parseFrom(entry.getStoreValue());
+            // 打印事务头信息,执行的线程id,事务耗时
+            CanalEntry.Header header = entry.getHeader();
+            long executeTime = header.getExecuteTime();
+            long delayTime = new Date().getTime() - executeTime;
+            logger.info(transactionFormat,
+                    new Object[]{"begin", begin.getTransactionId(), header.getLogfileName(),
+                            String.valueOf(header.getLogfileOffset()),
+                            String.valueOf(header.getExecuteTime()), String.valueOf(delayTime)});
+        } catch (Exception e) {
+            logger.error("parse event has an error , data:" + entry.toString(), e);
+        }
+    }
+
+    protected void transactionEnd(CanalEntry.Entry entry) {
+        if (!debug) {
+            return;
+        }
+        try {
+            CanalEntry.TransactionEnd end = CanalEntry.TransactionEnd.parseFrom(entry.getStoreValue());
+            // 打印事务提交信息,事务id
+            CanalEntry.Header header = entry.getHeader();
+            long executeTime = header.getExecuteTime();
+            long delayTime = new Date().getTime() - executeTime;
+
+            logger.info(transactionFormat,
+                    new Object[]{"end", end.getTransactionId(), header.getLogfileName(),
+                            String.valueOf(header.getLogfileOffset()),
+                            String.valueOf(header.getExecuteTime()), String.valueOf(delayTime)});
+        } catch (Exception e) {
+            logger.error("parse event has an error , data:" + entry.toString(), e);
+        }
+    }
+
+    /**
+     * 判断事件类型为DML 数据
+     *
+     * @param eventType
+     * @return
+     */
+    protected boolean isDML(CanalEntry.EventType eventType) {
+        switch (eventType) {
+            case INSERT:
+            case UPDATE:
+            case DELETE:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    /**
+     * 处理 DDL数据
+     *
+     * @param header
+     * @param eventType
+     * @param sql
+     */
+
+    protected void processDDL(CanalEntry.Header header, CanalEntry.EventType eventType, String sql) {
+        if (!debug) {
+            return;
+        }
+        String table = header.getSchemaName() + "." + header.getTableName();
+        //对于DDL,直接执行,因为没有行变更数据
+        switch (eventType) {
+            case CREATE:
+                logger.warn("parse create table event, table: {}, sql: {}", table, sql);
+                return;
+            case ALTER:
+                logger.warn("parse alter table event, table: {}, sql: {}", table, sql);
+                return;
+            case TRUNCATE:
+                logger.warn("parse truncate table event, table: {}, sql: {}", table, sql);
+                return;
+            case ERASE:
+            case QUERY:
+                logger.warn("parse event : {}, sql: {} . ignored!", eventType.name(), sql);
+                return;
+            case RENAME:
+                logger.warn("parse rename table event, table: {}, sql: {}", table, sql);
+                return;
+            case CINDEX:
+                logger.warn("parse create index event, table: {}, sql: {}", table, sql);
+                return;
+            case DINDEX:
+                logger.warn("parse delete index event, table: {}, sql: {}", table, sql);
+                return;
+            default:
+                logger.warn("parse unknown event: {}, table: {}, sql: {}", new String[]{eventType.name(), table, sql});
+                break;
+        }
+    }
+
+    /**
+     * 强烈建议捕获异常,非上述已列出的其他操作,非核心
+     * 除了“insert”、“update”、“delete”操作之外的,其他类型的操作.
+     * 默认实现为“无操作”
+     *
+     * @param header 可以从header中获得schema、table的名称
+     * @param sql
+     */
+    public void whenOthers(CanalEntry.Header header, String sql) {
+        String schema = header.getSchemaName();
+        String table = header.getTableName();
+        logger.error("ignore event,schema: {},table: {},SQL: {}", new String[]{schema, table, sql});
+    }
+
+    public enum ExceptionStrategy {
+        RETRY(1), IGNORE(2);
+        public int code;
+
+        ExceptionStrategy(int code) {
+            this.code = code;
+        }
+
+        public static ExceptionStrategy codeOf(Integer code) {
+            if (code != null) {
+                for (ExceptionStrategy e : ExceptionStrategy.values()) {
+                    if (e.code == code) {
+                        return e;
+                    }
+                }
+            }
+            return null;
+        }
+    }
+
+    public String getZkServers() {
+        return zkServers;
+    }
+
+    public void setZkServers(String zkServers) {
+        this.zkServers = zkServers;
+    }
+
+    public String getAddress() {
+        return address;
+    }
+
+    public void setAddress(String address) {
+        this.address = address;
+    }
+
+    public String getDestination() {
+        return destination;
+    }
+
+    public void setDestination(String destination) {
+        this.destination = destination;
+    }
+
+    public String getUsername() {
+        return username;
+    }
+
+    public void setUsername(String username) {
+        this.username = username;
+    }
+
+    public String getPassword() {
+        return password;
+    }
+
+    public void setPassword(String password) {
+        this.password = password;
+    }
+
+    public int getBatchSize() {
+        return batchSize;
+    }
+
+    public void setBatchSize(int batchSize) {
+        this.batchSize = batchSize;
+    }
+
+    public String getFilter() {
+        return filter;
+    }
+
+    public void setFilter(String filter) {
+        this.filter = filter;
+    }
+
+    public boolean isDebug() {
+        return debug;
+    }
+
+    public void setDebug(boolean debug) {
+        this.debug = debug;
+    }
+
+    public int getExceptionStrategy() {
+        return exceptionStrategy;
+    }
+
+    public void setExceptionStrategy(int exceptionStrategy) {
+        this.exceptionStrategy = exceptionStrategy;
+    }
+
+    public int getRetryTimes() {
+        return retryTimes;
+    }
+
+    public void setRetryTimes(int retryTimes) {
+        this.retryTimes = retryTimes;
+    }
+
+    public int getWaitingTime() {
+        return waitingTime;
+    }
+
+    public void setWaitingTime(int waitingTime) {
+        this.waitingTime = waitingTime;
+    }
+}

+ 35 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/MysqlLoadLauncher.java

@@ -0,0 +1,35 @@
+package com.alibaba.otter.canal.example.db;
+
+import com.alibaba.otter.canal.example.db.mysql.MysqlClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MysqlLoadLauncher {
+    private static final Logger logger = LoggerFactory.getLogger(MysqlLoadLauncher.class);
+
+    public static void main(String[] args) {
+        try {
+            logger.info("## start the canal mysql client.");
+            final MysqlClient client = ServiceLocator.getMysqlClient();
+            logger.info("## the canal consumer is running now ......");
+            client.start();
+            Runtime.getRuntime().addShutdownHook(new Thread() {
+
+                public void run() {
+                    try {
+                        logger.info("## stop the canal consumer");
+                        client.stop();
+                    } catch (Throwable e) {
+                        logger.warn("##something goes wrong when stopping canal consumer:\n{}", e);
+                    } finally {
+                        logger.info("## canal consumer is down.");
+                    }
+                }
+
+            });
+        } catch (Throwable e) {
+            logger.error("## Something goes wrong when starting up the canal consumer:\n{}", e);
+            System.exit(0);
+        }
+    }
+}

+ 169 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/PropertyPlaceholderConfigurer.java

@@ -0,0 +1,169 @@
+package com.alibaba.otter.canal.example.db;
+
+import org.springframework.beans.factory.InitializingBean;
+import org.springframework.context.ResourceLoaderAware;
+import org.springframework.core.io.Resource;
+import org.springframework.core.io.ResourceLoader;
+import org.springframework.util.Assert;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * 扩展Spring的
+ * {@linkplain org.springframework.beans.factory.config.PropertyPlaceholderConfigurer}
+ * ,增加默认值的功能。 例如:${placeholder:defaultValue},假如placeholder的值不存在,则默认取得
+ * defaultValue。
+ * 
+ * @author jianghang 2013-1-24 下午03:37:56
+ * @version 1.0.0
+ */
+public class PropertyPlaceholderConfigurer extends org.springframework.beans.factory.config.PropertyPlaceholderConfigurer implements ResourceLoaderAware, InitializingBean {
+
+    private static final String PLACEHOLDER_PREFIX = "${";
+    private static final String PLACEHOLDER_SUFFIX = "}";
+    private ResourceLoader      loader;
+    private String[]            locationNames;
+
+    public PropertyPlaceholderConfigurer(){
+        setIgnoreUnresolvablePlaceholders(true);
+    }
+
+    public void setResourceLoader(ResourceLoader loader) {
+        this.loader = loader;
+    }
+
+    public void setLocationNames(String[] locations) {
+        this.locationNames = locations;
+    }
+
+    public void afterPropertiesSet() throws Exception {
+        Assert.notNull(loader, "no resourceLoader");
+
+        if (locationNames != null) {
+            for (int i = 0; i < locationNames.length; i++) {
+                locationNames[i] = resolveSystemPropertyPlaceholders(locationNames[i]);
+            }
+        }
+
+        if (locationNames != null) {
+            List<Resource> resources = new ArrayList<Resource>(locationNames.length);
+
+            for (String location : locationNames) {
+                location = trimToNull(location);
+
+                if (location != null) {
+                    resources.add(loader.getResource(location));
+                }
+            }
+
+            super.setLocations(resources.toArray(new Resource[resources.size()]));
+        }
+    }
+
+    private String resolveSystemPropertyPlaceholders(String text) {
+        StringBuilder buf = new StringBuilder(text);
+
+        for (int startIndex = buf.indexOf(PLACEHOLDER_PREFIX); startIndex >= 0;) {
+            int endIndex = buf.indexOf(PLACEHOLDER_SUFFIX, startIndex + PLACEHOLDER_PREFIX.length());
+
+            if (endIndex != -1) {
+                String placeholder = buf.substring(startIndex + PLACEHOLDER_PREFIX.length(), endIndex);
+                int nextIndex = endIndex + PLACEHOLDER_SUFFIX.length();
+
+                try {
+                    String value = resolveSystemPropertyPlaceholder(placeholder);
+
+                    if (value != null) {
+                        buf.replace(startIndex, endIndex + PLACEHOLDER_SUFFIX.length(), value);
+                        nextIndex = startIndex + value.length();
+                    } else {
+                        System.err.println("Could not resolve placeholder '"
+                                           + placeholder
+                                           + "' in ["
+                                           + text
+                                           + "] as system property: neither system property nor environment variable found");
+                    }
+                } catch (Throwable ex) {
+                    System.err.println("Could not resolve placeholder '" + placeholder + "' in [" + text
+                                       + "] as system property: " + ex);
+                }
+
+                startIndex = buf.indexOf(PLACEHOLDER_PREFIX, nextIndex);
+            } else {
+                startIndex = -1;
+            }
+        }
+
+        return buf.toString();
+    }
+
+    private String resolveSystemPropertyPlaceholder(String placeholder) {
+        DefaultablePlaceholder dp = new DefaultablePlaceholder(placeholder);
+        String value = System.getProperty(dp.placeholder);
+
+        if (value == null) {
+            value = System.getenv(dp.placeholder);
+        }
+
+        if (value == null) {
+            value = dp.defaultValue;
+        }
+
+        return value;
+    }
+
+    @Override
+    protected String resolvePlaceholder(String placeholder, Properties props, int systemPropertiesMode) {
+        DefaultablePlaceholder dp = new DefaultablePlaceholder(placeholder);
+        String value = super.resolvePlaceholder(dp.placeholder, props, systemPropertiesMode);
+
+        if (value == null) {
+            value = dp.defaultValue;
+        }
+
+        return trimToEmpty(value);
+    }
+
+    private static class DefaultablePlaceholder {
+
+        private final String defaultValue;
+        private final String placeholder;
+
+        public DefaultablePlaceholder(String placeholder){
+            int commaIndex = placeholder.indexOf(":");
+            String defaultValue = null;
+
+            if (commaIndex >= 0) {
+                defaultValue = trimToEmpty(placeholder.substring(commaIndex + 1));
+                placeholder = trimToEmpty(placeholder.substring(0, commaIndex));
+            }
+
+            this.placeholder = placeholder;
+            this.defaultValue = defaultValue;
+        }
+    }
+
+    private String trimToNull(String str) {
+        if (str == null) {
+            return null;
+        }
+
+        String result = str.trim();
+
+        if (result == null || result.length() == 0) {
+            return null;
+        }
+
+        return result;
+    }
+
+    public static String trimToEmpty(String str) {
+        if (str == null) {
+            return "";
+        }
+
+        return str.trim();
+    }
+}

+ 44 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/ServiceLocator.java

@@ -0,0 +1,44 @@
+package com.alibaba.otter.canal.example.db;
+
+import com.alibaba.otter.canal.example.db.mysql.MysqlClient;
+import org.springframework.beans.factory.DisposableBean;
+import org.springframework.context.ApplicationContext;
+import org.springframework.context.support.ClassPathXmlApplicationContext;
+import org.springframework.util.Assert;
+
+public class ServiceLocator implements DisposableBean {
+
+    private static ApplicationContext applicationContext = null;
+
+    static {
+        try {
+            applicationContext = new ClassPathXmlApplicationContext("classpath:client-spring.xml");
+        } catch (RuntimeException e) {
+            throw e;
+        }
+    }
+
+    private static <T> T getBean(String name) {
+        assertContextInjected();
+        return (T) applicationContext.getBean(name);
+    }
+
+
+    private static void clearHolder() {
+        ServiceLocator.applicationContext = null;
+    }
+
+    @Override
+    public void destroy() throws Exception {
+        ServiceLocator.clearHolder();
+    }
+
+    private static void assertContextInjected() {
+        Assert.state(applicationContext != null, "ApplicationContext not set");
+    }
+
+
+    public static MysqlClient getMysqlClient() {
+        return getBean("mysqlClient");
+    }
+}

+ 121 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/AbstractDbDialect.java

@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect;
+
+import com.alibaba.otter.canal.example.db.utils.DdlUtils;
+import com.google.common.base.Function;
+import com.google.common.collect.MigrateMap;
+import org.apache.commons.lang.exception.NestableRuntimeException;
+import org.apache.ddlutils.model.Table;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.ConnectionCallback;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.datasource.DataSourceTransactionManager;
+import org.springframework.jdbc.support.lob.LobHandler;
+import org.springframework.transaction.TransactionDefinition;
+import org.springframework.transaction.support.TransactionTemplate;
+import org.springframework.util.Assert;
+
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+public abstract class AbstractDbDialect implements DbDialect {
+
+    protected int databaseMajorVersion;
+    protected int databaseMinorVersion;
+    protected String databaseName;
+    protected JdbcTemplate jdbcTemplate;
+    protected TransactionTemplate transactionTemplate;
+    protected LobHandler lobHandler;
+    protected Map<List<String>, Table> tables;
+
+    public AbstractDbDialect(final JdbcTemplate jdbcTemplate, LobHandler lobHandler) {
+        this.jdbcTemplate = jdbcTemplate;
+        this.lobHandler = lobHandler;
+        // 初始化transction
+        this.transactionTemplate = new TransactionTemplate();
+        transactionTemplate.setTransactionManager(new DataSourceTransactionManager(jdbcTemplate.getDataSource()));
+        transactionTemplate.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRES_NEW);
+
+        // 初始化一些数据
+        jdbcTemplate.execute(new ConnectionCallback() {
+
+            public Object doInConnection(Connection c) throws SQLException, DataAccessException {
+                DatabaseMetaData meta = c.getMetaData();
+                databaseName = meta.getDatabaseProductName();
+                databaseMajorVersion = meta.getDatabaseMajorVersion();
+                databaseMinorVersion = meta.getDatabaseMinorVersion();
+
+                return null;
+            }
+        });
+
+        initTables(jdbcTemplate);
+    }
+
+    public Table findTable(String schema, String table, boolean useCache) {
+        List<String> key = Arrays.asList(schema, table);
+        if (useCache == false) {
+            tables.remove(key);
+        }
+
+        return tables.get(key);
+    }
+
+    public Table findTable(String schema, String table) {
+        return findTable(schema, table, true);
+    }
+
+    public LobHandler getLobHandler() {
+        return lobHandler;
+    }
+
+    public JdbcTemplate getJdbcTemplate() {
+        return jdbcTemplate;
+    }
+
+    public TransactionTemplate getTransactionTemplate() {
+        return transactionTemplate;
+    }
+
+    private void initTables(final JdbcTemplate jdbcTemplate) {
+        this.tables = MigrateMap.makeComputingMap(new Function<List<String>, Table>() {
+
+            public Table apply(List<String> names) {
+                Assert.isTrue(names.size() == 2);
+                try {
+                    Table table = DdlUtils.findTable(jdbcTemplate, names.get(0), names.get(0), names.get(1));
+                    if (table == null) {
+                        throw new NestableRuntimeException("no found table [" + names.get(0) + "." + names.get(1)
+                                + "] , pls check");
+                    } else {
+                        return table;
+                    }
+                } catch (Exception e) {
+                    throw new NestableRuntimeException("find table [" + names.get(0) + "." + names.get(1) + "] error",
+                            e);
+                }
+            }
+        });
+    }
+
+
+}

+ 105 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/AbstractSqlTemplate.java

@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect;
+
+/**
+ * 默认的基于标准SQL实现的CRUD sql封装
+ * 
+ * @author jianghang 2011-10-27 下午01:37:00
+ * @version 4.0.0
+ */
+public abstract class AbstractSqlTemplate implements SqlTemplate {
+
+    private static final String DOT = ".";
+
+    public String getSelectSql(String schemaName, String tableName, String[] pkNames, String[] columnNames) {
+        StringBuilder sql = new StringBuilder("select ");
+        int size = columnNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(columnNames[i])).append((i + 1 < size) ? " , " : "");
+        }
+
+        sql.append(" from ").append(getFullName(schemaName, tableName)).append(" where ( ");
+        appendColumnEquals(sql, pkNames, "and");
+        sql.append(" ) ");
+        return sql.toString().intern();// 不使用intern,避免方法区内存消耗过多
+    }
+
+    public String getUpdateSql(String schemaName, String tableName, String[] pkNames, String[] columnNames) {
+        StringBuilder sql = new StringBuilder("update " + getFullName(schemaName, tableName) + " set ");
+        appendColumnEquals(sql, columnNames, ",");
+        sql.append(" where (");
+        appendColumnEquals(sql, pkNames, "and");
+        sql.append(")");
+        return sql.toString().intern(); // 不使用intern,避免方法区内存消耗过多
+    }
+
+    public String getInsertSql(String schemaName, String tableName, String[] pkNames, String[] columnNames) {
+        StringBuilder sql = new StringBuilder("insert into " + getFullName(schemaName, tableName) + "(");
+        String[] allColumns = new String[pkNames.length + columnNames.length];
+        System.arraycopy(columnNames, 0, allColumns, 0, columnNames.length);
+        System.arraycopy(pkNames, 0, allColumns, columnNames.length, pkNames.length);
+
+        int size = allColumns.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(allColumns[i])).append((i + 1 < size) ? "," : "");
+        }
+
+        sql.append(") values (");
+        appendColumnQuestions(sql, allColumns);
+        sql.append(")");
+        return sql.toString().intern();// intern优化,避免出现大量相同的字符串
+    }
+
+    public String getDeleteSql(String schemaName, String tableName, String[] pkNames) {
+        StringBuilder sql = new StringBuilder("delete from " + getFullName(schemaName, tableName) + " where ");
+        appendColumnEquals(sql, pkNames, "and");
+        return sql.toString().intern();// intern优化,避免出现大量相同的字符串
+    }
+
+    protected String getFullName(String schemaName, String tableName) {
+        StringBuilder sb = new StringBuilder();
+        if (schemaName != null) {
+            sb.append(appendEscape(schemaName)).append(DOT);
+        }
+        sb.append(appendEscape(tableName));
+        return sb.toString().intern();
+    }
+
+    // ================ helper method ============
+
+    protected String appendEscape(String columnName) {
+        return columnName;
+    }
+
+    protected void appendColumnQuestions(StringBuilder sql, String[] columns) {
+        int size = columns.length;
+        for (int i = 0; i < size; i++) {
+            sql.append("?").append((i + 1 < size) ? " , " : "");
+        }
+    }
+
+    protected void appendColumnEquals(StringBuilder sql, String[] columns, String separator) {
+        int size = columns.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(" ").append(appendEscape(columns[i])).append(" = ").append("? ");
+            if (i != size - 1) {
+                sql.append(separator);
+            }
+        }
+    }
+}

+ 20 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/DbDialect.java

@@ -0,0 +1,20 @@
+package com.alibaba.otter.canal.example.db.dialect;
+
+import org.apache.ddlutils.model.Table;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.support.lob.LobHandler;
+import org.springframework.transaction.support.TransactionTemplate;
+
+public interface DbDialect {
+
+    LobHandler getLobHandler();
+
+    JdbcTemplate getJdbcTemplate();
+
+    TransactionTemplate getTransactionTemplate();
+
+    Table findTable(String schema, String table);
+
+    Table findTable(String schema, String table, boolean useCache);
+
+}

+ 40 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/SqlTemplate.java

@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect;
+
+/**
+ * sql构造模板操作
+ * 
+ * @author jianghang 2011-10-27 下午01:31:15
+ * @version 4.0.0
+ */
+public interface SqlTemplate {
+
+    public String getSelectSql(String schemaName, String tableName, String[] pkNames, String[] columnNames);
+
+    public String getUpdateSql(String schemaName, String tableName, String[] pkNames, String[] columnNames);
+
+    public String getDeleteSql(String schemaName, String tableName, String[] pkNames);
+
+    public String getInsertSql(String schemaName, String tableName, String[] pkNames, String[] columnNames);
+
+    /**
+     * 获取对应的mergeSql
+     */
+    public String getMergeSql(String schemaName, String tableName, String[] pkNames, String[] columnNames,
+                              String[] viewColumnNames, boolean updatePks);
+}

+ 93 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/TableType.java

@@ -0,0 +1,93 @@
+package com.alibaba.otter.canal.example.db.dialect;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+/**
+ * An enumeration wrapper around JDBC table types.
+ */
+public enum TableType {
+
+    /**
+     * Unknown
+     */
+    unknown,
+
+    /**
+     * System table
+     */
+    system_table,
+
+    /**
+     * Global temporary
+     */
+    global_temporary,
+
+    /**
+     * Local temporary
+     */
+    local_temporary,
+
+    /**
+     * Table
+     */
+    table,
+
+    /**
+     * View
+     */
+    view,
+
+    /**
+     * Alias
+     */
+    alias,
+
+    /**
+     * Synonym
+     */
+    synonym,;
+
+    /**
+     * Converts an array of table types to an array of their corresponding string values.
+     *
+     * @param tableTypes Array of table types
+     * @return Array of string table types
+     */
+    public static String[] toStrings(final TableType[] tableTypes) {
+        if ((tableTypes == null) || (tableTypes.length == 0)) {
+            return new String[0];
+        }
+
+        final List<String> tableTypeStrings = new ArrayList<String>(tableTypes.length);
+
+        for (final TableType tableType : tableTypes) {
+            if (tableType != null) {
+                tableTypeStrings.add(tableType.toString().toUpperCase(Locale.ENGLISH));
+            }
+        }
+
+        return tableTypeStrings.toArray(new String[tableTypeStrings.size()]);
+    }
+
+    /**
+     * Converts an array of string table types to an array of their corresponding enumeration values.
+     *
+     * @param tableTypeStrings Array of string table types
+     * @return Array of table types
+     */
+    public static TableType[] valueOf(final String[] tableTypeStrings) {
+        if ((tableTypeStrings == null) || (tableTypeStrings.length == 0)) {
+            return new TableType[0];
+        }
+
+        final List<TableType> tableTypes = new ArrayList<TableType>(tableTypeStrings.length);
+
+        for (final String tableTypeString : tableTypeStrings) {
+            tableTypes.add(valueOf(tableTypeString.toLowerCase(Locale.ENGLISH)));
+        }
+
+        return tableTypes.toArray(new TableType[tableTypes.size()]);
+    }
+}

+ 32 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/mysql/MysqlDialect.java

@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect.mysql;
+
+import com.alibaba.otter.canal.example.db.dialect.AbstractDbDialect;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.support.lob.LobHandler;
+
+public class MysqlDialect extends AbstractDbDialect {
+
+    public MysqlDialect(JdbcTemplate jdbcTemplate, LobHandler lobHandler) {
+        super(jdbcTemplate, lobHandler);
+    }
+
+    public boolean isEmptyStringNulled() {
+        return false;
+    }
+}

+ 84 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/mysql/MysqlSqlTemplate.java

@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect.mysql;
+
+import com.alibaba.otter.canal.example.db.dialect.AbstractSqlTemplate;
+
+/**
+ * mysql sql生成模板
+ *
+ * @author jianghang 2011-10-27 下午01:41:20
+ * @version 4.0.0
+ */
+public class MysqlSqlTemplate extends AbstractSqlTemplate {
+
+    private static final String ESCAPE = "`";
+
+    public String getMergeSql(String schemaName, String tableName, String[] pkNames, String[] columnNames,
+                              String[] viewColumnNames, boolean includePks) {
+        StringBuilder sql = new StringBuilder("insert into " + getFullName(schemaName, tableName) + "(");
+        int size = columnNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(columnNames[i])).append(" , ");
+        }
+        size = pkNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(pkNames[i])).append((i + 1 < size) ? " , " : "");
+        }
+
+        sql.append(") values (");
+        size = columnNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append("?").append(" , ");
+        }
+        size = pkNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append("?").append((i + 1 < size) ? " , " : "");
+        }
+        sql.append(")");
+        sql.append(" on duplicate key update ");
+
+        size = columnNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(columnNames[i]))
+                    .append("=values(")
+                    .append(appendEscape(columnNames[i]))
+                    .append(")");
+            if (includePks) {
+                sql.append(" , ");
+            } else {
+                sql.append((i + 1 < size) ? " , " : "");
+            }
+        }
+
+        if (includePks) {
+            // mysql merge sql匹配了uniqe / primary key时都会执行update,所以需要更新pk信息
+            size = pkNames.length;
+            for (int i = 0; i < size; i++) {
+                sql.append(appendEscape(pkNames[i])).append("=values(").append(appendEscape(pkNames[i])).append(")");
+                sql.append((i + 1 < size) ? " , " : "");
+            }
+        }
+
+        return sql.toString().intern();// intern优化,避免出现大量相同的字符串
+    }
+
+    protected String appendEscape(String columnName) {
+        return ESCAPE + columnName + ESCAPE;
+    }
+
+}

+ 207 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/mysql/AbstractMysqlClient.java

@@ -0,0 +1,207 @@
+package com.alibaba.otter.canal.example.db.mysql;
+
+import com.alibaba.fastjson.JSON;
+import com.alibaba.otter.canal.example.db.AbstractDbClient;
+import com.alibaba.otter.canal.example.db.dialect.DbDialect;
+import com.alibaba.otter.canal.example.db.dialect.mysql.MysqlDialect;
+import com.alibaba.otter.canal.example.db.dialect.mysql.MysqlSqlTemplate;
+import com.alibaba.otter.canal.example.db.dialect.SqlTemplate;
+import com.alibaba.otter.canal.example.db.utils.SqlUtils;
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+import org.apache.commons.lang.StringUtils;
+import org.apache.ddlutils.model.Column;
+import org.apache.ddlutils.model.Table;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.core.PreparedStatementSetter;
+import org.springframework.jdbc.core.StatementCreatorUtils;
+import org.springframework.jdbc.support.lob.DefaultLobHandler;
+import org.springframework.jdbc.support.lob.LobCreator;
+import org.springframework.transaction.TransactionStatus;
+import org.springframework.transaction.support.TransactionCallback;
+
+import javax.sql.DataSource;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+
+public abstract class AbstractMysqlClient extends AbstractDbClient {
+
+    private DataSource dataSource;
+
+    private DbDialect dbDialect;
+    private SqlTemplate sqlTemplate;
+
+    protected Integer execute(final CanalEntry.Header header, final List<CanalEntry.Column> columns) {
+        final String sql = getSql(header, columns);
+        final LobCreator lobCreator = dbDialect.getLobHandler().getLobCreator();
+        dbDialect.getTransactionTemplate().execute(new TransactionCallback() {
+
+            public Object doInTransaction(TransactionStatus status) {
+                try {
+                    JdbcTemplate template = dbDialect.getJdbcTemplate();
+                    int affect = template.update(sql, new PreparedStatementSetter() {
+
+                        public void setValues(PreparedStatement ps) throws SQLException {
+                            doPreparedStatement(ps, dbDialect, lobCreator, header, columns);
+                        }
+                    });
+                    return affect;
+                } finally {
+                    lobCreator.close();
+                }
+            }
+        });
+        return 0;
+    }
+
+    private String getSql(CanalEntry.Header header, List<CanalEntry.Column> columns) {
+        List<String> pkNames = new ArrayList<>();
+        List<String> colNames = new ArrayList<>();
+        for (CanalEntry.Column column : columns) {
+            if (column.getIsKey()) {
+                pkNames.add(column.getName());
+            } else {
+                colNames.add(column.getName());
+            }
+        }
+        String sql = "";
+        CanalEntry.EventType eventType = header.getEventType();
+        switch (eventType) {
+            case INSERT:
+                sql = sqlTemplate.getInsertSql(header.getSchemaName(), header.getTableName(), pkNames.toArray(new String[]{}), colNames.toArray(new String[]{}));
+                break;
+            case UPDATE:
+                sql = sqlTemplate.getUpdateSql(header.getSchemaName(), header.getTableName(), pkNames.toArray(new String[]{}), colNames.toArray(new String[]{}));
+                break;
+            case DELETE:
+                sql = sqlTemplate.getDeleteSql(header.getSchemaName(), header.getTableName(), pkNames.toArray(new String[]{}));
+        }
+        logger.info("Execute sql: {}", sql);
+        return sql;
+    }
+
+    private void doPreparedStatement(PreparedStatement ps, DbDialect dbDialect, LobCreator lobCreator,
+                                     CanalEntry.Header header, List<CanalEntry.Column> columns) throws SQLException {
+
+        List<CanalEntry.Column> rebuildColumns = new ArrayList<>(columns.size());
+
+        List<CanalEntry.Column> keyColumns = new ArrayList<>(columns.size());
+        List<CanalEntry.Column> notKeyColumns = new ArrayList<>(columns.size());
+        for (CanalEntry.Column column : columns) {
+            if (column.getIsKey()) {
+                keyColumns.add(column);
+            } else {
+                notKeyColumns.add(column);
+            }
+        }
+        CanalEntry.EventType eventType = header.getEventType();
+        switch (eventType) {
+            case INSERT:
+            case UPDATE:
+                // insert/update语句对应的字段数序都是将主键排在后面
+                rebuildColumns.addAll(notKeyColumns);
+                rebuildColumns.addAll(keyColumns);
+                break;
+            case DELETE:
+                rebuildColumns.addAll(keyColumns);
+        }
+
+        // 获取一下当前字段名的数据是否必填
+        Table table = dbDialect.findTable(header.getSchemaName(), header.getTableName());
+        Map<String, Boolean> isRequiredMap = new HashMap();
+        for (Column tableColumn : table.getColumns()) {
+            isRequiredMap.put(StringUtils.lowerCase(tableColumn.getName()), tableColumn.isRequired());
+        }
+
+        List<Object> values = new ArrayList<>(rebuildColumns.size());
+        for (int i = 0; i < rebuildColumns.size(); i++) {
+            int paramIndex = i + 1;
+            CanalEntry.Column column = rebuildColumns.get(i);
+            int sqlType = column.getSqlType();
+
+            Boolean isRequired = isRequiredMap.get(StringUtils.lowerCase(column.getName()));
+            if (isRequired == null) {
+                // 清理一下目标库的表结构,二次检查一下
+                table = dbDialect.findTable(header.getSchemaName(), header.getTableName());
+
+                isRequiredMap = new HashMap<>();
+                for (Column tableColumn : table.getColumns()) {
+                    isRequiredMap.put(StringUtils.lowerCase(tableColumn.getName()), tableColumn.isRequired());
+                }
+
+                isRequired = isRequiredMap.get(StringUtils.lowerCase(column.getName()));
+                if (isRequired == null) {
+                    throw new CanalClientException(String.format("column name %s is not found in Table[%s]",
+                            column.getName(),
+                            table.toString()));
+                }
+            }
+
+            Object param;
+            if (sqlType == Types.TIME || sqlType == Types.TIMESTAMP || sqlType == Types.DATE) {
+                // 解决mysql的0000-00-00 00:00:00问题,直接依赖mysql
+                // driver进行处理,如果转化为Timestamp会出错
+                param = column.getValue();
+                if (param instanceof String && StringUtils.isEmpty(String.valueOf(param))) {
+                    param = null;
+                }
+            } else {
+                param = SqlUtils.stringToSqlValue(column.getValue(),
+                        sqlType,
+                        isRequired,
+                        column.getIsNull());
+            }
+
+            try {
+                switch (sqlType) {
+                    case Types.CLOB:
+                        lobCreator.setClobAsString(ps, paramIndex, (String) param);
+                        break;
+                    case Types.BLOB:
+                        lobCreator.setBlobAsBytes(ps, paramIndex, (byte[]) param);
+                        break;
+                    case Types.TIME:
+                    case Types.TIMESTAMP:
+                    case Types.DATE:
+                        ps.setObject(paramIndex, param);
+                        break;
+                    case Types.BIT:
+                        StatementCreatorUtils.setParameterValue(ps, paramIndex, Types.DECIMAL, null, param);
+                        break;
+                    default:
+                        StatementCreatorUtils.setParameterValue(ps, paramIndex, sqlType, null, param);
+                        break;
+                }
+                values.add(param);
+            } catch (SQLException ex) {
+                logger.error("## SetParam error , [sqltype={}, value={}]",
+                        new Object[]{sqlType, param});
+                throw ex;
+            }
+        }
+        logger.info("## sql values: {}", JSON.toJSONString(values));
+    }
+
+    @Override
+    public void afterPropertiesSet() {
+        JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
+        DefaultLobHandler lobHandler = new DefaultLobHandler();
+        lobHandler.setStreamAsLob(true);
+        dbDialect = new MysqlDialect(jdbcTemplate, lobHandler);
+        sqlTemplate = new MysqlSqlTemplate();
+    }
+
+    public DataSource getDataSource() {
+        return dataSource;
+    }
+
+    public void setDataSource(DataSource dataSource) {
+        this.dataSource = dataSource;
+    }
+}

+ 23 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/mysql/MysqlClient.java

@@ -0,0 +1,23 @@
+package com.alibaba.otter.canal.example.db.mysql;
+
+import com.alibaba.otter.canal.protocol.CanalEntry;
+
+import java.util.List;
+
+public class MysqlClient extends AbstractMysqlClient {
+
+    @Override
+    public void insert(CanalEntry.Header header, List<CanalEntry.Column> afterColumns) {
+        execute(header, afterColumns);
+    }
+
+    @Override
+    public void update(CanalEntry.Header header, List<CanalEntry.Column> afterColumns) {
+        execute(header, afterColumns);
+    }
+
+    @Override
+    public void delete(CanalEntry.Header header, List<CanalEntry.Column> beforeColumns) {
+        execute(header, beforeColumns);
+    }
+}

+ 50 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/utils/ByteArrayConverter.java

@@ -0,0 +1,50 @@
+package com.alibaba.otter.canal.example.db.utils;
+
+import org.apache.commons.beanutils.ConversionException;
+import org.apache.commons.beanutils.Converter;
+import org.apache.commons.beanutils.converters.ArrayConverter;
+import org.apache.commons.beanutils.converters.ByteConverter;
+
+public class ByteArrayConverter implements Converter {
+
+    public static final Converter SQL_BYTES = new ByteArrayConverter(null);
+    private static final Converter converter = new ArrayConverter(byte[].class, new ByteConverter());
+
+    protected final Object defaultValue;
+    protected final boolean useDefault;
+
+    public ByteArrayConverter() {
+        this.defaultValue = null;
+        this.useDefault = false;
+    }
+
+    public ByteArrayConverter(Object defaultValue) {
+        this.defaultValue = defaultValue;
+        this.useDefault = true;
+    }
+
+    public Object convert(Class type, Object value) {
+        if (value == null) {
+            if (useDefault) {
+                return (defaultValue);
+            } else {
+                throw new ConversionException("No value specified");
+            }
+        }
+
+        if (value instanceof byte[]) {
+            return (value);
+        }
+
+        // BLOB类型,canal直接存储为String("ISO-8859-1")
+        if (value instanceof String) {
+            try {
+                return ((String) value).getBytes("ISO-8859-1");
+            } catch (Exception e) {
+                throw new ConversionException(e);
+            }
+        }
+
+        return converter.convert(type, value); // byteConvertor进行转化
+    }
+}

+ 326 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/utils/DdlUtils.java

@@ -0,0 +1,326 @@
+package com.alibaba.otter.canal.example.db.utils;
+
+import com.alibaba.otter.canal.example.db.dialect.TableType;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.commons.lang.builder.ToStringStyle;
+import org.apache.commons.lang.math.NumberUtils;
+import org.apache.ddlutils.model.Column;
+import org.apache.ddlutils.model.Table;
+import org.apache.ddlutils.platform.DatabaseMetaDataWrapper;
+import org.apache.ddlutils.platform.MetaDataColumnDescriptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.ConnectionCallback;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.support.JdbcUtils;
+
+import java.sql.*;
+import java.util.*;
+
+
+public class DdlUtils {
+
+    private static final Logger logger = LoggerFactory.getLogger(DdlUtils.class);
+    private static TableType[] SUPPORTED_TABLE_TYPES = new TableType[]{TableType.view, TableType.table};
+    private final static Map<Integer, String> _defaultSizes = new HashMap<Integer, String>();
+
+    static {
+        _defaultSizes.put(new Integer(1), "254");
+        _defaultSizes.put(new Integer(12), "254");
+        _defaultSizes.put(new Integer(-1), "254");
+        _defaultSizes.put(new Integer(-2), "254");
+        _defaultSizes.put(new Integer(-3), "254");
+        _defaultSizes.put(new Integer(-4), "254");
+        _defaultSizes.put(new Integer(4), "32");
+        _defaultSizes.put(new Integer(-5), "64");
+        _defaultSizes.put(new Integer(7), "7,0");
+        _defaultSizes.put(new Integer(6), "15,0");
+        _defaultSizes.put(new Integer(8), "15,0");
+        _defaultSizes.put(new Integer(3), "15,15");
+        _defaultSizes.put(new Integer(2), "15,15");
+    }
+
+
+    public static Table findTable(final JdbcTemplate jdbcTemplate, final String catalogName, final String schemaName,
+                                  final String tableName) {
+        return (Table) jdbcTemplate.execute(new ConnectionCallback() {
+
+            public Object doInConnection(Connection con) throws SQLException, DataAccessException {
+                Table table = null;
+                DatabaseMetaDataWrapper metaData = new DatabaseMetaDataWrapper();
+                try {
+
+                    DatabaseMetaData databaseMetaData = con.getMetaData();
+
+                    metaData.setMetaData(databaseMetaData);
+                    metaData.setTableTypes(TableType.toStrings(SUPPORTED_TABLE_TYPES));
+                    metaData.setCatalog(catalogName);
+                    metaData.setSchemaPattern(schemaName);
+
+                    String convertTableName = tableName;
+                    if (databaseMetaData.storesUpperCaseIdentifiers()) {
+                        metaData.setCatalog(catalogName.toUpperCase());
+                        metaData.setSchemaPattern(schemaName.toUpperCase());
+                        convertTableName = tableName.toUpperCase();
+                    }
+                    if (databaseMetaData.storesLowerCaseIdentifiers()) {
+                        metaData.setCatalog(catalogName.toLowerCase());
+                        metaData.setSchemaPattern(schemaName.toLowerCase());
+                        convertTableName = tableName.toLowerCase();
+                    }
+
+                    ResultSet tableData = null;
+                    try {
+                        tableData = metaData.getTables(convertTableName);
+
+                        while ((tableData != null) && tableData.next()) {
+                            Map<String, Object> values = readColumns(tableData, initColumnsForTable());
+
+                            table = readTable(metaData, values);
+                            if (table.getName().equalsIgnoreCase(tableName)) {
+                                break;
+                            }
+                        }
+                    } finally {
+                        JdbcUtils.closeResultSet(tableData);
+                    }
+                } catch (Exception e) {
+                    logger.error(e.getMessage(), e);
+                }
+
+                makeAllColumnsPrimaryKeysIfNoPrimaryKeysFound(table);
+                return table;
+            }
+        });
+    }
+
+    /**
+     * Treat tables with no primary keys as a table with all primary keys.
+     */
+    private static void makeAllColumnsPrimaryKeysIfNoPrimaryKeysFound(Table table) {
+        if ((table != null) && (table.getPrimaryKeyColumns() != null) && (table.getPrimaryKeyColumns().length == 0)) {
+            Column[] allCoumns = table.getColumns();
+
+            for (Column column : allCoumns) {
+                column.setPrimaryKey(true);
+            }
+        }
+    }
+
+    private static Table readTable(DatabaseMetaDataWrapper metaData, Map<String, Object> values) throws SQLException {
+        String tableName = (String) values.get("TABLE_NAME");
+        Table table = null;
+
+        if ((tableName != null) && (tableName.length() > 0)) {
+            table = new Table();
+            table.setName(tableName);
+            table.setType((String) values.get("TABLE_TYPE"));
+            table.setCatalog((String) values.get("TABLE_CAT"));
+            table.setSchema((String) values.get("TABLE_SCHEM"));
+            table.setDescription((String) values.get("REMARKS"));
+            table.addColumns(readColumns(metaData, tableName));
+
+            Collection<String> primaryKeys = readPrimaryKeyNames(metaData, tableName);
+
+            for (Object key : primaryKeys) {
+                Column col = table.findColumn((String) key, true);
+
+                if (col != null) {
+                    col.setPrimaryKey(true);
+                } else {
+                    throw new NullPointerException(String.format("%s pk %s is null - %s %s",
+                            tableName,
+                            key,
+                            ToStringBuilder.reflectionToString(metaData, ToStringStyle.SIMPLE_STYLE),
+                            ToStringBuilder.reflectionToString(values, ToStringStyle.SIMPLE_STYLE)));
+                }
+            }
+        }
+
+        return table;
+    }
+
+    private static List<MetaDataColumnDescriptor> initColumnsForTable() {
+        List<MetaDataColumnDescriptor> result = new ArrayList<MetaDataColumnDescriptor>();
+
+        result.add(new MetaDataColumnDescriptor("TABLE_NAME", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("TABLE_TYPE", Types.VARCHAR, "UNKNOWN"));
+        result.add(new MetaDataColumnDescriptor("TABLE_CAT", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("TABLE_SCHEM", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("REMARKS", Types.VARCHAR));
+
+        return result;
+    }
+
+    private static List<MetaDataColumnDescriptor> initColumnsForColumn() {
+        List<MetaDataColumnDescriptor> result = new ArrayList<MetaDataColumnDescriptor>();
+
+        // As suggested by Alexandre Borgoltz, we're reading the COLUMN_DEF
+        // first because Oracle
+        // has problems otherwise (it seemingly requires a LONG column to be the
+        // first to be read)
+        // See also DDLUTILS-29
+        result.add(new MetaDataColumnDescriptor("COLUMN_DEF", Types.VARCHAR));
+
+        // we're also reading the table name so that a model reader impl can
+        // filter manually
+        result.add(new MetaDataColumnDescriptor("TABLE_NAME", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("COLUMN_NAME", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("TYPE_NAME", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("DATA_TYPE", Types.INTEGER, new Integer(Types.OTHER)));
+        result.add(new MetaDataColumnDescriptor("NUM_PREC_RADIX", Types.INTEGER, new Integer(10)));
+        result.add(new MetaDataColumnDescriptor("DECIMAL_DIGITS", Types.INTEGER, new Integer(0)));
+        result.add(new MetaDataColumnDescriptor("COLUMN_SIZE", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("IS_NULLABLE", Types.VARCHAR, "YES"));
+        result.add(new MetaDataColumnDescriptor("REMARKS", Types.VARCHAR));
+
+        return result;
+    }
+
+    private static List<MetaDataColumnDescriptor> initColumnsForPK() {
+        List<MetaDataColumnDescriptor> result = new ArrayList<MetaDataColumnDescriptor>();
+
+        result.add(new MetaDataColumnDescriptor("COLUMN_NAME", Types.VARCHAR));
+
+        // we're also reading the table name so that a model reader impl can
+        // filter manually
+        result.add(new MetaDataColumnDescriptor("TABLE_NAME", Types.VARCHAR));
+
+        // the name of the primary key is currently only interesting to the pk
+        // index name resolution
+        result.add(new MetaDataColumnDescriptor("PK_NAME", Types.VARCHAR));
+
+        return result;
+    }
+
+    private static List<Column> readColumns(DatabaseMetaDataWrapper metaData, String tableName) throws SQLException {
+        ResultSet columnData = null;
+
+        try {
+            columnData = metaData.getColumns(tableName, null);
+
+            List<Column> columns = new ArrayList<Column>();
+            Map<String, Object> values;
+
+            for (; columnData.next(); columns.add(readColumn(metaData, values))) {
+                Map<String, Object> tmp = readColumns(columnData, initColumnsForColumn());
+                if (tableName.equalsIgnoreCase((String) tmp.get("TABLE_NAME"))) {
+                    values = tmp;
+                } else {
+                    break;
+                }
+            }
+
+            return columns;
+        } finally {
+            JdbcUtils.closeResultSet(columnData);
+        }
+    }
+
+    private static Column readColumn(DatabaseMetaDataWrapper metaData, Map<String, Object> values) throws SQLException {
+        Column column = new Column();
+
+        column.setName((String) values.get("COLUMN_NAME"));
+        column.setDefaultValue((String) values.get("COLUMN_DEF"));
+        column.setTypeCode(((Integer) values.get("DATA_TYPE")).intValue());
+
+        String typeName = (String) values.get("TYPE_NAME");
+        // column.setType(typeName);
+
+        if ((typeName != null) && typeName.startsWith("TIMESTAMP")) {
+            column.setTypeCode(Types.TIMESTAMP);
+        }
+        // modify 2013-09-25,处理下unsigned
+        if ((typeName != null) && StringUtils.containsIgnoreCase(typeName, "UNSIGNED")) {
+            // 如果为unsigned,往上调大一个量级,避免数据溢出
+            switch (column.getTypeCode()) {
+                case Types.TINYINT:
+                    column.setTypeCode(Types.SMALLINT);
+                    break;
+                case Types.SMALLINT:
+                    column.setTypeCode(Types.INTEGER);
+                    break;
+                case Types.INTEGER:
+                    column.setTypeCode(Types.BIGINT);
+                    break;
+                case Types.BIGINT:
+                    column.setTypeCode(Types.DECIMAL);
+                    break;
+                default:
+                    break;
+            }
+        }
+
+        Integer precision = (Integer) values.get("NUM_PREC_RADIX");
+
+        if (precision != null) {
+            column.setPrecisionRadix(precision.intValue());
+        }
+
+        String size = (String) values.get("COLUMN_SIZE");
+
+        if (size == null) {
+            size = (String) _defaultSizes.get(new Integer(column.getTypeCode()));
+        }
+
+        // we're setting the size after the precision and radix in case
+        // the database prefers to return them in the size value
+        column.setSize(size);
+
+        int scale = 0;
+        Object dec_digits = values.get("DECIMAL_DIGITS");
+
+        if (dec_digits instanceof String) {
+            scale = (dec_digits == null) ? 0 : NumberUtils.toInt(dec_digits.toString());
+        } else if (dec_digits instanceof Integer) {
+            scale = (dec_digits == null) ? 0 : (Integer) dec_digits;
+        }
+
+        if (scale != 0) {
+            column.setScale(scale);
+        }
+
+        column.setRequired("NO".equalsIgnoreCase(((String) values.get("IS_NULLABLE")).trim()));
+        column.setDescription((String) values.get("REMARKS"));
+        return column;
+    }
+
+    private static Map<String, Object> readColumns(ResultSet resultSet, List<MetaDataColumnDescriptor> columnDescriptors)
+            throws SQLException {
+        Map<String, Object> values = new HashMap<String, Object>();
+        MetaDataColumnDescriptor descriptor;
+
+        for (Iterator<MetaDataColumnDescriptor> it = columnDescriptors.iterator(); it.hasNext(); values.put(descriptor.getName(),
+                descriptor.readColumn(resultSet))) {
+            descriptor = (MetaDataColumnDescriptor) it.next();
+        }
+
+        return values;
+    }
+
+    private static Collection<String> readPrimaryKeyNames(DatabaseMetaDataWrapper metaData, String tableName)
+            throws SQLException {
+        ResultSet pkData = null;
+
+        try {
+            List<String> pks = new ArrayList<String>();
+            Map<String, Object> values;
+
+            for (pkData = metaData.getPrimaryKeys(tableName); pkData.next(); pks.add(readPrimaryKeyName(metaData,
+                    values))) {
+                values = readColumns(pkData, initColumnsForPK());
+            }
+
+            return pks;
+        } finally {
+            JdbcUtils.closeResultSet(pkData);
+        }
+    }
+
+    private static String readPrimaryKeyName(DatabaseMetaDataWrapper metaData, Map<String, Object> values)
+            throws SQLException {
+        return (String) values.get("COLUMN_NAME");
+    }
+}

+ 140 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/utils/SqlTimestampConverter.java

@@ -0,0 +1,140 @@
+package com.alibaba.otter.canal.example.db.utils;
+
+import org.apache.commons.beanutils.ConversionException;
+import org.apache.commons.beanutils.Converter;
+import org.apache.commons.lang.time.DateFormatUtils;
+
+import java.sql.Timestamp;
+import java.text.ParseException;
+import java.text.ParsePosition;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Locale;
+
+public class SqlTimestampConverter implements Converter {
+
+    /**
+     * Field description
+     */
+    public static final String[] DATE_FORMATS = new String[]{"yyyy-MM-dd", "HH:mm:ss", "yyyy-MM-dd HH:mm:ss",
+            "yyyy-MM-dd hh:mm:ss.fffffffff", "EEE MMM dd HH:mm:ss zzz yyyy",
+            DateFormatUtils.ISO_DATETIME_FORMAT.getPattern(),
+            DateFormatUtils.ISO_DATETIME_TIME_ZONE_FORMAT.getPattern(),
+            DateFormatUtils.SMTP_DATETIME_FORMAT.getPattern(),};
+
+    public static final Converter SQL_TIMESTAMP = new SqlTimestampConverter(null);
+
+    /**
+     * The default value specified to our Constructor, if any.
+     */
+    private final Object defaultValue;
+
+    /**
+     * Should we return the default value on conversion errors?
+     */
+    private final boolean useDefault;
+
+    /**
+     * Create a {@link Converter} that will throw a {@link ConversionException} if a conversion error occurs.
+     */
+    public SqlTimestampConverter() {
+        this.defaultValue = null;
+        this.useDefault = false;
+    }
+
+    /**
+     * Create a {@link Converter} that will return the specified default value if a conversion error occurs.
+     *
+     * @param defaultValue The default value to be returned
+     */
+    public SqlTimestampConverter(Object defaultValue) {
+        this.defaultValue = defaultValue;
+        this.useDefault = true;
+    }
+
+    /**
+     * Convert the specified input object into an output object of the specified type.
+     *
+     * @param type  Data type to which this value should be converted
+     * @param value The input value to be converted
+     * @throws ConversionException if conversion cannot be performed successfully
+     */
+    public Object convert(Class type, Object value) {
+        if (value == null) {
+            if (useDefault) {
+                return (defaultValue);
+            } else {
+                throw new ConversionException("No value specified");
+            }
+        }
+
+        if (value instanceof java.sql.Date && java.sql.Date.class.equals(type)) {
+            return value;
+        } else if (value instanceof java.sql.Time && java.sql.Time.class.equals(type)) {
+            return value;
+        } else if (value instanceof Timestamp && Timestamp.class.equals(type)) {
+            return value;
+        } else {
+            try {
+                if (java.sql.Date.class.equals(type)) {
+                    return new java.sql.Date(convertTimestamp2TimeMillis(value.toString()));
+                } else if (java.sql.Time.class.equals(type)) {
+                    return new java.sql.Time(convertTimestamp2TimeMillis(value.toString()));
+                } else if (Timestamp.class.equals(type)) {
+                    return new Timestamp(convertTimestamp2TimeMillis(value.toString()));
+                } else {
+                    return new Timestamp(convertTimestamp2TimeMillis(value.toString()));
+                }
+            } catch (Exception e) {
+                throw new ConversionException("Value format invalid: " + e.getMessage(), e);
+            }
+        }
+
+    }
+
+    private Long convertTimestamp2TimeMillis(String input) {
+        if (input == null) {
+            return null;
+        }
+
+        try {
+            // 先处理Timestamp类型
+            return Timestamp.valueOf(input).getTime();
+        } catch (Exception nfe) {
+            try {
+                try {
+                    return parseDate(input, DATE_FORMATS, Locale.ENGLISH).getTime();
+                } catch (Exception err) {
+                    return parseDate(input, DATE_FORMATS, Locale.getDefault()).getTime();
+                }
+            } catch (Exception err) {
+                // 最后处理long time的情况
+                return Long.parseLong(input);
+            }
+        }
+    }
+
+    private Date parseDate(String str, String[] parsePatterns, Locale locale) throws ParseException {
+        if ((str == null) || (parsePatterns == null)) {
+            throw new IllegalArgumentException("Date and Patterns must not be null");
+        }
+
+        SimpleDateFormat parser = null;
+        ParsePosition pos = new ParsePosition(0);
+
+        for (int i = 0; i < parsePatterns.length; i++) {
+            if (i == 0) {
+                parser = new SimpleDateFormat(parsePatterns[0], locale);
+            } else {
+                parser.applyPattern(parsePatterns[i]);
+            }
+            pos.setIndex(0);
+            Date date = parser.parse(str, pos);
+            if ((date != null) && (pos.getIndex() == str.length())) {
+                return date;
+            }
+        }
+
+        throw new ParseException("Unable to parse the date: " + str, -1);
+    }
+}

+ 315 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/utils/SqlUtils.java

@@ -0,0 +1,315 @@
+package com.alibaba.otter.canal.example.db.utils;
+
+import org.apache.commons.beanutils.ConvertUtilsBean;
+import org.apache.commons.lang.StringUtils;
+
+import java.io.UnsupportedEncodingException;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.sql.*;
+import java.util.HashMap;
+import java.util.Map;
+
+public class SqlUtils {
+
+    public static final String REQUIRED_FIELD_NULL_SUBSTITUTE = " ";
+    public static final String SQLDATE_FORMAT = "yyyy-MM-dd";
+    public static final String TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss";
+    private static final Map<Integer, Class<?>> sqlTypeToJavaTypeMap = new HashMap<Integer, Class<?>>();
+    private static final ConvertUtilsBean convertUtilsBean = new ConvertUtilsBean();
+
+    static {
+        // regist Converter
+        convertUtilsBean.register(SqlTimestampConverter.SQL_TIMESTAMP, Date.class);
+        convertUtilsBean.register(SqlTimestampConverter.SQL_TIMESTAMP, Time.class);
+        convertUtilsBean.register(SqlTimestampConverter.SQL_TIMESTAMP, Timestamp.class);
+        convertUtilsBean.register(ByteArrayConverter.SQL_BYTES, byte[].class);
+
+        // bool
+        sqlTypeToJavaTypeMap.put(Types.BOOLEAN, Boolean.class);
+
+        // int
+        sqlTypeToJavaTypeMap.put(Types.TINYINT, Integer.class);
+        sqlTypeToJavaTypeMap.put(Types.SMALLINT, Integer.class);
+        sqlTypeToJavaTypeMap.put(Types.INTEGER, Integer.class);
+
+        // long
+        sqlTypeToJavaTypeMap.put(Types.BIGINT, Long.class);
+        // mysql bit最多64位,无符号
+        sqlTypeToJavaTypeMap.put(Types.BIT, BigInteger.class);
+
+        // decimal
+        sqlTypeToJavaTypeMap.put(Types.REAL, Float.class);
+        sqlTypeToJavaTypeMap.put(Types.FLOAT, Float.class);
+        sqlTypeToJavaTypeMap.put(Types.DOUBLE, Double.class);
+        sqlTypeToJavaTypeMap.put(Types.NUMERIC, BigDecimal.class);
+        sqlTypeToJavaTypeMap.put(Types.DECIMAL, BigDecimal.class);
+
+        // date
+        sqlTypeToJavaTypeMap.put(Types.DATE, Date.class);
+        sqlTypeToJavaTypeMap.put(Types.TIME, Time.class);
+        sqlTypeToJavaTypeMap.put(Types.TIMESTAMP, Timestamp.class);
+
+        // blob
+        sqlTypeToJavaTypeMap.put(Types.BLOB, byte[].class);
+
+        // byte[]
+        sqlTypeToJavaTypeMap.put(Types.REF, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.OTHER, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.ARRAY, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.STRUCT, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.SQLXML, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.BINARY, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.DATALINK, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.DISTINCT, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.VARBINARY, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.JAVA_OBJECT, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.LONGVARBINARY, byte[].class);
+
+        // String
+        sqlTypeToJavaTypeMap.put(Types.CHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.VARCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.LONGVARCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.LONGNVARCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.NCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.NVARCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.NCLOB, String.class);
+        sqlTypeToJavaTypeMap.put(Types.CLOB, String.class);
+    }
+
+    /**
+     * 将指定java.sql.Types的ResultSet value转换成相应的String
+     *
+     * @param rs
+     * @param index
+     * @param sqlType
+     * @return
+     * @throws SQLException
+     */
+    public static String sqlValueToString(ResultSet rs, int index, int sqlType) throws SQLException {
+        Class<?> requiredType = sqlTypeToJavaTypeMap.get(sqlType);
+        if (requiredType == null) {
+            throw new IllegalArgumentException("unknow java.sql.Types - " + sqlType);
+        }
+
+        return getResultSetValue(rs, index, requiredType);
+    }
+
+    /**
+     * sqlValueToString方法的逆向过程
+     *
+     * @param value
+     * @param sqlType
+     * @param isTextRequired
+     * @param isEmptyStringNulled
+     * @return
+     */
+    public static Object stringToSqlValue(String value, int sqlType, boolean isRequired, boolean isEmptyStringNulled) {
+        // 设置变量
+        String sourceValue = value;
+        if (SqlUtils.isTextType(sqlType)) {
+            if ((sourceValue == null) || (StringUtils.isEmpty(sourceValue) && isEmptyStringNulled)) {
+                return isRequired ? REQUIRED_FIELD_NULL_SUBSTITUTE : null;
+            } else {
+                return sourceValue;
+            }
+        } else {
+            if (StringUtils.isEmpty(sourceValue)) {
+                return null;
+            } else {
+                Class<?> requiredType = sqlTypeToJavaTypeMap.get(sqlType);
+                if (requiredType == null) {
+                    throw new IllegalArgumentException("unknow java.sql.Types - " + sqlType);
+                } else if (requiredType.equals(String.class)) {
+                    return sourceValue;
+                } else if (isNumeric(sqlType)) {
+                    return convertUtilsBean.convert(sourceValue.trim(), requiredType);
+                } else {
+                    return convertUtilsBean.convert(sourceValue, requiredType);
+                }
+            }
+        }
+    }
+
+    public static String encoding(String source, int sqlType, String sourceEncoding, String targetEncoding) {
+        switch (sqlType) {
+            case Types.CHAR:
+            case Types.VARCHAR:
+            case Types.LONGVARCHAR:
+            case Types.NCHAR:
+            case Types.NVARCHAR:
+            case Types.LONGNVARCHAR:
+            case Types.CLOB:
+            case Types.NCLOB:
+                if (false == StringUtils.isEmpty(source)) {
+                    String fromEncoding = StringUtils.isBlank(sourceEncoding) ? "UTF-8" : sourceEncoding;
+                    String toEncoding = StringUtils.isBlank(targetEncoding) ? "UTF-8" : targetEncoding;
+
+                    // if (false == StringUtils.equalsIgnoreCase(fromEncoding,
+                    // toEncoding)) {
+                    try {
+                        return new String(source.getBytes(fromEncoding), toEncoding);
+                    } catch (UnsupportedEncodingException e) {
+                        throw new IllegalArgumentException(e.getMessage(), e);
+                    }
+                    // }
+                }
+        }
+
+        return source;
+    }
+
+    /**
+     * Retrieve a JDBC column value from a ResultSet, using the specified value
+     * type.
+     * <p>
+     * Uses the specifically typed ResultSet accessor methods, falling back to
+     * {@link #getResultSetValue(ResultSet, int)} for unknown types.
+     * <p>
+     * Note that the returned value may not be assignable to the specified
+     * required type, in case of an unknown type. Calling code needs to deal
+     * with this case appropriately, e.g. throwing a corresponding exception.
+     *
+     * @param rs           is the ResultSet holding the data
+     * @param index        is the column index
+     * @param requiredType the required value type (may be <code>null</code>)
+     * @return the value object
+     * @throws SQLException if thrown by the JDBC API
+     */
+    private static String getResultSetValue(ResultSet rs, int index, Class<?> requiredType) throws SQLException {
+        if (requiredType == null) {
+            return getResultSetValue(rs, index);
+        }
+
+        Object value = null;
+        boolean wasNullCheck = false;
+
+        // Explicitly extract typed value, as far as possible.
+        if (String.class.equals(requiredType)) {
+            value = rs.getString(index);
+        } else if (boolean.class.equals(requiredType) || Boolean.class.equals(requiredType)) {
+            value = Boolean.valueOf(rs.getBoolean(index));
+            wasNullCheck = true;
+        } else if (byte.class.equals(requiredType) || Byte.class.equals(requiredType)) {
+            value = new Byte(rs.getByte(index));
+            wasNullCheck = true;
+        } else if (short.class.equals(requiredType) || Short.class.equals(requiredType)) {
+            value = new Short(rs.getShort(index));
+            wasNullCheck = true;
+        } else if (int.class.equals(requiredType) || Integer.class.equals(requiredType)) {
+            value = new Long(rs.getLong(index));
+            wasNullCheck = true;
+        } else if (long.class.equals(requiredType) || Long.class.equals(requiredType)) {
+            value = rs.getBigDecimal(index);
+            wasNullCheck = true;
+        } else if (float.class.equals(requiredType) || Float.class.equals(requiredType)) {
+            value = new Float(rs.getFloat(index));
+            wasNullCheck = true;
+        } else if (double.class.equals(requiredType) || Double.class.equals(requiredType)
+                || Number.class.equals(requiredType)) {
+            value = new Double(rs.getDouble(index));
+            wasNullCheck = true;
+        } else if (Time.class.equals(requiredType)) {
+            // try {
+            // value = rs.getTime(index);
+            // } catch (SQLException e) {
+            value = rs.getString(index);// 尝试拿为string对象,0000无法用Time表示
+            // if (value == null && !rs.wasNull()) {
+            // value = "00:00:00"; //
+            // mysql设置了zeroDateTimeBehavior=convertToNull,出现0值时返回为null
+            // }
+            // }
+        } else if (Timestamp.class.equals(requiredType) || Date.class.equals(requiredType)) {
+            // try {
+            // value = convertTimestamp(rs.getTimestamp(index));
+            // } catch (SQLException e) {
+            // 尝试拿为string对象,0000-00-00 00:00:00无法用Timestamp 表示
+            value = rs.getString(index);
+            // if (value == null && !rs.wasNull()) {
+            // value = "0000:00:00 00:00:00"; //
+            // mysql设置了zeroDateTimeBehavior=convertToNull,出现0值时返回为null
+            // }
+            // }
+        } else if (BigDecimal.class.equals(requiredType)) {
+            value = rs.getBigDecimal(index);
+        } else if (BigInteger.class.equals(requiredType)) {
+            value = rs.getBigDecimal(index);
+        } else if (Blob.class.equals(requiredType)) {
+            value = rs.getBlob(index);
+        } else if (Clob.class.equals(requiredType)) {
+            value = rs.getClob(index);
+        } else if (byte[].class.equals(requiredType)) {
+            try {
+                byte[] bytes = rs.getBytes(index);
+                if (bytes == null) {
+                    value = null;
+                } else {
+                    value = new String(bytes, "ISO-8859-1");// 将binary转化为iso-8859-1的字符串
+                }
+            } catch (UnsupportedEncodingException e) {
+                throw new SQLException(e);
+            }
+        } else {
+            // Some unknown type desired -> rely on getObject.
+            value = getResultSetValue(rs, index);
+        }
+
+        // Perform was-null check if demanded (for results that the
+        // JDBC driver returns as primitives).
+        if (wasNullCheck && (value != null) && rs.wasNull()) {
+            value = null;
+        }
+
+        return (value == null) ? null : convertUtilsBean.convert(value);
+    }
+
+    /**
+     * Retrieve a JDBC column value from a ResultSet, using the most appropriate
+     * value type. The returned value should be a detached value object, not
+     * having any ties to the active ResultSet: in particular, it should not be
+     * a Blob or Clob object but rather a byte array respectively String
+     * representation.
+     * <p>
+     * Uses the <code>getObject(index)</code> method, but includes additional
+     * "hacks" to get around Oracle 10g returning a non-standard object for its
+     * TIMESTAMP datatype and a <code>java.sql.Date</code> for DATE columns
+     * leaving out the time portion: These columns will explicitly be extracted
+     * as standard <code>java.sql.Timestamp</code> object.
+     *
+     * @param rs    is the ResultSet holding the data
+     * @param index is the column index
+     * @return the value object
+     * @throws SQLException if thrown by the JDBC API
+     * @see Blob
+     * @see Clob
+     * @see Timestamp
+     */
+    private static String getResultSetValue(ResultSet rs, int index) throws SQLException {
+        Object obj = rs.getObject(index);
+        return (obj == null) ? null : convertUtilsBean.convert(obj);
+    }
+
+    // private static Object convertTimestamp(Timestamp timestamp) {
+    // return (timestamp == null) ? null : timestamp.getTime();
+    // }
+
+    /**
+     * Check whether the given SQL type is numeric.
+     */
+    public static boolean isNumeric(int sqlType) {
+        return (Types.BIT == sqlType) || (Types.BIGINT == sqlType) || (Types.DECIMAL == sqlType)
+                || (Types.DOUBLE == sqlType) || (Types.FLOAT == sqlType) || (Types.INTEGER == sqlType)
+                || (Types.NUMERIC == sqlType) || (Types.REAL == sqlType) || (Types.SMALLINT == sqlType)
+                || (Types.TINYINT == sqlType);
+    }
+
+    public static boolean isTextType(int sqlType) {
+        if (sqlType == Types.CHAR || sqlType == Types.VARCHAR || sqlType == Types.CLOB || sqlType == Types.LONGVARCHAR
+                || sqlType == Types.NCHAR || sqlType == Types.NVARCHAR || sqlType == Types.NCLOB
+                || sqlType == Types.LONGNVARCHAR) {
+            return true;
+        } else {
+            return false;
+        }
+    }
+}

+ 53 - 0
example/src/main/resources/client-spring.xml

@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.0.xsd"
+       default-autowire="byName">
+
+    <bean class="com.alibaba.otter.canal.example.db.PropertyPlaceholderConfigurer" lazy-init="false">
+        <property name="ignoreResourceNotFound" value="true"/>
+        <property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_OVERRIDE"/><!-- 允许system覆盖 -->
+        <property name="locationNames">
+            <list>
+                <value>classpath:client.properties</value>
+            </list>
+        </property>
+    </bean>
+
+    <bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" destroy-method="close">
+        <property name="driverClassName" value="com.mysql.jdbc.Driver"/>
+        <property name="url" value="${target.mysql.url:}"/>
+        <property name="username" value="${target.mysql.dbUsername:canal}"/>
+        <property name="password" value="${target.mysql.dbPassword:canal}"/>
+        <property name="maxActive" value="30"/>
+        <property name="initialSize" value="0"/>
+        <property name="minIdle" value="1"/>
+        <property name="maxWait" value="10000"/>
+        <property name="timeBetweenEvictionRunsMillis" value="60000"/>
+        <property name="minEvictableIdleTimeMillis" value="300000"/>
+        <property name="validationQuery" value="SELECT 1"/>
+        <property name="exceptionSorterClassName" value="com.alibaba.druid.pool.vendor.MySqlExceptionSorter"/>
+        <property name="validConnectionCheckerClassName" value="com.alibaba.druid.pool.vendor.MySqlValidConnectionChecker"/>
+        <property name="testWhileIdle" value="true"/>
+        <property name="testOnBorrow" value="false"/>
+        <property name="testOnReturn" value="false"/>
+        <property name="useUnfairLock" value="true"/>
+    </bean>
+
+    <bean name="canalConnectorClient" class="com.alibaba.otter.canal.example.db.CanalConnectorClient" abstract="true">
+        <property name="zkServers" value="${zk.servers:127.0.0.1:2181}"/>
+        <property name="debug" value="${client.debug:true}"/>
+        <property name="destination" value="${client.destination:example}"/>
+        <property name="username" value="${client.username:canal}"/>
+        <property name="password" value="${client.password:canal}"/>
+        <property name="exceptionStrategy" value="${client.exceptionstrategy:1}"/>
+        <property name="retryTimes" value="${client.retrytimes:3}"/>
+        <property name="filter" value="${client.filter:.*\\..*}"/>
+        <property name="waitingTime" value="${client.waiting.time:10}"/>
+    </bean>
+
+
+    <bean id="mysqlClient" class="com.alibaba.otter.canal.example.db.mysql.MysqlClient" lazy-init="true" parent="canalConnectorClient">
+        <property name="dataSource" ref="dataSource"/>
+    </bean>
+</beans>

+ 16 - 0
example/src/main/resources/client.properties

@@ -0,0 +1,16 @@
+# client 配置
+zk.servers=127.0.0.1:2181
+# 5 * 1024
+client.batch.size=5120
+client.debug=false
+client.destination=example
+client.username=canal
+client.password=canal
+client.exceptionstrategy=1
+client.retrytimes=3
+client.filter=.*\\..*
+
+# 同步目标: mysql 配置
+target.mysql.url=jdbc:mysql://127.0.0.1:4306
+target.mysql.username=root
+target.mysql.password=123456

+ 19 - 13
instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/CanalInstanceWithManager.java

@@ -6,6 +6,11 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
+
+import com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.HistoryTableMetaCache;
+
+import com.alibaba.otter.canal.meta.FileMixedMetaManager;
+
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -20,13 +25,7 @@ import com.alibaba.otter.canal.filter.aviater.AviaterRegexFilter;
 import com.alibaba.otter.canal.instance.core.AbstractCanalInstance;
 import com.alibaba.otter.canal.instance.manager.model.Canal;
 import com.alibaba.otter.canal.instance.manager.model.CanalParameter;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.DataSourcing;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.HAMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.IndexMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.MetaMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.SourcingType;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.StorageMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.StorageScavengeMode;
+import com.alibaba.otter.canal.instance.manager.model.CanalParameter.*;
 import com.alibaba.otter.canal.meta.MemoryMetaManager;
 import com.alibaba.otter.canal.meta.PeriodMixedMetaManager;
 import com.alibaba.otter.canal.meta.ZooKeeperMetaManager;
@@ -37,12 +36,7 @@ import com.alibaba.otter.canal.parse.inbound.AbstractEventParser;
 import com.alibaba.otter.canal.parse.inbound.group.GroupEventParser;
 import com.alibaba.otter.canal.parse.inbound.mysql.LocalBinlogEventParser;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser;
-import com.alibaba.otter.canal.parse.index.CanalLogPositionManager;
-import com.alibaba.otter.canal.parse.index.FailbackLogPositionManager;
-import com.alibaba.otter.canal.parse.index.MemoryLogPositionManager;
-import com.alibaba.otter.canal.parse.index.MetaLogPositionManager;
-import com.alibaba.otter.canal.parse.index.PeriodMixedLogPositionManager;
-import com.alibaba.otter.canal.parse.index.ZooKeeperLogPositionManager;
+import com.alibaba.otter.canal.parse.index.*;
 import com.alibaba.otter.canal.parse.support.AuthenticationInfo;
 import com.alibaba.otter.canal.protocol.position.EntryPosition;
 import com.alibaba.otter.canal.sink.entry.EntryEventSink;
@@ -120,6 +114,11 @@ public class CanalInstanceWithManager extends AbstractCanalInstance {
             ZooKeeperMetaManager zooKeeperMetaManager = new ZooKeeperMetaManager();
             zooKeeperMetaManager.setZkClientx(getZkclientx());
             ((PeriodMixedMetaManager) metaManager).setZooKeeperMetaManager(zooKeeperMetaManager);
+        } else if (mode.isLocalFile()){
+            FileMixedMetaManager fileMixedMetaManager = new FileMixedMetaManager();
+            fileMixedMetaManager.setDataDir(parameters.getDataDir());
+            fileMixedMetaManager.setPeriod(parameters.getMetaFileFlushPeriod());
+            metaManager = fileMixedMetaManager;
         } else {
             throw new CanalException("unsupport MetaMode for " + mode);
         }
@@ -242,6 +241,13 @@ public class CanalInstanceWithManager extends AbstractCanalInstance {
             mysqlEventParser.setDetectingIntervalInSeconds(parameters.getDetectingIntervalInSeconds());
             // 数据库信息参数
             mysqlEventParser.setSlaveId(parameters.getSlaveId());
+            mysqlEventParser.setTableMetaStorageFactory(parameters.getTableMetaStorageFactory());
+            // Ctrip callback
+//            mysqlEventParser.setCallback(parameters.getCallback());
+//            HistoryTableMetaCache cache = new HistoryTableMetaCache();
+//            cache.init(parameters.getEntries());
+//            mysqlEventParser.setHistoryTableMetaCache(cache);
+
             if (!CollectionUtils.isEmpty(dbAddresses)) {
                 mysqlEventParser.setMasterInfo(new AuthenticationInfo(dbAddresses.get(0),
                     parameters.getDbUsername(),

+ 37 - 1
instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/model/CanalParameter.java

@@ -5,6 +5,7 @@ import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.List;
 
+import com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.TableMetaStorageFactory;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.builder.ToStringBuilder;
 
@@ -28,8 +29,10 @@ public class CanalParameter implements Serializable {
     private Long                     zkClusterId;                                                    // zk集群id,为管理方便
     private List<String>             zkClusters;                                                     // zk集群地址
 
+    private String                   dataDir                            = "../conf";                 // 默认本地文件数据的目录默认是conf
     // meta相关参数
     private MetaMode                 metaMode                           = MetaMode.MEMORY;           // meta机制
+    private Integer                  metaFileFlushPeriod                = 1000;                      // meta刷新间隔
 
     // storage存储
     private Integer                  transactionSize                    = 1024;                      // 支持处理的transaction事务大小
@@ -106,6 +109,9 @@ public class CanalParameter implements Serializable {
     private Long                     standbyLogfileOffest               = null;
     private Long                     standbyTimestamp                   = null;
 
+    // Ctrip Table Meta
+    TableMetaStorageFactory tableMetaStorageFactory;
+
     public static enum RunMode {
 
         /** 嵌入式 */
@@ -243,7 +249,9 @@ public class CanalParameter implements Serializable {
         /** 文件存储模式 */
         ZOOKEEPER,
         /** 混合模式,内存+文件 */
-        MIXED;
+        MIXED,
+        /** 本地文件存储模式*/
+        LOCAL_FILE;
 
         public boolean isMemory() {
             return this.equals(MetaMode.MEMORY);
@@ -256,6 +264,10 @@ public class CanalParameter implements Serializable {
         public boolean isMixed() {
             return this.equals(MetaMode.MIXED);
         }
+
+        public boolean isLocalFile(){
+            return this.equals(MetaMode.LOCAL_FILE);
+        }
     }
 
     public static enum IndexMode {
@@ -390,6 +402,22 @@ public class CanalParameter implements Serializable {
         return storageMode;
     }
 
+    public String getDataDir() {
+        return dataDir;
+    }
+
+    public void setDataDir(String dataDir) {
+        this.dataDir = dataDir;
+    }
+
+    public Integer getMetaFileFlushPeriod() {
+        return metaFileFlushPeriod;
+    }
+
+    public void setMetaFileFlushPeriod(Integer metaFileFlushPeriod) {
+        this.metaFileFlushPeriod = metaFileFlushPeriod;
+    }
+
     public void setStorageMode(StorageMode storageMode) {
         this.storageMode = storageMode;
     }
@@ -859,6 +887,14 @@ public class CanalParameter implements Serializable {
         this.blackFilter = blackFilter;
     }
 
+    public TableMetaStorageFactory getTableMetaStorageFactory() {
+        return tableMetaStorageFactory;
+    }
+
+    public void setTableMetaStorageFactory(TableMetaStorageFactory tableMetaStorageFactory) {
+        this.tableMetaStorageFactory = tableMetaStorageFactory;
+    }
+
     public String toString() {
         return ToStringBuilder.reflectionToString(this, CanalToStringStyle.DEFAULT_STYLE);
     }

+ 3 - 1
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/MessageDeserializer.java

@@ -29,7 +29,9 @@ public class MessageDeserializer implements Deserializer<Message> {
     @Override
     public Message deserialize(String topic, byte[] data) {
         try {
-            if (data == null) return null;
+            if (data == null) {
+                return null;
+            }
             else {
                 CanalPacket.Packet p = CanalPacket.Packet.parseFrom(data);
                 switch (p.getType()) {

+ 2 - 2
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningData.java

@@ -8,8 +8,8 @@ package com.alibaba.otter.canal.kafka.client.running;
  */
 public class ClientRunningData {
 
-    private String groupId;
-    private String address;
+    private String  groupId;
+    private String  address;
     private boolean active = true;
 
     public String getGroupId() {

+ 44 - 38
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningMonitor.java

@@ -1,12 +1,11 @@
 package com.alibaba.otter.canal.kafka.client.running;
 
-import com.alibaba.otter.canal.common.AbstractCanalLifeCycle;
-import com.alibaba.otter.canal.common.utils.AddressUtils;
-import com.alibaba.otter.canal.common.utils.BooleanMutex;
-import com.alibaba.otter.canal.common.utils.JsonUtils;
-import com.alibaba.otter.canal.common.zookeeper.ZkClientx;
-import com.alibaba.otter.canal.common.zookeeper.ZookeeperPathUtils;
-import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+import java.text.MessageFormat;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
 import org.I0Itec.zkclient.IZkDataListener;
 import org.I0Itec.zkclient.exception.ZkException;
 import org.I0Itec.zkclient.exception.ZkInterruptedException;
@@ -17,11 +16,14 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.MDC;
 
-import java.text.MessageFormat;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
+import com.alibaba.otter.canal.common.AbstractCanalLifeCycle;
+import com.alibaba.otter.canal.common.utils.AddressUtils;
+import com.alibaba.otter.canal.common.utils.BooleanMutex;
+import com.alibaba.otter.canal.common.utils.JsonUtils;
+import com.alibaba.otter.canal.common.zookeeper.ZkClientx;
+import com.alibaba.otter.canal.common.zookeeper.ZookeeperPathUtils;
+import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+
 
 /**
  * kafka client running状态信息
@@ -31,13 +33,18 @@ import java.util.concurrent.TimeUnit;
  */
 public class ClientRunningMonitor extends AbstractCanalLifeCycle {
 
-    private static final String TOPIC_ROOT_NODE = ZookeeperPathUtils.CANAL_ROOT_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + "topics";
+    private static final String TOPIC_ROOT_NODE             = ZookeeperPathUtils.CANAL_ROOT_NODE
+                                                              + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + "topics";
 
-    private static final String TOPIC_NODE = TOPIC_ROOT_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + "{0}";
+    private static final String TOPIC_NODE                  = TOPIC_ROOT_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR
+                                                              + "{0}";
 
-    private static final String TOPIC_CLIENTID_NODE = TOPIC_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + "{1}";
+    private static final String TOPIC_CLIENTID_NODE         = TOPIC_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR
+                                                              + "{1}";
 
-    private static final String TOPIC_CLIENTID_RUNNING_NODE = TOPIC_CLIENTID_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + ZookeeperPathUtils.RUNNING_NODE;
+    private static final String TOPIC_CLIENTID_RUNNING_NODE = TOPIC_CLIENTID_NODE
+                                                              + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR
+                                                              + ZookeeperPathUtils.RUNNING_NODE;
 
     private static String getTopicClientRunning(String topic, String groupId) {
         return MessageFormat.format(TOPIC_CLIENTID_RUNNING_NODE, topic, groupId);
@@ -47,21 +54,21 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
         return MessageFormat.format(TOPIC_CLIENTID_NODE, topic, groupId);
     }
 
-    private static final Logger logger = LoggerFactory.getLogger(ClientRunningMonitor.class);
-    private ZkClientx zkClient;
-    private String topic;
-    private ClientRunningData clientData;
-    private IZkDataListener dataListener;
-    private BooleanMutex mutex = new BooleanMutex(false);
-    private volatile boolean release = false;
+    private static final Logger        logger       = LoggerFactory.getLogger(ClientRunningMonitor.class);
+    private ZkClientx                  zkClient;
+    private String                     topic;
+    private ClientRunningData          clientData;
+    private IZkDataListener            dataListener;
+    private BooleanMutex               mutex        = new BooleanMutex(false);
+    private volatile boolean           release      = false;
     private volatile ClientRunningData activeData;
-    private ScheduledExecutorService delayExector = Executors.newScheduledThreadPool(1);
-    private ClientRunningListener listener;
-    private int delayTime = 5;
+    private ScheduledExecutorService   delayExector = Executors.newScheduledThreadPool(1);
+    private ClientRunningListener      listener;
+    private int                        delayTime    = 5;
 
-    private static Integer virtualPort;
+    private static Integer             virtualPort;
 
-    public ClientRunningMonitor() {
+    public ClientRunningMonitor(){
         if (virtualPort == null) {
             Random rand = new Random();
             virtualPort = rand.nextInt(9000) + 1000;
@@ -108,7 +115,6 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
 
     }
 
-
     public void start() {
         super.start();
 
@@ -123,7 +129,7 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
         String path = getTopicClientRunning(this.topic, clientData.getGroupId());
         zkClient.unsubscribeDataChanges(path, dataListener);
         releaseRunning(); // 尝试一下release
-        //Fix issue #697
+        // Fix issue #697
         if (delayExector != null) {
             delayExector.shutdown();
         }
@@ -159,13 +165,12 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
                 }
             }
         } catch (ZkNoNodeException e) {
-            zkClient.createPersistent(getClientIdNodePath(this.topic, clientData.getGroupId()),
-                    true); // 尝试创建父节点
+            zkClient.createPersistent(getClientIdNodePath(this.topic, clientData.getGroupId()), true); // 尝试创建父节点
             initRunning();
         } catch (Throwable t) {
             logger.error(MessageFormat.format("There is an error when execute initRunning method, with destination [{0}].",
-                    topic),
-                    t);
+                topic),
+                t);
             // 出现任何异常尝试release
             releaseRunning();
             throw new CanalClientException("something goes wrong in initRunning method. ", t);
@@ -187,7 +192,8 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
      */
     public boolean check() {
         String path = getTopicClientRunning(this.topic, clientData.getGroupId());
-        //ZookeeperPathUtils.getDestinationClientRunning(this.destination, clientData.getClientId());
+        // ZookeeperPathUtils.getDestinationClientRunning(this.destination,
+        // clientData.getClientId());
         try {
             byte[] bytes = zkClient.readData(path);
             ClientRunningData eventData = JsonUtils.unmarshalFromByte(bytes, ClientRunningData.class);
@@ -196,8 +202,8 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
             boolean result = isMine(activeData.getAddress());
             if (!result) {
                 logger.warn("canal is running in [{}] , but not in [{}]",
-                        activeData.getAddress(),
-                        clientData.getAddress());
+                    activeData.getAddress(),
+                    clientData.getAddress());
             }
             return result;
         } catch (ZkNoNodeException e) {
@@ -235,7 +241,7 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
         if (listener != null) {
             // 触发回调
             listener.processActiveEnter();
-            this.clientData.setAddress(/*address*/AddressUtils.getHostIp() + ":" + virtualPort);
+            this.clientData.setAddress(/* address */AddressUtils.getHostIp() + ":" + virtualPort);
 
             String path = getTopicClientRunning(this.topic, clientData.getGroupId());
             // 序列化

+ 3 - 3
kafka/src/main/java/com/alibaba/otter/canal/kafka/CanalServerStarter.java

@@ -17,9 +17,9 @@ import com.alibaba.otter.canal.deployer.CanalController;
  */
 public class CanalServerStarter {
 
-    private static final String CLASSPATH_URL_PREFIX = "classpath:";
-    private static final Logger logger               = LoggerFactory.getLogger(CanalServerStarter.class);
-    private volatile static boolean running          = false;
+    private static final String     CLASSPATH_URL_PREFIX = "classpath:";
+    private static final Logger     logger               = LoggerFactory.getLogger(CanalServerStarter.class);
+    private volatile static boolean running              = false;
 
     public static void init() {
         try {

+ 27 - 15
kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/CanalKafkaProducer.java

@@ -1,5 +1,6 @@
 package com.alibaba.otter.canal.kafka.producer;
 
+import java.io.IOException;
 import java.util.Properties;
 
 import org.apache.kafka.clients.producer.KafkaProducer;
@@ -10,7 +11,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.alibaba.otter.canal.kafka.producer.KafkaProperties.Topic;
-import com.alibaba.otter.canal.protocol.CanalEntry;
 import com.alibaba.otter.canal.protocol.Message;
 
 /**
@@ -49,21 +49,33 @@ public class CanalKafkaProducer {
         }
     }
 
-    public void send(Topic topic, Message message) {
-        boolean valid = false;
-        if (message != null && !message.getEntries().isEmpty()) {
-            for (CanalEntry.Entry entry : message.getEntries()) {
-                if (entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONBEGIN
-                    && entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONEND) {
-                    valid = true;
-                    break;
-                }
-            }
-        }
-        if (!valid) {
-            return;
-        }
+    public void send(Topic topic, Message message) throws IOException {
+        // set canal.instance.filter.transaction.entry = true
 
+        // boolean valid = false;
+        // if (message != null) {
+        // if (message.isRaw() && !message.getRawEntries().isEmpty()) {
+        // for (ByteString byteString : message.getRawEntries()) {
+        // CanalEntry.Entry entry = CanalEntry.Entry.parseFrom(byteString);
+        // if (entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONBEGIN
+        // && entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONEND) {
+        // valid = true;
+        // break;
+        // }
+        // }
+        // } else if (!message.getEntries().isEmpty()){
+        // for (CanalEntry.Entry entry : message.getEntries()) {
+        // if (entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONBEGIN
+        // && entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONEND) {
+        // valid = true;
+        // break;
+        // }
+        // }
+        // }
+        // }
+        // if (!valid) {
+        // return;
+        // }
         ProducerRecord<String, Message> record;
         if (topic.getPartition() != null) {
             record = new ProducerRecord<String, Message>(topic.getTopic(), topic.getPartition(), null, message);

+ 5 - 2
kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/CanalKafkaStarter.java

@@ -52,7 +52,10 @@ public class CanalKafkaStarter {
             // 初始化 kafka producer
             canalKafkaProducer = new CanalKafkaProducer();
             canalKafkaProducer.init(kafkaProperties);
-
+            // set filterTransactionEntry
+            if (kafkaProperties.isFilterTransactionEntry()) {
+                System.setProperty("canal.instance.filter.transaction.entry", "true");
+            }
             // 对应每个instance启动一个worker线程
             List<CanalDestination> destinations = kafkaProperties.getCanalDestinations();
 
@@ -118,7 +121,7 @@ public class CanalKafkaStarter {
                     Message message = server.getWithoutAck(clientIdentity, kafkaProperties.getCanalBatchSize()); // 获取指定数量的数据
                     long batchId = message.getId();
                     try {
-                        int size = message.getEntries().size();
+                        int size = message.isRaw() ?  message.getRawEntries().size() : message.getEntries().size();
                         if (batchId != -1 && size != 0) {
                             if (!StringUtils.isEmpty(destination.getTopic())) {
                                 Topic topic = new Topic();

+ 17 - 8
kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/KafkaProperties.java

@@ -13,15 +13,15 @@ import java.util.Set;
  */
 public class KafkaProperties {
 
-    private String                 servers           = "localhost:6667";
-    private int                    retries           = 0;
-    private int                    batchSize         = 16384;
-    private int                    lingerMs          = 1;
-    private long                   bufferMemory      = 33554432L;
+    private String                 servers                = "localhost:6667";
+    private int                    retries                = 0;
+    private int                    batchSize              = 16384;
+    private int                    lingerMs               = 1;
+    private long                   bufferMemory           = 33554432L;
+    private boolean                filterTransactionEntry = true;
+    private int                    canalBatchSize         = 5;
 
-    private int                    canalBatchSize    = 5;
-
-    private List<CanalDestination> canalDestinations = new ArrayList<CanalDestination>();
+    private List<CanalDestination> canalDestinations      = new ArrayList<CanalDestination>();
 
     public static class CanalDestination {
 
@@ -158,4 +158,13 @@ public class KafkaProperties {
     public void setCanalDestinations(List<CanalDestination> canalDestinations) {
         this.canalDestinations = canalDestinations;
     }
+
+    public boolean isFilterTransactionEntry() {
+        return filterTransactionEntry;
+    }
+
+    public void setFilterTransactionEntry(boolean filterTransactionEntry) {
+        this.filterTransactionEntry = filterTransactionEntry;
+    }
+
 }

+ 52 - 10
kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/MessageSerializer.java

@@ -1,5 +1,6 @@
 package com.alibaba.otter.canal.kafka.producer;
 
+import java.util.List;
 import java.util.Map;
 
 import org.apache.kafka.common.errors.SerializationException;
@@ -8,7 +9,11 @@ import org.springframework.util.CollectionUtils;
 
 import com.alibaba.otter.canal.protocol.CanalEntry;
 import com.alibaba.otter.canal.protocol.CanalPacket;
+import com.alibaba.otter.canal.protocol.CanalPacket.PacketType;
 import com.alibaba.otter.canal.protocol.Message;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedOutputStream;
+import com.google.protobuf.WireFormat;
 
 /**
  * Kafka Message类的序列化
@@ -25,22 +30,59 @@ public class MessageSerializer implements Serializer<Message> {
     @Override
     public byte[] serialize(String topic, Message data) {
         try {
-            if (data == null) return null;
-            else {
-                CanalPacket.Messages.Builder messageBuilder = CanalPacket.Messages.newBuilder();
-                if (data.getId() != -1 && !CollectionUtils.isEmpty(data.getEntries())) {
-                    for (CanalEntry.Entry entry : data.getEntries()) {
-                        messageBuilder.addMessages(entry.toByteString());
+            if (data != null) {
+                if (data.getId() != -1) {
+                    if (data.isRaw() && !CollectionUtils.isEmpty(data.getRawEntries())) {
+                        // for performance
+                        List<ByteString> rowEntries = data.getRawEntries();
+                        // message size
+                        int messageSize = 0;
+                        messageSize += com.google.protobuf.CodedOutputStream.computeInt64Size(1, data.getId());
+
+                        int dataSize = 0;
+                        for (int i = 0; i < rowEntries.size(); i++) {
+                            dataSize += com.google.protobuf.CodedOutputStream.computeBytesSizeNoTag(rowEntries.get(i));
+                        }
+                        messageSize += dataSize;
+                        messageSize += 1 * rowEntries.size();
+                        // packet size
+                        int size = 0;
+                        size += com.google.protobuf.CodedOutputStream.computeEnumSize(3,
+                            PacketType.MESSAGES.getNumber());
+                        size += com.google.protobuf.CodedOutputStream.computeTagSize(5)
+                                + com.google.protobuf.CodedOutputStream.computeRawVarint32Size(messageSize)
+                                + messageSize;
+                        // build data
+                        byte[] body = new byte[size];
+                        CodedOutputStream output = CodedOutputStream.newInstance(body);
+                        output.writeEnum(3, PacketType.MESSAGES.getNumber());
+
+                        output.writeTag(5, WireFormat.WIRETYPE_LENGTH_DELIMITED);
+                        output.writeRawVarint32(messageSize);
+                        // message
+                        output.writeInt64(1, data.getId());
+                        for (int i = 0; i < rowEntries.size(); i++) {
+                            output.writeBytes(2, rowEntries.get(i));
+                        }
+                        output.checkNoSpaceLeft();
+                        return body;
+                    } else if (!CollectionUtils.isEmpty(data.getEntries())) {
+                        CanalPacket.Messages.Builder messageBuilder = CanalPacket.Messages.newBuilder();
+                        for (CanalEntry.Entry entry : data.getEntries()) {
+                            messageBuilder.addMessages(entry.toByteString());
+                        }
+
+                        CanalPacket.Packet.Builder packetBuilder = CanalPacket.Packet.newBuilder();
+                        packetBuilder.setType(CanalPacket.PacketType.MESSAGES);
+                        packetBuilder.setBody(messageBuilder.build().toByteString());
+                        return packetBuilder.build().toByteArray();
                     }
                 }
-                CanalPacket.Packet.Builder packetBuilder = CanalPacket.Packet.newBuilder();
-                packetBuilder.setType(CanalPacket.PacketType.MESSAGES);
-                packetBuilder.setBody(messageBuilder.build().toByteString());
-                return packetBuilder.build().toByteArray();
             }
         } catch (Exception e) {
             throw new SerializationException("Error when serializing message to byte[] ");
         }
+        return null;
     }
 
     @Override

+ 1 - 0
kafka/src/main/resources/kafka.yml

@@ -5,6 +5,7 @@ lingerMs: 1
 bufferMemory: 33554432
 # canal的批次大小,单位 k
 canalBatchSize: 50
+filterTransactionEntry: true
 
 canalDestinations:
   - canalDestination: example

+ 30 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/exception/PositionNotFoundException.java

@@ -0,0 +1,30 @@
+package com.alibaba.otter.canal.parse.exception;
+
+/**
+ * @author chengjin.lyf on 2018/7/20 下午2:54
+ * @since 1.0.25
+ */
+public class PositionNotFoundException extends CanalParseException {
+
+    private static final long serialVersionUID = -7382448928116244017L;
+
+    public PositionNotFoundException(String errorCode) {
+        super(errorCode);
+    }
+
+    public PositionNotFoundException(String errorCode, Throwable cause) {
+        super(errorCode, cause);
+    }
+
+    public PositionNotFoundException(String errorCode, String errorDesc) {
+        super(errorCode, errorDesc);
+    }
+
+    public PositionNotFoundException(String errorCode, String errorDesc, Throwable cause) {
+        super(errorCode, errorDesc, cause);
+    }
+
+    public PositionNotFoundException(Throwable cause) {
+        super(cause);
+    }
+}

+ 30 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/exception/ServerIdNotMatchException.java

@@ -0,0 +1,30 @@
+package com.alibaba.otter.canal.parse.exception;
+
+import com.alibaba.otter.canal.common.CanalException;
+
+/**
+ * @author chengjin.lyf on 2018/8/8 下午1:07
+ * @since 1.0.25
+ */
+public class ServerIdNotMatchException extends CanalException{
+
+    public ServerIdNotMatchException(String errorCode) {
+        super(errorCode);
+    }
+
+    public ServerIdNotMatchException(String errorCode, Throwable cause) {
+        super(errorCode, cause);
+    }
+
+    public ServerIdNotMatchException(String errorCode, String errorDesc) {
+        super(errorCode, errorDesc);
+    }
+
+    public ServerIdNotMatchException(String errorCode, String errorDesc, Throwable cause) {
+        super(errorCode, errorDesc, cause);
+    }
+
+    public ServerIdNotMatchException(Throwable cause) {
+        super(cause);
+    }
+}

+ 40 - 6
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/AbstractEventParser.java

@@ -8,6 +8,7 @@ import java.util.TimerTask;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
+import com.alibaba.otter.canal.parse.exception.PositionNotFoundException;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.commons.lang.math.RandomUtils;
@@ -94,6 +95,10 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
                                                                                     .availableProcessors() * 60 / 100;     // 60%的能力跑解析,剩余部分处理网络
     protected int                                    parallelBufferSize         = 256;                                     // 必须为2的幂
     protected MultiStageCoprocessor                  multiStageCoprocessor;
+    protected ParserExceptionHandler                 parserExceptionHandler;
+    protected long serverId;
+
+
 
     protected abstract BinlogParser buildParser();
 
@@ -170,11 +175,16 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
                         preDump(erosaConnection);
 
                         erosaConnection.connect();// 链接
+
+                        long queryServerId = erosaConnection.queryServerId();
+                        if (queryServerId != 0){
+                            serverId = queryServerId;
+                        }
                         // 4. 获取最后的位置信息
                         EntryPosition position = findStartPosition(erosaConnection);
                         final EntryPosition startPosition = position;
                         if (startPosition == null) {
-                            throw new CanalParseException("can't find start position for " + destination);
+                            throw new PositionNotFoundException("can't find start position for " + destination);
                         }
 
                         if (!processTableMeta(startPosition)) {
@@ -277,6 +287,9 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
                                 runningInfo.getAddress().toString()), e);
                             sendAlarm(destination, ExceptionUtils.getFullStackTrace(e));
                         }
+                        if (parserExceptionHandler!=null){
+                            parserExceptionHandler.handle(e);
+                        }
                     } finally {
                         // 重新置为中断状态
                         Thread.interrupted();
@@ -303,7 +316,12 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
                     transactionBuffer.reset();// 重置一下缓冲队列,重新记录数据
                     binlogParser.reset();// 重新置位
                     if (multiStageCoprocessor != null) {
-                        multiStageCoprocessor.reset();
+                        // 处理 RejectedExecutionException
+                        try {
+                            multiStageCoprocessor.reset();
+                        } catch (Throwable t) {
+                            logger.debug("multi processor rejected:", t);
+                        }
                     }
 
                     if (running) {
@@ -331,6 +349,11 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
         stopHeartBeat(); // 先停止心跳
         parseThread.interrupt(); // 尝试中断
         eventSink.interrupt();
+
+        if (multiStageCoprocessor != null && multiStageCoprocessor.isStart()) {
+            multiStageCoprocessor.stop();
+        }
+
         try {
             parseThread.join();// 等待其结束
         } catch (InterruptedException e) {
@@ -343,10 +366,6 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
         if (transactionBuffer.isStart()) {
             transactionBuffer.stop();
         }
-
-        if (multiStageCoprocessor != null && multiStageCoprocessor.isStart()) {
-            multiStageCoprocessor.stop();
-        }
     }
 
     protected boolean consumeTheEventAndProfilingIfNecessary(List<CanalEntry.Entry> entrys) throws CanalSinkException,
@@ -609,4 +628,19 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
         this.parallelBufferSize = parallelBufferSize;
     }
 
+    public ParserExceptionHandler getParserExceptionHandler() {
+        return parserExceptionHandler;
+    }
+
+    public void setParserExceptionHandler(ParserExceptionHandler parserExceptionHandler) {
+        this.parserExceptionHandler = parserExceptionHandler;
+    }
+
+    public long getServerId() {
+        return serverId;
+    }
+
+    public void setServerId(long serverId) {
+        this.serverId = serverId;
+    }
 }

+ 2 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/ErosaConnection.java

@@ -40,4 +40,6 @@ public interface ErosaConnection {
     public void dump(GTIDSet gtidSet, MultiStageCoprocessor coprocessor) throws IOException;
 
     ErosaConnection fork();
+
+    public long queryServerId() throws IOException;
 }

+ 2 - 2
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/MultiStageCoprocessor.java

@@ -21,9 +21,9 @@ public interface MultiStageCoprocessor extends CanalLifeCycle {
     /**
      * 网络数据投递
      */
-    public void publish(LogBuffer buffer);
+    public boolean publish(LogBuffer buffer);
 
-    public void publish(LogBuffer buffer, String binlogFileName);
+    public boolean publish(LogBuffer buffer, String binlogFileName);
 
     public void reset();
 }

+ 9 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/ParserExceptionHandler.java

@@ -0,0 +1,9 @@
+package com.alibaba.otter.canal.parse.inbound;
+
+/**
+ * @author chengjin.lyf on 2018/7/20 下午3:55
+ * @since 1.0.25
+ */
+public interface ParserExceptionHandler {
+    void handle(Throwable e);
+}

+ 11 - 2
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/AbstractMysqlEventParser.java

@@ -7,6 +7,7 @@ import org.slf4j.LoggerFactory;
 
 import com.alibaba.otter.canal.filter.CanalEventFilter;
 import com.alibaba.otter.canal.filter.aviater.AviaterRegexFilter;
+import com.alibaba.otter.canal.parse.CanalEventParser;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.MysqlGTIDSet;
 import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.inbound.AbstractEventParser;
@@ -91,8 +92,16 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
     public void start() throws CanalParseException {
         if (enableTsdb) {
             if (tableMetaTSDB == null) {
-                // 初始化
-                tableMetaTSDB = TableMetaTSDBBuilder.build(destination, tsdbSpringXml);
+                synchronized (CanalEventParser.class) {
+                    try {
+                        // 设置当前正在加载的通道,加载spring查找文件时会用到该变量
+                        System.setProperty("canal.instance.destination", destination);
+                        // 初始化
+                        tableMetaTSDB = TableMetaTSDBBuilder.build(destination, tsdbSpringXml);
+                    } finally {
+                        System.setProperty("canal.instance.destination", "");
+                    }
+                }
             }
         }
 

+ 53 - 3
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinLogConnection.java

@@ -4,6 +4,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.List;
 
+import com.alibaba.otter.canal.parse.exception.ServerIdNotMatchException;
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
@@ -36,6 +37,9 @@ public class LocalBinLogConnection implements ErosaConnection {
     private String              directory;
     private int                 bufferSize = 16 * 1024;
     private boolean             running    = false;
+    private long                serverId;
+    private FileParserListener  parserListener;
+
 
     public LocalBinLogConnection(){
     }
@@ -96,6 +100,9 @@ public class LocalBinLogConnection implements ErosaConnection {
                     if (event == null) {
                         continue;
                     }
+                    if (serverId != 0 && event.getServerId() != serverId){
+                        throw new ServerIdNotMatchException("unexpected serverId "+serverId + " in binlog file !");
+                    }
 
                     if (!func.sink(event)) {
                         needContinue = false;
@@ -103,8 +110,9 @@ public class LocalBinLogConnection implements ErosaConnection {
                     }
                 }
 
+                fetcher.close(); // 关闭上一个文件
+                parserFinish(current.getName());
                 if (needContinue) {// 读取下一个
-                    fetcher.close(); // 关闭上一个文件
 
                     File nextFile;
                     if (needWait) {
@@ -160,6 +168,11 @@ public class LocalBinLogConnection implements ErosaConnection {
                 while (fetcher.fetch()) {
                     LogEvent event = decoder.decode(fetcher, context);
                     if (event != null) {
+
+                        if (serverId != 0 && event.getServerId() != serverId){
+                            throw new ServerIdNotMatchException("unexpected serverId "+serverId + " in binlog file !");
+                        }
+
                         if (event.getWhen() > timestampSeconds) {
                             break;
                         }
@@ -221,11 +234,16 @@ public class LocalBinLogConnection implements ErosaConnection {
                 while (fetcher.fetch()) {
                     LogBuffer buffer = fetcher.duplicate();
                     fetcher.consume(fetcher.limit());
-                    coprocessor.publish(buffer, binlogfilename); // set filename
+                    // set filename
+                    if (!coprocessor.publish(buffer, binlogfilename)) {
+                        needContinue = false;
+                        break;
+                    }
                 }
 
+                fetcher.close(); // 关闭上一个文件
+                parserFinish(binlogfilename);
                 if (needContinue) {// 读取下一个
-                    fetcher.close(); // 关闭上一个文件
 
                     File nextFile;
                     if (needWait) {
@@ -254,6 +272,12 @@ public class LocalBinLogConnection implements ErosaConnection {
         }
     }
 
+    private void parserFinish(String fileName){
+        if (parserListener != null){
+            parserListener.onFinish(fileName);
+        }
+    }
+
     @Override
     public void dump(long timestampMills, MultiStageCoprocessor coprocessor) throws IOException {
         List<File> currentBinlogs = binlogs.currentBinlogs();
@@ -282,6 +306,11 @@ public class LocalBinLogConnection implements ErosaConnection {
                 while (fetcher.fetch()) {
                     LogEvent event = decoder.decode(fetcher, context);
                     if (event != null) {
+
+                        if (serverId != 0 && event.getServerId() != serverId){
+                            throw new ServerIdNotMatchException("unexpected serverId "+serverId + " in binlog file !");
+                        }
+
                         if (event.getWhen() > timestampSeconds) {
                             break;
                         }
@@ -340,6 +369,11 @@ public class LocalBinLogConnection implements ErosaConnection {
         return connection;
     }
 
+    @Override
+    public long queryServerId() {
+        return 0;
+    }
+
     public boolean isNeedWait() {
         return needWait;
     }
@@ -364,4 +398,20 @@ public class LocalBinLogConnection implements ErosaConnection {
         this.bufferSize = bufferSize;
     }
 
+    public long getServerId() {
+        return serverId;
+    }
+
+    public void setServerId(long serverId) {
+        this.serverId = serverId;
+    }
+
+    public void setParserListener(FileParserListener parserListener) {
+        this.parserListener = parserListener;
+    }
+
+    public interface FileParserListener{
+        void onFinish(String fileName);
+    }
+
 }

+ 18 - 2
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlConnection.java

@@ -9,8 +9,10 @@ import java.util.List;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.math.NumberUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.springframework.util.CollectionUtils;
 
 import com.alibaba.otter.canal.parse.driver.mysql.MysqlConnector;
 import com.alibaba.otter.canal.parse.driver.mysql.MysqlQueryExecutor;
@@ -206,7 +208,9 @@ public class MysqlConnection implements ErosaConnection {
             while (fetcher.fetch()) {
                 LogBuffer buffer = fetcher.duplicate();
                 fetcher.consume(fetcher.limit());
-                coprocessor.publish(buffer);
+                if (!coprocessor.publish(buffer)) {
+                    break;
+                }
             }
         } finally {
             fetcher.close();
@@ -230,7 +234,9 @@ public class MysqlConnection implements ErosaConnection {
             while (fetcher.fetch()) {
                 LogBuffer buffer = fetcher.duplicate();
                 fetcher.consume(fetcher.limit());
-                coprocessor.publish(buffer);
+                if (!coprocessor.publish(buffer)) {
+                    break;
+                }
             }
         } finally {
             fetcher.close();
@@ -320,6 +326,16 @@ public class MysqlConnection implements ErosaConnection {
         return connection;
     }
 
+    @Override
+    public long queryServerId() throws IOException {
+        ResultSetPacket resultSetPacket = query("show variables like 'server_id'");
+        List<String> fieldValues = resultSetPacket.getFieldValues();
+        if (fieldValues == null || fieldValues.size() != 2){
+            return 0;
+        }
+        return NumberUtils.toLong(fieldValues.get(1));
+    }
+
     // ====================== help method ====================
 
     /**

+ 29 - 9
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParser.java

@@ -10,6 +10,11 @@ import java.util.Map;
 import java.util.TimerTask;
 import java.util.concurrent.atomic.AtomicLong;
 
+import com.alibaba.otter.canal.parse.inbound.*;
+import com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.TableMetaCacheInterface;
+import com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.TableMetaCacheWithStorage;
+import com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.TableMetaStorage;
+import com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.TableMetaStorageFactory;
 import org.apache.commons.lang.StringUtils;
 import org.springframework.util.CollectionUtils;
 
@@ -20,9 +25,6 @@ import com.alibaba.otter.canal.parse.driver.mysql.packets.server.FieldPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ResultSetPacket;
 import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.ha.CanalHAController;
-import com.alibaba.otter.canal.parse.inbound.ErosaConnection;
-import com.alibaba.otter.canal.parse.inbound.HeartBeatCallback;
-import com.alibaba.otter.canal.parse.inbound.SinkFunction;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection.BinlogFormat;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection.BinlogImage;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.LogEventConvert;
@@ -53,16 +55,16 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
     private int                receiveBufferSize                 = 64 * 1024;
     private int                sendBufferSize                    = 64 * 1024;
     // 数据库信息
-    private AuthenticationInfo masterInfo;                                   // 主库
-    private AuthenticationInfo standbyInfo;                                  // 备库
+    protected AuthenticationInfo masterInfo;                                   // 主库
+    protected AuthenticationInfo standbyInfo;                                  // 备库
     // binlog信息
-    private EntryPosition      masterPosition;
-    private EntryPosition      standbyPosition;
+    protected EntryPosition      masterPosition;
+    protected EntryPosition      standbyPosition;
     private long               slaveId;                                      // 链接到mysql的slave
     // 心跳检查信息
     private String             detectingSQL;                                 // 心跳sql
     private MysqlConnection    metaConnection;                               // 查询meta信息的链接
-    private TableMetaCache     tableMetaCache;                               // 对应meta
+    private TableMetaCacheInterface tableMetaCache;                               // 对应meta
                                                                               // cache
     private int                fallbackIntervalInSeconds         = 60;       // 切换回退时间
     private BinlogFormat[]     supportBinlogFormats;                         // 支持的binlogFormat,如果设置会执行强校验
@@ -72,6 +74,8 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
     private int                dumpErrorCount                    = 0;        // binlogDump失败异常计数
     private int                dumpErrorCountThreshold           = 2;        // binlogDump失败异常计数阀值
 
+    private TableMetaStorageFactory tableMetaStorageFactory;
+
     protected ErosaConnection buildErosaConnection() {
         return buildMysqlConnection(this.runningInfo);
     }
@@ -125,7 +129,13 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                 ((DatabaseTableMeta) tableMetaTSDB).setBlackFilter(eventBlackFilter);
             }
 
-            tableMetaCache = new TableMetaCache(metaConnection, tableMetaTSDB);
+
+            TableMetaStorage storage = null;
+            if (tableMetaStorageFactory != null) {
+                storage = tableMetaStorageFactory.getTableMetaStorage();
+            }
+
+            tableMetaCache = new TableMetaCacheWithStorage(metaConnection, storage);
             ((LogEventConvert) binlogParser).setTableMetaCache(tableMetaCache);
         }
     }
@@ -643,6 +653,9 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                 throw new CanalParseException("command : 'show master status' has an error! pls check. you need (at least one of) the SUPER,REPLICATION CLIENT privilege(s) for this operation");
             }
             EntryPosition endPosition = new EntryPosition(fields.get(0), Long.valueOf(fields.get(1)));
+            if (isGTIDMode && fields.size() > 4) {
+                endPosition.setGtid(fields.get(4));
+            }
             return endPosition;
         } catch (IOException e) {
             throw new CanalParseException("command : 'show master status' has an error!", e);
@@ -908,4 +921,11 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
         this.dumpErrorCountThreshold = dumpErrorCountThreshold;
     }
 
+    public TableMetaStorageFactory getTableMetaStorageFactory() {
+        return tableMetaStorageFactory;
+    }
+
+    public void setTableMetaStorageFactory(TableMetaStorageFactory tableMetaStorageFactory) {
+        this.tableMetaStorageFactory = tableMetaStorageFactory;
+    }
 }

+ 38 - 34
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlMultiStageCoprocessor.java

@@ -52,17 +52,17 @@ import com.taobao.tddl.dbsync.binlog.event.WriteRowsLogEvent;
  */
 public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implements MultiStageCoprocessor {
 
-    private LogEventConvert          logEventConvert;
-    private EventTransactionBuffer   transactionBuffer;
-    private ErosaConnection          connection;
-
-    private int                      parserThreadCount;
-    private int                      ringBufferSize;
-    private RingBuffer<MessageEvent> disruptorMsgBuffer;
-    private ExecutorService          parserExecutor;
-    private ExecutorService          stageExecutor;
-    private String                   destination;
-    private CanalParseException      exception;
+    private LogEventConvert              logEventConvert;
+    private EventTransactionBuffer       transactionBuffer;
+    private ErosaConnection              connection;
+
+    private int                          parserThreadCount;
+    private int                          ringBufferSize;
+    private RingBuffer<MessageEvent>     disruptorMsgBuffer;
+    private ExecutorService              parserExecutor;
+    private ExecutorService              stageExecutor;
+    private String                       destination;
+    private volatile CanalParseException exception;
 
     public MysqlMultiStageCoprocessor(int ringBufferSize, int parserThreadCount, LogEventConvert logEventConvert,
                                       EventTransactionBuffer transactionBuffer, String destination){
@@ -135,22 +135,31 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
         } catch (Throwable e) {
             // ignore
         }
-        disruptorMsgBuffer = null;
         super.stop();
     }
 
     /**
      * 网络数据投递
      */
-    public void publish(LogBuffer buffer) {
-        publish(buffer, null);
+    public boolean publish(LogBuffer buffer) {
+        return publish(buffer, null);
     }
 
-    public void publish(LogBuffer buffer, String binlogFileName) {
-        if (!isStart() && exception != null) {
-            throw exception;
+    public boolean publish(LogBuffer buffer, String binlogFileName) {
+        if (!isStart()) {
+            if (exception != null) {
+                throw exception;
+            }
+            return false;
         }
 
+        /**
+         * 由于改为processor仅终止自身stage而不是stop,那么需要由incident标识coprocessor是否正常工作。
+         * 让dump线程能够及时感知
+         */
+        if (exception != null) {
+            throw exception;
+        }
         boolean interupted = false;
         do {
             try {
@@ -168,10 +177,7 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
                 interupted = Thread.interrupted();
             }
         } while (!interupted && isStart());
-
-        if (exception != null) {
-            throw exception;
-        }
+        return isStart();
     }
 
     @Override
@@ -197,10 +203,15 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
             try {
                 LogBuffer buffer = event.getBuffer();
                 if (StringUtils.isNotEmpty(event.getBinlogFileName())
-                    && !context.getLogPosition().getFileName().equals(event.getBinlogFileName())) {
+                    && (context.getLogPosition() == null
+                    || !context.getLogPosition().getFileName().equals(event.getBinlogFileName()))) {
                     // set roate binlog file name
-                    context.setLogPosition(new LogPosition(event.getBinlogFileName(), context.getLogPosition()
-                        .getPosition()));
+                    if (context.getLogPosition() == null){
+                        context.setLogPosition(new LogPosition(event.getBinlogFileName(), 0));
+                    }else{
+                        context.setLogPosition(new LogPosition(event.getBinlogFileName(), context.getLogPosition()
+                                .getPosition()));
+                    }
                 }
 
                 LogEvent logEvent = decoder.decode(buffer, context);
@@ -249,9 +260,7 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
 
         @Override
         public void onShutdown() {
-            if (isStart()) {
-                stop();
-            }
+
         }
     }
 
@@ -287,9 +296,7 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
 
         @Override
         public void onShutdown() {
-            if (isStart()) {
-                stop();
-            }
+
         }
     }
 
@@ -328,9 +335,7 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
 
         @Override
         public void onShutdown() {
-            if (isStart()) {
-                stop();
-            }
+
         }
     }
 
@@ -397,7 +402,6 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
 
         @Override
         public void handleEventException(final Throwable ex, final long sequence, final Object event) {
-            throw new RuntimeException(ex);
         }
 
         @Override

+ 0 - 3
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/DirectLogFetcher.java

@@ -64,9 +64,6 @@ public class DirectLogFetcher extends LogFetcher {
         if ("1".equals(dbsemi)) {
             issemi = true;
         }
-        // 和mysql driver一样,提供buffer机制,提升读取binlog速度
-        // this.input = new
-        // BufferedInputStream(channel.socket().getInputStream(), 16384);
     }
 
     /**

+ 10 - 4
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java

@@ -10,6 +10,9 @@ import java.util.Arrays;
 import java.util.BitSet;
 import java.util.List;
 
+import com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.TableMetaCacheInterface;
+import com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.TableMetaStorage;
+import com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.exception.NoHistoryException;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.exception.ExceptionUtils;
 import org.slf4j.Logger;
@@ -87,7 +90,10 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
     private volatile AviaterRegexFilter nameFilter;                                                          // 运行时引用可能会有变化,比如规则发生变化时
     private volatile AviaterRegexFilter nameBlackFilter;
 
-    private TableMetaCache              tableMetaCache;
+
+    private TableMetaCacheInterface tableMetaCache;
+    private String                      binlogFileName      = "mysql-bin.000001";
+
     private Charset                     charset             = Charset.defaultCharset();
     private boolean                     filterQueryDcl      = false;
     private boolean                     filterQueryDml      = false;
@@ -262,7 +268,8 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
             if (!isSeek) {
                 // 使用新的表结构元数据管理方式
                 EntryPosition position = createPosition(event.getHeader());
-                tableMetaCache.apply(position, event.getDbName(), queryString, null);
+                String fulltbName = schemaName+"."+tableName;
+                tableMetaCache.apply(position, fulltbName, queryString, null);
             }
 
             Header header = createHeader(event.getHeader(), schemaName, tableName, type);
@@ -933,7 +940,7 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
         this.nameBlackFilter = nameBlackFilter;
     }
 
-    public void setTableMetaCache(TableMetaCache tableMetaCache) {
+    public void setTableMetaCache(TableMetaCacheInterface tableMetaCache) {
         this.tableMetaCache = tableMetaCache;
     }
 
@@ -960,5 +967,4 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
     public void setGtidSet(GTIDSet gtidSet) {
         this.gtidSet = gtidSet;
     }
-
 }

+ 6 - 1
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/TableMetaCache.java

@@ -6,6 +6,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.TableMetaCacheInterface;
 import org.apache.commons.lang.StringUtils;
 
 import com.alibaba.otter.canal.parse.driver.mysql.packets.server.FieldPacket;
@@ -29,7 +30,7 @@ import com.google.common.cache.LoadingCache;
  * @author jianghang 2013-1-17 下午10:15:16
  * @version 1.0.0
  */
-public class TableMetaCache {
+public class TableMetaCache implements TableMetaCacheInterface {
 
     public static final String              COLUMN_NAME    = "COLUMN_NAME";
     public static final String              COLUMN_TYPE    = "COLUMN_TYPE";
@@ -99,6 +100,10 @@ public class TableMetaCache {
             String createDDL = packet.getFieldValues().get(1);
             MemoryTableMeta memoryTableMeta = new MemoryTableMeta();
             memoryTableMeta.apply(DatabaseTableMeta.INIT_POSITION, schema, createDDL, null);
+            String[] strings = table.split("\\.");
+            if (strings.length > 1) {
+                table = strings[1];
+            }
             TableMeta tableMeta = memoryTableMeta.find(schema, table);
             return tableMeta.getFields();
         } else {

+ 260 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/BinlogDownloadQueue.java

@@ -0,0 +1,260 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds;
+
+import java.io.*;
+import java.util.*;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.data.BinlogFile;
+
+import io.netty.handler.codec.http.HttpResponseStatus;
+
+/**
+ * @author chengjin.lyf on 2018/8/7 下午3:10
+ * @since 1.0.25
+ */
+public class BinlogDownloadQueue {
+
+    private static final Logger logger = LoggerFactory.getLogger(BinlogDownloadQueue.class);
+    private static final int      TIMEOUT             = 10000;
+
+    private LinkedBlockingQueue<BinlogFile> downloadQueue = new LinkedBlockingQueue<BinlogFile>();
+    private LinkedBlockingQueue<Runnable> taskQueue = new LinkedBlockingQueue<Runnable>();
+    private LinkedList<BinlogFile> binlogList;
+    private final int batchSize;
+    private Thread downloadThread;
+    public boolean running = true;
+    private final String destDir;
+    private String hostId;
+    private int currentSize;
+    private String lastDownload;
+
+    public BinlogDownloadQueue(List<BinlogFile> downloadQueue, int batchSize, String destDir) throws IOException {
+        this.binlogList = new LinkedList(downloadQueue);
+        this.batchSize = batchSize;
+        this.destDir = destDir;
+        this.currentSize = 0;
+        prepareBinlogList();
+        cleanDir();
+    }
+
+    private void prepareBinlogList(){
+        for (BinlogFile binlog : this.binlogList) {
+            String fileName = StringUtils.substringBetween(binlog.getDownloadLink(), "mysql-bin.", "?");
+            binlog.setFileName(fileName);
+        }
+        Collections.sort(this.binlogList, new Comparator<BinlogFile>() {
+            @Override
+            public int compare(BinlogFile o1, BinlogFile o2) {
+                return o1.getFileName().compareTo(o2.getFileName());
+            }
+        });
+    }
+
+    public void cleanDir() throws IOException {
+        File destDirFile = new File(destDir);
+        FileUtils.forceMkdir(destDirFile);
+        FileUtils.cleanDirectory(destDirFile);
+    }
+
+    public void silenceDownload() {
+        if (downloadThread != null) {
+            return;
+        }
+        downloadThread = new Thread(new DownloadThread());
+        downloadThread.start();
+    }
+
+
+    public BinlogFile tryOne() throws IOException {
+        BinlogFile binlogFile = binlogList.poll();
+        download(binlogFile);
+        hostId = binlogFile.getHostInstanceID();
+        this.currentSize ++;
+        return binlogFile;
+    }
+
+    public void notifyNotMatch(){
+        this.currentSize --;
+        filter(hostId);
+    }
+
+    private void filter(String hostInstanceId){
+        Iterator<BinlogFile> it = binlogList.iterator();
+        while (it.hasNext()){
+            BinlogFile bf = it.next();
+            if(bf.getHostInstanceID().equalsIgnoreCase(hostInstanceId)){
+                it.remove();
+            }else{
+                hostId = bf.getHostInstanceID();
+            }
+        }
+    }
+
+    public boolean isLastFile(String fileName){
+        String needCompareName = lastDownload;
+        if (StringUtils.isNotEmpty(needCompareName) && StringUtils.endsWith(needCompareName, "tar")){
+            needCompareName = needCompareName.substring(0, needCompareName.indexOf("."));
+        }
+        return fileName.equalsIgnoreCase(needCompareName) && binlogList.isEmpty();
+    }
+
+    public void prepare() throws InterruptedException {
+        for (int i = this.currentSize; i < batchSize && !binlogList.isEmpty(); i++) {
+            BinlogFile binlogFile = null;
+            while (!binlogList.isEmpty()){
+                binlogFile = binlogList.poll();
+                if (!binlogFile.getHostInstanceID().equalsIgnoreCase(hostId)){
+                    continue;
+                }
+                break;
+            }
+            if (binlogFile == null){
+                break;
+            }
+            this.downloadQueue.put(binlogFile);
+            this.lastDownload = "mysql-bin." + binlogFile.getFileName();
+            this.currentSize ++;
+        }
+    }
+
+    public void downOne(){
+        this.currentSize --;
+    }
+
+    public void release(){
+        running = false;
+        this.currentSize = 0;
+        binlogList.clear();
+        downloadQueue.clear();
+    }
+
+    private void download(BinlogFile binlogFile) throws IOException {
+        String downloadLink = binlogFile.getDownloadLink();
+        String fileName = binlogFile.getFileName();
+        HttpGet httpGet = new HttpGet(downloadLink);
+        CloseableHttpClient httpClient = HttpClientBuilder.create()
+                .setMaxConnPerRoute(50)
+                .setMaxConnTotal(100)
+                .build();
+        RequestConfig requestConfig = RequestConfig.custom()
+                .setConnectTimeout(TIMEOUT)
+                .setConnectionRequestTimeout(TIMEOUT)
+                .setSocketTimeout(TIMEOUT)
+                .build();
+        httpGet.setConfig(requestConfig);
+        HttpResponse response = httpClient.execute(httpGet);
+        int statusCode = response.getStatusLine().getStatusCode();
+        if (statusCode != HttpResponseStatus.OK.code()) {
+            throw new RuntimeException("download failed , url:" + downloadLink + " , statusCode:"
+                                       + statusCode);
+        }
+        saveFile(new File(destDir), "mysql-bin." + fileName, response);
+    }
+
+    private static void saveFile(File parentFile, String fileName, HttpResponse response) throws IOException {
+        InputStream is = response.getEntity().getContent();
+        long totalSize = Long.parseLong(response.getFirstHeader("Content-Length").getValue());
+        if(response.getFirstHeader("Content-Disposition")!=null){
+            fileName = response.getFirstHeader("Content-Disposition").getValue();
+            fileName = StringUtils.substringAfter(fileName, "filename=");
+        }
+        boolean isTar = StringUtils.endsWith(fileName, ".tar");
+        FileUtils.forceMkdir(parentFile);
+        FileOutputStream fos = null;
+        try {
+            if (isTar) {
+                TarArchiveInputStream tais = new TarArchiveInputStream(is);
+                TarArchiveEntry tarArchiveEntry = null;
+                while ((tarArchiveEntry = tais.getNextTarEntry()) != null) {
+                    String name = tarArchiveEntry.getName();
+                    File tarFile = new File(parentFile, name + ".tmp");
+                    logger.info("start to download file " + tarFile.getName());
+                    BufferedOutputStream bos = null;
+                    try {
+                        bos = new BufferedOutputStream(new FileOutputStream(tarFile));
+                        int read = -1;
+                        byte[] buffer = new byte[1024];
+                        while ((read = tais.read(buffer)) != -1) {
+                            bos.write(buffer, 0, read);
+                        }
+                        logger.info("download file " + tarFile.getName() + " end!");
+                        tarFile.renameTo(new File(parentFile, name));
+                    } finally {
+                        IOUtils.closeQuietly(bos);
+                    }
+                }
+                tais.close();
+            } else {
+                File file = new File(parentFile, fileName + ".tmp");
+                if (!file.isFile()) {
+                    file.createNewFile();
+                }
+                try {
+                    fos = new FileOutputStream(file);
+                    byte[] buffer = new byte[1024];
+                    int len;
+                    long copySize = 0;
+                    long nextPrintProgress = 0;
+                    logger.info("start to download file " + file.getName());
+                    while ((len = is.read(buffer)) != -1) {
+                        fos.write(buffer, 0, len);
+                        copySize += len;
+                        long progress = copySize * 100 / totalSize;
+                        if (progress >= nextPrintProgress) {
+                            logger.info("download " + file.getName() + " progress : " + progress
+                                        + "% , download size : " + copySize + ", total size : " + totalSize);
+                            nextPrintProgress += 10;
+                        }
+                    }
+                    logger.info("download file " + file.getName() + " end!");
+                    fos.flush();
+                } finally {
+                    IOUtils.closeQuietly(fos);
+                }
+                file.renameTo(new File(parentFile, fileName));
+            }
+        } finally {
+            IOUtils.closeQuietly(fos);
+        }
+    }
+
+    public void execute(Runnable runnable) throws InterruptedException {
+        taskQueue.put(runnable);
+    }
+
+    private class DownloadThread implements Runnable {
+
+        @Override
+        public void run() {
+            while (running) {
+                try {
+                    BinlogFile binlogFile = downloadQueue.poll(5000, TimeUnit.MILLISECONDS);
+                    if (binlogFile != null){
+                        download(binlogFile);
+                    }
+                    Runnable runnable = taskQueue.poll(5000, TimeUnit.MILLISECONDS);
+                    if (runnable != null){
+                        runnable.run();
+                    }
+                } catch (Exception e) {
+                    e.printStackTrace();
+                }
+            }
+
+        }
+    }
+}

+ 149 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsBinlogEventParserProxy.java

@@ -0,0 +1,149 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+
+import com.alibaba.otter.canal.parse.exception.PositionNotFoundException;
+import com.alibaba.otter.canal.parse.inbound.ParserExceptionHandler;
+import com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser;
+
+/**
+ * @author chengjin.lyf on 2018/7/20 上午10:52
+ * @since 1.0.25
+ */
+public class RdsBinlogEventParserProxy extends MysqlEventParser {
+
+    private String rdsOpenApiUrl = "https://rds.aliyuncs.com/"; // openapi地址
+    private String accesskey; // 云账号的ak
+    private String secretkey; // 云账号sk
+    private String instanceId; // rds实例id
+    private Long startTime;
+    private Long endTime;
+    private String directory; //binlog 目录
+    private int batchSize = 4; //最多下载的binlog文件数量
+
+    private RdsLocalBinlogEventParser rdsBinlogEventParser = new RdsLocalBinlogEventParser();
+    private ExecutorService executorService = Executors.newSingleThreadExecutor(new ThreadFactory() {
+
+        @Override
+        public Thread newThread(Runnable r) {
+            Thread t = new Thread(r, "rds-binlog-daemon-thread");
+            t.setDaemon(true);
+            return t;
+        }
+    });
+
+    @Override
+    public void start() {
+        final ParserExceptionHandler targetHandler = this.getParserExceptionHandler();
+        rdsBinlogEventParser.setLogPositionManager(this.getLogPositionManager());
+        rdsBinlogEventParser.setDestination(destination);
+        rdsBinlogEventParser.setAlarmHandler(this.getAlarmHandler());
+        rdsBinlogEventParser.setConnectionCharset(this.connectionCharset);
+        rdsBinlogEventParser.setConnectionCharsetNumber(this.connectionCharsetNumber);
+        rdsBinlogEventParser.setEnableTsdb(this.enableTsdb);
+        rdsBinlogEventParser.setEventBlackFilter(this.eventBlackFilter);
+        rdsBinlogEventParser.setFilterQueryDcl(this.filterQueryDcl);
+        rdsBinlogEventParser.setFilterQueryDdl(this.filterQueryDdl);
+        rdsBinlogEventParser.setFilterQueryDml(this.filterQueryDml);
+        rdsBinlogEventParser.setFilterRows(this.filterRows);
+        rdsBinlogEventParser.setFilterTableError(this.filterTableError);
+        rdsBinlogEventParser.setIsGTIDMode(this.isGTIDMode);
+        rdsBinlogEventParser.setMasterInfo(this.masterInfo);
+        rdsBinlogEventParser.setEventFilter(this.eventFilter);
+        rdsBinlogEventParser.setMasterPosition(this.masterPosition);
+        rdsBinlogEventParser.setTransactionSize(this.transactionSize);
+        rdsBinlogEventParser.setUrl(this.rdsOpenApiUrl);
+        rdsBinlogEventParser.setAccesskey(this.accesskey);
+        rdsBinlogEventParser.setSecretkey(this.secretkey);
+        rdsBinlogEventParser.setInstanceId(this.instanceId);
+        rdsBinlogEventParser.setEventSink(eventSink);
+        rdsBinlogEventParser.setDirectory(directory);
+        rdsBinlogEventParser.setBatchSize(batchSize);
+        rdsBinlogEventParser.setFinishListener(new RdsLocalBinlogEventParser.ParseFinishListener() {
+            @Override
+            public void onFinish() {
+                executorService.execute(new Runnable() {
+                    @Override
+                    public void run() {
+                        rdsBinlogEventParser.stop();
+                        RdsBinlogEventParserProxy.this.start();
+                    }
+                });
+
+            }
+        });
+        this.setParserExceptionHandler(new ParserExceptionHandler() {
+
+            @Override
+            public void handle(Throwable e) {
+                handleMysqlParserException(e);
+                if (targetHandler != null) {
+                    targetHandler.handle(e);
+                }
+            }
+        });
+        super.start();
+    }
+
+    public void handleMysqlParserException(Throwable throwable) {
+        if (throwable instanceof PositionNotFoundException) {
+            logger.info("remove rds not found position, try download rds binlog!");
+            executorService.execute(new Runnable() {
+
+                @Override
+                public void run() {
+                    try {
+                        logger.info("stop mysql parser!");
+                        RdsBinlogEventParserProxy rdsBinlogEventParserProxy = RdsBinlogEventParserProxy.this;
+                        long serverId = rdsBinlogEventParserProxy.getServerId();
+                        rdsBinlogEventParser.setServerId(serverId);
+                        rdsBinlogEventParserProxy.stop();
+                        logger.info("start rds mysql binlog parser!");
+                        rdsBinlogEventParser.start();
+                    } catch (Exception e) {
+                        e.printStackTrace();
+                    }
+                }
+            });
+        }
+    }
+
+    @Override
+    public void stop() {
+        super.stop();
+    }
+
+    @Override
+    public boolean isStart() {
+        return super.isStart();
+    }
+
+    public void setRdsOpenApiUrl(String rdsOpenApiUrl) {
+        this.rdsOpenApiUrl = rdsOpenApiUrl;
+    }
+
+
+    public void setAccesskey(String accesskey) {
+        this.accesskey = accesskey;
+    }
+
+
+    public void setSecretkey(String secretkey) {
+        this.secretkey = secretkey;
+    }
+
+
+    public void setInstanceId(String instanceId) {
+        this.instanceId = instanceId;
+    }
+
+    public void setDirectory(String directory) {
+        this.directory = directory;
+    }
+
+    public void setBatchSize(int batchSize) {
+        this.batchSize = batchSize;
+    }
+}

+ 53 - 8
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsBinlogOpenApi.java

@@ -1,5 +1,9 @@
 package com.alibaba.otter.canal.parse.inbound.mysql.rds;
 
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.data.BinlogFile;
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.data.DescribeBinlogFileResult;
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.data.RdsItem;
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.request.DescribeBinlogFilesRequest;
 import io.netty.handler.codec.http.HttpResponseStatus;
 
 import java.io.BufferedOutputStream;
@@ -9,16 +13,11 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.UnsupportedEncodingException;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.net.URLEncoder;
 import java.text.SimpleDateFormat;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
-import java.util.TreeMap;
-import java.util.UUID;
+import java.util.*;
 
 import javax.crypto.Mac;
 import javax.crypto.SecretKey;
@@ -56,6 +55,52 @@ public class RdsBinlogOpenApi {
     private static final String   API_VERSION         = "2014-08-15";
     private static final String   SIGNATURE_VERSION   = "1.0";
 
+
+    public static List<BinlogFile> listBinlogFiles(String url, String ak, String sk, String dbInstanceId, Date startTime,
+                                                   Date endTime) {
+        DescribeBinlogFilesRequest request = new DescribeBinlogFilesRequest();
+        if (StringUtils.isNotEmpty(url)){
+            try {
+                URI uri = new URI(url);
+                request.setEndPoint(uri.getHost());
+            } catch (URISyntaxException e) {
+                logger.error("resolve url host failed, will use default rds endpoint!");
+            }
+        }
+        request.setStartDate(startTime);
+        request.setEndDate(endTime);
+        request.setPageNumber(1);
+        request.setPageSize(100);
+        request.setRdsInstanceId(dbInstanceId);
+        request.setAccessKeyId(ak);
+        request.setAccessKeySecret(sk);
+        DescribeBinlogFileResult result = null;
+        int retryTime = 3;
+        while (true){
+            try{
+                result = request.doAction();
+                break;
+            }catch (Exception e){
+                if(retryTime-- <= 0){
+                    throw new RuntimeException(e);
+                }
+                try {
+                    Thread.sleep(100L);
+                } catch (InterruptedException e1) {
+                }
+            }
+        }
+        if (result == null){
+            return Collections.EMPTY_LIST;
+        }
+        RdsItem rdsItem = result.getItems();
+        if (rdsItem != null){
+            return rdsItem.getBinLogFile();
+        }
+        return Collections.EMPTY_LIST;
+    }
+
+
     public static void downloadBinlogFiles(String url, String ak, String sk, String dbInstanceId, Date startTime,
                                            Date endTime, File destDir) throws Throwable {
         int pageSize = 100;

+ 130 - 29
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsLocalBinlogEventParser.java

@@ -2,14 +2,23 @@ package com.alibaba.otter.canal.parse.inbound.mysql.rds;
 
 import java.io.File;
 import java.util.Date;
+import java.util.List;
 
 import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.math.NumberUtils;
 import org.springframework.util.Assert;
 
 import com.alibaba.otter.canal.parse.CanalEventParser;
 import com.alibaba.otter.canal.parse.exception.CanalParseException;
+import com.alibaba.otter.canal.parse.exception.PositionNotFoundException;
+import com.alibaba.otter.canal.parse.exception.ServerIdNotMatchException;
+import com.alibaba.otter.canal.parse.inbound.ErosaConnection;
+import com.alibaba.otter.canal.parse.inbound.ParserExceptionHandler;
+import com.alibaba.otter.canal.parse.inbound.mysql.LocalBinLogConnection;
 import com.alibaba.otter.canal.parse.inbound.mysql.LocalBinlogEventParser;
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.data.BinlogFile;
 import com.alibaba.otter.canal.protocol.position.EntryPosition;
+import com.alibaba.otter.canal.protocol.position.LogPosition;
 
 /**
  * 基于rds binlog备份文件的复制
@@ -17,47 +26,102 @@ import com.alibaba.otter.canal.protocol.position.EntryPosition;
  * @author agapple 2017年10月15日 下午1:27:36
  * @since 1.0.25
  */
-public class RdsLocalBinlogEventParser extends LocalBinlogEventParser implements CanalEventParser {
+public class RdsLocalBinlogEventParser extends LocalBinlogEventParser implements CanalEventParser, LocalBinLogConnection.FileParserListener {
 
     private String url = "https://rds.aliyuncs.com/"; // openapi地址
-    private String accesskey;                        // 云账号的ak
-    private String secretkey;                        // 云账号sk
-    private String instanceId;                       // rds实例id
-    private Long   startTime;
-    private Long   endTime;
+    private String accesskey; // 云账号的ak
+    private String secretkey; // 云账号sk
+    private String instanceId; // rds实例id
+    private Long startTime;
+    private Long endTime;
+    private BinlogDownloadQueue binlogDownloadQueue;
+    private ParseFinishListener finishListener;
+    private int batchSize;
 
     public RdsLocalBinlogEventParser(){
     }
 
     public void start() throws CanalParseException {
         try {
-            Assert.notNull(startTime);
             Assert.notNull(accesskey);
             Assert.notNull(secretkey);
             Assert.notNull(instanceId);
             Assert.notNull(url);
+            Assert.notNull(directory);
+
             if (endTime == null) {
                 endTime = System.currentTimeMillis();
             }
 
-            RdsBinlogOpenApi.downloadBinlogFiles(url,
-                accesskey,
+            EntryPosition entryPosition = findStartPosition(null);
+            if (entryPosition == null) {
+                throw new PositionNotFoundException("position not found!");
+            }
+            long startTimeInMill = entryPosition.getTimestamp();
+            startTime = startTimeInMill;
+            List<BinlogFile> binlogFiles = RdsBinlogOpenApi.listBinlogFiles(url, accesskey,
                 secretkey,
                 instanceId,
                 new Date(startTime),
-                new Date(endTime),
-                new File(directory));
-
-            // 更新一下时间戳
-            masterPosition = new EntryPosition(startTime);
+                new Date(endTime));
+            binlogDownloadQueue = new BinlogDownloadQueue(binlogFiles, batchSize, directory);
+            binlogDownloadQueue.silenceDownload();
+            needWait = true;
+            parallel = false;
+            // try to download one file,use to test server id
+            binlogDownloadQueue.tryOne();
         } catch (Throwable e) {
             logger.error("download binlog failed", e);
             throw new CanalParseException(e);
         }
+        setParserExceptionHandler(new ParserExceptionHandler() {
 
+            @Override
+            public void handle(Throwable e) {
+                handleMysqlParserException(e);
+            }
+        });
         super.start();
     }
 
+    private void handleMysqlParserException(Throwable throwable) {
+        if (throwable instanceof ServerIdNotMatchException) {
+            logger.error("server id not match, try download another rds binlog!");
+            binlogDownloadQueue.notifyNotMatch();
+            try {
+                binlogDownloadQueue.cleanDir();
+                binlogDownloadQueue.prepare();
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            }
+            try {
+                binlogDownloadQueue.execute(new Runnable() {
+
+                    @Override
+                    public void run() {
+                        RdsLocalBinlogEventParser.super.stop();
+                        RdsLocalBinlogEventParser.super.start();
+                    }
+                });
+            } catch (InterruptedException e) {
+                throw new RuntimeException(e);
+            }
+
+        }
+    }
+
+    @Override
+    protected ErosaConnection buildErosaConnection() {
+        ErosaConnection connection = super.buildErosaConnection();
+        if (connection instanceof LocalBinLogConnection) {
+            LocalBinLogConnection localBinLogConnection = (LocalBinLogConnection) connection;
+            localBinLogConnection.setNeedWait(true);
+            localBinLogConnection.setServerId(serverId);
+            localBinLogConnection.setParserListener(this);
+        }
+        return connection;
+    }
+
     public String getUrl() {
         return url;
     }
@@ -68,44 +132,81 @@ public class RdsLocalBinlogEventParser extends LocalBinlogEventParser implements
         }
     }
 
-    public String getAccesskey() {
-        return accesskey;
-    }
 
     public void setAccesskey(String accesskey) {
         this.accesskey = accesskey;
     }
 
-    public String getSecretkey() {
-        return secretkey;
-    }
 
     public void setSecretkey(String secretkey) {
         this.secretkey = secretkey;
     }
 
-    public String getInstanceId() {
-        return instanceId;
-    }
 
     public void setInstanceId(String instanceId) {
         this.instanceId = instanceId;
     }
 
-    public Long getStartTime() {
-        return startTime;
-    }
 
     public void setStartTime(Long startTime) {
         this.startTime = startTime;
     }
 
-    public Long getEndTime() {
-        return endTime;
-    }
 
     public void setEndTime(Long endTime) {
         this.endTime = endTime;
     }
 
+    @Override
+    public void onFinish(String fileName) {
+        try {
+            binlogDownloadQueue.downOne();
+            File needDeleteFile = new File(directory + File.separator + fileName);
+            if (needDeleteFile.exists()){
+                needDeleteFile.delete();
+            }
+            // 处理下logManager位点问题
+            LogPosition logPosition = logPositionManager.getLatestIndexBy(destination);
+            EntryPosition position = logPosition.getPostion();
+            if (position != null){
+                LogPosition newLogPosition = new LogPosition();
+                String journalName = position.getJournalName();
+                int sepIdx = journalName.indexOf(".");
+                String fileIndex = journalName.substring(sepIdx+1);
+                int index = NumberUtils.toInt(fileIndex) + 1;
+                String newJournalName = journalName.substring(0, sepIdx) + "." + StringUtils.leftPad(String.valueOf(index), fileIndex.length(), "0");
+                newLogPosition.setPostion(new EntryPosition(newJournalName, 4L, position.getTimestamp(), position.getServerId()));
+                newLogPosition.setIdentity(logPosition.getIdentity());
+                logPositionManager.persistLogPosition(destination, newLogPosition);
+            }
+
+            if (binlogDownloadQueue.isLastFile(fileName)) {
+                logger.info("all file parse complete, switch to mysql parser!");
+                finishListener.onFinish();
+                return;
+            }
+            binlogDownloadQueue.prepare();
+        } catch (Exception e) {
+            logger.error("prepare download binlog file failed!", e);
+            throw new RuntimeException(e);
+        }
+    }
+
+    @Override
+    public void stop() {
+        this.binlogDownloadQueue.release();
+        super.stop();
+    }
+
+    public void setFinishListener(ParseFinishListener finishListener) {
+        this.finishListener = finishListener;
+    }
+
+    public interface ParseFinishListener{
+        void onFinish();
+    }
+
+    public void setBatchSize(int batchSize) {
+        this.batchSize = batchSize;
+    }
 }

+ 72 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/BinlogFile.java

@@ -0,0 +1,72 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds.data;
+
+/**
+ * @author chengjin.lyf on 2018/8/7 下午2:26
+ * @since 1.0.25
+ */
+public class BinlogFile {
+
+    private Long FileSize;
+    private String LogBeginTime;
+    private String LogEndTime;
+    private String DownloadLink;
+    private String HostInstanceID;
+    private String LinkExpiredTime;
+    private String fileName;
+
+    public Long getFileSize() {
+        return FileSize;
+    }
+
+    public void setFileSize(Long fileSize) {
+        FileSize = fileSize;
+    }
+
+    public String getLogBeginTime() {
+        return LogBeginTime;
+    }
+
+    public void setLogBeginTime(String logBeginTime) {
+        LogBeginTime = logBeginTime;
+    }
+
+    public String getLogEndTime() {
+        return LogEndTime;
+    }
+
+    public void setLogEndTime(String logEndTime) {
+        LogEndTime = logEndTime;
+    }
+
+    public String getDownloadLink() {
+        return DownloadLink;
+    }
+
+    public void setDownloadLink(String downloadLink) {
+        DownloadLink = downloadLink;
+    }
+
+    public String getHostInstanceID() {
+        return HostInstanceID;
+    }
+
+    public void setHostInstanceID(String hostInstanceID) {
+        HostInstanceID = hostInstanceID;
+    }
+
+    public String getLinkExpiredTime() {
+        return LinkExpiredTime;
+    }
+
+    public void setLinkExpiredTime(String linkExpiredTime) {
+        LinkExpiredTime = linkExpiredTime;
+    }
+
+    public String getFileName() {
+        return fileName;
+    }
+
+    public void setFileName(String fileName) {
+        this.fileName = fileName;
+    }
+}

+ 62 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/DescribeBinlogFileResult.java

@@ -0,0 +1,62 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds.data;
+
+/**
+ * @author chengjin.lyf on 2018/8/7 下午2:26
+ * @since 1.0.25
+ */
+public class DescribeBinlogFileResult {
+    private RdsItem Items;
+    private long PageNumber;
+    private long TotalRecordCount;
+    private long TotalFileSize;
+    private String RequestId;
+    private long PageRecordCount;
+
+    public RdsItem getItems() {
+        return Items;
+    }
+
+    public void setItems(RdsItem items) {
+        Items = items;
+    }
+
+    public long getPageNumber() {
+        return PageNumber;
+    }
+
+    public void setPageNumber(long pageNumber) {
+        PageNumber = pageNumber;
+    }
+
+    public long getTotalRecordCount() {
+        return TotalRecordCount;
+    }
+
+    public void setTotalRecordCount(long totalRecordCount) {
+        TotalRecordCount = totalRecordCount;
+    }
+
+    public long getTotalFileSize() {
+        return TotalFileSize;
+    }
+
+    public void setTotalFileSize(long totalFileSize) {
+        TotalFileSize = totalFileSize;
+    }
+
+    public String getRequestId() {
+        return RequestId;
+    }
+
+    public void setRequestId(String requestId) {
+        RequestId = requestId;
+    }
+
+    public long getPageRecordCount() {
+        return PageRecordCount;
+    }
+
+    public void setPageRecordCount(long pageRecordCount) {
+        PageRecordCount = pageRecordCount;
+    }
+}

+ 69 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/RdsBackupPolicy.java

@@ -0,0 +1,69 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds.data;
+
+/**
+ * @author chengjin.lyf on 2018/8/7 下午2:26
+ * @since 1.0.25
+ */
+public class RdsBackupPolicy {
+
+    /**
+     * 数据备份保留天数(7到730天)。
+     */
+    private String BackupRetentionPeriod;
+    /**
+     * 数据备份时间,格式:HH:mmZ- HH:mm Z。
+     */
+    private String PreferredBackupTime;
+    /**
+     * 数据备份周期。Monday:周一;Tuesday:周二;Wednesday:周三;Thursday:周四;Friday:周五;Saturday:周六;Sunday:周日。
+     */
+    private String PreferredBackupPeriod;
+    /**
+     * 日志备份状态。Enable:开启;Disabled:关闭。
+     */
+    private boolean BackupLog;
+    /**
+     * 日志备份保留天数(7到730天)。
+     */
+    private int LogBackupRetentionPeriod;
+
+    public String getBackupRetentionPeriod() {
+        return BackupRetentionPeriod;
+    }
+
+    public void setBackupRetentionPeriod(String backupRetentionPeriod) {
+        BackupRetentionPeriod = backupRetentionPeriod;
+    }
+
+    public String getPreferredBackupTime() {
+        return PreferredBackupTime;
+    }
+
+    public void setPreferredBackupTime(String preferredBackupTime) {
+        PreferredBackupTime = preferredBackupTime;
+    }
+
+    public String getPreferredBackupPeriod() {
+        return PreferredBackupPeriod;
+    }
+
+    public void setPreferredBackupPeriod(String preferredBackupPeriod) {
+        PreferredBackupPeriod = preferredBackupPeriod;
+    }
+
+    public boolean isBackupLog() {
+        return BackupLog;
+    }
+
+    public void setBackupLog(boolean backupLog) {
+        BackupLog = backupLog;
+    }
+
+    public int getLogBackupRetentionPeriod() {
+        return LogBackupRetentionPeriod;
+    }
+
+    public void setLogBackupRetentionPeriod(int logBackupRetentionPeriod) {
+        LogBackupRetentionPeriod = logBackupRetentionPeriod;
+    }
+}

+ 19 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/RdsItem.java

@@ -0,0 +1,19 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds.data;
+
+import java.util.List;
+
+/**
+ * @author chengjin.lyf on 2018/8/7 下午2:26
+ * @since 1.0.25
+ */
+public class RdsItem {
+    private List<BinlogFile> BinLogFile;
+
+    public List<BinlogFile> getBinLogFile() {
+        return BinLogFile;
+    }
+
+    public void setBinLogFile(List<BinlogFile> binLogFile) {
+        BinLogFile = binLogFile;
+    }
+}

+ 250 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/request/AbstractRequest.java

@@ -0,0 +1,250 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds.request;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
+import java.security.cert.CertificateException;
+import java.security.cert.X509Certificate;
+import java.text.SimpleDateFormat;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+
+import javax.crypto.Mac;
+import javax.crypto.SecretKey;
+import javax.crypto.spec.SecretKeySpec;
+import javax.net.ssl.SSLContext;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.lang.StringUtils;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpStatus;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.config.Registry;
+import org.apache.http.config.RegistryBuilder;
+import org.apache.http.conn.HttpClientConnectionManager;
+import org.apache.http.conn.socket.PlainConnectionSocketFactory;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.conn.ssl.TrustStrategy;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
+import org.apache.http.ssl.SSLContexts;
+import org.apache.http.util.EntityUtils;
+
+import io.netty.handler.codec.http.HttpResponseStatus;
+
+/**
+ * @author chengjin.lyf on 2018/8/7 下午2:26
+ * @since 1.0.25
+ */
+public abstract class AbstractRequest<T> {
+
+    /**
+     * 要求的编码格式
+     */
+    private static final String ENCODING = "UTF-8";
+    /**
+     * 要求的sign签名算法
+     */
+    private static final String MAC_NAME = "HmacSHA1";
+
+    private String accessKeyId;
+
+    private String accessKeySecret;
+
+    /**
+     *  api 版本
+     *
+     */
+    private String version;
+
+    private String endPoint = "rds.aliyuncs.com";
+
+    private String protocol = "http";
+
+    public void setProtocol(String protocol) {
+        this.protocol = protocol;
+    }
+
+    private int timeout = (int) TimeUnit.MINUTES.toMillis(1);
+
+
+    private Map<String, String> treeMap = new TreeMap();
+
+    public void putQueryString(String name, String value){
+        if (StringUtils.isBlank(name) || StringUtils.isBlank(value)){
+            return;
+        }
+        treeMap.put(name, value);
+    }
+
+
+    public void setVersion(String version) {
+        this.version = version;
+    }
+
+
+    public void setEndPoint(String endPoint) {
+        this.endPoint = endPoint;
+    }
+
+    public void setAccessKeyId(String accessKeyId) {
+        this.accessKeyId = accessKeyId;
+    }
+
+    public void setAccessKeySecret(String accessKeySecret) {
+        this.accessKeySecret = accessKeySecret;
+    }
+
+    /**
+     * 使用 HMAC-SHA1 签名方法对对encryptText进行签名
+     *
+     * @param encryptText 被签名的字符串
+     * @param encryptKey 密钥
+     * @return
+     * @throws Exception
+     */
+    private byte[] HmacSHA1Encrypt(String encryptText, String encryptKey) throws Exception {
+        byte[] data = encryptKey.getBytes(ENCODING);
+        // 根据给定的字节数组构造一个密钥,第二参数指定一个密钥算法的名称
+        SecretKey secretKey = new SecretKeySpec(data, MAC_NAME);
+        // 生成一个指定 Mac 算法 的 Mac 对象
+        Mac mac = Mac.getInstance(MAC_NAME);
+        // 用给定密钥初始化 Mac 对象
+        mac.init(secretKey);
+
+        byte[] text = encryptText.getBytes(ENCODING);
+        // 完成 Mac 操作
+        return mac.doFinal(text);
+    }
+
+    private String base64(byte input[]) throws UnsupportedEncodingException {
+        return new String(Base64.encodeBase64(input), ENCODING);
+    }
+
+    private String concatQueryString(Map<String, String> parameters) throws UnsupportedEncodingException {
+        if (null == parameters) {
+            return null;
+        }
+        StringBuilder urlBuilder = new StringBuilder("");
+        for (Map.Entry<String, String> entry : parameters.entrySet()) {
+            String key = entry.getKey();
+            String val = entry.getValue();
+            urlBuilder.append(encode(key));
+            if (val != null) {
+                urlBuilder.append("=").append(encode(val));
+            }
+            urlBuilder.append("&");
+        }
+        int strIndex = urlBuilder.length();
+        if (parameters.size() > 0) {
+            urlBuilder.deleteCharAt(strIndex - 1);
+        }
+        return urlBuilder.toString();
+    }
+
+    private String encode(String value) throws UnsupportedEncodingException {
+        return URLEncoder.encode(value, "UTF-8");
+    }
+
+    private String makeSignature(TreeMap<String, String> paramMap) throws Exception {
+        String cqs = concatQueryString(paramMap);
+        cqs = encode(cqs);
+        cqs = cqs.replaceAll("\\+", "%20");
+        cqs = cqs.replaceAll("\\*", "%2A");
+        cqs = cqs.replaceAll("%7E", "~");
+        StringBuilder stringBuilder = new StringBuilder();
+        stringBuilder.append("GET").append("&").append(encode("/")).append("&").append(cqs);
+        return base64(HmacSHA1Encrypt(stringBuilder.toString(), accessKeySecret + "&"));
+    }
+
+    public final String formatUTCTZ(Date date) {
+        SimpleDateFormat sdf = new SimpleDateFormat("YYYY-MM-dd'T'HH:mm:ss'Z'");
+        sdf.setTimeZone(TimeZone.getTimeZone("UTC"));
+        return sdf.format(date);
+    }
+
+    private void fillCommonParam(Map<String, String> p) {
+        p.put("Format", "JSON");
+        p.put("Version", version);
+        p.put("AccessKeyId", accessKeyId);
+        p.put("SignatureMethod", "HMAC-SHA1"); //此处不能用变量 MAC_NAME
+        p.put("Timestamp", formatUTCTZ(new Date()));
+        p.put("SignatureVersion", "1.0");
+        p.put("SignatureNonce", UUID.randomUUID().toString());
+    }
+
+    private String makeRequestString(Map<String, String> param) throws Exception {
+        fillCommonParam(param);
+        String sign = makeSignature(new TreeMap<String, String>(param));
+        StringBuilder builder = new StringBuilder();
+        for (Map.Entry<String, String> entry : param.entrySet()) {
+            builder.append(encode(entry.getKey())).append("=").append(encode(entry.getValue())).append("&");
+        }
+        builder.append("Signature").append("=").append(sign);
+        return builder.toString();
+    }
+
+    /**
+     * 执行http请求
+     *
+     * @param getMethod
+     * @return
+     * @throws IOException
+     */
+    private final HttpResponse executeHttpRequest(HttpGet getMethod, String host) throws Exception {
+        SSLContext sslContext = SSLContexts.custom().loadTrustMaterial(null, new TrustStrategy() {
+
+            @Override
+            public boolean isTrusted(X509Certificate[] arg0, String arg1) throws CertificateException {
+                return true;
+            }
+        }).build();
+        SSLConnectionSocketFactory sslsf = new SSLConnectionSocketFactory(sslContext,
+                new String[] { "TLSv1" },
+                null,
+                SSLConnectionSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
+        Registry registry = RegistryBuilder.create()
+                .register("http", PlainConnectionSocketFactory.INSTANCE)
+                .register("https", sslsf)
+                .build();
+        HttpClientConnectionManager httpClientConnectionManager = new PoolingHttpClientConnectionManager(registry);
+        CloseableHttpClient httpClient = HttpClientBuilder.create()
+                .setMaxConnPerRoute(50)
+                .setMaxConnTotal(100)
+                .setConnectionManager(httpClientConnectionManager)
+                .build();
+        RequestConfig requestConfig = RequestConfig.custom()
+                .setConnectTimeout(timeout)
+                .setConnectionRequestTimeout(timeout)
+                .setSocketTimeout(timeout)
+                .build();
+        getMethod.setConfig(requestConfig);
+        HttpResponse response = httpClient.execute(getMethod);
+        int statusCode = response.getStatusLine().getStatusCode();
+        if (statusCode != HttpResponseStatus.OK.code() && statusCode != HttpResponseStatus.PARTIAL_CONTENT.code()) {
+            String result = EntityUtils.toString(response.getEntity());
+            throw new RuntimeException("return error !" + response.getStatusLine().getReasonPhrase() + ", " + result);
+        }
+        return response;
+    }
+
+    protected abstract T processResult(HttpResponse response) throws Exception;
+
+    protected void processBefore(){
+
+    }
+
+    public final T  doAction() throws Exception {
+        processBefore();
+        String requestStr = makeRequestString(treeMap);
+        HttpGet httpGet = new HttpGet(protocol + "://" +endPoint + "?" + requestStr);
+        HttpResponse response = executeHttpRequest(httpGet, endPoint);
+        if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) {
+            String result = EntityUtils.toString(response.getEntity());
+            throw new RuntimeException("http request failed! " + result);
+        }
+        return processResult(response);
+    }
+}

+ 41 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/request/DescribeBackupPolicyRequest.java

@@ -0,0 +1,41 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds.request;
+
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.data.RdsBackupPolicy;
+import org.apache.http.HttpResponse;
+import org.apache.http.util.EntityUtils;
+
+import com.alibaba.fastjson.JSON;
+import com.alibaba.fastjson.JSONObject;
+
+/**
+ * rds 备份策略查询
+ * @author chengjin.lyf on 2018/8/7 下午3:41
+ * @since 1.0.25
+ */
+public class DescribeBackupPolicyRequest extends AbstractRequest<RdsBackupPolicy> {
+
+
+    public DescribeBackupPolicyRequest() {
+        setVersion("2014-08-15");
+        putQueryString("Action", "DescribeBackupPolicy");
+
+    }
+
+
+    public void setRdsInstanceId(String rdsInstanceId) {
+        putQueryString("DBInstanceId", rdsInstanceId);
+    }
+
+    @Override
+    protected RdsBackupPolicy processResult(HttpResponse response) throws Exception {
+        String result = EntityUtils.toString(response.getEntity());
+        JSONObject jsonObj = JSON.parseObject(result);
+        RdsBackupPolicy policy = new RdsBackupPolicy();
+        policy.setBackupRetentionPeriod(jsonObj.getString("BackupRetentionPeriod"));
+        policy.setBackupLog(jsonObj.getString("BackupLog").equalsIgnoreCase("Enable"));
+        policy.setLogBackupRetentionPeriod(jsonObj.getIntValue("LogBackupRetentionPeriod"));
+        policy.setPreferredBackupPeriod(jsonObj.getString("PreferredBackupPeriod"));
+        policy.setPreferredBackupTime(jsonObj.getString("PreferredBackupTime"));
+        return policy;
+    }
+}

+ 56 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/request/DescribeBinlogFilesRequest.java

@@ -0,0 +1,56 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds.request;
+
+import java.util.Date;
+
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.data.DescribeBinlogFileResult;
+import org.apache.http.HttpResponse;
+import org.apache.http.util.EntityUtils;
+
+import com.alibaba.fastjson.JSONObject;
+import com.alibaba.fastjson.TypeReference;
+
+/**
+ * @author chengjin.lyf on 2018/8/7 下午3:41
+ * @since 1.0.25
+ */
+public class DescribeBinlogFilesRequest extends AbstractRequest<DescribeBinlogFileResult> {
+
+
+    public DescribeBinlogFilesRequest() {
+        setVersion("2014-08-15");
+        putQueryString("Action", "DescribeBinlogFiles");
+
+    }
+
+    public void setRdsInstanceId(String rdsInstanceId) {
+        putQueryString("DBInstanceId", rdsInstanceId);
+    }
+
+    public void setPageSize(int pageSize) {
+        putQueryString("PageSize", String.valueOf(pageSize));
+    }
+
+    public void setPageNumber(int pageNumber) {
+        putQueryString("PageNumber", String.valueOf(pageNumber));
+    }
+
+    public void setStartDate(Date startDate) {
+        putQueryString("StartTime" , formatUTCTZ(startDate));
+    }
+
+    public void setEndDate(Date endDate) {
+        putQueryString("EndTime" , formatUTCTZ(endDate));
+    }
+
+    public void setResourceOwnerId(Long resourceOwnerId) {
+        putQueryString("ResourceOwnerId", String.valueOf(resourceOwnerId));
+    }
+
+    @Override
+    protected DescribeBinlogFileResult processResult(HttpResponse response) throws Exception {
+        String result = EntityUtils.toString(response.getEntity());
+        DescribeBinlogFileResult describeBinlogFileResult = JSONObject.parseObject(result, new TypeReference<DescribeBinlogFileResult>() {
+        });
+        return describeBinlogFileResult;
+    }
+}

+ 191 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/HistoryTableMetaCache.java

@@ -0,0 +1,191 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tablemeta;
+
+import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ResultSetPacket;
+import com.alibaba.otter.canal.parse.inbound.TableMeta;
+import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection;
+import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.TableMetaCache;
+import com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.exception.CacheConnectionNull;
+import com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.exception.NoHistoryException;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+
+import java.io.IOException;
+import java.util.*;
+
+public class HistoryTableMetaCache {
+    private TableMetaStorage tableMetaStorage;
+    private MysqlConnection metaConnection;
+    private LoadingCache<String, Map<Long, TableMeta>> cache; // 第一层:数据库名.表名,第二层时间戳,TableMeta
+
+    public HistoryTableMetaCache() {
+        cache = CacheBuilder.newBuilder().build(new CacheLoader<String, Map<Long, TableMeta>>() {
+            @Override
+            public Map<Long, TableMeta> load(String tableName) throws Exception {
+                Long timestamp = new Date().getTime();
+                String[] strs = tableName.split("\\.");
+                String schema = strs[0];
+                if (tableMetaStorage != null) {
+                    init(tableMetaStorage.fetchByTableName(tableName)); // 从存储中读取表的历史ddl
+                }
+                ResultSetPacket resultSetPacket = connectionQuery("show create table " + tableName); // 获取当前ddl
+                String currentDdl = resultSetPacket.getFieldValues().get(1);
+                if (cache.asMap().containsKey(tableName)) {
+                    Map<Long, TableMeta> tableMetaMap = cache.getUnchecked(tableName);
+                    if (tableMetaMap.isEmpty()) {
+                        put(schema, tableName, currentDdl, timestamp - 1000L); // 放入当前schema,取时间为当前时间-1s
+                    } else {                                               // 如果table存在历史
+                        Iterator<Long> iterator = tableMetaMap.keySet().iterator();
+                        Long firstTimestamp = iterator.next();
+                        TableMeta first = tableMetaMap.get(firstTimestamp); // 拿第一条ddl
+                        if (!first.getDdl().equalsIgnoreCase(currentDdl)) { // 当前ddl与历史第一条不一致,放入当前ddl
+                            put(schema, tableName, currentDdl, calculateNewTimestamp(firstTimestamp)); // 计算放入的timestamp,设为第一条时间+1s
+                        }
+                    }
+                } else {
+                    put(schema, tableName, currentDdl, timestamp - 1000L); // 放入当前schema
+                }
+                return cache.get(tableName);
+            }
+        });
+    }
+
+    public void init(List<TableMetaEntry> entries) throws IOException {
+        if (entries == null) {
+            return;
+        }
+        for (TableMetaEntry entry : entries) {
+            try {
+                put(entry.getSchema(), entry.getTable(), entry.getDdl(), entry.getTimestamp());
+            } catch (CacheConnectionNull cacheConnectionNull) {
+                cacheConnectionNull.printStackTrace();
+            }
+        }
+    }
+
+    public TableMeta put(String schema, String table, String ddl, Long timestamp) throws CacheConnectionNull, IOException {
+        ResultSetPacket resultSetPacket;
+        if (!(ddl.contains("CREATE TABLE") || ddl.contains("create table"))) { // 尝试直接从数据库拉取CREATE TABLE的DDL
+            resultSetPacket = connectionQuery("show create table " + table);
+            ddl = resultSetPacket.getFieldValues().get(1);
+        } else { // CREATE TABLE 的 DDL
+            resultSetPacket = new ResultSetPacket();
+            List<String> fields = new ArrayList<String>();
+            String[] strings = table.split("\\.");
+            String shortTable = table;
+            if (strings.length > 1) {
+                shortTable = strings[1];
+            }
+            fields.add(0, shortTable);
+            fields.add(1, ddl);
+            resultSetPacket.setFieldValues(fields);
+            if (metaConnection != null) {
+                resultSetPacket.setSourceAddress(metaConnection.getAddress());
+            }
+        }
+        Map<Long, TableMeta> tableMetaMap;
+        if (!cache.asMap().containsKey(table)) {
+            tableMetaMap = new TreeMap<Long, TableMeta>(new Comparator<Long>() {
+                @Override
+                public int compare(Long o1, Long o2) {
+                    return o2.compareTo(o1);
+                }
+            });
+            cache.put(table, tableMetaMap);
+        } else {
+            tableMetaMap = cache.getUnchecked(table);
+        }
+        eliminate(tableMetaMap); // 淘汰旧的TableMeta
+        TableMeta tableMeta = new TableMeta(schema, table, TableMetaCache.parseTableMeta(schema, table, resultSetPacket));
+        if (tableMeta.getDdl() == null) { // 生成的TableMeta有时DDL为null
+            tableMeta.setDdl(ddl);
+        }
+        tableMetaMap.put(timestamp, tableMeta);
+        return tableMeta;
+    }
+
+    public TableMeta get(String schema, String table, Long timestamp) throws NoHistoryException, CacheConnectionNull {
+        Map<Long, TableMeta> tableMetaMap = cache.getUnchecked(table);
+        Iterator<Long> iterator = tableMetaMap.keySet().iterator();
+        Long selected = null;
+        while(iterator.hasNext()) {
+            Long temp = iterator.next();
+            if (timestamp > temp) {
+                selected = temp;
+                break;
+            }
+        }
+
+        if (selected == null) {
+            iterator = tableMetaMap.keySet().iterator();
+            if (iterator.hasNext()) {
+                selected = iterator.next();
+            } else {
+                throw new NoHistoryException(schema, table);
+            }
+        }
+
+        return tableMetaMap.get(selected);
+    }
+
+    public void clearTableMeta() {
+        cache.invalidateAll();
+    }
+
+    public void clearTableMetaWithSchemaName(String schema) {
+        for (String tableName : cache.asMap().keySet()) {
+            String[] strs = tableName.split("\\.");
+            if (schema.equalsIgnoreCase(strs[0])) {
+                cache.invalidate(tableName);
+            }
+        }
+    }
+
+    public void clearTableMeta(String schema, String table) {
+        if (!table.contains(".")) {
+            table = schema+"."+table;
+        }
+        cache.invalidate(table);
+    }
+
+    // eliminate older table meta in cache
+    private void eliminate(Map<Long, TableMeta> tableMetaMap) {
+        int MAX_CAPABILITY = 20;
+        if (tableMetaMap.keySet().size() < MAX_CAPABILITY) {
+            return;
+        }
+        Iterator<Long> iterator = tableMetaMap.keySet().iterator();
+        while(iterator.hasNext()) {
+            iterator.next();
+        }
+        iterator.remove();
+    }
+
+    private Long calculateNewTimestamp(Long oldTimestamp) {
+        return oldTimestamp + 1000;
+    }
+
+    private ResultSetPacket connectionQuery(String query) throws CacheConnectionNull, IOException {
+        if (metaConnection == null) {
+            throw new CacheConnectionNull();
+        }
+        try {
+            return metaConnection.query(query);
+        } catch (IOException e) {
+            try {
+                metaConnection.reconnect();
+                return metaConnection.query(query);
+            } catch (IOException e1) {
+                throw e1;
+            }
+        }
+    }
+
+    public void setMetaConnection(MysqlConnection metaConnection) {
+        this.metaConnection = metaConnection;
+    }
+
+    public void setTableMetaStorage(TableMetaStorage tableMetaStorage) {
+        this.tableMetaStorage = tableMetaStorage;
+    }
+}

+ 20 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/TableMetaCacheInterface.java

@@ -0,0 +1,20 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tablemeta;
+
+import com.alibaba.otter.canal.parse.inbound.TableMeta;
+import com.alibaba.otter.canal.protocol.position.EntryPosition;
+
+public interface TableMetaCacheInterface {
+
+    TableMeta getTableMeta(String schema, String table, boolean useCache, EntryPosition position);
+
+    void clearTableMeta();
+
+    void clearTableMetaWithSchemaName(String schema);
+
+    void clearTableMeta(String schema, String table);
+
+    boolean apply(EntryPosition position, String schema, String ddl, String extra);
+
+    boolean isOnRDS();
+
+}

+ 105 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/TableMetaCacheWithStorage.java

@@ -0,0 +1,105 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tablemeta;
+
+import com.alibaba.otter.canal.parse.inbound.TableMeta;
+import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection;
+import com.alibaba.otter.canal.protocol.position.EntryPosition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+public class TableMetaCacheWithStorage implements TableMetaCacheInterface {
+
+    private static Logger logger = LoggerFactory.getLogger(TableMetaCacheWithStorage.class);
+    private TableMetaStorage tableMetaStorage; // TableMeta存储
+    private HistoryTableMetaCache cache = new HistoryTableMetaCache(); // cache
+
+    public TableMetaCacheWithStorage(MysqlConnection con, TableMetaStorage tableMetaStorage) {
+        this.tableMetaStorage = tableMetaStorage;
+        InetSocketAddress address = con.getAddress();
+        this.tableMetaStorage.setDbAddress(address.getHostName()+":"+address.getPort());
+        cache.setMetaConnection(con);
+        cache.setTableMetaStorage(tableMetaStorage);
+        if (tableMetaStorage != null) {
+            try {
+                cache.init(tableMetaStorage.fetch()); // 初始化,从存储拉取TableMeta
+            } catch (IOException e) {
+                logger.error(e.getMessage());
+            }
+        }
+    }
+
+    @Override
+    public boolean apply(EntryPosition position, String fullTableName, String ddl, String extra) {
+        String[] strs = fullTableName.split("\\.");
+        String schema = strs[0];
+        if (schema.equalsIgnoreCase("null")) { // ddl schema为null,放弃处理
+            return false;
+        }
+        try {
+            TableMeta tableMeta = cache.get(schema, fullTableName, position.getTimestamp());
+            if (!compare(tableMeta, ddl)) { // 获取最近的TableMeta,进行比对
+                TableMeta result = cache.put(schema, fullTableName, ddl, calTimestamp(position.getTimestamp()));
+                if (tableMetaStorage != null && result != null) { // 储存
+                    tableMetaStorage.store(schema, fullTableName, result.getDdl(), calTimestamp(position.getTimestamp()));
+                }
+            }
+            return true;
+        } catch (Exception e) {
+            logger.error(e.toString());
+        }
+
+        return false;
+    }
+
+    @Override
+    public boolean isOnRDS() {
+        return false;
+    }
+
+    /***
+     *
+     * @param schema dbname
+     * @param table tablename
+     * @param useCache unused
+     * @param position timestamp
+     * @return
+     */
+    @Override
+    public TableMeta getTableMeta(String schema, String table, boolean useCache, EntryPosition position) {
+        String fulltbName = schema + "." + table;
+        try {
+            return cache.get(schema, fulltbName, position.getTimestamp());
+        } catch (Exception e) {
+            logger.error(e.toString());
+        }
+        return null;
+    }
+
+    @Override
+    public void clearTableMeta() {
+        cache.clearTableMeta();
+    }
+
+    @Override
+    public void clearTableMetaWithSchemaName(String schema) {
+        cache.clearTableMetaWithSchemaName(schema);
+    }
+
+    @Override
+    public void clearTableMeta(String schema, String table) {
+        cache.clearTableMeta(schema, table);
+    }
+
+    private boolean compare(TableMeta tableMeta, String ddl) {
+        if (tableMeta == null) {
+            return false;
+        }
+        return tableMeta.getDdl().equalsIgnoreCase(ddl);
+    }
+
+    private Long calTimestamp(Long timestamp) {
+        return timestamp;
+    }
+}

+ 55 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/TableMetaEntry.java

@@ -0,0 +1,55 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tablemeta;
+
+import java.io.Serializable;
+
+public class TableMetaEntry implements Serializable {
+
+    private static final long serialVersionUID = -1350200637109107904L;
+
+    private String dbAddress;
+    private String schema;
+    private String table;
+    private String ddl;
+    private Long timestamp;
+
+
+    public String getSchema() {
+        return schema;
+    }
+
+    public void setSchema(String schema) {
+        this.schema = schema;
+    }
+
+    public String getTable() {
+        return table;
+    }
+
+    public void setTable(String table) {
+        this.table = table;
+    }
+
+    public String getDdl() {
+        return ddl;
+    }
+
+    public void setDdl(String ddl) {
+        this.ddl = ddl;
+    }
+
+    public Long getTimestamp() {
+        return timestamp;
+    }
+
+    public void setTimestamp(Long timestamp) {
+        this.timestamp = timestamp;
+    }
+
+    public String getDbAddress() {
+        return dbAddress;
+    }
+
+    public void setDbAddress(String dbAddress) {
+        this.dbAddress = dbAddress;
+    }
+}

+ 18 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/TableMetaStorage.java

@@ -0,0 +1,18 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tablemeta;
+
+import java.util.List;
+
+public interface TableMetaStorage {
+
+    void store(String schema, String table, String ddl, Long timestamp);
+
+    List<TableMetaEntry> fetch();
+
+    List<TableMetaEntry> fetchByTableName(String tableName);
+
+    String getDbName();
+
+    String getDbAddress();
+
+    void setDbAddress(String address);
+}

+ 9 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/TableMetaStorageFactory.java

@@ -0,0 +1,9 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tablemeta;
+
+public interface TableMetaStorageFactory {
+
+    TableMetaStorage getTableMetaStorage();
+
+    String getDbName();
+
+}

+ 9 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/exception/CacheConnectionNull.java

@@ -0,0 +1,9 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.exception;
+
+public class CacheConnectionNull extends Exception{
+
+    @Override
+    public String toString() {
+        return "CacheConnectionNull";
+    }
+}

+ 21 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tablemeta/exception/NoHistoryException.java

@@ -0,0 +1,21 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tablemeta.exception;
+
+public class NoHistoryException extends Exception{
+
+    private String dbName;
+    private String tbName;
+
+    public NoHistoryException(String dbName, String tbName) {
+        this.dbName = dbName;
+        this.tbName = tbName;
+    }
+
+    public void printTableName() {
+        System.out.println(dbName+"."+tbName);
+    }
+
+    @Override
+    public String toString() {
+        return "NioHistoryException: " + dbName + " " + tbName;
+    }
+}

部分文件因文件數量過多而無法顯示