Ver código fonte

Merge pull request #1 from alibaba/master

merge
Payon 7 anos atrás
pai
commit
84f3d92790
100 arquivos alterados com 4877 adições e 892 exclusões
  1. 2 0
      .gitignore
  2. 1 1
      client/pom.xml
  3. 1 1
      common/pom.xml
  4. 1 1
      dbsync/pom.xml
  5. 3 1
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/FileLogFetcher.java
  6. 16 7
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogBuffer.java
  7. 7 7
      deployer/pom.xml
  8. 0 15
      deployer/src/main/bin/metrics_env.sh
  9. 1 6
      deployer/src/main/bin/startup.sh
  10. 14 2
      deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalController.java
  11. 13 2
      deployer/src/main/resources/canal.properties
  12. 14 10
      deployer/src/main/resources/example/instance.properties
  13. 0 32
      deployer/src/main/resources/example/rds_instance.properties
  14. 39 0
      deployer/src/main/resources/spring/base-instance.xml
  15. 5 23
      deployer/src/main/resources/spring/default-instance.xml
  16. 5 23
      deployer/src/main/resources/spring/file-instance.xml
  17. 8 26
      deployer/src/main/resources/spring/group-instance.xml
  18. 0 146
      deployer/src/main/resources/spring/local-instance.xml
  19. 5 23
      deployer/src/main/resources/spring/memory-instance.xml
  20. 33 0
      docker/Dockerfile
  21. 41 0
      docker/base/Dockerfile
  22. 29 0
      docker/build.sh
  23. 119 0
      docker/image/admin/app.sh
  24. 2 0
      docker/image/admin/bin/clean_log
  25. 45 0
      docker/image/admin/bin/clean_log.sh
  26. 18 0
      docker/image/admin/health.sh
  27. 11 0
      docker/image/alidata/bin/exec_rc_local.sh
  28. 6 0
      docker/image/alidata/bin/lark-wait
  29. 27 0
      docker/image/alidata/bin/main.sh
  30. 19 0
      docker/image/alidata/init/02init-sshd.sh
  31. 66 0
      docker/image/alidata/init/fix-hosts.py
  32. 40 0
      docker/image/alidata/lib/proc.sh
  33. 92 0
      docker/run.sh
  34. 1 1
      driver/pom.xml
  35. 1 1
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlConnector.java
  36. 77 1
      example/pom.xml
  37. 12 2
      example/src/main/java/com/alibaba/otter/canal/example/AbstractCanalClientTest.java
  38. 144 0
      example/src/main/java/com/alibaba/otter/canal/example/db/AbstractDbClient.java
  39. 488 0
      example/src/main/java/com/alibaba/otter/canal/example/db/CanalConnectorClient.java
  40. 35 0
      example/src/main/java/com/alibaba/otter/canal/example/db/MysqlLoadLauncher.java
  41. 169 0
      example/src/main/java/com/alibaba/otter/canal/example/db/PropertyPlaceholderConfigurer.java
  42. 44 0
      example/src/main/java/com/alibaba/otter/canal/example/db/ServiceLocator.java
  43. 121 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/AbstractDbDialect.java
  44. 105 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/AbstractSqlTemplate.java
  45. 20 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/DbDialect.java
  46. 40 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/SqlTemplate.java
  47. 93 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/TableType.java
  48. 32 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/mysql/MysqlDialect.java
  49. 84 0
      example/src/main/java/com/alibaba/otter/canal/example/db/dialect/mysql/MysqlSqlTemplate.java
  50. 207 0
      example/src/main/java/com/alibaba/otter/canal/example/db/mysql/AbstractMysqlClient.java
  51. 23 0
      example/src/main/java/com/alibaba/otter/canal/example/db/mysql/MysqlClient.java
  52. 50 0
      example/src/main/java/com/alibaba/otter/canal/example/db/utils/ByteArrayConverter.java
  53. 326 0
      example/src/main/java/com/alibaba/otter/canal/example/db/utils/DdlUtils.java
  54. 140 0
      example/src/main/java/com/alibaba/otter/canal/example/db/utils/SqlTimestampConverter.java
  55. 315 0
      example/src/main/java/com/alibaba/otter/canal/example/db/utils/SqlUtils.java
  56. 53 0
      example/src/main/resources/client-spring.xml
  57. 16 0
      example/src/main/resources/client.properties
  58. 1 1
      filter/pom.xml
  59. 1 1
      instance/core/pom.xml
  60. 1 1
      instance/manager/pom.xml
  61. 8 6
      instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/CanalInstanceWithManager.java
  62. 61 1
      instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/model/CanalParameter.java
  63. 1 1
      instance/pom.xml
  64. 1 1
      instance/spring/pom.xml
  65. 2 2
      kafka-client/pom.xml
  66. 3 1
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/MessageDeserializer.java
  67. 2 2
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningData.java
  68. 44 38
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningMonitor.java
  69. 2 2
      kafka/pom.xml
  70. 3 3
      kafka/src/main/java/com/alibaba/otter/canal/kafka/CanalServerStarter.java
  71. 25 26
      kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/CanalKafkaProducer.java
  72. 5 2
      kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/CanalKafkaStarter.java
  73. 17 8
      kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/KafkaProperties.java
  74. 49 10
      kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/MessageSerializer.java
  75. 1 0
      kafka/src/main/resources/kafka.yml
  76. 1 1
      meta/pom.xml
  77. 1 1
      parse/pom.xml
  78. 30 0
      parse/src/main/java/com/alibaba/otter/canal/parse/exception/PositionNotFoundException.java
  79. 32 0
      parse/src/main/java/com/alibaba/otter/canal/parse/exception/ServerIdNotMatchException.java
  80. 29 2
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/AbstractEventParser.java
  81. 2 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/ErosaConnection.java
  82. 5 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/EventTransactionBuffer.java
  83. 2 1
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/MultiStageCoprocessor.java
  84. 9 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/ParserExceptionHandler.java
  85. 44 20
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/AbstractMysqlEventParser.java
  86. 69 12
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinLogConnection.java
  87. 29 1
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlConnection.java
  88. 32 29
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParser.java
  89. 50 30
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlMultiStageCoprocessor.java
  90. 26 6
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java
  91. 46 16
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/local/BinLogFileQueue.java
  92. 339 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/BinlogDownloadQueue.java
  93. 1 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/HttpHelper.java
  94. 166 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsBinlogEventParserProxy.java
  95. 67 301
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsBinlogOpenApi.java
  96. 144 35
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsLocalBinlogEventParser.java
  97. 80 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/BinlogFile.java
  98. 63 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/DescribeBinlogFileResult.java
  99. 78 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/RdsBackupPolicy.java
  100. 26 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/RdsItem.java

+ 2 - 0
.gitignore

@@ -14,3 +14,5 @@ jtester.properties
 .idea/
 *.iml
 .DS_Store
+*.tar.gz
+*.rpm

+ 1 - 1
client/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
-		<version>1.0.26-SNAPSHOT</version>
+		<version>1.1.0-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 1
common/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
-		<version>1.0.26-SNAPSHOT</version>
+		<version>1.1.0-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	<artifactId>canal.common</artifactId>

+ 1 - 1
dbsync/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
-		<version>1.0.26-SNAPSHOT</version>
+		<version>1.1.0-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>

+ 3 - 1
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/FileLogFetcher.java

@@ -168,7 +168,9 @@ public final class FileLogFetcher extends LogFetcher {
      * @see com.taobao.tddl.dbsync.binlog.LogFetcher#close()
      */
     public void close() throws IOException {
-        if (fin != null) fin.close();
+        if (fin != null) {
+            fin.close();
+        }
 
         fin = null;
     }

+ 16 - 7
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogBuffer.java

@@ -490,7 +490,6 @@ public final class RowsLogBuffer {
                     // t % 100);
 
                     StringBuilder builder = new StringBuilder();
-                    builder.append(26);
                     appendNumber4(builder, d / 10000);
                     builder.append('-');
                     appendNumber2(builder, (d % 10000) / 100);
@@ -615,7 +614,13 @@ public final class RowsLogBuffer {
                     if (i32 < 0) {
                         builder.append('-');
                     }
-                    appendNumber2(builder, u32 / 10000);
+
+                    int d = u32 / 10000;
+                    if (d > 100) {
+                        builder.append(String.valueOf(d));
+                    } else {
+                        appendNumber2(builder, d);
+                    }
                     builder.append(':');
                     appendNumber2(builder, (u32 % 10000) / 100);
                     builder.append(':');
@@ -724,7 +729,13 @@ public final class RowsLogBuffer {
                     if (ltime < 0) {
                         builder.append('-');
                     }
-                    appendNumber2(builder, (int) ((intpart >> 12) % (1 << 10)));
+
+                    int d = (int) ((intpart >> 12) % (1 << 10));
+                    if (d > 100) {
+                        builder.append(String.valueOf(d));
+                    } else {
+                        appendNumber2(builder, d);
+                    }
                     builder.append(':');
                     appendNumber2(builder, (int) ((intpart >> 6) % (1 << 6)));
                     builder.append(':');
@@ -1134,7 +1145,7 @@ public final class RowsLogBuffer {
                 .append(digits[(d / 100) % 10])
                 .append(digits[(d / 10) % 10])
                 .append(digits[d % 10]);
-        } else if (d >= 100) {
+        } else {
             builder.append('0');
             appendNumber3(builder, d);
         }
@@ -1142,9 +1153,7 @@ public final class RowsLogBuffer {
 
     private void appendNumber3(StringBuilder builder, int d) {
         if (d >= 100) {
-            builder.append(digits[d / 100])
-                .append(digits[(d / 10) % 10])
-                .append(digits[d % 10]);
+            builder.append(digits[d / 100]).append(digits[(d / 10) % 10]).append(digits[d % 10]);
         } else {
             builder.append('0');
             appendNumber2(builder, d);

+ 7 - 7
deployer/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
-		<version>1.0.26-SNAPSHOT</version>
+		<version>1.1.0-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>
@@ -18,12 +18,12 @@
 		</dependency>
 
 		<!-- 这里指定runtime的metrics provider-->
-		<!--<dependency>-->
-			<!--<groupId>com.alibaba.otter</groupId>-->
-			<!--<artifactId>canal.prometheus</artifactId>-->
-			<!--<version>${project.version}</version>-->
-			<!--<scope>runtime</scope>-->
-		<!--</dependency>-->
+		<dependency>
+			<groupId>com.alibaba.otter</groupId>
+			<artifactId>canal.prometheus</artifactId>
+			<version>${project.version}</version>
+			<scope>runtime</scope>
+		</dependency>
 	</dependencies>
 	
 	<build>

+ 0 - 15
deployer/src/main/bin/metrics_env.sh

@@ -1,15 +0,0 @@
-#!/bin/bash
-# Additional line arg for current prometheus solution
-case "`uname`" in
-Linux)
-    bin_abs_path=$(readlink -f $(dirname $0))
-	;;
-*)
-	bin_abs_path=`cd $(dirname $0); pwd`
-	;;
-esac
-base=${bin_abs_path}/..
-if [ $(ls $base/lib/aspectjweaver*.jar | wc -l) -eq 1 ]; then
-    WEAVER=$(ls $base/lib/aspectjweaver*.jar)
-    METRICS_OPTS=" -javaagent:"${WEAVER}" "
-fi

+ 1 - 6
deployer/src/main/bin/startup.sh

@@ -94,12 +94,7 @@ then
 	echo LOG CONFIGURATION : $logback_configurationFile
 	echo canal conf : $canal_conf 
 	echo CLASSPATH :$CLASSPATH
-#   metrics support options
-#	if [ -x $base/bin/metrics_env.sh ]; then
-#	    . $base/bin/metrics_env.sh
-#	    echo METRICS_OPTS $METRICS_OPTS
-#	fi
-	$JAVA $JAVA_OPTS $METRICS_OPTS $JAVA_DEBUG_OPT $CANAL_OPTS -classpath .:$CLASSPATH com.alibaba.otter.canal.deployer.CanalLauncher 1>>$base/logs/canal/canal.log 2>&1 &
+	$JAVA $JAVA_OPTS $JAVA_DEBUG_OPT $CANAL_OPTS -classpath .:$CLASSPATH com.alibaba.otter.canal.deployer.CanalLauncher 1>>$base/logs/canal/canal.log 2>&1 &
 	echo $! > $base/bin/canal.pid 
 	
 	echo "cd to $current_path for continue"

+ 14 - 2
deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalController.java

@@ -34,6 +34,7 @@ import com.alibaba.otter.canal.instance.core.CanalInstanceGenerator;
 import com.alibaba.otter.canal.instance.manager.CanalConfigClient;
 import com.alibaba.otter.canal.instance.manager.ManagerCanalInstanceGenerator;
 import com.alibaba.otter.canal.instance.spring.SpringCanalInstanceGenerator;
+import com.alibaba.otter.canal.parse.CanalEventParser;
 import com.alibaba.otter.canal.server.embedded.CanalServerWithEmbedded;
 import com.alibaba.otter.canal.server.exception.CanalServerException;
 import com.alibaba.otter.canal.server.netty.CanalServerWithNetty;
@@ -303,7 +304,7 @@ public class CanalController {
                     return instanceGenerator.generate(destination);
                 } else if (config.getMode().isSpring()) {
                     SpringCanalInstanceGenerator instanceGenerator = new SpringCanalInstanceGenerator();
-                    synchronized (this) {
+                    synchronized (CanalEventParser.class) {
                         try {
                             // 设置当前正在加载的通道,加载spring查找文件时会用到该变量
                             System.setProperty(CanalConstants.CANAL_DESTINATION_PROPERTY, destination);
@@ -379,7 +380,18 @@ public class CanalController {
     }
 
     private String getProperty(Properties properties, String key) {
-        return StringUtils.trim(properties.getProperty(StringUtils.trim(key)));
+        key = StringUtils.trim(key);
+        String value = System.getProperty(key);
+
+        if (value == null) {
+            value = System.getenv(key);
+        }
+
+        if (value == null) {
+            value = properties.getProperty(key);
+        }
+
+        return StringUtils.trim(value);
     }
 
     public void start() throws Throwable {

+ 13 - 2
deployer/src/main/resources/canal.properties

@@ -3,7 +3,7 @@
 #################################################
 canal.id= 1
 canal.ip=
-canal.port= 11111
+canal.port=11111
 canal.zkServers=
 # flush data to zk
 canal.zookeeper.flush.period = 1000
@@ -43,6 +43,7 @@ canal.instance.filter.query.dml = false
 canal.instance.filter.query.ddl = false
 canal.instance.filter.table.error = false
 canal.instance.filter.rows = false
+canal.instance.filter.transaction.entry = false
 
 # binlog format/image check
 canal.instance.binlog.format = ROW,STATEMENT,MIXED 
@@ -58,6 +59,17 @@ canal.instance.parser.parallel = true
 ## disruptor ringbuffer size, must be power of 2
 canal.instance.parser.parallelBufferSize = 256
 
+# table meta tsdb info
+canal.instance.tsdb.enable=true
+canal.instance.tsdb.dir=${canal.file.data.dir:../conf}/${canal.instance.destination:}
+canal.instance.tsdb.url=jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
+canal.instance.tsdb.dbUsername=canal
+canal.instance.tsdb.dbPassword=canal
+
+# rds oss binlog account
+canal.instance.rds.accesskey =
+canal.instance.rds.secretkey =
+
 #################################################
 ######### 		destinations		############# 
 #################################################
@@ -74,7 +86,6 @@ canal.instance.tsdb.spring.xml=classpath:spring/tsdb/h2-tsdb.xml
 canal.instance.global.mode = spring 
 canal.instance.global.lazy = false
 #canal.instance.global.manager.address = 127.0.0.1:1099
-#canal.instance.global.spring.xml = classpath:spring/local-instance.xml
 #canal.instance.global.spring.xml = classpath:spring/memory-instance.xml
 canal.instance.global.spring.xml = classpath:spring/file-instance.xml
 #canal.instance.global.spring.xml = classpath:spring/default-instance.xml

+ 14 - 10
deployer/src/main/resources/example/instance.properties

@@ -1,35 +1,39 @@
 #################################################
-## mysql serverId
-canal.instance.mysql.slaveId=0
+## mysql serverId , v1.0.26+ will autoGen 
+# canal.instance.mysql.slaveId=0
 
-# position info
-canal.instance.master.address=127.0.0.1:3306
 # enable gtid use true/false
 canal.instance.gtidon=false
+
+# position info
+canal.instance.master.address=127.0.0.1:3306
 canal.instance.master.journal.name=
 canal.instance.master.position=
 canal.instance.master.timestamp=
 canal.instance.master.gtid=
 
+# rds oss binlog
+canal.instance.rds.accesskey=
+canal.instance.rds.secretkey=
+canal.instance.rds.instanceId=
+
 # table meta tsdb info
 canal.instance.tsdb.enable=true
-canal.instance.tsdb.dir=${canal.file.data.dir:../conf}/${canal.instance.destination:}
-canal.instance.tsdb.url=jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
 #canal.instance.tsdb.url=jdbc:mysql://127.0.0.1:3306/canal_tsdb
-canal.instance.tsdb.dbUsername=canal
-canal.instance.tsdb.dbPassword=canal
-
+#canal.instance.tsdb.dbUsername=canal
+#canal.instance.tsdb.dbPassword=canal
 
 #canal.instance.standby.address =
 #canal.instance.standby.journal.name =
 #canal.instance.standby.position = 
 #canal.instance.standby.timestamp =
 #canal.instance.standby.gtid=
+
 # username/password
 canal.instance.dbUsername=canal
 canal.instance.dbPassword=canal
-canal.instance.defaultDatabaseName=test
 canal.instance.connectionCharset=UTF-8
+
 # table regex
 canal.instance.filter.regex=.*\\..*
 # table black regex

+ 0 - 32
deployer/src/main/resources/example/rds_instance.properties

@@ -1,32 +0,0 @@
-#################################################
-# rds openapi binlog
-canal.instance.rds.open.url=https://rds.aliyuncs.com/
-canal.instance.rds.open.accesskey=
-canal.instance.rds.open.secretkey=
-canal.instance.rds.instanceId=
-canal.instance.rds.startTime=
-canal.instance.rds.endTime=
-
-# local binlog dir
-canal.instance.parser.directory=${canal.file.data.dir:../conf}/${canal.instance.destination:}/binlog
-# position info
-canal.instance.master.address=127.0.0.1:3306
-
-# table meta tsdb info
-canal.instance.tsdb.enable=true
-canal.instance.tsdb.dir=${canal.file.data.dir:../conf}/${canal.instance.destination:}
-canal.instance.tsdb.url=jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
-#canal.instance.tsdb.url=jdbc:mysql://127.0.0.1:3306/canal_tsdb
-#canal.instance.tsdb.dbUsername=canal
-#canal.instance.tsdb.dbPassword=canal
-
-# username/password
-canal.instance.dbUsername=canal
-canal.instance.dbPassword=canal
-canal.instance.defaultDatabaseName=test
-canal.instance.connectionCharset=UTF-8
-# table regex
-canal.instance.filter.regex=test\\..*
-# table black regex
-canal.instance.filter.black.regex=
-#################################################

+ 39 - 0
deployer/src/main/resources/spring/base-instance.xml

@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<beans xmlns="http://www.springframework.org/schema/beans"
+	xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:tx="http://www.springframework.org/schema/tx"
+	xmlns:aop="http://www.springframework.org/schema/aop" xmlns:lang="http://www.springframework.org/schema/lang"
+	xmlns:context="http://www.springframework.org/schema/context"
+	xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.0.xsd
+           http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-2.0.xsd
+           http://www.springframework.org/schema/lang http://www.springframework.org/schema/lang/spring-lang-2.0.xsd
+           http://www.springframework.org/schema/tx http://www.springframework.org/schema/tx/spring-tx-2.0.xsd
+           http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-2.5.xsd"
+	default-autowire="byName">
+
+	<!-- properties -->
+	<bean class="com.alibaba.otter.canal.instance.spring.support.PropertyPlaceholderConfigurer" lazy-init="false">
+		<property name="ignoreResourceNotFound" value="true" />
+		<property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_OVERRIDE"/><!-- 允许system覆盖 -->
+		<property name="locationNames">
+			<list>
+				<value>classpath:canal.properties</value>
+				<value>classpath:${canal.instance.destination:}/instance.properties</value>
+			</list>
+		</property>
+	</bean>
+	
+	<bean id="socketAddressEditor" class="com.alibaba.otter.canal.instance.spring.support.SocketAddressEditor" />
+	<bean class="org.springframework.beans.factory.config.CustomEditorConfigurer"> 
+		<property name="propertyEditorRegistrars">
+			<list>
+				<ref bean="socketAddressEditor" />
+			</list>
+		</property>
+	</bean>
+	
+	<bean id="baseEventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.rds.RdsBinlogEventParserProxy" abstract="true">
+		<property name="accesskey" value="${canal.instance.rds.accesskey:}" />
+		<property name="secretkey" value="${canal.instance.rds.secretkey:}" />
+		<property name="instanceId" value="${canal.instance.rds.instanceId:}" />
+	</bean>
+</beans>

+ 5 - 23
deployer/src/main/resources/spring/default-instance.xml

@@ -10,26 +10,7 @@
            http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-2.5.xsd"
 	default-autowire="byName">
 
-	<!-- properties -->
-	<bean class="com.alibaba.otter.canal.instance.spring.support.PropertyPlaceholderConfigurer" lazy-init="false">
-		<property name="ignoreResourceNotFound" value="true" />
-		<property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_OVERRIDE"/><!-- 允许system覆盖 -->
-		<property name="locationNames">
-			<list>
-				<value>classpath:canal.properties</value>
-				<value>classpath:${canal.instance.destination:}/instance.properties</value>
-			</list>
-		</property>
-	</bean>
-	
-	<bean id="socketAddressEditor" class="com.alibaba.otter.canal.instance.spring.support.SocketAddressEditor" />
-	<bean class="org.springframework.beans.factory.config.CustomEditorConfigurer"> 
-		<property name="propertyEditorRegistrars">
-			<list>
-				<ref bean="socketAddressEditor" />
-			</list>
-		</property>
-	</bean>
+	<import resource="classpath:spring/base-instance.xml" />
 	
 	<bean id="instance" class="com.alibaba.otter.canal.instance.spring.CanalInstanceWithSpring">
 		<property name="destination" value="${canal.instance.destination}" />
@@ -81,9 +62,10 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
-	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
+	<bean id="eventParser" parent="baseEventParser" >
 		<property name="destination" value="${canal.instance.destination}" />
 		<property name="slaveId" value="${canal.instance.mysql.slaveId:0}" />
 		<!-- 心跳配置 -->
@@ -148,7 +130,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -156,7 +138,7 @@
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 5 - 23
deployer/src/main/resources/spring/file-instance.xml

@@ -10,26 +10,7 @@
            http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-2.5.xsd"
 	default-autowire="byName">
 
-	<!-- properties -->
-	<bean class="com.alibaba.otter.canal.instance.spring.support.PropertyPlaceholderConfigurer" lazy-init="false">
-		<property name="ignoreResourceNotFound" value="true" />
-		<property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_OVERRIDE"/><!-- 允许system覆盖 -->
-		<property name="locationNames">
-			<list>
-				<value>classpath:canal.properties</value>
-				<value>classpath:${canal.instance.destination:}/instance.properties</value>
-			</list>
-		</property>
-	</bean>
-	
-	<bean id="socketAddressEditor" class="com.alibaba.otter.canal.instance.spring.support.SocketAddressEditor" />
-	<bean class="org.springframework.beans.factory.config.CustomEditorConfigurer"> 
-		<property name="propertyEditorRegistrars">
-			<list>
-				<ref bean="socketAddressEditor" />
-			</list>
-		</property>
-	</bean>
+	<import resource="classpath:spring/base-instance.xml" />
 	
 	<bean id="instance" class="com.alibaba.otter.canal.instance.spring.CanalInstanceWithSpring">
 		<property name="destination" value="${canal.instance.destination}" />
@@ -67,9 +48,10 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
-	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
+	<bean id="eventParser" parent="baseEventParser">
 		<property name="destination" value="${canal.instance.destination}" />
 		<property name="slaveId" value="${canal.instance.mysql.slaveId:0}" />
 		<!-- 心跳配置 -->
@@ -133,7 +115,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -141,7 +123,7 @@
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 8 - 26
deployer/src/main/resources/spring/group-instance.xml

@@ -10,26 +10,7 @@
            http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-2.5.xsd"
 	default-autowire="byName">
 	
-	<!-- properties -->
-	<bean class="com.alibaba.otter.canal.instance.spring.support.PropertyPlaceholderConfigurer" lazy-init="false">
-		<property name="ignoreResourceNotFound" value="true" />
-		<property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_OVERRIDE"/><!-- 允许system覆盖 -->
-		<property name="locationNames">
-			<list>
-				<value>classpath:canal.properties</value>
-				<value>classpath:${canal.instance.destination:}/instance.properties</value>
-			</list>
-		</property>
-	</bean>
-	
-	<bean id="socketAddressEditor" class="com.alibaba.otter.canal.instance.spring.support.SocketAddressEditor" />
-	<bean class="org.springframework.beans.factory.config.CustomEditorConfigurer"> 
-		<property name="propertyEditorRegistrars">
-			<list>
-				<ref bean="socketAddressEditor" />
-			</list>
-		</property>
-	</bean>
+	<import resource="classpath:spring/base-instance.xml" />
 
 	<bean id="instance" class="com.alibaba.otter.canal.instance.spring.CanalInstanceWithSpring">
 		<property name="destination" value="${canal.instance.destination}" />
@@ -64,6 +45,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 	
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.group.GroupEventParser">
@@ -75,7 +57,7 @@
 		</property>
 	</bean>
 
-	<bean id="eventParser1" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
+	<bean id="eventParser1" parent="baseEventParser">
 		<property name="destination" value="${canal.instance.destination}" />
 		<property name="slaveId" value="${canal.instance.mysql.slaveId:0}" />
 		<!-- 心跳配置 -->
@@ -130,7 +112,7 @@
 				<property name="address" value="${canal.instance.master1.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -138,7 +120,7 @@
 				<property name="address" value="${canal.instance.standby1.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		
@@ -173,7 +155,7 @@
 		<property name="parallelBufferSize" value="${canal.instance.parser.parallelBufferSize:256}" />
 	</bean>
 	
-	<bean id="eventParser2" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
+	<bean id="eventParser2" parent="baseEventParser">
 		<property name="destination" value="${canal.instance.destination}" />
 		<property name="slaveId" value="${canal.instance.mysql.slaveId:0}" />
 		<!-- 心跳配置 -->
@@ -228,7 +210,7 @@
 				<property name="address" value="${canal.instance.master2.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -236,7 +218,7 @@
 				<property name="address" value="${canal.instance.standby2.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 0 - 146
deployer/src/main/resources/spring/local-instance.xml

@@ -1,146 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<beans xmlns="http://www.springframework.org/schema/beans"
-	xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:tx="http://www.springframework.org/schema/tx"
-	xmlns:aop="http://www.springframework.org/schema/aop" xmlns:lang="http://www.springframework.org/schema/lang"
-	xmlns:context="http://www.springframework.org/schema/context"
-	xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.0.xsd
-           http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-2.0.xsd
-           http://www.springframework.org/schema/lang http://www.springframework.org/schema/lang/spring-lang-2.0.xsd
-           http://www.springframework.org/schema/tx http://www.springframework.org/schema/tx/spring-tx-2.0.xsd
-           http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-2.5.xsd"
-	default-autowire="byName">
-
-	<!-- properties -->
-	<bean class="com.alibaba.otter.canal.instance.spring.support.PropertyPlaceholderConfigurer" lazy-init="false">
-		<property name="ignoreResourceNotFound" value="true" />
-		<property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_OVERRIDE"/><!-- 允许system覆盖 -->
-		<property name="locationNames">
-			<list>
-				<value>classpath:canal.properties</value>
-				<value>classpath:${canal.instance.destination:}/instance.properties</value>
-			</list>
-		</property>
-	</bean>
-	
-	<bean id="socketAddressEditor" class="com.alibaba.otter.canal.instance.spring.support.SocketAddressEditor" />
-	<bean class="org.springframework.beans.factory.config.CustomEditorConfigurer"> 
-		<property name="propertyEditorRegistrars">
-			<list>
-				<ref bean="socketAddressEditor" />
-			</list>
-		</property>
-	</bean>
-	
-	<bean id="instance" class="com.alibaba.otter.canal.instance.spring.CanalInstanceWithSpring">
-		<property name="destination" value="${canal.instance.destination}" />
-		<property name="eventParser">
-			<ref local="eventParser" />
-		</property>
-		<property name="eventSink">
-			<ref local="eventSink" />
-		</property>
-		<property name="eventStore">
-			<ref local="eventStore" />
-		</property>
-		<property name="metaManager">
-			<ref local="metaManager" />
-		</property>
-		<property name="alarmHandler">
-			<ref local="alarmHandler" />
-		</property>
-	</bean>
-	
-	<!-- 报警处理类 -->
-	<bean id="alarmHandler" class="com.alibaba.otter.canal.common.alarm.LogAlarmHandler" />
-	
-	<bean id="metaManager" class="com.alibaba.otter.canal.meta.FileMixedMetaManager">
-		<property name="dataDir" value="${canal.file.data.dir:../conf}" />
-		<property name="period" value="${canal.file.flush.period:1000}" />
-	</bean>
-	
-	<bean id="eventStore" class="com.alibaba.otter.canal.store.memory.MemoryEventStoreWithBuffer">
-		<property name="bufferSize" value="${canal.instance.memory.buffer.size:16384}" />
-		<property name="bufferMemUnit" value="${canal.instance.memory.buffer.memunit:1024}" />
-		<property name="batchMode" value="${canal.instance.memory.batch.mode:MEMSIZE}" />
-		<property name="ddlIsolation" value="${canal.instance.get.ddl.isolation:false}" />
-	</bean>
-	
-	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
-		<property name="eventStore" ref="eventStore" />
-	</bean>
-
-	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.rds.RdsLocalBinlogEventParser">
-		<property name="destination" value="${canal.instance.destination}" />			
-		<property name="alarmHandler" ref="alarmHandler" />
-
-		<!-- 解析过滤处理 -->
-		<property name="eventFilter">
-			<bean class="com.alibaba.otter.canal.filter.aviater.AviaterRegexFilter" >
-				<constructor-arg index="0" value="${canal.instance.filter.regex:.*\..*}" />
-			</bean>
-		</property>
-		
-		<property name="eventBlackFilter">
-			<bean class="com.alibaba.otter.canal.filter.aviater.AviaterRegexFilter" >
-				<constructor-arg index="0" value="${canal.instance.filter.black.regex:}" />
-				<constructor-arg index="1" value="false" />
-			</bean>
-		</property>
-		
-		<!-- 最大事务解析大小,超过该大小后事务将被切分为多个事务投递 -->
-		<property name="transactionSize" value="${canal.instance.transaction.size:1024}" />
-			
-		<!-- 解析编码 -->
-		<property name="connectionCharset" value="${canal.instance.connectionCharset:UTF-8}" />
-	
-		<!-- 解析位点记录 -->
-		<property name="logPositionManager">
-			<bean class="com.alibaba.otter.canal.parse.index.FailbackLogPositionManager">
-				<constructor-arg>
-					<bean class="com.alibaba.otter.canal.parse.index.MemoryLogPositionManager" />
-				</constructor-arg>
-				<constructor-arg>
-					<bean class="com.alibaba.otter.canal.parse.index.MetaLogPositionManager">
-						<constructor-arg ref="metaManager"/>
-					</bean>
-				</constructor-arg>
-			</bean>
-		</property>
-		
-		<!-- 解析数据库信息 -->
-		<property name="masterInfo">
-			<bean class="com.alibaba.otter.canal.parse.support.AuthenticationInfo">
-				<property name="address" value="${canal.instance.master.address}" />
-				<property name="username" value="${canal.instance.dbUsername:retl}" />
-				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
-			</bean>
-		</property>
-		
-		<property name="filterQueryDml" value="${canal.instance.filter.query.dml:false}" />
-		<property name="filterQueryDcl" value="${canal.instance.filter.query.dcl:false}" />
-		<property name="filterQueryDdl" value="${canal.instance.filter.query.ddl:false}" />
-		<property name="useDruidDdlFilter" value="${canal.instance.filter.druid.ddl:true}" />
-		<property name="filterRows" value="${canal.instance.filter.rows:false}" />
-		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
-		<property name="needWait" value="${canal.instance.parser.needWait:false}"/>
-		<property name="directory" value="${canal.instance.parser.directory:}"/>
-		
-		<!-- rds相关 -->
-		<property name="url" value="${canal.instance.rds.open.url:}"/>
-		<property name="accesskey" value="${canal.instance.rds.open.accesskey:}"/>
-		<property name="secretkey" value="${canal.instance.rds.open.secretkey:}"/>
-		<property name="instanceId" value="${canal.instance.rds.instanceId:}"/>
-		<property name="startTime" value="${canal.instance.rds.startTime:}"/>
-		<property name="endTime" value="${canal.instance.rds.endTime:}"/>
-		
-		<!--表结构相关-->
-		<property name="enableTsdb" value="${canal.instance.tsdb.enable:true}"/>
-		<property name="tsdbSpringXml" value="${canal.instance.tsdb.spring.xml:}"/>
-		
-		<!-- parallel parser -->
-		<property name="parallel" value="${canal.instance.parser.parallel:true}" />
-		<property name="parallelThreadSize" value="${canal.instance.parser.parallelThreadSize}" />
-		<property name="parallelBufferSize" value="${canal.instance.parser.parallelBufferSize:256}" />
-	</bean>
-</beans>

+ 5 - 23
deployer/src/main/resources/spring/memory-instance.xml

@@ -10,26 +10,7 @@
            http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-2.5.xsd"
 	default-autowire="byName">
 	
-	<!-- properties -->
-	<bean class="com.alibaba.otter.canal.instance.spring.support.PropertyPlaceholderConfigurer" lazy-init="false">
-		<property name="ignoreResourceNotFound" value="true" />
-		<property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_OVERRIDE"/><!-- 允许system覆盖 -->
-		<property name="locationNames">
-			<list>
-				<value>classpath:canal.properties</value>
-				<value>classpath:${canal.instance.destination:}/instance.properties</value>
-			</list>
-		</property>
-	</bean>
-	
-	<bean id="socketAddressEditor" class="com.alibaba.otter.canal.instance.spring.support.SocketAddressEditor" />
-	<bean class="org.springframework.beans.factory.config.CustomEditorConfigurer"> 
-		<property name="propertyEditorRegistrars">
-			<list>
-				<ref bean="socketAddressEditor" />
-			</list>
-		</property>
-	</bean>
+	<import resource="classpath:spring/base-instance.xml" />
 
 	<bean id="instance" class="com.alibaba.otter.canal.instance.spring.CanalInstanceWithSpring">
 		<property name="destination" value="${canal.instance.destination}" />
@@ -64,9 +45,10 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
-	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
+	<bean id="eventParser" parent="baseEventParser">
 		<property name="destination" value="${canal.instance.destination}" />
 		<property name="slaveId" value="${canal.instance.mysql.slaveId:0}" />
 		<!-- 心跳配置 -->
@@ -121,7 +103,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -129,7 +111,7 @@
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 33 - 0
docker/Dockerfile

@@ -0,0 +1,33 @@
+FROM canal/osbase:v1
+
+MAINTAINER agapple (jianghang115@gmail.com)
+
+# install canal
+COPY image/ /tmp/docker/
+COPY canal.deployer-*.tar.gz /home/admin/
+
+RUN \
+    cp -R /tmp/docker/alidata /alidata && \
+    chmod +x /alidata/bin/* && \
+    mkdir -p /home/admin && \
+    cp -R /tmp/docker/admin/* /home/admin/  && \
+    /bin/cp -f alidata/bin/lark-wait /usr/bin/lark-wait && \
+
+    mkdir -p /home/admin/canal-server && \
+    tar -xzvf /home/admin/canal.deployer-*.tar.gz -C /home/admin/canal-server && \
+    /bin/rm -f /home/admin/canal.deployer-*.tar.gz && \
+
+    mkdir -p home/admin/canal-server/logs  && \
+    chmod +x /home/admin/*.sh  && \
+    chmod +x /home/admin/bin/*.sh  && \
+    chown admin: -R /home/admin && \
+    yum clean all && \
+    true
+
+# 2222 sys , 8080 web , 8000 debug , 11111 canal
+EXPOSE 2222 11111 8000 8080
+
+WORKDIR /home/admin
+
+ENTRYPOINT [ "/alidata/bin/main.sh" ]
+CMD [ "/home/admin/app.sh" ]

+ 41 - 0
docker/base/Dockerfile

@@ -0,0 +1,41 @@
+FROM centos:centos6.10
+
+MAINTAINER agapple (jianghang115@gmail.com)
+
+env DOWNLOAD_LINK="http://download.oracle.com/otn-pub/java/jdk/8u181-b13/96a7b8442fe848ef90c96a2fad6ed6d1/jdk-8u181-linux-x64.rpm"
+# install system
+RUN \
+    /bin/cp -f /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
+    echo 'root:Hello1234' | chpasswd && \
+    groupadd -r admin && useradd -g admin admin && \
+    yum install -y man && \
+    yum install -y dstat && \
+    yum install -y unzip && \
+    yum install -y nc && \
+    yum install -y openssh-server && \
+    yum install -y tar && \
+    yum install -y which && \
+    yum install -y wget && \
+    yum install -y perl && \
+    yum install -y file && \
+    ssh-keygen -q -N "" -t dsa -f /etc/ssh/ssh_host_dsa_key && \
+    ssh-keygen -q -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key && \
+    sed -ri 's/session    required     pam_loginuid.so/#session    required     pam_loginuid.so/g' /etc/pam.d/sshd && \
+    sed -i -e 's/^#Port 22$/Port 2222/' /etc/ssh/sshd_config && \
+    mkdir -p /root/.ssh && chown root.root /root && chmod 700 /root/.ssh && \
+    yum install -y cronie && \
+    sed -i '/session required pam_loginuid.so/d' /etc/pam.d/crond && \
+    true
+
+RUN \
+    touch /var/lib/rpm/* && \ 
+    wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=xxx; oraclelicense=accept-securebackup-cookie" "$DOWNLOAD_LINK" -O /tmp/jdk-8-linux-x64.rpm && \
+    yum -y install /tmp/jdk-8-linux-x64.rpm && \
+    /bin/rm -f /tmp/jdk-8-linux-x64.rpm && \
+
+    echo "export JAVA_HOME=/usr/java/latest" >> /etc/profile && \
+    echo "export PATH=\$JAVA_HOME/bin:\$PATH" >> /etc/profile && \
+    yum clean all && \
+    true
+
+CMD ["/bin/bash"]

+ 29 - 0
docker/build.sh

@@ -0,0 +1,29 @@
+#!/bin/bash
+
+current_path=`pwd`
+case "`uname`" in
+    Darwin)
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+    Linux)
+        bin_abs_path=$(readlink -f $(dirname $0))
+        ;;
+    *)
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+esac
+BASE=${bin_abs_path}
+
+if [ "$1" == "base" ] ; then
+    docker build --no-cache -t canal/osbase $BASE/base
+else 
+    rm -rf $BASE/canal.*.tar.gz ; 
+    cd $BASE/../ && mvn clean package -Dmaven.test.skip -Denv=release && cd $current_path ;
+    if [ "$1" == "kafka" ] ; then
+	   cp $BASE/../target/canal-kafka-*.tar.gz $BASE/
+	   docker build --no-cache -t canal/canal-server $BASE/
+    else 
+	   cp $BASE/../target/canal.deployer-*.tar.gz $BASE/
+	   docker build --no-cache -t canal/canal-server $BASE/
+    fi
+fi

+ 119 - 0
docker/image/admin/app.sh

@@ -0,0 +1,119 @@
+#!/bin/bash
+set -e
+
+source /etc/profile
+export JAVA_HOME=/usr/java/latest
+export PATH=$JAVA_HOME/bin:$PATH
+touch /tmp/start.log
+chown admin: /tmp/start.log
+chown -R admin: /home/admin/canal-server
+host=`hostname -i`
+
+# waitterm
+#   wait TERM/INT signal.
+#   see: http://veithen.github.io/2014/11/16/sigterm-propagation.html
+waitterm() {
+        local PID
+        # any process to block
+        tail -f /dev/null &
+        PID="$!"
+        # setup trap, could do nothing, or just kill the blocker
+        trap "kill -TERM ${PID}" TERM INT
+        # wait for signal, ignore wait exit code
+        wait "${PID}" || true
+        # clear trap
+        trap - TERM INT
+        # wait blocker, ignore blocker exit code
+        wait "${PID}" 2>/dev/null || true
+}
+
+# waittermpid "${PIDFILE}".
+#   monitor process by pidfile && wait TERM/INT signal.
+#   if the process disappeared, return 1, means exit with ERROR.
+#   if TERM or INT signal received, return 0, means OK to exit.
+waittermpid() {
+        local PIDFILE PID do_run error
+        PIDFILE="${1?}"
+        do_run=true
+        error=0
+        trap "do_run=false" TERM INT
+        while "${do_run}" ; do
+                PID="$(cat "${PIDFILE}")"
+                if ! ps -p "${PID}" >/dev/null 2>&1 ; then
+                        do_run=false
+                        error=1
+                else
+                        sleep 1
+                fi
+        done
+        trap - TERM INT
+        return "${error}"
+}
+
+
+function checkStart() {
+    local name=$1
+    local cmd=$2
+    local timeout=$3
+    cost=5
+    while [ $timeout -gt 0 ]; do
+        ST=`eval $cmd`
+        if [ "$ST" == "0" ]; then
+            sleep 1
+            let timeout=timeout-1
+            let cost=cost+1
+        elif [ "$ST" == "" ]; then
+            sleep 1
+            let timeout=timeout-1
+            let cost=cost+1
+        else
+            break
+        fi
+    done
+    echo "start $name successful"
+}
+
+
+function start_canal() {
+    echo "start canal ..."
+    serverPort=`perl -le 'print $ENV{"canal.port"}'`
+    if [ -z "$serverPort" ] ; then
+        serverPort=11111
+    fi
+
+    destination=`perl -le 'print $ENV{"canal.destinations"}'`
+    if [[ "$destination" =~ ',' ]]; then
+        echo "multi destination:$destination is not support"
+        exit 1;
+    else
+        if [ "$destination" != "" ] && [ "$destination" != "example" ] ; then
+            mv /home/admin/canal-server/conf/example /home/admin/canal-server/conf/$destination
+        fi 
+    fi
+    su admin -c 'cd /home/admin/canal-server/bin/ && sh restart.sh 1>>/tmp/start.log 2>&1'
+    sleep 5
+    #check start
+    checkStart "canal" "nc 127.0.0.1 $serverPort -w 1 -z | wc -l" 30
+}
+
+function stop_canal() {
+    echo "stop canal"
+    su admin -c 'cd /home/admin/canal-server/bin/ && sh stop.sh 1>>/tmp/start.log 2>&1'
+    echo "stop canal successful ..."
+}
+
+echo "==> START ..."
+
+start_canal
+
+echo "==> START SUCCESSFUL ..."
+
+tail -f /dev/null &
+# wait TERM signal
+waitterm
+
+echo "==> STOP"
+
+stop_canal
+
+echo "==> STOP SUCCESSFUL ..."

+ 2 - 0
docker/image/admin/bin/clean_log

@@ -0,0 +1,2 @@
+# cron clean log once per minute
+*/2 * * * * admin /home/admin/bin/clean_log.sh >>/tmp/clean_log.log 2>&1

+ 45 - 0
docker/image/admin/bin/clean_log.sh

@@ -0,0 +1,45 @@
+#!/bin/bash
+
+# Global Settings
+PATH="$HOME/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/X11R6/bin:/root/bin"
+export PATH
+
+CUTOFF="85"
+#获取磁盘使用率最高的分区
+USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+before=$USAGE
+
+baseClean(){
+    #删除tmp目录15天前的文件。
+    #更新文档时间戳
+    if [ -d /tmp/hsperfdata_admin ]
+    then
+        touch /tmp/hsperfdata_admin
+        touch /tmp/hsperfdata_admin/*
+    fi
+
+    find /tmp/ -type f -mtime +15 | xargs -t rm -rf >/dev/null 2>&1
+
+
+    now=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+    echo "before:$before; now:$now"
+}
+
+CANAL_DIR="/home/admin/canal-server/logs"
+if [[ -d $CANAL_DIR ]]; then
+  USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+  if [[ $USAGE -ge 90 ]]; then
+        find $CANAL_DIR -type f -mtime +7 | xargs rm -rf {}
+  fi
+  USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+  if [[ $USAGE -ge 80 ]]; then
+        find $CANAL_DIR -type f -mtime +3 | xargs rm -rf {}
+  fi
+  USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+  if [[ $USAGE -ge 80 ]]; then
+        find $CANAL_DIR -type d -empty -mtime +3 | grep -v canal | xargs rm -rf {}
+        find $CANAL_DIR -type f -iname '*.tmp' | xargs rm -rf {}
+  fi
+  baseClean
+  exit 0
+fi

+ 18 - 0
docker/image/admin/health.sh

@@ -0,0 +1,18 @@
+#!/bin/sh
+metrics_port=`perl -le 'print $ENV{"canal.metrics.pull.port"}'`
+if [ "$metrics_port" == "" ]; then
+	metrics_port="11112"
+fi
+
+CHECK_URL="http://127.0.0.1:$metrics_port/metrics"
+CHECK_POINT="canal"
+CHECK_COUNT=`curl -s --connect-timeout 7 --max-time 7 $CHECK_URL | grep -c $CHECK_POINT`
+if [ $CHECK_COUNT -eq 0 ]; then
+    echo "[FAILED]"
+    status=0
+	error=1
+else
+    echo "[  OK  ]"
+    status=1
+	error=0
+fi

+ 11 - 0
docker/image/alidata/bin/exec_rc_local.sh

@@ -0,0 +1,11 @@
+#!/bin/bash
+
+if [ "${SKIP_EXEC_RC_LOCAL}" = "YES" ] ; then
+	echo "skip /etc/rc.local: SKIP_EXEC_RC_LOCAL=${SKIP_EXEC_RC_LOCAL}"
+	exit
+fi
+
+if [ "${DOCKER_DEPLOY_TYPE}" = "HOST" ] ; then
+	echo "skip /etc/rc.local: DOCKER_DEPLOY_TYPE=${DOCKER_DEPLOY_TYPE}"
+	exit
+fi

+ 6 - 0
docker/image/alidata/bin/lark-wait

@@ -0,0 +1,6 @@
+#!/bin/bash
+set -e
+
+chown admin: -R /home/admin/
+source /alidata/lib/proc.sh
+waitterm

+ 27 - 0
docker/image/alidata/bin/main.sh

@@ -0,0 +1,27 @@
+#!/bin/bash
+
+[ -n "${DOCKER_DEPLOY_TYPE}" ] || DOCKER_DEPLOY_TYPE="VM"
+echo "DOCKER_DEPLOY_TYPE=${DOCKER_DEPLOY_TYPE}"
+
+# run init scripts
+for e in $(ls /alidata/init/*) ; do
+	[ -x "${e}" ] || continue
+	echo "==> INIT $e"
+	$e
+	echo "==> EXIT CODE: $?"
+done
+
+echo "==> INIT DEFAULT"
+service sshd start
+service crond start
+
+#echo "check hostname -i: `hostname -i`"
+#hti_num=`hostname -i|awk '{print NF}'`
+#if [ $hti_num -gt 1 ];then
+#    echo "hostname -i result error:`hostname -i`"
+#    exit 120
+#fi
+
+echo "==> INIT DONE"
+echo "==> RUN ${*}"
+exec "${@}"

+ 19 - 0
docker/image/alidata/init/02init-sshd.sh

@@ -0,0 +1,19 @@
+#!/bin/bash
+
+# set port
+if [ -z "${SSHD_PORT}" ] ; then
+	SSHD_PORT=22
+	[ "${DOCKER_DEPLOY_TYPE}" = "HOST" ] && SSHD_PORT=2222
+fi
+
+sed -r -i '/^OPTIONS=/ d' /etc/sysconfig/sshd
+echo 'OPTIONS="-p '"${SSHD_PORT}"'"' >> /etc/sysconfig/sshd
+
+# set admin ssh pulic key
+if [ "${USE_ADMIN_PASSAGE}" = "YES" ] ; then
+    echo "set admin passage"
+    mkdir -p /home/admin/.ssh
+    chown admin:admin /home/admin/.ssh
+    chown admin:admin /home/admin/.ssh/authorized_keys
+    chmod 644 /home/admin/.ssh/authorized_keys
+fi

+ 66 - 0
docker/image/alidata/init/fix-hosts.py

@@ -0,0 +1,66 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#****************************************************************#
+# Create Date: 2017-01-06 17:58
+#***************************************************************#
+
+import socket
+import shutil
+from time import gmtime, strftime
+
+# get host_name
+host_name = socket.gethostname()
+tmp_file = "/tmp/.lark-fix-host.hosts"
+host_file = "/etc/hosts"
+bak_file_name = "/tmp/hosts-fix-bak.%s" % ( strftime("%Y-%m-%d_%H-%M-%S", gmtime()) )
+
+# load /etc/hosts file context
+FH = open(host_file,"r")
+file_lines = [ i.rstrip() for i in FH.readlines()]
+FH.close()
+file_lines_reverse = file_lines[::-1]
+new_lines = []
+bad_lines = []
+last_match_line = ""
+
+for line in file_lines_reverse:
+    if line.find(host_name) < 0:  # 不匹配的行直接跳过
+        new_lines.append(line + "\n")
+        continue
+
+    cols = line.split()
+    new_cols = []
+    if cols[0].startswith("#"): # 跳过已经注释掉的行
+        new_lines.append(line + "\n")
+        continue
+    for col in cols:
+        if not col == host_name: # 跳过不匹配的列
+            new_cols.append(col)
+            continue
+
+        if cols[0] == "127.0.0.1": # 如果第一列是 127.0.0.1 就跳过匹配的列, 防止 hostname -i 返回 127.0.0.1
+            continue
+
+        # 如果已经发现过匹配的列, 就丢掉重复的列
+        if not len(last_match_line) == 0:
+            continue
+
+        new_cols.append(col)
+        last_match_line = line
+
+    # 跳过 xx.xx.xx.xx hostname 这样的重复列
+    if len(new_cols) == 1:
+        continue
+
+    new_l = "%s\n" % " ".join(new_cols)
+    new_lines.append(new_l)
+
+# save tmp hosts
+
+FH2=file(tmp_file,"w+")
+FH2.writelines( new_lines[::-1])
+FH2.close()
+
+# mv to /etc/hosts
+shutil.copy(host_file, bak_file_name)
+shutil.move(tmp_file, host_file)

+ 40 - 0
docker/image/alidata/lib/proc.sh

@@ -0,0 +1,40 @@
+# waitterm
+#   wait TERM/INT signal.
+#   see: http://veithen.github.io/2014/11/16/sigterm-propagation.html
+waitterm() {
+	local PID
+	# any process to block
+	tail -f /dev/null &
+	PID="$!"
+	# setup trap, could do nothing, or just kill the blocker
+	trap "kill -TERM ${PID}" TERM INT
+	# wait for signal, ignore wait exit code
+	wait "${PID}" || true
+	# clear trap
+	trap - TERM INT
+	# wait blocker, ignore blocker exit code
+	wait "${PID}" 2>/dev/null || true
+}
+
+# waittermpid "${PIDFILE}".
+#   monitor process by pidfile && wait TERM/INT signal.
+#   if the process disappeared, return 1, means exit with ERROR.
+#   if TERM or INT signal received, return 0, means OK to exit.
+waittermpid() {
+	local PIDFILE PID do_run error
+	PIDFILE="${1?}"
+	do_run=true
+	error=0
+	trap "do_run=false" TERM INT
+	while "${do_run}" ; do
+		PID="$(cat "${PIDFILE}")"
+		if ! ps -p "${PID}" >/dev/null 2>&1 ; then
+			do_run=false
+			error=1
+		else
+			sleep 1
+		fi
+	done
+	trap - TERM INT
+	return "${error}"
+}

+ 92 - 0
docker/run.sh

@@ -0,0 +1,92 @@
+#!/bin/bash
+
+function usage() {
+    echo "Usage:"
+    echo "  run.sh [CONFIG]"
+    echo "example:"
+    echo "  run.sh -e canal.instance.master.address=127.0.0.1:3306 \\"
+    echo "         -e canal.instance.dbUsername=canal \\"
+    echo "         -e canal.instance.dbPassword=canal \\"
+    echo "         -e canal.instance.connectionCharset=UTF-8 \\"
+    echo "         -e canal.instance.tsdb.enable=true \\"
+    echo "         -e canal.instance.gtidon=false \\"
+    echo "         -e canal.instance.filter.regex=.*\\\\\\..* "
+    exit
+}
+
+function check_port() {
+    local port=$1
+    local TL=$(which telnet)
+    if [ -f $TL ]; then
+        data=`echo quit | telnet 127.0.0.1 $port| grep -ic connected`
+        echo $data
+        return
+    fi
+
+    local NC=$(which nc)
+    if [ -f $NC ]; then
+        data=`nc -z -w 1 127.0.0.1 $port | grep -ic succeeded`
+        echo $data
+        return
+    fi
+    echo "0"
+    return
+}
+
+function getMyIp() {
+    case "`uname`" in
+        Darwin)
+         myip=`echo "show State:/Network/Global/IPv4" | scutil | grep PrimaryInterface | awk '{print $3}' | xargs ifconfig | grep inet | grep -v inet6 | awk '{print $2}'`
+         ;;
+        *)
+         myip=`ip route get 1 | awk '{print $NF;exit}'`
+         ;;
+  esac
+  echo $myip
+}
+
+NET_MODE=""
+case "`uname`" in
+    Darwin)
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+    Linux)
+        bin_abs_path=$(readlink -f $(dirname $0))
+        NET_MODE="--net=host"
+        ;;
+    *)
+        NET_MODE="--net=host"
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+esac
+BASE=${bin_abs_path}
+if [ $# -eq 0 ]; then
+    usage
+elif [ "$1" == "-h" ] ; then
+    usage
+elif [ "$1" == "help" ] ; then
+    usage
+fi
+
+DATA="$BASE/data"
+mkdir -p $DATA
+CONFIG=${@:1}
+#VOLUMNS="-v $DATA:/home/admin/canal-server/logs"
+PORTLIST="8000 8080 2222 11111"
+PORTS=""
+for PORT in $PORTLIST ; do
+    #exist=`check_port $PORT`
+    exist="0"
+    if [ "$exist" == "0" ]; then
+        PORTS="$PORTS -p $PORT:$PORT"
+    else
+        echo "port $PORT is used , pls check"
+        exit 1
+    fi
+done
+
+MEMORY="-m 4096m"
+LOCALHOST=`getMyIp`
+cmd="docker run -it -h $LOCALHOST $CONFIG --name=canal-server $VOLUMNS $NET_MODE $PORTS $MEMORY canal/canal-server"
+echo $cmd
+eval $cmd

+ 1 - 1
driver/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
-		<version>1.0.26-SNAPSHOT</version>
+		<version>1.1.0-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 1
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlConnector.java

@@ -32,7 +32,7 @@ public class MysqlConnector {
     private String              password;
 
     private byte                charsetNumber     = 33;
-    private String              defaultSchema     = "retl";
+    private String              defaultSchema     = "test";
     private int                 soTimeout         = 30 * 1000;
     private int                 connTimeout       = 5 * 1000;
     private int                 receiveBufferSize = 16 * 1024;

+ 77 - 1
example/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
-		<version>1.0.26-SNAPSHOT</version>
+		<version>1.1.0-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>
@@ -21,6 +21,82 @@
 			<artifactId>canal.protocol</artifactId>
 			<version>${project.version}</version>
 		</dependency>
+		<dependency>
+			<groupId>com.alibaba</groupId>
+			<artifactId>druid</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>mysql</groupId>
+			<artifactId>mysql-connector-java</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.ddlutils</groupId>
+			<artifactId>ddlutils</artifactId>
+			<version>1.0</version>
+			<exclusions>
+				<exclusion>
+					<groupId>commons-beanutils</groupId>
+					<artifactId>commons-beanutils-core</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-lang</groupId>
+					<artifactId>commons-lang</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-dbcp</groupId>
+					<artifactId>commons-dbcp</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-pool</groupId>
+					<artifactId>commons-pool</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-logging</groupId>
+					<artifactId>commons-logging-api</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>dom4j</groupId>
+					<artifactId>dom4j</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>stax</groupId>
+					<artifactId>stax-api</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-collections</groupId>
+					<artifactId>commons-collections</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-digester</groupId>
+					<artifactId>commons-digester</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>commons-betwixt</groupId>
+					<artifactId>commons-betwixt</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.commons</groupId>
+			<artifactId>commons-pool2</artifactId>
+			<version>2.5.0</version>
+		</dependency>
+		<dependency>
+			<groupId>commons-beanutils</groupId>
+			<artifactId>commons-beanutils</artifactId>
+			<version>1.8.2</version>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.commons</groupId>
+			<artifactId>commons-lang3</artifactId>
+			<version>3.7</version>
+		</dependency>
+		<dependency>
+			<groupId>commons-collections</groupId>
+			<artifactId>commons-collections</artifactId>
+			<version>3.2</version>
+		</dependency>
+
 		<!-- test dependency -->
 		<dependency>
 			<groupId>junit</groupId>

+ 12 - 2
example/src/main/java/com/alibaba/otter/canal/example/AbstractCanalClientTest.java

@@ -1,9 +1,9 @@
 package com.alibaba.otter.canal.example;
 
+import java.io.UnsupportedEncodingException;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.SystemUtils;
@@ -254,7 +254,17 @@ public class AbstractCanalClientTest {
     protected void printColumn(List<Column> columns) {
         for (Column column : columns) {
             StringBuilder builder = new StringBuilder();
-            builder.append(column.getName() + " : " + column.getValue());
+            try {
+                if (StringUtils.containsIgnoreCase(column.getMysqlType(), "BLOB")
+                    || StringUtils.containsIgnoreCase(column.getMysqlType(), "BINARY")) {
+                    // get value bytes
+                    builder.append(column.getName() + " : "
+                                   + new String(column.getValue().getBytes("ISO-8859-1"), "UTF-8"));
+                } else {
+                    builder.append(column.getName() + " : " + column.getValue());
+                }
+            } catch (UnsupportedEncodingException e) {
+            }
             builder.append("    type=" + column.getMysqlType());
             if (column.getUpdated()) {
                 builder.append("    update=" + column.getUpdated());

+ 144 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/AbstractDbClient.java

@@ -0,0 +1,144 @@
+package com.alibaba.otter.canal.example.db;
+
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.Message;
+import org.slf4j.MDC;
+
+import java.util.Date;
+import java.util.List;
+
+public abstract class AbstractDbClient extends CanalConnectorClient {
+
+
+    public abstract void insert(CanalEntry.Header header, List<CanalEntry.Column> afterColumns);
+
+    public abstract void update(CanalEntry.Header header, List<CanalEntry.Column> afterColumns);
+
+    public abstract void delete(CanalEntry.Header header, List<CanalEntry.Column> beforeColumns);
+
+
+    @Override
+    public synchronized void start() {
+        if (running) {
+            return;
+        }
+        super.start();
+    }
+
+    @Override
+    public synchronized void stop() {
+        if (!running) {
+            return;
+        }
+        super.stop();
+        MDC.remove("destination");
+    }
+
+    @Override
+    protected void processMessage(Message message) {
+        long batchId = message.getId();
+        //遍历每条消息
+        for (CanalEntry.Entry entry : message.getEntries()) {
+            session(entry);//no exception
+        }
+        //ack all the time。
+        connector.ack(batchId);
+    }
+
+    private void session(CanalEntry.Entry entry) {
+        CanalEntry.EntryType entryType = entry.getEntryType();
+        int times = 0;
+        boolean success = false;
+        while (!success) {
+            if (times > 0) {
+                /**
+                 * 1:retry,重试,重试默认为3次,由retryTimes参数决定,如果重试次数达到阈值,则跳过,并且记录日志。
+                 * 2:ignore,直接忽略,不重试,记录日志。
+                 */
+                if (exceptionStrategy == ExceptionStrategy.RETRY.code) {
+                    if (times >= retryTimes) {
+                        break;
+                    }
+                } else {
+                    break;
+                }
+            }
+            try {
+                switch (entryType) {
+                    case TRANSACTIONBEGIN:
+                        transactionBegin(entry);
+                        break;
+                    case TRANSACTIONEND:
+                        transactionEnd(entry);
+                        break;
+                    case ROWDATA:
+                        rowData(entry);
+                        break;
+                    default:
+                        break;
+                }
+                success = true;
+            } catch (Exception e) {
+                times++;
+                logger.error("parse event has an error ,times: + " + times + ", data:" + entry.toString(), e);
+            }
+
+        }
+    }
+
+    private void rowData(CanalEntry.Entry entry) throws Exception {
+        CanalEntry.RowChange rowChange = CanalEntry.RowChange.parseFrom(entry.getStoreValue());
+        CanalEntry.EventType eventType = rowChange.getEventType();
+        CanalEntry.Header header = entry.getHeader();
+        long executeTime = header.getExecuteTime();
+        long delayTime = new Date().getTime() - executeTime;
+        String sql = rowChange.getSql();
+
+        try {
+            if (!isDML(eventType) || rowChange.getIsDdl()) {
+                processDDL(header, eventType, sql);
+                return;
+            }
+            //处理DML数据
+            processDML(header, eventType, rowChange, sql);
+        } catch (Exception e) {
+            logger.error("process event error ,", e);
+            logger.error(rowFormat,
+                    new Object[]{header.getLogfileName(), String.valueOf(header.getLogfileOffset()),
+                            header.getSchemaName(), header.getTableName(), eventType,
+                            String.valueOf(executeTime), String.valueOf(delayTime)});
+            throw e;//重新抛出
+        }
+    }
+
+    /**
+     * 处理 dml 数据
+     *
+     * @param header
+     * @param eventType
+     * @param rowChange
+     * @param sql
+     */
+    protected void processDML(CanalEntry.Header header, CanalEntry.EventType eventType, CanalEntry.RowChange rowChange, String sql) {
+        for (CanalEntry.RowData rowData : rowChange.getRowDatasList()) {
+            switch (eventType) {
+                case DELETE:
+                    delete(header, rowData.getBeforeColumnsList());
+                    break;
+                case INSERT:
+                    insert(header, rowData.getAfterColumnsList());
+                    break;
+                case UPDATE:
+                    update(header, rowData.getAfterColumnsList());
+                    break;
+                default:
+                    whenOthers(header, sql);
+            }
+        }
+    }
+
+}
+
+
+
+

+ 488 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/CanalConnectorClient.java

@@ -0,0 +1,488 @@
+package com.alibaba.otter.canal.example.db;
+
+import com.alibaba.otter.canal.client.CanalConnector;
+import com.alibaba.otter.canal.client.CanalConnectors;
+import com.alibaba.otter.canal.common.AbstractCanalLifeCycle;
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.Message;
+import org.apache.commons.lang.SystemUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
+import org.springframework.beans.factory.InitializingBean;
+import org.springframework.util.CollectionUtils;
+
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.List;
+
+public abstract class CanalConnectorClient extends AbstractCanalLifeCycle implements InitializingBean {
+
+    protected static final Logger logger = LoggerFactory.getLogger(CanalConnectorClient.class);
+    protected static final String SEP = SystemUtils.LINE_SEPARATOR;
+    protected static String contextFormat;
+    protected static String rowFormat;
+    protected static String transactionFormat;
+    protected static final String DATE_FORMAT = "yyyy-MM-dd HH:mm:ss";
+
+    static {
+        StringBuilder sb = new StringBuilder();
+        sb.append(SEP)
+                .append("-------------Batch-------------")
+                .append(SEP)
+                .append("* Batch Id: [{}] ,count : [{}] , Mem size : [{}] , Time : {}")
+                .append(SEP)
+                .append("* Start : [{}] ")
+                .append(SEP)
+                .append("* End : [{}] ")
+                .append(SEP)
+                .append("-------------------------------")
+                .append(SEP);
+        contextFormat = sb.toString();
+
+        sb = new StringBuilder();
+        sb.append(SEP)
+                .append("+++++++++++++Row+++++++++++++>>>")
+                .append("binlog[{}:{}] , name[{},{}] , eventType : {} , executeTime : {} , delay : {}ms")
+                .append(SEP);
+        rowFormat = sb.toString();
+
+        sb = new StringBuilder();
+        sb.append(SEP)
+                .append("===========Transaction {} : {}=======>>>")
+                .append("binlog[{}:{}] , executeTime : {} , delay : {}ms")
+                .append(SEP);
+        transactionFormat = sb.toString();
+    }
+
+    private String zkServers;//cluster
+    private String address;//single,ip:port
+    private String destination;
+    private String username;
+    private String password;
+    private int batchSize = 5 * 1024;
+    private String filter = "";//同canal filter,用于过滤database或者table的相关数据。
+    protected boolean debug = false;//开启debug,会把每条消息的详情打印
+
+    //1:retry,重试,重试默认为3次,由retryTimes参数决定,如果重试次数达到阈值,则跳过,并且记录日志。
+    //2:ignore,直接忽略,不重试,记录日志。
+    protected int exceptionStrategy = 1;
+    protected int retryTimes = 3;
+    protected int waitingTime = 100;//当binlog没有数据时,主线程等待的时间,单位ms,大于0
+
+
+    protected CanalConnector connector;
+    protected Thread thread;
+
+    protected Thread.UncaughtExceptionHandler handler = new Thread.UncaughtExceptionHandler() {
+
+        public void uncaughtException(Thread t, Throwable e) {
+            logger.error("process message has an error", e);
+        }
+    };
+
+    @Override
+    public void afterPropertiesSet() {
+        if (waitingTime <= 0) {
+            throw new IllegalArgumentException("waitingTime must be greater than 0");
+        }
+        if (ExceptionStrategy.codeOf(exceptionStrategy) == null) {
+            throw new IllegalArgumentException("exceptionStrategy is not valid,1 or 2");
+        }
+        start();
+    }
+
+    @Override
+    public void start() {
+        if (running) {
+            return;
+        }
+        super.start();
+        initConnector();
+
+        thread = new Thread(new Runnable() {
+
+            public void run() {
+                process();
+            }
+        });
+
+        thread.setUncaughtExceptionHandler(handler);
+        thread.start();
+    }
+
+    @Override
+    public void stop() {
+        if (!running) {
+            return;
+        }
+        super.stop();
+        quietlyStop(thread);
+    }
+
+    protected void quietlyStop(Thread task) {
+        if (task != null) {
+            task.interrupt();
+            try {
+                task.join();
+            } catch (InterruptedException e) {
+                // ignore
+            }
+        }
+    }
+
+    public void process() {
+        int times = 0;
+        while (running) {
+            try {
+                sleepWhenFailed(times);
+                //after block, should check the status of thread.
+                if (!running) {
+                    break;
+                }
+                MDC.put("destination", destination);
+                connector.connect();
+                connector.subscribe(filter);
+                connector.rollback();
+                times = 0;//reset;
+
+                while (running) {
+                    // 获取指定数量的数据,不确认
+                    Message message = connector.getWithoutAck(batchSize);
+
+                    long batchId = message.getId();
+                    int size = message.getEntries().size();
+
+                    if (batchId == -1 || size == 0) {
+                        try {
+                            Thread.sleep(waitingTime);
+                        } catch (InterruptedException e) {
+                            //
+                        }
+                        continue;
+                    }
+                    //logger
+                    printBatch(message, batchId);
+
+                    processMessage(message);
+
+                }
+            } catch (Exception e) {
+                logger.error("process error!", e);
+                if (times > 20) {
+                    times = 0;
+                }
+                times++;
+            } finally {
+                connector.disconnect();
+                MDC.remove("destination");
+            }
+        }
+    }
+
+    protected abstract void processMessage(Message message);
+
+
+    private void initConnector() {
+        if (zkServers != null && zkServers.length() > 0) {
+            connector = CanalConnectors.newClusterConnector(zkServers, destination, username, password);
+        } else if (address != null) {
+            String[] segments = address.split(":");
+            SocketAddress socketAddress = new InetSocketAddress(segments[0], Integer.valueOf(segments[1]));
+            connector = CanalConnectors.newSingleConnector(socketAddress, destination, username, password);
+        } else {
+            throw new IllegalArgumentException("zkServers or address cant be null at same time,you should specify one of them!");
+        }
+
+    }
+
+    /**
+     * 用于控制当连接异常时,重试的策略,我们不应该每次都是立即重试,否则将可能导致大量的错误,在空转时导致CPU过高的问题
+     * sleep策略基于简单的累加
+     *
+     * @param times
+     */
+    private void sleepWhenFailed(int times) {
+        if (times <= 0) {
+            return;
+        }
+        try {
+            int sleepTime = 1000 + times * 100;//最大sleep 3s。
+            Thread.sleep(sleepTime);
+        } catch (Exception ex) {
+            //
+        }
+    }
+
+    /**
+     * 打印当前batch的摘要信息
+     *
+     * @param message
+     * @param batchId
+     */
+    protected void printBatch(Message message, long batchId) {
+        if (!debug) {
+            return;
+        }
+        List<CanalEntry.Entry> entries = message.getEntries();
+        if (CollectionUtils.isEmpty(entries)) {
+            return;
+        }
+
+        long memSize = 0;
+        for (CanalEntry.Entry entry : entries) {
+            memSize += entry.getHeader().getEventLength();
+        }
+        int size = entries.size();
+        String startPosition = buildPosition(entries.get(0));
+        String endPosition = buildPosition(message.getEntries().get(size - 1));
+
+        SimpleDateFormat format = new SimpleDateFormat(DATE_FORMAT);
+        logger.info(contextFormat, new Object[]{batchId, size, memSize, format.format(new Date()), startPosition, endPosition});
+    }
+
+    protected String buildPosition(CanalEntry.Entry entry) {
+        CanalEntry.Header header = entry.getHeader();
+        long time = header.getExecuteTime();
+        Date date = new Date(time);
+        SimpleDateFormat format = new SimpleDateFormat(DATE_FORMAT);
+        StringBuilder sb = new StringBuilder();
+        sb.append(header.getLogfileName())
+                .append(":")
+                .append(header.getLogfileOffset())
+                .append(":")
+                .append(header.getExecuteTime())
+                .append("(")
+                .append(format.format(date))
+                .append(")");
+        return sb.toString();
+    }
+
+    /**
+     * default,only logging information
+     *
+     * @param entry
+     */
+    protected void transactionBegin(CanalEntry.Entry entry) {
+        if (!debug) {
+            return;
+        }
+        try {
+            CanalEntry.TransactionBegin begin = CanalEntry.TransactionBegin.parseFrom(entry.getStoreValue());
+            // 打印事务头信息,执行的线程id,事务耗时
+            CanalEntry.Header header = entry.getHeader();
+            long executeTime = header.getExecuteTime();
+            long delayTime = new Date().getTime() - executeTime;
+            logger.info(transactionFormat,
+                    new Object[]{"begin", begin.getTransactionId(), header.getLogfileName(),
+                            String.valueOf(header.getLogfileOffset()),
+                            String.valueOf(header.getExecuteTime()), String.valueOf(delayTime)});
+        } catch (Exception e) {
+            logger.error("parse event has an error , data:" + entry.toString(), e);
+        }
+    }
+
+    protected void transactionEnd(CanalEntry.Entry entry) {
+        if (!debug) {
+            return;
+        }
+        try {
+            CanalEntry.TransactionEnd end = CanalEntry.TransactionEnd.parseFrom(entry.getStoreValue());
+            // 打印事务提交信息,事务id
+            CanalEntry.Header header = entry.getHeader();
+            long executeTime = header.getExecuteTime();
+            long delayTime = new Date().getTime() - executeTime;
+
+            logger.info(transactionFormat,
+                    new Object[]{"end", end.getTransactionId(), header.getLogfileName(),
+                            String.valueOf(header.getLogfileOffset()),
+                            String.valueOf(header.getExecuteTime()), String.valueOf(delayTime)});
+        } catch (Exception e) {
+            logger.error("parse event has an error , data:" + entry.toString(), e);
+        }
+    }
+
+    /**
+     * 判断事件类型为DML 数据
+     *
+     * @param eventType
+     * @return
+     */
+    protected boolean isDML(CanalEntry.EventType eventType) {
+        switch (eventType) {
+            case INSERT:
+            case UPDATE:
+            case DELETE:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    /**
+     * 处理 DDL数据
+     *
+     * @param header
+     * @param eventType
+     * @param sql
+     */
+
+    protected void processDDL(CanalEntry.Header header, CanalEntry.EventType eventType, String sql) {
+        if (!debug) {
+            return;
+        }
+        String table = header.getSchemaName() + "." + header.getTableName();
+        //对于DDL,直接执行,因为没有行变更数据
+        switch (eventType) {
+            case CREATE:
+                logger.warn("parse create table event, table: {}, sql: {}", table, sql);
+                return;
+            case ALTER:
+                logger.warn("parse alter table event, table: {}, sql: {}", table, sql);
+                return;
+            case TRUNCATE:
+                logger.warn("parse truncate table event, table: {}, sql: {}", table, sql);
+                return;
+            case ERASE:
+            case QUERY:
+                logger.warn("parse event : {}, sql: {} . ignored!", eventType.name(), sql);
+                return;
+            case RENAME:
+                logger.warn("parse rename table event, table: {}, sql: {}", table, sql);
+                return;
+            case CINDEX:
+                logger.warn("parse create index event, table: {}, sql: {}", table, sql);
+                return;
+            case DINDEX:
+                logger.warn("parse delete index event, table: {}, sql: {}", table, sql);
+                return;
+            default:
+                logger.warn("parse unknown event: {}, table: {}, sql: {}", new String[]{eventType.name(), table, sql});
+                break;
+        }
+    }
+
+    /**
+     * 强烈建议捕获异常,非上述已列出的其他操作,非核心
+     * 除了“insert”、“update”、“delete”操作之外的,其他类型的操作.
+     * 默认实现为“无操作”
+     *
+     * @param header 可以从header中获得schema、table的名称
+     * @param sql
+     */
+    public void whenOthers(CanalEntry.Header header, String sql) {
+        String schema = header.getSchemaName();
+        String table = header.getTableName();
+        logger.error("ignore event,schema: {},table: {},SQL: {}", new String[]{schema, table, sql});
+    }
+
+    public enum ExceptionStrategy {
+        RETRY(1), IGNORE(2);
+        public int code;
+
+        ExceptionStrategy(int code) {
+            this.code = code;
+        }
+
+        public static ExceptionStrategy codeOf(Integer code) {
+            if (code != null) {
+                for (ExceptionStrategy e : ExceptionStrategy.values()) {
+                    if (e.code == code) {
+                        return e;
+                    }
+                }
+            }
+            return null;
+        }
+    }
+
+    public String getZkServers() {
+        return zkServers;
+    }
+
+    public void setZkServers(String zkServers) {
+        this.zkServers = zkServers;
+    }
+
+    public String getAddress() {
+        return address;
+    }
+
+    public void setAddress(String address) {
+        this.address = address;
+    }
+
+    public String getDestination() {
+        return destination;
+    }
+
+    public void setDestination(String destination) {
+        this.destination = destination;
+    }
+
+    public String getUsername() {
+        return username;
+    }
+
+    public void setUsername(String username) {
+        this.username = username;
+    }
+
+    public String getPassword() {
+        return password;
+    }
+
+    public void setPassword(String password) {
+        this.password = password;
+    }
+
+    public int getBatchSize() {
+        return batchSize;
+    }
+
+    public void setBatchSize(int batchSize) {
+        this.batchSize = batchSize;
+    }
+
+    public String getFilter() {
+        return filter;
+    }
+
+    public void setFilter(String filter) {
+        this.filter = filter;
+    }
+
+    public boolean isDebug() {
+        return debug;
+    }
+
+    public void setDebug(boolean debug) {
+        this.debug = debug;
+    }
+
+    public int getExceptionStrategy() {
+        return exceptionStrategy;
+    }
+
+    public void setExceptionStrategy(int exceptionStrategy) {
+        this.exceptionStrategy = exceptionStrategy;
+    }
+
+    public int getRetryTimes() {
+        return retryTimes;
+    }
+
+    public void setRetryTimes(int retryTimes) {
+        this.retryTimes = retryTimes;
+    }
+
+    public int getWaitingTime() {
+        return waitingTime;
+    }
+
+    public void setWaitingTime(int waitingTime) {
+        this.waitingTime = waitingTime;
+    }
+}

+ 35 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/MysqlLoadLauncher.java

@@ -0,0 +1,35 @@
+package com.alibaba.otter.canal.example.db;
+
+import com.alibaba.otter.canal.example.db.mysql.MysqlClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MysqlLoadLauncher {
+    private static final Logger logger = LoggerFactory.getLogger(MysqlLoadLauncher.class);
+
+    public static void main(String[] args) {
+        try {
+            logger.info("## start the canal mysql client.");
+            final MysqlClient client = ServiceLocator.getMysqlClient();
+            logger.info("## the canal consumer is running now ......");
+            client.start();
+            Runtime.getRuntime().addShutdownHook(new Thread() {
+
+                public void run() {
+                    try {
+                        logger.info("## stop the canal consumer");
+                        client.stop();
+                    } catch (Throwable e) {
+                        logger.warn("##something goes wrong when stopping canal consumer:\n{}", e);
+                    } finally {
+                        logger.info("## canal consumer is down.");
+                    }
+                }
+
+            });
+        } catch (Throwable e) {
+            logger.error("## Something goes wrong when starting up the canal consumer:\n{}", e);
+            System.exit(0);
+        }
+    }
+}

+ 169 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/PropertyPlaceholderConfigurer.java

@@ -0,0 +1,169 @@
+package com.alibaba.otter.canal.example.db;
+
+import org.springframework.beans.factory.InitializingBean;
+import org.springframework.context.ResourceLoaderAware;
+import org.springframework.core.io.Resource;
+import org.springframework.core.io.ResourceLoader;
+import org.springframework.util.Assert;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * 扩展Spring的
+ * {@linkplain org.springframework.beans.factory.config.PropertyPlaceholderConfigurer}
+ * ,增加默认值的功能。 例如:${placeholder:defaultValue},假如placeholder的值不存在,则默认取得
+ * defaultValue。
+ * 
+ * @author jianghang 2013-1-24 下午03:37:56
+ * @version 1.0.0
+ */
+public class PropertyPlaceholderConfigurer extends org.springframework.beans.factory.config.PropertyPlaceholderConfigurer implements ResourceLoaderAware, InitializingBean {
+
+    private static final String PLACEHOLDER_PREFIX = "${";
+    private static final String PLACEHOLDER_SUFFIX = "}";
+    private ResourceLoader      loader;
+    private String[]            locationNames;
+
+    public PropertyPlaceholderConfigurer(){
+        setIgnoreUnresolvablePlaceholders(true);
+    }
+
+    public void setResourceLoader(ResourceLoader loader) {
+        this.loader = loader;
+    }
+
+    public void setLocationNames(String[] locations) {
+        this.locationNames = locations;
+    }
+
+    public void afterPropertiesSet() throws Exception {
+        Assert.notNull(loader, "no resourceLoader");
+
+        if (locationNames != null) {
+            for (int i = 0; i < locationNames.length; i++) {
+                locationNames[i] = resolveSystemPropertyPlaceholders(locationNames[i]);
+            }
+        }
+
+        if (locationNames != null) {
+            List<Resource> resources = new ArrayList<Resource>(locationNames.length);
+
+            for (String location : locationNames) {
+                location = trimToNull(location);
+
+                if (location != null) {
+                    resources.add(loader.getResource(location));
+                }
+            }
+
+            super.setLocations(resources.toArray(new Resource[resources.size()]));
+        }
+    }
+
+    private String resolveSystemPropertyPlaceholders(String text) {
+        StringBuilder buf = new StringBuilder(text);
+
+        for (int startIndex = buf.indexOf(PLACEHOLDER_PREFIX); startIndex >= 0;) {
+            int endIndex = buf.indexOf(PLACEHOLDER_SUFFIX, startIndex + PLACEHOLDER_PREFIX.length());
+
+            if (endIndex != -1) {
+                String placeholder = buf.substring(startIndex + PLACEHOLDER_PREFIX.length(), endIndex);
+                int nextIndex = endIndex + PLACEHOLDER_SUFFIX.length();
+
+                try {
+                    String value = resolveSystemPropertyPlaceholder(placeholder);
+
+                    if (value != null) {
+                        buf.replace(startIndex, endIndex + PLACEHOLDER_SUFFIX.length(), value);
+                        nextIndex = startIndex + value.length();
+                    } else {
+                        System.err.println("Could not resolve placeholder '"
+                                           + placeholder
+                                           + "' in ["
+                                           + text
+                                           + "] as system property: neither system property nor environment variable found");
+                    }
+                } catch (Throwable ex) {
+                    System.err.println("Could not resolve placeholder '" + placeholder + "' in [" + text
+                                       + "] as system property: " + ex);
+                }
+
+                startIndex = buf.indexOf(PLACEHOLDER_PREFIX, nextIndex);
+            } else {
+                startIndex = -1;
+            }
+        }
+
+        return buf.toString();
+    }
+
+    private String resolveSystemPropertyPlaceholder(String placeholder) {
+        DefaultablePlaceholder dp = new DefaultablePlaceholder(placeholder);
+        String value = System.getProperty(dp.placeholder);
+
+        if (value == null) {
+            value = System.getenv(dp.placeholder);
+        }
+
+        if (value == null) {
+            value = dp.defaultValue;
+        }
+
+        return value;
+    }
+
+    @Override
+    protected String resolvePlaceholder(String placeholder, Properties props, int systemPropertiesMode) {
+        DefaultablePlaceholder dp = new DefaultablePlaceholder(placeholder);
+        String value = super.resolvePlaceholder(dp.placeholder, props, systemPropertiesMode);
+
+        if (value == null) {
+            value = dp.defaultValue;
+        }
+
+        return trimToEmpty(value);
+    }
+
+    private static class DefaultablePlaceholder {
+
+        private final String defaultValue;
+        private final String placeholder;
+
+        public DefaultablePlaceholder(String placeholder){
+            int commaIndex = placeholder.indexOf(":");
+            String defaultValue = null;
+
+            if (commaIndex >= 0) {
+                defaultValue = trimToEmpty(placeholder.substring(commaIndex + 1));
+                placeholder = trimToEmpty(placeholder.substring(0, commaIndex));
+            }
+
+            this.placeholder = placeholder;
+            this.defaultValue = defaultValue;
+        }
+    }
+
+    private String trimToNull(String str) {
+        if (str == null) {
+            return null;
+        }
+
+        String result = str.trim();
+
+        if (result == null || result.length() == 0) {
+            return null;
+        }
+
+        return result;
+    }
+
+    public static String trimToEmpty(String str) {
+        if (str == null) {
+            return "";
+        }
+
+        return str.trim();
+    }
+}

+ 44 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/ServiceLocator.java

@@ -0,0 +1,44 @@
+package com.alibaba.otter.canal.example.db;
+
+import com.alibaba.otter.canal.example.db.mysql.MysqlClient;
+import org.springframework.beans.factory.DisposableBean;
+import org.springframework.context.ApplicationContext;
+import org.springframework.context.support.ClassPathXmlApplicationContext;
+import org.springframework.util.Assert;
+
+public class ServiceLocator implements DisposableBean {
+
+    private static ApplicationContext applicationContext = null;
+
+    static {
+        try {
+            applicationContext = new ClassPathXmlApplicationContext("classpath:client-spring.xml");
+        } catch (RuntimeException e) {
+            throw e;
+        }
+    }
+
+    private static <T> T getBean(String name) {
+        assertContextInjected();
+        return (T) applicationContext.getBean(name);
+    }
+
+
+    private static void clearHolder() {
+        ServiceLocator.applicationContext = null;
+    }
+
+    @Override
+    public void destroy() throws Exception {
+        ServiceLocator.clearHolder();
+    }
+
+    private static void assertContextInjected() {
+        Assert.state(applicationContext != null, "ApplicationContext not set");
+    }
+
+
+    public static MysqlClient getMysqlClient() {
+        return getBean("mysqlClient");
+    }
+}

+ 121 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/AbstractDbDialect.java

@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect;
+
+import com.alibaba.otter.canal.example.db.utils.DdlUtils;
+import com.google.common.base.Function;
+import com.google.common.collect.MigrateMap;
+import org.apache.commons.lang.exception.NestableRuntimeException;
+import org.apache.ddlutils.model.Table;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.ConnectionCallback;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.datasource.DataSourceTransactionManager;
+import org.springframework.jdbc.support.lob.LobHandler;
+import org.springframework.transaction.TransactionDefinition;
+import org.springframework.transaction.support.TransactionTemplate;
+import org.springframework.util.Assert;
+
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+public abstract class AbstractDbDialect implements DbDialect {
+
+    protected int databaseMajorVersion;
+    protected int databaseMinorVersion;
+    protected String databaseName;
+    protected JdbcTemplate jdbcTemplate;
+    protected TransactionTemplate transactionTemplate;
+    protected LobHandler lobHandler;
+    protected Map<List<String>, Table> tables;
+
+    public AbstractDbDialect(final JdbcTemplate jdbcTemplate, LobHandler lobHandler) {
+        this.jdbcTemplate = jdbcTemplate;
+        this.lobHandler = lobHandler;
+        // 初始化transction
+        this.transactionTemplate = new TransactionTemplate();
+        transactionTemplate.setTransactionManager(new DataSourceTransactionManager(jdbcTemplate.getDataSource()));
+        transactionTemplate.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRES_NEW);
+
+        // 初始化一些数据
+        jdbcTemplate.execute(new ConnectionCallback() {
+
+            public Object doInConnection(Connection c) throws SQLException, DataAccessException {
+                DatabaseMetaData meta = c.getMetaData();
+                databaseName = meta.getDatabaseProductName();
+                databaseMajorVersion = meta.getDatabaseMajorVersion();
+                databaseMinorVersion = meta.getDatabaseMinorVersion();
+
+                return null;
+            }
+        });
+
+        initTables(jdbcTemplate);
+    }
+
+    public Table findTable(String schema, String table, boolean useCache) {
+        List<String> key = Arrays.asList(schema, table);
+        if (useCache == false) {
+            tables.remove(key);
+        }
+
+        return tables.get(key);
+    }
+
+    public Table findTable(String schema, String table) {
+        return findTable(schema, table, true);
+    }
+
+    public LobHandler getLobHandler() {
+        return lobHandler;
+    }
+
+    public JdbcTemplate getJdbcTemplate() {
+        return jdbcTemplate;
+    }
+
+    public TransactionTemplate getTransactionTemplate() {
+        return transactionTemplate;
+    }
+
+    private void initTables(final JdbcTemplate jdbcTemplate) {
+        this.tables = MigrateMap.makeComputingMap(new Function<List<String>, Table>() {
+
+            public Table apply(List<String> names) {
+                Assert.isTrue(names.size() == 2);
+                try {
+                    Table table = DdlUtils.findTable(jdbcTemplate, names.get(0), names.get(0), names.get(1));
+                    if (table == null) {
+                        throw new NestableRuntimeException("no found table [" + names.get(0) + "." + names.get(1)
+                                + "] , pls check");
+                    } else {
+                        return table;
+                    }
+                } catch (Exception e) {
+                    throw new NestableRuntimeException("find table [" + names.get(0) + "." + names.get(1) + "] error",
+                            e);
+                }
+            }
+        });
+    }
+
+
+}

+ 105 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/AbstractSqlTemplate.java

@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect;
+
+/**
+ * 默认的基于标准SQL实现的CRUD sql封装
+ * 
+ * @author jianghang 2011-10-27 下午01:37:00
+ * @version 4.0.0
+ */
+public abstract class AbstractSqlTemplate implements SqlTemplate {
+
+    private static final String DOT = ".";
+
+    public String getSelectSql(String schemaName, String tableName, String[] pkNames, String[] columnNames) {
+        StringBuilder sql = new StringBuilder("select ");
+        int size = columnNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(columnNames[i])).append((i + 1 < size) ? " , " : "");
+        }
+
+        sql.append(" from ").append(getFullName(schemaName, tableName)).append(" where ( ");
+        appendColumnEquals(sql, pkNames, "and");
+        sql.append(" ) ");
+        return sql.toString().intern();// 不使用intern,避免方法区内存消耗过多
+    }
+
+    public String getUpdateSql(String schemaName, String tableName, String[] pkNames, String[] columnNames) {
+        StringBuilder sql = new StringBuilder("update " + getFullName(schemaName, tableName) + " set ");
+        appendColumnEquals(sql, columnNames, ",");
+        sql.append(" where (");
+        appendColumnEquals(sql, pkNames, "and");
+        sql.append(")");
+        return sql.toString().intern(); // 不使用intern,避免方法区内存消耗过多
+    }
+
+    public String getInsertSql(String schemaName, String tableName, String[] pkNames, String[] columnNames) {
+        StringBuilder sql = new StringBuilder("insert into " + getFullName(schemaName, tableName) + "(");
+        String[] allColumns = new String[pkNames.length + columnNames.length];
+        System.arraycopy(columnNames, 0, allColumns, 0, columnNames.length);
+        System.arraycopy(pkNames, 0, allColumns, columnNames.length, pkNames.length);
+
+        int size = allColumns.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(allColumns[i])).append((i + 1 < size) ? "," : "");
+        }
+
+        sql.append(") values (");
+        appendColumnQuestions(sql, allColumns);
+        sql.append(")");
+        return sql.toString().intern();// intern优化,避免出现大量相同的字符串
+    }
+
+    public String getDeleteSql(String schemaName, String tableName, String[] pkNames) {
+        StringBuilder sql = new StringBuilder("delete from " + getFullName(schemaName, tableName) + " where ");
+        appendColumnEquals(sql, pkNames, "and");
+        return sql.toString().intern();// intern优化,避免出现大量相同的字符串
+    }
+
+    protected String getFullName(String schemaName, String tableName) {
+        StringBuilder sb = new StringBuilder();
+        if (schemaName != null) {
+            sb.append(appendEscape(schemaName)).append(DOT);
+        }
+        sb.append(appendEscape(tableName));
+        return sb.toString().intern();
+    }
+
+    // ================ helper method ============
+
+    protected String appendEscape(String columnName) {
+        return columnName;
+    }
+
+    protected void appendColumnQuestions(StringBuilder sql, String[] columns) {
+        int size = columns.length;
+        for (int i = 0; i < size; i++) {
+            sql.append("?").append((i + 1 < size) ? " , " : "");
+        }
+    }
+
+    protected void appendColumnEquals(StringBuilder sql, String[] columns, String separator) {
+        int size = columns.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(" ").append(appendEscape(columns[i])).append(" = ").append("? ");
+            if (i != size - 1) {
+                sql.append(separator);
+            }
+        }
+    }
+}

+ 20 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/DbDialect.java

@@ -0,0 +1,20 @@
+package com.alibaba.otter.canal.example.db.dialect;
+
+import org.apache.ddlutils.model.Table;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.support.lob.LobHandler;
+import org.springframework.transaction.support.TransactionTemplate;
+
+public interface DbDialect {
+
+    LobHandler getLobHandler();
+
+    JdbcTemplate getJdbcTemplate();
+
+    TransactionTemplate getTransactionTemplate();
+
+    Table findTable(String schema, String table);
+
+    Table findTable(String schema, String table, boolean useCache);
+
+}

+ 40 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/SqlTemplate.java

@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect;
+
+/**
+ * sql构造模板操作
+ * 
+ * @author jianghang 2011-10-27 下午01:31:15
+ * @version 4.0.0
+ */
+public interface SqlTemplate {
+
+    public String getSelectSql(String schemaName, String tableName, String[] pkNames, String[] columnNames);
+
+    public String getUpdateSql(String schemaName, String tableName, String[] pkNames, String[] columnNames);
+
+    public String getDeleteSql(String schemaName, String tableName, String[] pkNames);
+
+    public String getInsertSql(String schemaName, String tableName, String[] pkNames, String[] columnNames);
+
+    /**
+     * 获取对应的mergeSql
+     */
+    public String getMergeSql(String schemaName, String tableName, String[] pkNames, String[] columnNames,
+                              String[] viewColumnNames, boolean updatePks);
+}

+ 93 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/TableType.java

@@ -0,0 +1,93 @@
+package com.alibaba.otter.canal.example.db.dialect;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+/**
+ * An enumeration wrapper around JDBC table types.
+ */
+public enum TableType {
+
+    /**
+     * Unknown
+     */
+    unknown,
+
+    /**
+     * System table
+     */
+    system_table,
+
+    /**
+     * Global temporary
+     */
+    global_temporary,
+
+    /**
+     * Local temporary
+     */
+    local_temporary,
+
+    /**
+     * Table
+     */
+    table,
+
+    /**
+     * View
+     */
+    view,
+
+    /**
+     * Alias
+     */
+    alias,
+
+    /**
+     * Synonym
+     */
+    synonym,;
+
+    /**
+     * Converts an array of table types to an array of their corresponding string values.
+     *
+     * @param tableTypes Array of table types
+     * @return Array of string table types
+     */
+    public static String[] toStrings(final TableType[] tableTypes) {
+        if ((tableTypes == null) || (tableTypes.length == 0)) {
+            return new String[0];
+        }
+
+        final List<String> tableTypeStrings = new ArrayList<String>(tableTypes.length);
+
+        for (final TableType tableType : tableTypes) {
+            if (tableType != null) {
+                tableTypeStrings.add(tableType.toString().toUpperCase(Locale.ENGLISH));
+            }
+        }
+
+        return tableTypeStrings.toArray(new String[tableTypeStrings.size()]);
+    }
+
+    /**
+     * Converts an array of string table types to an array of their corresponding enumeration values.
+     *
+     * @param tableTypeStrings Array of string table types
+     * @return Array of table types
+     */
+    public static TableType[] valueOf(final String[] tableTypeStrings) {
+        if ((tableTypeStrings == null) || (tableTypeStrings.length == 0)) {
+            return new TableType[0];
+        }
+
+        final List<TableType> tableTypes = new ArrayList<TableType>(tableTypeStrings.length);
+
+        for (final String tableTypeString : tableTypeStrings) {
+            tableTypes.add(valueOf(tableTypeString.toLowerCase(Locale.ENGLISH)));
+        }
+
+        return tableTypes.toArray(new TableType[tableTypes.size()]);
+    }
+}

+ 32 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/mysql/MysqlDialect.java

@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect.mysql;
+
+import com.alibaba.otter.canal.example.db.dialect.AbstractDbDialect;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.support.lob.LobHandler;
+
+public class MysqlDialect extends AbstractDbDialect {
+
+    public MysqlDialect(JdbcTemplate jdbcTemplate, LobHandler lobHandler) {
+        super(jdbcTemplate, lobHandler);
+    }
+
+    public boolean isEmptyStringNulled() {
+        return false;
+    }
+}

+ 84 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/dialect/mysql/MysqlSqlTemplate.java

@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010-2101 Alibaba Group Holding Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.otter.canal.example.db.dialect.mysql;
+
+import com.alibaba.otter.canal.example.db.dialect.AbstractSqlTemplate;
+
+/**
+ * mysql sql生成模板
+ *
+ * @author jianghang 2011-10-27 下午01:41:20
+ * @version 4.0.0
+ */
+public class MysqlSqlTemplate extends AbstractSqlTemplate {
+
+    private static final String ESCAPE = "`";
+
+    public String getMergeSql(String schemaName, String tableName, String[] pkNames, String[] columnNames,
+                              String[] viewColumnNames, boolean includePks) {
+        StringBuilder sql = new StringBuilder("insert into " + getFullName(schemaName, tableName) + "(");
+        int size = columnNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(columnNames[i])).append(" , ");
+        }
+        size = pkNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(pkNames[i])).append((i + 1 < size) ? " , " : "");
+        }
+
+        sql.append(") values (");
+        size = columnNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append("?").append(" , ");
+        }
+        size = pkNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append("?").append((i + 1 < size) ? " , " : "");
+        }
+        sql.append(")");
+        sql.append(" on duplicate key update ");
+
+        size = columnNames.length;
+        for (int i = 0; i < size; i++) {
+            sql.append(appendEscape(columnNames[i]))
+                    .append("=values(")
+                    .append(appendEscape(columnNames[i]))
+                    .append(")");
+            if (includePks) {
+                sql.append(" , ");
+            } else {
+                sql.append((i + 1 < size) ? " , " : "");
+            }
+        }
+
+        if (includePks) {
+            // mysql merge sql匹配了uniqe / primary key时都会执行update,所以需要更新pk信息
+            size = pkNames.length;
+            for (int i = 0; i < size; i++) {
+                sql.append(appendEscape(pkNames[i])).append("=values(").append(appendEscape(pkNames[i])).append(")");
+                sql.append((i + 1 < size) ? " , " : "");
+            }
+        }
+
+        return sql.toString().intern();// intern优化,避免出现大量相同的字符串
+    }
+
+    protected String appendEscape(String columnName) {
+        return ESCAPE + columnName + ESCAPE;
+    }
+
+}

+ 207 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/mysql/AbstractMysqlClient.java

@@ -0,0 +1,207 @@
+package com.alibaba.otter.canal.example.db.mysql;
+
+import com.alibaba.fastjson.JSON;
+import com.alibaba.otter.canal.example.db.AbstractDbClient;
+import com.alibaba.otter.canal.example.db.dialect.DbDialect;
+import com.alibaba.otter.canal.example.db.dialect.mysql.MysqlDialect;
+import com.alibaba.otter.canal.example.db.dialect.mysql.MysqlSqlTemplate;
+import com.alibaba.otter.canal.example.db.dialect.SqlTemplate;
+import com.alibaba.otter.canal.example.db.utils.SqlUtils;
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+import org.apache.commons.lang.StringUtils;
+import org.apache.ddlutils.model.Column;
+import org.apache.ddlutils.model.Table;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.core.PreparedStatementSetter;
+import org.springframework.jdbc.core.StatementCreatorUtils;
+import org.springframework.jdbc.support.lob.DefaultLobHandler;
+import org.springframework.jdbc.support.lob.LobCreator;
+import org.springframework.transaction.TransactionStatus;
+import org.springframework.transaction.support.TransactionCallback;
+
+import javax.sql.DataSource;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+
+public abstract class AbstractMysqlClient extends AbstractDbClient {
+
+    private DataSource dataSource;
+
+    private DbDialect dbDialect;
+    private SqlTemplate sqlTemplate;
+
+    protected Integer execute(final CanalEntry.Header header, final List<CanalEntry.Column> columns) {
+        final String sql = getSql(header, columns);
+        final LobCreator lobCreator = dbDialect.getLobHandler().getLobCreator();
+        dbDialect.getTransactionTemplate().execute(new TransactionCallback() {
+
+            public Object doInTransaction(TransactionStatus status) {
+                try {
+                    JdbcTemplate template = dbDialect.getJdbcTemplate();
+                    int affect = template.update(sql, new PreparedStatementSetter() {
+
+                        public void setValues(PreparedStatement ps) throws SQLException {
+                            doPreparedStatement(ps, dbDialect, lobCreator, header, columns);
+                        }
+                    });
+                    return affect;
+                } finally {
+                    lobCreator.close();
+                }
+            }
+        });
+        return 0;
+    }
+
+    private String getSql(CanalEntry.Header header, List<CanalEntry.Column> columns) {
+        List<String> pkNames = new ArrayList<>();
+        List<String> colNames = new ArrayList<>();
+        for (CanalEntry.Column column : columns) {
+            if (column.getIsKey()) {
+                pkNames.add(column.getName());
+            } else {
+                colNames.add(column.getName());
+            }
+        }
+        String sql = "";
+        CanalEntry.EventType eventType = header.getEventType();
+        switch (eventType) {
+            case INSERT:
+                sql = sqlTemplate.getInsertSql(header.getSchemaName(), header.getTableName(), pkNames.toArray(new String[]{}), colNames.toArray(new String[]{}));
+                break;
+            case UPDATE:
+                sql = sqlTemplate.getUpdateSql(header.getSchemaName(), header.getTableName(), pkNames.toArray(new String[]{}), colNames.toArray(new String[]{}));
+                break;
+            case DELETE:
+                sql = sqlTemplate.getDeleteSql(header.getSchemaName(), header.getTableName(), pkNames.toArray(new String[]{}));
+        }
+        logger.info("Execute sql: {}", sql);
+        return sql;
+    }
+
+    private void doPreparedStatement(PreparedStatement ps, DbDialect dbDialect, LobCreator lobCreator,
+                                     CanalEntry.Header header, List<CanalEntry.Column> columns) throws SQLException {
+
+        List<CanalEntry.Column> rebuildColumns = new ArrayList<>(columns.size());
+
+        List<CanalEntry.Column> keyColumns = new ArrayList<>(columns.size());
+        List<CanalEntry.Column> notKeyColumns = new ArrayList<>(columns.size());
+        for (CanalEntry.Column column : columns) {
+            if (column.getIsKey()) {
+                keyColumns.add(column);
+            } else {
+                notKeyColumns.add(column);
+            }
+        }
+        CanalEntry.EventType eventType = header.getEventType();
+        switch (eventType) {
+            case INSERT:
+            case UPDATE:
+                // insert/update语句对应的字段数序都是将主键排在后面
+                rebuildColumns.addAll(notKeyColumns);
+                rebuildColumns.addAll(keyColumns);
+                break;
+            case DELETE:
+                rebuildColumns.addAll(keyColumns);
+        }
+
+        // 获取一下当前字段名的数据是否必填
+        Table table = dbDialect.findTable(header.getSchemaName(), header.getTableName());
+        Map<String, Boolean> isRequiredMap = new HashMap();
+        for (Column tableColumn : table.getColumns()) {
+            isRequiredMap.put(StringUtils.lowerCase(tableColumn.getName()), tableColumn.isRequired());
+        }
+
+        List<Object> values = new ArrayList<>(rebuildColumns.size());
+        for (int i = 0; i < rebuildColumns.size(); i++) {
+            int paramIndex = i + 1;
+            CanalEntry.Column column = rebuildColumns.get(i);
+            int sqlType = column.getSqlType();
+
+            Boolean isRequired = isRequiredMap.get(StringUtils.lowerCase(column.getName()));
+            if (isRequired == null) {
+                // 清理一下目标库的表结构,二次检查一下
+                table = dbDialect.findTable(header.getSchemaName(), header.getTableName());
+
+                isRequiredMap = new HashMap<>();
+                for (Column tableColumn : table.getColumns()) {
+                    isRequiredMap.put(StringUtils.lowerCase(tableColumn.getName()), tableColumn.isRequired());
+                }
+
+                isRequired = isRequiredMap.get(StringUtils.lowerCase(column.getName()));
+                if (isRequired == null) {
+                    throw new CanalClientException(String.format("column name %s is not found in Table[%s]",
+                            column.getName(),
+                            table.toString()));
+                }
+            }
+
+            Object param;
+            if (sqlType == Types.TIME || sqlType == Types.TIMESTAMP || sqlType == Types.DATE) {
+                // 解决mysql的0000-00-00 00:00:00问题,直接依赖mysql
+                // driver进行处理,如果转化为Timestamp会出错
+                param = column.getValue();
+                if (param instanceof String && StringUtils.isEmpty(String.valueOf(param))) {
+                    param = null;
+                }
+            } else {
+                param = SqlUtils.stringToSqlValue(column.getValue(),
+                        sqlType,
+                        isRequired,
+                        column.getIsNull());
+            }
+
+            try {
+                switch (sqlType) {
+                    case Types.CLOB:
+                        lobCreator.setClobAsString(ps, paramIndex, (String) param);
+                        break;
+                    case Types.BLOB:
+                        lobCreator.setBlobAsBytes(ps, paramIndex, (byte[]) param);
+                        break;
+                    case Types.TIME:
+                    case Types.TIMESTAMP:
+                    case Types.DATE:
+                        ps.setObject(paramIndex, param);
+                        break;
+                    case Types.BIT:
+                        StatementCreatorUtils.setParameterValue(ps, paramIndex, Types.DECIMAL, null, param);
+                        break;
+                    default:
+                        StatementCreatorUtils.setParameterValue(ps, paramIndex, sqlType, null, param);
+                        break;
+                }
+                values.add(param);
+            } catch (SQLException ex) {
+                logger.error("## SetParam error , [sqltype={}, value={}]",
+                        new Object[]{sqlType, param});
+                throw ex;
+            }
+        }
+        logger.info("## sql values: {}", JSON.toJSONString(values));
+    }
+
+    @Override
+    public void afterPropertiesSet() {
+        JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
+        DefaultLobHandler lobHandler = new DefaultLobHandler();
+        lobHandler.setStreamAsLob(true);
+        dbDialect = new MysqlDialect(jdbcTemplate, lobHandler);
+        sqlTemplate = new MysqlSqlTemplate();
+    }
+
+    public DataSource getDataSource() {
+        return dataSource;
+    }
+
+    public void setDataSource(DataSource dataSource) {
+        this.dataSource = dataSource;
+    }
+}

+ 23 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/mysql/MysqlClient.java

@@ -0,0 +1,23 @@
+package com.alibaba.otter.canal.example.db.mysql;
+
+import com.alibaba.otter.canal.protocol.CanalEntry;
+
+import java.util.List;
+
+public class MysqlClient extends AbstractMysqlClient {
+
+    @Override
+    public void insert(CanalEntry.Header header, List<CanalEntry.Column> afterColumns) {
+        execute(header, afterColumns);
+    }
+
+    @Override
+    public void update(CanalEntry.Header header, List<CanalEntry.Column> afterColumns) {
+        execute(header, afterColumns);
+    }
+
+    @Override
+    public void delete(CanalEntry.Header header, List<CanalEntry.Column> beforeColumns) {
+        execute(header, beforeColumns);
+    }
+}

+ 50 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/utils/ByteArrayConverter.java

@@ -0,0 +1,50 @@
+package com.alibaba.otter.canal.example.db.utils;
+
+import org.apache.commons.beanutils.ConversionException;
+import org.apache.commons.beanutils.Converter;
+import org.apache.commons.beanutils.converters.ArrayConverter;
+import org.apache.commons.beanutils.converters.ByteConverter;
+
+public class ByteArrayConverter implements Converter {
+
+    public static final Converter SQL_BYTES = new ByteArrayConverter(null);
+    private static final Converter converter = new ArrayConverter(byte[].class, new ByteConverter());
+
+    protected final Object defaultValue;
+    protected final boolean useDefault;
+
+    public ByteArrayConverter() {
+        this.defaultValue = null;
+        this.useDefault = false;
+    }
+
+    public ByteArrayConverter(Object defaultValue) {
+        this.defaultValue = defaultValue;
+        this.useDefault = true;
+    }
+
+    public Object convert(Class type, Object value) {
+        if (value == null) {
+            if (useDefault) {
+                return (defaultValue);
+            } else {
+                throw new ConversionException("No value specified");
+            }
+        }
+
+        if (value instanceof byte[]) {
+            return (value);
+        }
+
+        // BLOB类型,canal直接存储为String("ISO-8859-1")
+        if (value instanceof String) {
+            try {
+                return ((String) value).getBytes("ISO-8859-1");
+            } catch (Exception e) {
+                throw new ConversionException(e);
+            }
+        }
+
+        return converter.convert(type, value); // byteConvertor进行转化
+    }
+}

+ 326 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/utils/DdlUtils.java

@@ -0,0 +1,326 @@
+package com.alibaba.otter.canal.example.db.utils;
+
+import com.alibaba.otter.canal.example.db.dialect.TableType;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.commons.lang.builder.ToStringStyle;
+import org.apache.commons.lang.math.NumberUtils;
+import org.apache.ddlutils.model.Column;
+import org.apache.ddlutils.model.Table;
+import org.apache.ddlutils.platform.DatabaseMetaDataWrapper;
+import org.apache.ddlutils.platform.MetaDataColumnDescriptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.ConnectionCallback;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.support.JdbcUtils;
+
+import java.sql.*;
+import java.util.*;
+
+
+public class DdlUtils {
+
+    private static final Logger logger = LoggerFactory.getLogger(DdlUtils.class);
+    private static TableType[] SUPPORTED_TABLE_TYPES = new TableType[]{TableType.view, TableType.table};
+    private final static Map<Integer, String> _defaultSizes = new HashMap<Integer, String>();
+
+    static {
+        _defaultSizes.put(new Integer(1), "254");
+        _defaultSizes.put(new Integer(12), "254");
+        _defaultSizes.put(new Integer(-1), "254");
+        _defaultSizes.put(new Integer(-2), "254");
+        _defaultSizes.put(new Integer(-3), "254");
+        _defaultSizes.put(new Integer(-4), "254");
+        _defaultSizes.put(new Integer(4), "32");
+        _defaultSizes.put(new Integer(-5), "64");
+        _defaultSizes.put(new Integer(7), "7,0");
+        _defaultSizes.put(new Integer(6), "15,0");
+        _defaultSizes.put(new Integer(8), "15,0");
+        _defaultSizes.put(new Integer(3), "15,15");
+        _defaultSizes.put(new Integer(2), "15,15");
+    }
+
+
+    public static Table findTable(final JdbcTemplate jdbcTemplate, final String catalogName, final String schemaName,
+                                  final String tableName) {
+        return (Table) jdbcTemplate.execute(new ConnectionCallback() {
+
+            public Object doInConnection(Connection con) throws SQLException, DataAccessException {
+                Table table = null;
+                DatabaseMetaDataWrapper metaData = new DatabaseMetaDataWrapper();
+                try {
+
+                    DatabaseMetaData databaseMetaData = con.getMetaData();
+
+                    metaData.setMetaData(databaseMetaData);
+                    metaData.setTableTypes(TableType.toStrings(SUPPORTED_TABLE_TYPES));
+                    metaData.setCatalog(catalogName);
+                    metaData.setSchemaPattern(schemaName);
+
+                    String convertTableName = tableName;
+                    if (databaseMetaData.storesUpperCaseIdentifiers()) {
+                        metaData.setCatalog(catalogName.toUpperCase());
+                        metaData.setSchemaPattern(schemaName.toUpperCase());
+                        convertTableName = tableName.toUpperCase();
+                    }
+                    if (databaseMetaData.storesLowerCaseIdentifiers()) {
+                        metaData.setCatalog(catalogName.toLowerCase());
+                        metaData.setSchemaPattern(schemaName.toLowerCase());
+                        convertTableName = tableName.toLowerCase();
+                    }
+
+                    ResultSet tableData = null;
+                    try {
+                        tableData = metaData.getTables(convertTableName);
+
+                        while ((tableData != null) && tableData.next()) {
+                            Map<String, Object> values = readColumns(tableData, initColumnsForTable());
+
+                            table = readTable(metaData, values);
+                            if (table.getName().equalsIgnoreCase(tableName)) {
+                                break;
+                            }
+                        }
+                    } finally {
+                        JdbcUtils.closeResultSet(tableData);
+                    }
+                } catch (Exception e) {
+                    logger.error(e.getMessage(), e);
+                }
+
+                makeAllColumnsPrimaryKeysIfNoPrimaryKeysFound(table);
+                return table;
+            }
+        });
+    }
+
+    /**
+     * Treat tables with no primary keys as a table with all primary keys.
+     */
+    private static void makeAllColumnsPrimaryKeysIfNoPrimaryKeysFound(Table table) {
+        if ((table != null) && (table.getPrimaryKeyColumns() != null) && (table.getPrimaryKeyColumns().length == 0)) {
+            Column[] allCoumns = table.getColumns();
+
+            for (Column column : allCoumns) {
+                column.setPrimaryKey(true);
+            }
+        }
+    }
+
+    private static Table readTable(DatabaseMetaDataWrapper metaData, Map<String, Object> values) throws SQLException {
+        String tableName = (String) values.get("TABLE_NAME");
+        Table table = null;
+
+        if ((tableName != null) && (tableName.length() > 0)) {
+            table = new Table();
+            table.setName(tableName);
+            table.setType((String) values.get("TABLE_TYPE"));
+            table.setCatalog((String) values.get("TABLE_CAT"));
+            table.setSchema((String) values.get("TABLE_SCHEM"));
+            table.setDescription((String) values.get("REMARKS"));
+            table.addColumns(readColumns(metaData, tableName));
+
+            Collection<String> primaryKeys = readPrimaryKeyNames(metaData, tableName);
+
+            for (Object key : primaryKeys) {
+                Column col = table.findColumn((String) key, true);
+
+                if (col != null) {
+                    col.setPrimaryKey(true);
+                } else {
+                    throw new NullPointerException(String.format("%s pk %s is null - %s %s",
+                            tableName,
+                            key,
+                            ToStringBuilder.reflectionToString(metaData, ToStringStyle.SIMPLE_STYLE),
+                            ToStringBuilder.reflectionToString(values, ToStringStyle.SIMPLE_STYLE)));
+                }
+            }
+        }
+
+        return table;
+    }
+
+    private static List<MetaDataColumnDescriptor> initColumnsForTable() {
+        List<MetaDataColumnDescriptor> result = new ArrayList<MetaDataColumnDescriptor>();
+
+        result.add(new MetaDataColumnDescriptor("TABLE_NAME", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("TABLE_TYPE", Types.VARCHAR, "UNKNOWN"));
+        result.add(new MetaDataColumnDescriptor("TABLE_CAT", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("TABLE_SCHEM", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("REMARKS", Types.VARCHAR));
+
+        return result;
+    }
+
+    private static List<MetaDataColumnDescriptor> initColumnsForColumn() {
+        List<MetaDataColumnDescriptor> result = new ArrayList<MetaDataColumnDescriptor>();
+
+        // As suggested by Alexandre Borgoltz, we're reading the COLUMN_DEF
+        // first because Oracle
+        // has problems otherwise (it seemingly requires a LONG column to be the
+        // first to be read)
+        // See also DDLUTILS-29
+        result.add(new MetaDataColumnDescriptor("COLUMN_DEF", Types.VARCHAR));
+
+        // we're also reading the table name so that a model reader impl can
+        // filter manually
+        result.add(new MetaDataColumnDescriptor("TABLE_NAME", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("COLUMN_NAME", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("TYPE_NAME", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("DATA_TYPE", Types.INTEGER, new Integer(Types.OTHER)));
+        result.add(new MetaDataColumnDescriptor("NUM_PREC_RADIX", Types.INTEGER, new Integer(10)));
+        result.add(new MetaDataColumnDescriptor("DECIMAL_DIGITS", Types.INTEGER, new Integer(0)));
+        result.add(new MetaDataColumnDescriptor("COLUMN_SIZE", Types.VARCHAR));
+        result.add(new MetaDataColumnDescriptor("IS_NULLABLE", Types.VARCHAR, "YES"));
+        result.add(new MetaDataColumnDescriptor("REMARKS", Types.VARCHAR));
+
+        return result;
+    }
+
+    private static List<MetaDataColumnDescriptor> initColumnsForPK() {
+        List<MetaDataColumnDescriptor> result = new ArrayList<MetaDataColumnDescriptor>();
+
+        result.add(new MetaDataColumnDescriptor("COLUMN_NAME", Types.VARCHAR));
+
+        // we're also reading the table name so that a model reader impl can
+        // filter manually
+        result.add(new MetaDataColumnDescriptor("TABLE_NAME", Types.VARCHAR));
+
+        // the name of the primary key is currently only interesting to the pk
+        // index name resolution
+        result.add(new MetaDataColumnDescriptor("PK_NAME", Types.VARCHAR));
+
+        return result;
+    }
+
+    private static List<Column> readColumns(DatabaseMetaDataWrapper metaData, String tableName) throws SQLException {
+        ResultSet columnData = null;
+
+        try {
+            columnData = metaData.getColumns(tableName, null);
+
+            List<Column> columns = new ArrayList<Column>();
+            Map<String, Object> values;
+
+            for (; columnData.next(); columns.add(readColumn(metaData, values))) {
+                Map<String, Object> tmp = readColumns(columnData, initColumnsForColumn());
+                if (tableName.equalsIgnoreCase((String) tmp.get("TABLE_NAME"))) {
+                    values = tmp;
+                } else {
+                    break;
+                }
+            }
+
+            return columns;
+        } finally {
+            JdbcUtils.closeResultSet(columnData);
+        }
+    }
+
+    private static Column readColumn(DatabaseMetaDataWrapper metaData, Map<String, Object> values) throws SQLException {
+        Column column = new Column();
+
+        column.setName((String) values.get("COLUMN_NAME"));
+        column.setDefaultValue((String) values.get("COLUMN_DEF"));
+        column.setTypeCode(((Integer) values.get("DATA_TYPE")).intValue());
+
+        String typeName = (String) values.get("TYPE_NAME");
+        // column.setType(typeName);
+
+        if ((typeName != null) && typeName.startsWith("TIMESTAMP")) {
+            column.setTypeCode(Types.TIMESTAMP);
+        }
+        // modify 2013-09-25,处理下unsigned
+        if ((typeName != null) && StringUtils.containsIgnoreCase(typeName, "UNSIGNED")) {
+            // 如果为unsigned,往上调大一个量级,避免数据溢出
+            switch (column.getTypeCode()) {
+                case Types.TINYINT:
+                    column.setTypeCode(Types.SMALLINT);
+                    break;
+                case Types.SMALLINT:
+                    column.setTypeCode(Types.INTEGER);
+                    break;
+                case Types.INTEGER:
+                    column.setTypeCode(Types.BIGINT);
+                    break;
+                case Types.BIGINT:
+                    column.setTypeCode(Types.DECIMAL);
+                    break;
+                default:
+                    break;
+            }
+        }
+
+        Integer precision = (Integer) values.get("NUM_PREC_RADIX");
+
+        if (precision != null) {
+            column.setPrecisionRadix(precision.intValue());
+        }
+
+        String size = (String) values.get("COLUMN_SIZE");
+
+        if (size == null) {
+            size = (String) _defaultSizes.get(new Integer(column.getTypeCode()));
+        }
+
+        // we're setting the size after the precision and radix in case
+        // the database prefers to return them in the size value
+        column.setSize(size);
+
+        int scale = 0;
+        Object dec_digits = values.get("DECIMAL_DIGITS");
+
+        if (dec_digits instanceof String) {
+            scale = (dec_digits == null) ? 0 : NumberUtils.toInt(dec_digits.toString());
+        } else if (dec_digits instanceof Integer) {
+            scale = (dec_digits == null) ? 0 : (Integer) dec_digits;
+        }
+
+        if (scale != 0) {
+            column.setScale(scale);
+        }
+
+        column.setRequired("NO".equalsIgnoreCase(((String) values.get("IS_NULLABLE")).trim()));
+        column.setDescription((String) values.get("REMARKS"));
+        return column;
+    }
+
+    private static Map<String, Object> readColumns(ResultSet resultSet, List<MetaDataColumnDescriptor> columnDescriptors)
+            throws SQLException {
+        Map<String, Object> values = new HashMap<String, Object>();
+        MetaDataColumnDescriptor descriptor;
+
+        for (Iterator<MetaDataColumnDescriptor> it = columnDescriptors.iterator(); it.hasNext(); values.put(descriptor.getName(),
+                descriptor.readColumn(resultSet))) {
+            descriptor = (MetaDataColumnDescriptor) it.next();
+        }
+
+        return values;
+    }
+
+    private static Collection<String> readPrimaryKeyNames(DatabaseMetaDataWrapper metaData, String tableName)
+            throws SQLException {
+        ResultSet pkData = null;
+
+        try {
+            List<String> pks = new ArrayList<String>();
+            Map<String, Object> values;
+
+            for (pkData = metaData.getPrimaryKeys(tableName); pkData.next(); pks.add(readPrimaryKeyName(metaData,
+                    values))) {
+                values = readColumns(pkData, initColumnsForPK());
+            }
+
+            return pks;
+        } finally {
+            JdbcUtils.closeResultSet(pkData);
+        }
+    }
+
+    private static String readPrimaryKeyName(DatabaseMetaDataWrapper metaData, Map<String, Object> values)
+            throws SQLException {
+        return (String) values.get("COLUMN_NAME");
+    }
+}

+ 140 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/utils/SqlTimestampConverter.java

@@ -0,0 +1,140 @@
+package com.alibaba.otter.canal.example.db.utils;
+
+import org.apache.commons.beanutils.ConversionException;
+import org.apache.commons.beanutils.Converter;
+import org.apache.commons.lang.time.DateFormatUtils;
+
+import java.sql.Timestamp;
+import java.text.ParseException;
+import java.text.ParsePosition;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Locale;
+
+public class SqlTimestampConverter implements Converter {
+
+    /**
+     * Field description
+     */
+    public static final String[] DATE_FORMATS = new String[]{"yyyy-MM-dd", "HH:mm:ss", "yyyy-MM-dd HH:mm:ss",
+            "yyyy-MM-dd hh:mm:ss.fffffffff", "EEE MMM dd HH:mm:ss zzz yyyy",
+            DateFormatUtils.ISO_DATETIME_FORMAT.getPattern(),
+            DateFormatUtils.ISO_DATETIME_TIME_ZONE_FORMAT.getPattern(),
+            DateFormatUtils.SMTP_DATETIME_FORMAT.getPattern(),};
+
+    public static final Converter SQL_TIMESTAMP = new SqlTimestampConverter(null);
+
+    /**
+     * The default value specified to our Constructor, if any.
+     */
+    private final Object defaultValue;
+
+    /**
+     * Should we return the default value on conversion errors?
+     */
+    private final boolean useDefault;
+
+    /**
+     * Create a {@link Converter} that will throw a {@link ConversionException} if a conversion error occurs.
+     */
+    public SqlTimestampConverter() {
+        this.defaultValue = null;
+        this.useDefault = false;
+    }
+
+    /**
+     * Create a {@link Converter} that will return the specified default value if a conversion error occurs.
+     *
+     * @param defaultValue The default value to be returned
+     */
+    public SqlTimestampConverter(Object defaultValue) {
+        this.defaultValue = defaultValue;
+        this.useDefault = true;
+    }
+
+    /**
+     * Convert the specified input object into an output object of the specified type.
+     *
+     * @param type  Data type to which this value should be converted
+     * @param value The input value to be converted
+     * @throws ConversionException if conversion cannot be performed successfully
+     */
+    public Object convert(Class type, Object value) {
+        if (value == null) {
+            if (useDefault) {
+                return (defaultValue);
+            } else {
+                throw new ConversionException("No value specified");
+            }
+        }
+
+        if (value instanceof java.sql.Date && java.sql.Date.class.equals(type)) {
+            return value;
+        } else if (value instanceof java.sql.Time && java.sql.Time.class.equals(type)) {
+            return value;
+        } else if (value instanceof Timestamp && Timestamp.class.equals(type)) {
+            return value;
+        } else {
+            try {
+                if (java.sql.Date.class.equals(type)) {
+                    return new java.sql.Date(convertTimestamp2TimeMillis(value.toString()));
+                } else if (java.sql.Time.class.equals(type)) {
+                    return new java.sql.Time(convertTimestamp2TimeMillis(value.toString()));
+                } else if (Timestamp.class.equals(type)) {
+                    return new Timestamp(convertTimestamp2TimeMillis(value.toString()));
+                } else {
+                    return new Timestamp(convertTimestamp2TimeMillis(value.toString()));
+                }
+            } catch (Exception e) {
+                throw new ConversionException("Value format invalid: " + e.getMessage(), e);
+            }
+        }
+
+    }
+
+    private Long convertTimestamp2TimeMillis(String input) {
+        if (input == null) {
+            return null;
+        }
+
+        try {
+            // 先处理Timestamp类型
+            return Timestamp.valueOf(input).getTime();
+        } catch (Exception nfe) {
+            try {
+                try {
+                    return parseDate(input, DATE_FORMATS, Locale.ENGLISH).getTime();
+                } catch (Exception err) {
+                    return parseDate(input, DATE_FORMATS, Locale.getDefault()).getTime();
+                }
+            } catch (Exception err) {
+                // 最后处理long time的情况
+                return Long.parseLong(input);
+            }
+        }
+    }
+
+    private Date parseDate(String str, String[] parsePatterns, Locale locale) throws ParseException {
+        if ((str == null) || (parsePatterns == null)) {
+            throw new IllegalArgumentException("Date and Patterns must not be null");
+        }
+
+        SimpleDateFormat parser = null;
+        ParsePosition pos = new ParsePosition(0);
+
+        for (int i = 0; i < parsePatterns.length; i++) {
+            if (i == 0) {
+                parser = new SimpleDateFormat(parsePatterns[0], locale);
+            } else {
+                parser.applyPattern(parsePatterns[i]);
+            }
+            pos.setIndex(0);
+            Date date = parser.parse(str, pos);
+            if ((date != null) && (pos.getIndex() == str.length())) {
+                return date;
+            }
+        }
+
+        throw new ParseException("Unable to parse the date: " + str, -1);
+    }
+}

+ 315 - 0
example/src/main/java/com/alibaba/otter/canal/example/db/utils/SqlUtils.java

@@ -0,0 +1,315 @@
+package com.alibaba.otter.canal.example.db.utils;
+
+import org.apache.commons.beanutils.ConvertUtilsBean;
+import org.apache.commons.lang.StringUtils;
+
+import java.io.UnsupportedEncodingException;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.sql.*;
+import java.util.HashMap;
+import java.util.Map;
+
+public class SqlUtils {
+
+    public static final String REQUIRED_FIELD_NULL_SUBSTITUTE = " ";
+    public static final String SQLDATE_FORMAT = "yyyy-MM-dd";
+    public static final String TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss";
+    private static final Map<Integer, Class<?>> sqlTypeToJavaTypeMap = new HashMap<Integer, Class<?>>();
+    private static final ConvertUtilsBean convertUtilsBean = new ConvertUtilsBean();
+
+    static {
+        // regist Converter
+        convertUtilsBean.register(SqlTimestampConverter.SQL_TIMESTAMP, Date.class);
+        convertUtilsBean.register(SqlTimestampConverter.SQL_TIMESTAMP, Time.class);
+        convertUtilsBean.register(SqlTimestampConverter.SQL_TIMESTAMP, Timestamp.class);
+        convertUtilsBean.register(ByteArrayConverter.SQL_BYTES, byte[].class);
+
+        // bool
+        sqlTypeToJavaTypeMap.put(Types.BOOLEAN, Boolean.class);
+
+        // int
+        sqlTypeToJavaTypeMap.put(Types.TINYINT, Integer.class);
+        sqlTypeToJavaTypeMap.put(Types.SMALLINT, Integer.class);
+        sqlTypeToJavaTypeMap.put(Types.INTEGER, Integer.class);
+
+        // long
+        sqlTypeToJavaTypeMap.put(Types.BIGINT, Long.class);
+        // mysql bit最多64位,无符号
+        sqlTypeToJavaTypeMap.put(Types.BIT, BigInteger.class);
+
+        // decimal
+        sqlTypeToJavaTypeMap.put(Types.REAL, Float.class);
+        sqlTypeToJavaTypeMap.put(Types.FLOAT, Float.class);
+        sqlTypeToJavaTypeMap.put(Types.DOUBLE, Double.class);
+        sqlTypeToJavaTypeMap.put(Types.NUMERIC, BigDecimal.class);
+        sqlTypeToJavaTypeMap.put(Types.DECIMAL, BigDecimal.class);
+
+        // date
+        sqlTypeToJavaTypeMap.put(Types.DATE, Date.class);
+        sqlTypeToJavaTypeMap.put(Types.TIME, Time.class);
+        sqlTypeToJavaTypeMap.put(Types.TIMESTAMP, Timestamp.class);
+
+        // blob
+        sqlTypeToJavaTypeMap.put(Types.BLOB, byte[].class);
+
+        // byte[]
+        sqlTypeToJavaTypeMap.put(Types.REF, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.OTHER, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.ARRAY, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.STRUCT, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.SQLXML, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.BINARY, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.DATALINK, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.DISTINCT, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.VARBINARY, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.JAVA_OBJECT, byte[].class);
+        sqlTypeToJavaTypeMap.put(Types.LONGVARBINARY, byte[].class);
+
+        // String
+        sqlTypeToJavaTypeMap.put(Types.CHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.VARCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.LONGVARCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.LONGNVARCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.NCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.NVARCHAR, String.class);
+        sqlTypeToJavaTypeMap.put(Types.NCLOB, String.class);
+        sqlTypeToJavaTypeMap.put(Types.CLOB, String.class);
+    }
+
+    /**
+     * 将指定java.sql.Types的ResultSet value转换成相应的String
+     *
+     * @param rs
+     * @param index
+     * @param sqlType
+     * @return
+     * @throws SQLException
+     */
+    public static String sqlValueToString(ResultSet rs, int index, int sqlType) throws SQLException {
+        Class<?> requiredType = sqlTypeToJavaTypeMap.get(sqlType);
+        if (requiredType == null) {
+            throw new IllegalArgumentException("unknow java.sql.Types - " + sqlType);
+        }
+
+        return getResultSetValue(rs, index, requiredType);
+    }
+
+    /**
+     * sqlValueToString方法的逆向过程
+     *
+     * @param value
+     * @param sqlType
+     * @param isTextRequired
+     * @param isEmptyStringNulled
+     * @return
+     */
+    public static Object stringToSqlValue(String value, int sqlType, boolean isRequired, boolean isEmptyStringNulled) {
+        // 设置变量
+        String sourceValue = value;
+        if (SqlUtils.isTextType(sqlType)) {
+            if ((sourceValue == null) || (StringUtils.isEmpty(sourceValue) && isEmptyStringNulled)) {
+                return isRequired ? REQUIRED_FIELD_NULL_SUBSTITUTE : null;
+            } else {
+                return sourceValue;
+            }
+        } else {
+            if (StringUtils.isEmpty(sourceValue)) {
+                return null;
+            } else {
+                Class<?> requiredType = sqlTypeToJavaTypeMap.get(sqlType);
+                if (requiredType == null) {
+                    throw new IllegalArgumentException("unknow java.sql.Types - " + sqlType);
+                } else if (requiredType.equals(String.class)) {
+                    return sourceValue;
+                } else if (isNumeric(sqlType)) {
+                    return convertUtilsBean.convert(sourceValue.trim(), requiredType);
+                } else {
+                    return convertUtilsBean.convert(sourceValue, requiredType);
+                }
+            }
+        }
+    }
+
+    public static String encoding(String source, int sqlType, String sourceEncoding, String targetEncoding) {
+        switch (sqlType) {
+            case Types.CHAR:
+            case Types.VARCHAR:
+            case Types.LONGVARCHAR:
+            case Types.NCHAR:
+            case Types.NVARCHAR:
+            case Types.LONGNVARCHAR:
+            case Types.CLOB:
+            case Types.NCLOB:
+                if (false == StringUtils.isEmpty(source)) {
+                    String fromEncoding = StringUtils.isBlank(sourceEncoding) ? "UTF-8" : sourceEncoding;
+                    String toEncoding = StringUtils.isBlank(targetEncoding) ? "UTF-8" : targetEncoding;
+
+                    // if (false == StringUtils.equalsIgnoreCase(fromEncoding,
+                    // toEncoding)) {
+                    try {
+                        return new String(source.getBytes(fromEncoding), toEncoding);
+                    } catch (UnsupportedEncodingException e) {
+                        throw new IllegalArgumentException(e.getMessage(), e);
+                    }
+                    // }
+                }
+        }
+
+        return source;
+    }
+
+    /**
+     * Retrieve a JDBC column value from a ResultSet, using the specified value
+     * type.
+     * <p>
+     * Uses the specifically typed ResultSet accessor methods, falling back to
+     * {@link #getResultSetValue(ResultSet, int)} for unknown types.
+     * <p>
+     * Note that the returned value may not be assignable to the specified
+     * required type, in case of an unknown type. Calling code needs to deal
+     * with this case appropriately, e.g. throwing a corresponding exception.
+     *
+     * @param rs           is the ResultSet holding the data
+     * @param index        is the column index
+     * @param requiredType the required value type (may be <code>null</code>)
+     * @return the value object
+     * @throws SQLException if thrown by the JDBC API
+     */
+    private static String getResultSetValue(ResultSet rs, int index, Class<?> requiredType) throws SQLException {
+        if (requiredType == null) {
+            return getResultSetValue(rs, index);
+        }
+
+        Object value = null;
+        boolean wasNullCheck = false;
+
+        // Explicitly extract typed value, as far as possible.
+        if (String.class.equals(requiredType)) {
+            value = rs.getString(index);
+        } else if (boolean.class.equals(requiredType) || Boolean.class.equals(requiredType)) {
+            value = Boolean.valueOf(rs.getBoolean(index));
+            wasNullCheck = true;
+        } else if (byte.class.equals(requiredType) || Byte.class.equals(requiredType)) {
+            value = new Byte(rs.getByte(index));
+            wasNullCheck = true;
+        } else if (short.class.equals(requiredType) || Short.class.equals(requiredType)) {
+            value = new Short(rs.getShort(index));
+            wasNullCheck = true;
+        } else if (int.class.equals(requiredType) || Integer.class.equals(requiredType)) {
+            value = new Long(rs.getLong(index));
+            wasNullCheck = true;
+        } else if (long.class.equals(requiredType) || Long.class.equals(requiredType)) {
+            value = rs.getBigDecimal(index);
+            wasNullCheck = true;
+        } else if (float.class.equals(requiredType) || Float.class.equals(requiredType)) {
+            value = new Float(rs.getFloat(index));
+            wasNullCheck = true;
+        } else if (double.class.equals(requiredType) || Double.class.equals(requiredType)
+                || Number.class.equals(requiredType)) {
+            value = new Double(rs.getDouble(index));
+            wasNullCheck = true;
+        } else if (Time.class.equals(requiredType)) {
+            // try {
+            // value = rs.getTime(index);
+            // } catch (SQLException e) {
+            value = rs.getString(index);// 尝试拿为string对象,0000无法用Time表示
+            // if (value == null && !rs.wasNull()) {
+            // value = "00:00:00"; //
+            // mysql设置了zeroDateTimeBehavior=convertToNull,出现0值时返回为null
+            // }
+            // }
+        } else if (Timestamp.class.equals(requiredType) || Date.class.equals(requiredType)) {
+            // try {
+            // value = convertTimestamp(rs.getTimestamp(index));
+            // } catch (SQLException e) {
+            // 尝试拿为string对象,0000-00-00 00:00:00无法用Timestamp 表示
+            value = rs.getString(index);
+            // if (value == null && !rs.wasNull()) {
+            // value = "0000:00:00 00:00:00"; //
+            // mysql设置了zeroDateTimeBehavior=convertToNull,出现0值时返回为null
+            // }
+            // }
+        } else if (BigDecimal.class.equals(requiredType)) {
+            value = rs.getBigDecimal(index);
+        } else if (BigInteger.class.equals(requiredType)) {
+            value = rs.getBigDecimal(index);
+        } else if (Blob.class.equals(requiredType)) {
+            value = rs.getBlob(index);
+        } else if (Clob.class.equals(requiredType)) {
+            value = rs.getClob(index);
+        } else if (byte[].class.equals(requiredType)) {
+            try {
+                byte[] bytes = rs.getBytes(index);
+                if (bytes == null) {
+                    value = null;
+                } else {
+                    value = new String(bytes, "ISO-8859-1");// 将binary转化为iso-8859-1的字符串
+                }
+            } catch (UnsupportedEncodingException e) {
+                throw new SQLException(e);
+            }
+        } else {
+            // Some unknown type desired -> rely on getObject.
+            value = getResultSetValue(rs, index);
+        }
+
+        // Perform was-null check if demanded (for results that the
+        // JDBC driver returns as primitives).
+        if (wasNullCheck && (value != null) && rs.wasNull()) {
+            value = null;
+        }
+
+        return (value == null) ? null : convertUtilsBean.convert(value);
+    }
+
+    /**
+     * Retrieve a JDBC column value from a ResultSet, using the most appropriate
+     * value type. The returned value should be a detached value object, not
+     * having any ties to the active ResultSet: in particular, it should not be
+     * a Blob or Clob object but rather a byte array respectively String
+     * representation.
+     * <p>
+     * Uses the <code>getObject(index)</code> method, but includes additional
+     * "hacks" to get around Oracle 10g returning a non-standard object for its
+     * TIMESTAMP datatype and a <code>java.sql.Date</code> for DATE columns
+     * leaving out the time portion: These columns will explicitly be extracted
+     * as standard <code>java.sql.Timestamp</code> object.
+     *
+     * @param rs    is the ResultSet holding the data
+     * @param index is the column index
+     * @return the value object
+     * @throws SQLException if thrown by the JDBC API
+     * @see Blob
+     * @see Clob
+     * @see Timestamp
+     */
+    private static String getResultSetValue(ResultSet rs, int index) throws SQLException {
+        Object obj = rs.getObject(index);
+        return (obj == null) ? null : convertUtilsBean.convert(obj);
+    }
+
+    // private static Object convertTimestamp(Timestamp timestamp) {
+    // return (timestamp == null) ? null : timestamp.getTime();
+    // }
+
+    /**
+     * Check whether the given SQL type is numeric.
+     */
+    public static boolean isNumeric(int sqlType) {
+        return (Types.BIT == sqlType) || (Types.BIGINT == sqlType) || (Types.DECIMAL == sqlType)
+                || (Types.DOUBLE == sqlType) || (Types.FLOAT == sqlType) || (Types.INTEGER == sqlType)
+                || (Types.NUMERIC == sqlType) || (Types.REAL == sqlType) || (Types.SMALLINT == sqlType)
+                || (Types.TINYINT == sqlType);
+    }
+
+    public static boolean isTextType(int sqlType) {
+        if (sqlType == Types.CHAR || sqlType == Types.VARCHAR || sqlType == Types.CLOB || sqlType == Types.LONGVARCHAR
+                || sqlType == Types.NCHAR || sqlType == Types.NVARCHAR || sqlType == Types.NCLOB
+                || sqlType == Types.LONGNVARCHAR) {
+            return true;
+        } else {
+            return false;
+        }
+    }
+}

+ 53 - 0
example/src/main/resources/client-spring.xml

@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.0.xsd"
+       default-autowire="byName">
+
+    <bean class="com.alibaba.otter.canal.example.db.PropertyPlaceholderConfigurer" lazy-init="false">
+        <property name="ignoreResourceNotFound" value="true"/>
+        <property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_OVERRIDE"/><!-- 允许system覆盖 -->
+        <property name="locationNames">
+            <list>
+                <value>classpath:client.properties</value>
+            </list>
+        </property>
+    </bean>
+
+    <bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" destroy-method="close">
+        <property name="driverClassName" value="com.mysql.jdbc.Driver"/>
+        <property name="url" value="${target.mysql.url:}"/>
+        <property name="username" value="${target.mysql.dbUsername:canal}"/>
+        <property name="password" value="${target.mysql.dbPassword:canal}"/>
+        <property name="maxActive" value="30"/>
+        <property name="initialSize" value="0"/>
+        <property name="minIdle" value="1"/>
+        <property name="maxWait" value="10000"/>
+        <property name="timeBetweenEvictionRunsMillis" value="60000"/>
+        <property name="minEvictableIdleTimeMillis" value="300000"/>
+        <property name="validationQuery" value="SELECT 1"/>
+        <property name="exceptionSorterClassName" value="com.alibaba.druid.pool.vendor.MySqlExceptionSorter"/>
+        <property name="validConnectionCheckerClassName" value="com.alibaba.druid.pool.vendor.MySqlValidConnectionChecker"/>
+        <property name="testWhileIdle" value="true"/>
+        <property name="testOnBorrow" value="false"/>
+        <property name="testOnReturn" value="false"/>
+        <property name="useUnfairLock" value="true"/>
+    </bean>
+
+    <bean name="canalConnectorClient" class="com.alibaba.otter.canal.example.db.CanalConnectorClient" abstract="true">
+        <property name="zkServers" value="${zk.servers:127.0.0.1:2181}"/>
+        <property name="debug" value="${client.debug:true}"/>
+        <property name="destination" value="${client.destination:example}"/>
+        <property name="username" value="${client.username:canal}"/>
+        <property name="password" value="${client.password:canal}"/>
+        <property name="exceptionStrategy" value="${client.exceptionstrategy:1}"/>
+        <property name="retryTimes" value="${client.retrytimes:3}"/>
+        <property name="filter" value="${client.filter:.*\\..*}"/>
+        <property name="waitingTime" value="${client.waiting.time:10}"/>
+    </bean>
+
+
+    <bean id="mysqlClient" class="com.alibaba.otter.canal.example.db.mysql.MysqlClient" lazy-init="true" parent="canalConnectorClient">
+        <property name="dataSource" ref="dataSource"/>
+    </bean>
+</beans>

+ 16 - 0
example/src/main/resources/client.properties

@@ -0,0 +1,16 @@
+# client 配置
+zk.servers=127.0.0.1:2181
+# 5 * 1024
+client.batch.size=5120
+client.debug=false
+client.destination=example
+client.username=canal
+client.password=canal
+client.exceptionstrategy=1
+client.retrytimes=3
+client.filter=.*\\..*
+
+# 同步目标: mysql 配置
+target.mysql.url=jdbc:mysql://127.0.0.1:4306
+target.mysql.username=root
+target.mysql.password=123456

+ 1 - 1
filter/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
-		<version>1.0.26-SNAPSHOT</version>
+		<version>1.1.0-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 1
instance/core/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
-		<version>1.0.26-SNAPSHOT</version>
+		<version>1.1.0-SNAPSHOT</version>
 		<relativePath>../../pom.xml</relativePath>
 	</parent>
 	<artifactId>canal.instance.core</artifactId>

+ 1 - 1
instance/manager/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
-		<version>1.0.26-SNAPSHOT</version>
+		<version>1.1.0-SNAPSHOT</version>
 		<relativePath>../../pom.xml</relativePath>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>

+ 8 - 6
instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/CanalInstanceWithManager.java

@@ -6,6 +6,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
+import com.alibaba.otter.canal.meta.FileMixedMetaManager;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -27,6 +28,7 @@ import com.alibaba.otter.canal.instance.manager.model.CanalParameter.MetaMode;
 import com.alibaba.otter.canal.instance.manager.model.CanalParameter.SourcingType;
 import com.alibaba.otter.canal.instance.manager.model.CanalParameter.StorageMode;
 import com.alibaba.otter.canal.instance.manager.model.CanalParameter.StorageScavengeMode;
+import com.alibaba.otter.canal.meta.FileMixedMetaManager;
 import com.alibaba.otter.canal.meta.MemoryMetaManager;
 import com.alibaba.otter.canal.meta.PeriodMixedMetaManager;
 import com.alibaba.otter.canal.meta.ZooKeeperMetaManager;
@@ -37,12 +39,7 @@ import com.alibaba.otter.canal.parse.inbound.AbstractEventParser;
 import com.alibaba.otter.canal.parse.inbound.group.GroupEventParser;
 import com.alibaba.otter.canal.parse.inbound.mysql.LocalBinlogEventParser;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser;
-import com.alibaba.otter.canal.parse.index.CanalLogPositionManager;
-import com.alibaba.otter.canal.parse.index.FailbackLogPositionManager;
-import com.alibaba.otter.canal.parse.index.MemoryLogPositionManager;
-import com.alibaba.otter.canal.parse.index.MetaLogPositionManager;
-import com.alibaba.otter.canal.parse.index.PeriodMixedLogPositionManager;
-import com.alibaba.otter.canal.parse.index.ZooKeeperLogPositionManager;
+import com.alibaba.otter.canal.parse.index.*;
 import com.alibaba.otter.canal.parse.support.AuthenticationInfo;
 import com.alibaba.otter.canal.protocol.position.EntryPosition;
 import com.alibaba.otter.canal.sink.entry.EntryEventSink;
@@ -120,6 +117,11 @@ public class CanalInstanceWithManager extends AbstractCanalInstance {
             ZooKeeperMetaManager zooKeeperMetaManager = new ZooKeeperMetaManager();
             zooKeeperMetaManager.setZkClientx(getZkclientx());
             ((PeriodMixedMetaManager) metaManager).setZooKeeperMetaManager(zooKeeperMetaManager);
+        } else if (mode.isLocalFile()) {
+            FileMixedMetaManager fileMixedMetaManager = new FileMixedMetaManager();
+            fileMixedMetaManager.setDataDir(parameters.getDataDir());
+            fileMixedMetaManager.setPeriod(parameters.getMetaFileFlushPeriod());
+            metaManager = fileMixedMetaManager;
         } else {
             throw new CanalException("unsupport MetaMode for " + mode);
         }

+ 61 - 1
instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/model/CanalParameter.java

@@ -28,8 +28,10 @@ public class CanalParameter implements Serializable {
     private Long                     zkClusterId;                                                    // zk集群id,为管理方便
     private List<String>             zkClusters;                                                     // zk集群地址
 
+    private String                   dataDir                            = "../conf";                 // 默认本地文件数据的目录默认是conf
     // meta相关参数
     private MetaMode                 metaMode                           = MetaMode.MEMORY;           // meta机制
+    private Integer                  metaFileFlushPeriod                = 1000;                      // meta刷新间隔
 
     // storage存储
     private Integer                  transactionSize                    = 1024;                      // 支持处理的transaction事务大小
@@ -91,6 +93,10 @@ public class CanalParameter implements Serializable {
     private Boolean                  filterTableError                   = Boolean.FALSE;             // 是否忽略表解析异常
     private String                   blackFilter                        = null;                      // 匹配黑名单,忽略解析
 
+    private Boolean                  tsdbEnable                         = Boolean.FALSE;             // 是否开启tableMetaTSDB
+    private String                   tsdbJdbcUrl;
+    private String                   tsdbJdbcUserName;
+    private String                   tsdbJdbcPassword;
     // ================================== 兼容字段处理
     private InetSocketAddress        masterAddress;                                                  // 主库信息
     private String                   masterUsername;                                                 // 帐号
@@ -243,7 +249,9 @@ public class CanalParameter implements Serializable {
         /** 文件存储模式 */
         ZOOKEEPER,
         /** 混合模式,内存+文件 */
-        MIXED;
+        MIXED,
+        /** 本地文件存储模式 */
+        LOCAL_FILE;
 
         public boolean isMemory() {
             return this.equals(MetaMode.MEMORY);
@@ -256,6 +264,10 @@ public class CanalParameter implements Serializable {
         public boolean isMixed() {
             return this.equals(MetaMode.MIXED);
         }
+
+        public boolean isLocalFile() {
+            return this.equals(MetaMode.LOCAL_FILE);
+        }
     }
 
     public static enum IndexMode {
@@ -390,6 +402,22 @@ public class CanalParameter implements Serializable {
         return storageMode;
     }
 
+    public String getDataDir() {
+        return dataDir;
+    }
+
+    public void setDataDir(String dataDir) {
+        this.dataDir = dataDir;
+    }
+
+    public Integer getMetaFileFlushPeriod() {
+        return metaFileFlushPeriod;
+    }
+
+    public void setMetaFileFlushPeriod(Integer metaFileFlushPeriod) {
+        this.metaFileFlushPeriod = metaFileFlushPeriod;
+    }
+
     public void setStorageMode(StorageMode storageMode) {
         this.storageMode = storageMode;
     }
@@ -859,6 +887,38 @@ public class CanalParameter implements Serializable {
         this.blackFilter = blackFilter;
     }
 
+    public Boolean getTsdbEnable() {
+        return tsdbEnable;
+    }
+
+    public void setTsdbEnable(Boolean tsdbEnable) {
+        this.tsdbEnable = tsdbEnable;
+    }
+
+    public String getTsdbJdbcUrl() {
+        return tsdbJdbcUrl;
+    }
+
+    public void setTsdbJdbcUrl(String tsdbJdbcUrl) {
+        this.tsdbJdbcUrl = tsdbJdbcUrl;
+    }
+
+    public String getTsdbJdbcUserName() {
+        return tsdbJdbcUserName;
+    }
+
+    public void setTsdbJdbcUserName(String tsdbJdbcUserName) {
+        this.tsdbJdbcUserName = tsdbJdbcUserName;
+    }
+
+    public String getTsdbJdbcPassword() {
+        return tsdbJdbcPassword;
+    }
+
+    public void setTsdbJdbcPassword(String tsdbJdbcPassword) {
+        this.tsdbJdbcPassword = tsdbJdbcPassword;
+    }
+
     public String toString() {
         return ToStringBuilder.reflectionToString(this, CanalToStringStyle.DEFAULT_STYLE);
     }

+ 1 - 1
instance/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
-		<version>1.0.26-SNAPSHOT</version>
+		<version>1.1.0-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 1
instance/spring/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
-		<version>1.0.26-SNAPSHOT</version>
+		<version>1.1.0-SNAPSHOT</version>
 		<relativePath>../../pom.xml</relativePath>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>

+ 2 - 2
kafka-client/pom.xml

@@ -6,7 +6,7 @@
     <parent>
         <artifactId>canal</artifactId>
         <groupId>com.alibaba.otter</groupId>
-        <version>1.0.26-SNAPSHOT</version>
+        <version>1.1.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
     <groupId>com.alibaba.otter</groupId>
@@ -108,4 +108,4 @@
             </build>
         </profile>
     </profiles>
-</project>
+</project>

+ 3 - 1
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/MessageDeserializer.java

@@ -29,7 +29,9 @@ public class MessageDeserializer implements Deserializer<Message> {
     @Override
     public Message deserialize(String topic, byte[] data) {
         try {
-            if (data == null) return null;
+            if (data == null) {
+                return null;
+            }
             else {
                 CanalPacket.Packet p = CanalPacket.Packet.parseFrom(data);
                 switch (p.getType()) {

+ 2 - 2
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningData.java

@@ -8,8 +8,8 @@ package com.alibaba.otter.canal.kafka.client.running;
  */
 public class ClientRunningData {
 
-    private String groupId;
-    private String address;
+    private String  groupId;
+    private String  address;
     private boolean active = true;
 
     public String getGroupId() {

+ 44 - 38
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningMonitor.java

@@ -1,12 +1,11 @@
 package com.alibaba.otter.canal.kafka.client.running;
 
-import com.alibaba.otter.canal.common.AbstractCanalLifeCycle;
-import com.alibaba.otter.canal.common.utils.AddressUtils;
-import com.alibaba.otter.canal.common.utils.BooleanMutex;
-import com.alibaba.otter.canal.common.utils.JsonUtils;
-import com.alibaba.otter.canal.common.zookeeper.ZkClientx;
-import com.alibaba.otter.canal.common.zookeeper.ZookeeperPathUtils;
-import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+import java.text.MessageFormat;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
 import org.I0Itec.zkclient.IZkDataListener;
 import org.I0Itec.zkclient.exception.ZkException;
 import org.I0Itec.zkclient.exception.ZkInterruptedException;
@@ -17,11 +16,14 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.MDC;
 
-import java.text.MessageFormat;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
+import com.alibaba.otter.canal.common.AbstractCanalLifeCycle;
+import com.alibaba.otter.canal.common.utils.AddressUtils;
+import com.alibaba.otter.canal.common.utils.BooleanMutex;
+import com.alibaba.otter.canal.common.utils.JsonUtils;
+import com.alibaba.otter.canal.common.zookeeper.ZkClientx;
+import com.alibaba.otter.canal.common.zookeeper.ZookeeperPathUtils;
+import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+
 
 /**
  * kafka client running状态信息
@@ -31,13 +33,18 @@ import java.util.concurrent.TimeUnit;
  */
 public class ClientRunningMonitor extends AbstractCanalLifeCycle {
 
-    private static final String TOPIC_ROOT_NODE = ZookeeperPathUtils.CANAL_ROOT_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + "topics";
+    private static final String TOPIC_ROOT_NODE             = ZookeeperPathUtils.CANAL_ROOT_NODE
+                                                              + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + "topics";
 
-    private static final String TOPIC_NODE = TOPIC_ROOT_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + "{0}";
+    private static final String TOPIC_NODE                  = TOPIC_ROOT_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR
+                                                              + "{0}";
 
-    private static final String TOPIC_CLIENTID_NODE = TOPIC_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + "{1}";
+    private static final String TOPIC_CLIENTID_NODE         = TOPIC_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR
+                                                              + "{1}";
 
-    private static final String TOPIC_CLIENTID_RUNNING_NODE = TOPIC_CLIENTID_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + ZookeeperPathUtils.RUNNING_NODE;
+    private static final String TOPIC_CLIENTID_RUNNING_NODE = TOPIC_CLIENTID_NODE
+                                                              + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR
+                                                              + ZookeeperPathUtils.RUNNING_NODE;
 
     private static String getTopicClientRunning(String topic, String groupId) {
         return MessageFormat.format(TOPIC_CLIENTID_RUNNING_NODE, topic, groupId);
@@ -47,21 +54,21 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
         return MessageFormat.format(TOPIC_CLIENTID_NODE, topic, groupId);
     }
 
-    private static final Logger logger = LoggerFactory.getLogger(ClientRunningMonitor.class);
-    private ZkClientx zkClient;
-    private String topic;
-    private ClientRunningData clientData;
-    private IZkDataListener dataListener;
-    private BooleanMutex mutex = new BooleanMutex(false);
-    private volatile boolean release = false;
+    private static final Logger        logger       = LoggerFactory.getLogger(ClientRunningMonitor.class);
+    private ZkClientx                  zkClient;
+    private String                     topic;
+    private ClientRunningData          clientData;
+    private IZkDataListener            dataListener;
+    private BooleanMutex               mutex        = new BooleanMutex(false);
+    private volatile boolean           release      = false;
     private volatile ClientRunningData activeData;
-    private ScheduledExecutorService delayExector = Executors.newScheduledThreadPool(1);
-    private ClientRunningListener listener;
-    private int delayTime = 5;
+    private ScheduledExecutorService   delayExector = Executors.newScheduledThreadPool(1);
+    private ClientRunningListener      listener;
+    private int                        delayTime    = 5;
 
-    private static Integer virtualPort;
+    private static Integer             virtualPort;
 
-    public ClientRunningMonitor() {
+    public ClientRunningMonitor(){
         if (virtualPort == null) {
             Random rand = new Random();
             virtualPort = rand.nextInt(9000) + 1000;
@@ -108,7 +115,6 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
 
     }
 
-
     public void start() {
         super.start();
 
@@ -123,7 +129,7 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
         String path = getTopicClientRunning(this.topic, clientData.getGroupId());
         zkClient.unsubscribeDataChanges(path, dataListener);
         releaseRunning(); // 尝试一下release
-        //Fix issue #697
+        // Fix issue #697
         if (delayExector != null) {
             delayExector.shutdown();
         }
@@ -159,13 +165,12 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
                 }
             }
         } catch (ZkNoNodeException e) {
-            zkClient.createPersistent(getClientIdNodePath(this.topic, clientData.getGroupId()),
-                    true); // 尝试创建父节点
+            zkClient.createPersistent(getClientIdNodePath(this.topic, clientData.getGroupId()), true); // 尝试创建父节点
             initRunning();
         } catch (Throwable t) {
             logger.error(MessageFormat.format("There is an error when execute initRunning method, with destination [{0}].",
-                    topic),
-                    t);
+                topic),
+                t);
             // 出现任何异常尝试release
             releaseRunning();
             throw new CanalClientException("something goes wrong in initRunning method. ", t);
@@ -187,7 +192,8 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
      */
     public boolean check() {
         String path = getTopicClientRunning(this.topic, clientData.getGroupId());
-        //ZookeeperPathUtils.getDestinationClientRunning(this.destination, clientData.getClientId());
+        // ZookeeperPathUtils.getDestinationClientRunning(this.destination,
+        // clientData.getClientId());
         try {
             byte[] bytes = zkClient.readData(path);
             ClientRunningData eventData = JsonUtils.unmarshalFromByte(bytes, ClientRunningData.class);
@@ -196,8 +202,8 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
             boolean result = isMine(activeData.getAddress());
             if (!result) {
                 logger.warn("canal is running in [{}] , but not in [{}]",
-                        activeData.getAddress(),
-                        clientData.getAddress());
+                    activeData.getAddress(),
+                    clientData.getAddress());
             }
             return result;
         } catch (ZkNoNodeException e) {
@@ -235,7 +241,7 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
         if (listener != null) {
             // 触发回调
             listener.processActiveEnter();
-            this.clientData.setAddress(/*address*/AddressUtils.getHostIp() + ":" + virtualPort);
+            this.clientData.setAddress(/* address */AddressUtils.getHostIp() + ":" + virtualPort);
 
             String path = getTopicClientRunning(this.topic, clientData.getGroupId());
             // 序列化

+ 2 - 2
kafka/pom.xml

@@ -6,7 +6,7 @@
     <parent>
         <artifactId>canal</artifactId>
         <groupId>com.alibaba.otter</groupId>
-        <version>1.0.26-SNAPSHOT</version>
+        <version>1.1.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
     <groupId>com.alibaba.otter</groupId>
@@ -137,4 +137,4 @@
             </build>
         </profile>
     </profiles>
-</project>
+</project>

+ 3 - 3
kafka/src/main/java/com/alibaba/otter/canal/kafka/CanalServerStarter.java

@@ -17,9 +17,9 @@ import com.alibaba.otter.canal.deployer.CanalController;
  */
 public class CanalServerStarter {
 
-    private static final String CLASSPATH_URL_PREFIX = "classpath:";
-    private static final Logger logger               = LoggerFactory.getLogger(CanalServerStarter.class);
-    private volatile static boolean running          = false;
+    private static final String     CLASSPATH_URL_PREFIX = "classpath:";
+    private static final Logger     logger               = LoggerFactory.getLogger(CanalServerStarter.class);
+    private volatile static boolean running              = false;
 
     public static void init() {
         try {

+ 25 - 26
kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/CanalKafkaProducer.java

@@ -3,7 +3,6 @@ package com.alibaba.otter.canal.kafka.producer;
 import java.io.IOException;
 import java.util.Properties;
 
-import com.google.protobuf.ByteString;
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.Producer;
 import org.apache.kafka.clients.producer.ProducerRecord;
@@ -12,7 +11,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.alibaba.otter.canal.kafka.producer.KafkaProperties.Topic;
-import com.alibaba.otter.canal.protocol.CanalEntry;
 import com.alibaba.otter.canal.protocol.Message;
 
 /**
@@ -52,31 +50,32 @@ public class CanalKafkaProducer {
     }
 
     public void send(Topic topic, Message message) throws IOException {
-        boolean valid = false;
-        if (message != null) {
-            if (message.isRaw() && !message.getRawEntries().isEmpty()) {
-                for (ByteString byteString : message.getRawEntries()) {
-                    CanalEntry.Entry entry = CanalEntry.Entry.parseFrom(byteString);
-                    if (entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONBEGIN
-                            && entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONEND) {
-                        valid = true;
-                        break;
-                    }
-                }
-            } else if (!message.getEntries().isEmpty()){
-                for (CanalEntry.Entry entry : message.getEntries()) {
-                    if (entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONBEGIN
-                            && entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONEND) {
-                        valid = true;
-                        break;
-                    }
-                }
-            }
-        }
-        if (!valid) {
-            return;
-        }
+        // set canal.instance.filter.transaction.entry = true
 
+        // boolean valid = false;
+        // if (message != null) {
+        // if (message.isRaw() && !message.getRawEntries().isEmpty()) {
+        // for (ByteString byteString : message.getRawEntries()) {
+        // CanalEntry.Entry entry = CanalEntry.Entry.parseFrom(byteString);
+        // if (entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONBEGIN
+        // && entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONEND) {
+        // valid = true;
+        // break;
+        // }
+        // }
+        // } else if (!message.getEntries().isEmpty()){
+        // for (CanalEntry.Entry entry : message.getEntries()) {
+        // if (entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONBEGIN
+        // && entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONEND) {
+        // valid = true;
+        // break;
+        // }
+        // }
+        // }
+        // }
+        // if (!valid) {
+        // return;
+        // }
         ProducerRecord<String, Message> record;
         if (topic.getPartition() != null) {
             record = new ProducerRecord<String, Message>(topic.getTopic(), topic.getPartition(), null, message);

+ 5 - 2
kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/CanalKafkaStarter.java

@@ -52,7 +52,10 @@ public class CanalKafkaStarter {
             // 初始化 kafka producer
             canalKafkaProducer = new CanalKafkaProducer();
             canalKafkaProducer.init(kafkaProperties);
-
+            // set filterTransactionEntry
+            if (kafkaProperties.isFilterTransactionEntry()) {
+                System.setProperty("canal.instance.filter.transaction.entry", "true");
+            }
             // 对应每个instance启动一个worker线程
             List<CanalDestination> destinations = kafkaProperties.getCanalDestinations();
 
@@ -118,7 +121,7 @@ public class CanalKafkaStarter {
                     Message message = server.getWithoutAck(clientIdentity, kafkaProperties.getCanalBatchSize()); // 获取指定数量的数据
                     long batchId = message.getId();
                     try {
-                        int size = message.getEntries().size();
+                        int size = message.isRaw() ?  message.getRawEntries().size() : message.getEntries().size();
                         if (batchId != -1 && size != 0) {
                             if (!StringUtils.isEmpty(destination.getTopic())) {
                                 Topic topic = new Topic();

+ 17 - 8
kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/KafkaProperties.java

@@ -13,15 +13,15 @@ import java.util.Set;
  */
 public class KafkaProperties {
 
-    private String                 servers           = "localhost:6667";
-    private int                    retries           = 0;
-    private int                    batchSize         = 16384;
-    private int                    lingerMs          = 1;
-    private long                   bufferMemory      = 33554432L;
+    private String                 servers                = "localhost:6667";
+    private int                    retries                = 0;
+    private int                    batchSize              = 16384;
+    private int                    lingerMs               = 1;
+    private long                   bufferMemory           = 33554432L;
+    private boolean                filterTransactionEntry = true;
+    private int                    canalBatchSize         = 5;
 
-    private int                    canalBatchSize    = 5;
-
-    private List<CanalDestination> canalDestinations = new ArrayList<CanalDestination>();
+    private List<CanalDestination> canalDestinations      = new ArrayList<CanalDestination>();
 
     public static class CanalDestination {
 
@@ -158,4 +158,13 @@ public class KafkaProperties {
     public void setCanalDestinations(List<CanalDestination> canalDestinations) {
         this.canalDestinations = canalDestinations;
     }
+
+    public boolean isFilterTransactionEntry() {
+        return filterTransactionEntry;
+    }
+
+    public void setFilterTransactionEntry(boolean filterTransactionEntry) {
+        this.filterTransactionEntry = filterTransactionEntry;
+    }
+
 }

+ 49 - 10
kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/MessageSerializer.java

@@ -1,13 +1,19 @@
 package com.alibaba.otter.canal.kafka.producer;
 
-import com.alibaba.otter.canal.protocol.CanalEntry;
-import com.alibaba.otter.canal.protocol.CanalPacket;
-import com.alibaba.otter.canal.protocol.Message;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.kafka.common.errors.SerializationException;
 import org.apache.kafka.common.serialization.Serializer;
 import org.springframework.util.CollectionUtils;
 
-import java.util.Map;
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.CanalPacket;
+import com.alibaba.otter.canal.protocol.CanalPacket.PacketType;
+import com.alibaba.otter.canal.protocol.Message;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedOutputStream;
+import com.google.protobuf.WireFormat;
 
 /**
  * Kafka Message类的序列化
@@ -25,20 +31,53 @@ public class MessageSerializer implements Serializer<Message> {
     public byte[] serialize(String topic, Message data) {
         try {
             if (data != null) {
-                CanalPacket.Messages.Builder messageBuilder = CanalPacket.Messages.newBuilder();
                 if (data.getId() != -1) {
                     if (data.isRaw() && !CollectionUtils.isEmpty(data.getRawEntries())) {
-                        messageBuilder.addAllMessages(data.getRawEntries());
+                        // for performance
+                        List<ByteString> rowEntries = data.getRawEntries();
+                        // message size
+                        int messageSize = 0;
+                        messageSize += com.google.protobuf.CodedOutputStream.computeInt64Size(1, data.getId());
+
+                        int dataSize = 0;
+                        for (int i = 0; i < rowEntries.size(); i++) {
+                            dataSize += com.google.protobuf.CodedOutputStream.computeBytesSizeNoTag(rowEntries.get(i));
+                        }
+                        messageSize += dataSize;
+                        messageSize += 1 * rowEntries.size();
+                        // packet size
+                        int size = 0;
+                        size += com.google.protobuf.CodedOutputStream.computeEnumSize(3,
+                            PacketType.MESSAGES.getNumber());
+                        size += com.google.protobuf.CodedOutputStream.computeTagSize(5)
+                                + com.google.protobuf.CodedOutputStream.computeRawVarint32Size(messageSize)
+                                + messageSize;
+                        // build data
+                        byte[] body = new byte[size];
+                        CodedOutputStream output = CodedOutputStream.newInstance(body);
+                        output.writeEnum(3, PacketType.MESSAGES.getNumber());
+
+                        output.writeTag(5, WireFormat.WIRETYPE_LENGTH_DELIMITED);
+                        output.writeRawVarint32(messageSize);
+                        // message
+                        output.writeInt64(1, data.getId());
+                        for (int i = 0; i < rowEntries.size(); i++) {
+                            output.writeBytes(2, rowEntries.get(i));
+                        }
+                        output.checkNoSpaceLeft();
+                        return body;
                     } else if (!CollectionUtils.isEmpty(data.getEntries())) {
+                        CanalPacket.Messages.Builder messageBuilder = CanalPacket.Messages.newBuilder();
                         for (CanalEntry.Entry entry : data.getEntries()) {
                             messageBuilder.addMessages(entry.toByteString());
                         }
+
+                        CanalPacket.Packet.Builder packetBuilder = CanalPacket.Packet.newBuilder();
+                        packetBuilder.setType(CanalPacket.PacketType.MESSAGES);
+                        packetBuilder.setBody(messageBuilder.build().toByteString());
+                        return packetBuilder.build().toByteArray();
                     }
                 }
-                CanalPacket.Packet.Builder packetBuilder = CanalPacket.Packet.newBuilder();
-                packetBuilder.setType(CanalPacket.PacketType.MESSAGES);
-                packetBuilder.setBody(messageBuilder.build().toByteString());
-                return packetBuilder.build().toByteArray();
             }
         } catch (Exception e) {
             throw new SerializationException("Error when serializing message to byte[] ");

+ 1 - 0
kafka/src/main/resources/kafka.yml

@@ -5,6 +5,7 @@ lingerMs: 1
 bufferMemory: 33554432
 # canal的批次大小,单位 k
 canalBatchSize: 50
+filterTransactionEntry: true
 
 canalDestinations:
   - canalDestination: example

+ 1 - 1
meta/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
-		<version>1.0.26-SNAPSHOT</version>
+		<version>1.1.0-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 1
parse/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
-		<version>1.0.26-SNAPSHOT</version>
+		<version>1.1.0-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	<artifactId>canal.parse</artifactId>

+ 30 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/exception/PositionNotFoundException.java

@@ -0,0 +1,30 @@
+package com.alibaba.otter.canal.parse.exception;
+
+/**
+ * @author chengjin.lyf on 2018/7/20 下午2:54
+ * @since 1.0.25
+ */
+public class PositionNotFoundException extends CanalParseException {
+
+    private static final long serialVersionUID = -7382448928116244017L;
+
+    public PositionNotFoundException(String errorCode) {
+        super(errorCode);
+    }
+
+    public PositionNotFoundException(String errorCode, Throwable cause) {
+        super(errorCode, cause);
+    }
+
+    public PositionNotFoundException(String errorCode, String errorDesc) {
+        super(errorCode, errorDesc);
+    }
+
+    public PositionNotFoundException(String errorCode, String errorDesc, Throwable cause) {
+        super(errorCode, errorDesc, cause);
+    }
+
+    public PositionNotFoundException(Throwable cause) {
+        super(cause);
+    }
+}

+ 32 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/exception/ServerIdNotMatchException.java

@@ -0,0 +1,32 @@
+package com.alibaba.otter.canal.parse.exception;
+
+import com.alibaba.otter.canal.common.CanalException;
+
+/**
+ * @author chengjin.lyf on 2018/8/8 下午1:07
+ * @since 1.0.25
+ */
+public class ServerIdNotMatchException extends CanalException{
+
+    private static final long serialVersionUID = -6124989280379293953L;
+
+    public ServerIdNotMatchException(String errorCode) {
+        super(errorCode);
+    }
+
+    public ServerIdNotMatchException(String errorCode, Throwable cause) {
+        super(errorCode, cause);
+    }
+
+    public ServerIdNotMatchException(String errorCode, String errorDesc) {
+        super(errorCode, errorDesc);
+    }
+
+    public ServerIdNotMatchException(String errorCode, String errorDesc, Throwable cause) {
+        super(errorCode, errorDesc, cause);
+    }
+
+    public ServerIdNotMatchException(Throwable cause) {
+        super(cause);
+    }
+}

+ 29 - 2
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/AbstractEventParser.java

@@ -21,6 +21,7 @@ import com.alibaba.otter.canal.filter.CanalEventFilter;
 import com.alibaba.otter.canal.parse.CanalEventParser;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.MysqlGTIDSet;
 import com.alibaba.otter.canal.parse.exception.CanalParseException;
+import com.alibaba.otter.canal.parse.exception.PositionNotFoundException;
 import com.alibaba.otter.canal.parse.exception.TableIdNotFoundException;
 import com.alibaba.otter.canal.parse.inbound.EventTransactionBuffer.TransactionFlushCallback;
 import com.alibaba.otter.canal.parse.index.CanalLogPositionManager;
@@ -94,6 +95,8 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
                                                                                     .availableProcessors() * 60 / 100;     // 60%的能力跑解析,剩余部分处理网络
     protected int                                    parallelBufferSize         = 256;                                     // 必须为2的幂
     protected MultiStageCoprocessor                  multiStageCoprocessor;
+    protected ParserExceptionHandler                 parserExceptionHandler;
+    protected long                                   serverId;
 
     protected abstract BinlogParser buildParser();
 
@@ -170,11 +173,16 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
                         preDump(erosaConnection);
 
                         erosaConnection.connect();// 链接
+
+                        long queryServerId = erosaConnection.queryServerId();
+                        if (queryServerId != 0) {
+                            serverId = queryServerId;
+                        }
                         // 4. 获取最后的位置信息
                         EntryPosition position = findStartPosition(erosaConnection);
                         final EntryPosition startPosition = position;
                         if (startPosition == null) {
-                            throw new CanalParseException("can't find start position for " + destination);
+                            throw new PositionNotFoundException("can't find start position for " + destination);
                         }
 
                         if (!processTableMeta(startPosition)) {
@@ -277,6 +285,9 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
                                 runningInfo.getAddress().toString()), e);
                             sendAlarm(destination, ExceptionUtils.getFullStackTrace(e));
                         }
+                        if (parserExceptionHandler != null) {
+                            parserExceptionHandler.handle(e);
+                        }
                     } finally {
                         // 重新置为中断状态
                         Thread.interrupted();
@@ -426,7 +437,7 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
         return logPosition;
     }
 
-    protected void processSinkError(Throwable e, LogPosition lastPosition, String startBinlogFile, long startPosition) {
+    protected void processSinkError(Throwable e, LogPosition lastPosition, String startBinlogFile, Long startPosition) {
         if (lastPosition != null) {
             logger.warn(String.format("ERROR ## parse this event has an error , last position : [%s]",
                 lastPosition.getPostion()),
@@ -615,4 +626,20 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
         this.parallelBufferSize = parallelBufferSize;
     }
 
+    public ParserExceptionHandler getParserExceptionHandler() {
+        return parserExceptionHandler;
+    }
+
+    public void setParserExceptionHandler(ParserExceptionHandler parserExceptionHandler) {
+        this.parserExceptionHandler = parserExceptionHandler;
+    }
+
+    public long getServerId() {
+        return serverId;
+    }
+
+    public void setServerId(long serverId) {
+        this.serverId = serverId;
+    }
+
 }

+ 2 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/ErosaConnection.java

@@ -40,4 +40,6 @@ public interface ErosaConnection {
     public void dump(GTIDSet gtidSet, MultiStageCoprocessor coprocessor) throws IOException;
 
     ErosaConnection fork();
+
+    public long queryServerId() throws IOException;
 }

+ 5 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/EventTransactionBuffer.java

@@ -80,6 +80,11 @@ public class EventTransactionBuffer extends AbstractCanalLifeCycle {
                     flush();
                 }
                 break;
+            case HEARTBEAT:
+                // master过来的heartbeat,说明binlog已经读完了,是idle状态
+                put(entry);
+                flush();
+                break;
             default:
                 break;
         }

+ 2 - 1
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/MultiStageCoprocessor.java

@@ -2,6 +2,7 @@ package com.alibaba.otter.canal.parse.inbound;
 
 import com.alibaba.otter.canal.common.CanalLifeCycle;
 import com.taobao.tddl.dbsync.binlog.LogBuffer;
+import com.taobao.tddl.dbsync.binlog.LogEvent;
 
 /**
  * 针对解析器提供一个多阶段协同的处理
@@ -23,7 +24,7 @@ public interface MultiStageCoprocessor extends CanalLifeCycle {
      */
     public boolean publish(LogBuffer buffer);
 
-    public boolean publish(LogBuffer buffer, String binlogFileName);
+    public boolean publish(LogEvent event);
 
     public void reset();
 }

+ 9 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/ParserExceptionHandler.java

@@ -0,0 +1,9 @@
+package com.alibaba.otter.canal.parse.inbound;
+
+/**
+ * @author chengjin.lyf on 2018/7/20 下午3:55
+ * @since 1.0.25
+ */
+public interface ParserExceptionHandler {
+    void handle(Throwable e);
+}

+ 44 - 20
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/AbstractMysqlEventParser.java

@@ -1,39 +1,45 @@
 package com.alibaba.otter.canal.parse.inbound.mysql;
 
 import java.nio.charset.Charset;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.alibaba.otter.canal.filter.CanalEventFilter;
 import com.alibaba.otter.canal.filter.aviater.AviaterRegexFilter;
+import com.alibaba.otter.canal.parse.CanalEventParser;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.MysqlGTIDSet;
 import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.inbound.AbstractEventParser;
 import com.alibaba.otter.canal.parse.inbound.BinlogParser;
 import com.alibaba.otter.canal.parse.inbound.MultiStageCoprocessor;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.LogEventConvert;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.DefaultTableMetaTSDBFactory;
 import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.TableMetaTSDB;
-import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.TableMetaTSDBBuilder;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.TableMetaTSDBFactory;
 import com.alibaba.otter.canal.protocol.position.EntryPosition;
 
 public abstract class AbstractMysqlEventParser extends AbstractEventParser {
 
-    protected final Logger      logger                  = LoggerFactory.getLogger(this.getClass());
-    protected static final long BINLOG_START_OFFEST     = 4L;
+    protected final Logger         logger                    = LoggerFactory.getLogger(this.getClass());
+    protected static final long    BINLOG_START_OFFEST       = 4L;
+
+    protected TableMetaTSDBFactory tableMetaTSDBFactory      = new DefaultTableMetaTSDBFactory();
+    protected boolean              enableTsdb                = false;
+    protected String               tsdbSpringXml;
+    protected TableMetaTSDB        tableMetaTSDB;
 
-    protected boolean           enableTsdb              = false;
-    protected String            tsdbSpringXml;
-    protected TableMetaTSDB     tableMetaTSDB;
     // 编码信息
-    protected byte              connectionCharsetNumber = (byte) 33;
-    protected Charset           connectionCharset       = Charset.forName("UTF-8");
-    protected boolean           filterQueryDcl          = false;
-    protected boolean           filterQueryDml          = false;
-    protected boolean           filterQueryDdl          = false;
-    protected boolean           filterRows              = false;
-    protected boolean           filterTableError        = false;
-    protected boolean           useDruidDdlFilter       = true;
+    protected byte                 connectionCharsetNumber   = (byte) 33;
+    protected Charset              connectionCharset         = Charset.forName("UTF-8");
+    protected boolean              filterQueryDcl            = false;
+    protected boolean              filterQueryDml            = false;
+    protected boolean              filterQueryDdl            = false;
+    protected boolean              filterRows                = false;
+    protected boolean              filterTableError          = false;
+    protected boolean              useDruidDdlFilter         = true;
+    private final AtomicLong       eventsPublishBlockingTime = new AtomicLong(0L);
 
     protected BinlogParser buildParser() {
         LogEventConvert convert = new LogEventConvert();
@@ -91,8 +97,16 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
     public void start() throws CanalParseException {
         if (enableTsdb) {
             if (tableMetaTSDB == null) {
-                // 初始化
-                tableMetaTSDB = TableMetaTSDBBuilder.build(destination, tsdbSpringXml);
+                synchronized (CanalEventParser.class) {
+                    try {
+                        // 设置当前正在加载的通道,加载spring查找文件时会用到该变量
+                        System.setProperty("canal.instance.destination", destination);
+                        // 初始化
+                        tableMetaTSDB = tableMetaTSDBFactory.build(destination, tsdbSpringXml);
+                    } finally {
+                        System.setProperty("canal.instance.destination", "");
+                    }
+                }
             }
         }
 
@@ -101,7 +115,7 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
 
     public void stop() throws CanalParseException {
         if (enableTsdb) {
-            TableMetaTSDBBuilder.destory(destination);
+            tableMetaTSDBFactory.destory(destination);
             tableMetaTSDB = null;
         }
 
@@ -119,11 +133,13 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
     }
 
     protected MultiStageCoprocessor buildMultiStageCoprocessor() {
-        return new MysqlMultiStageCoprocessor(parallelBufferSize,
+        MysqlMultiStageCoprocessor mysqlMultiStageCoprocessor = new MysqlMultiStageCoprocessor(parallelBufferSize,
             parallelThreadSize,
             (LogEventConvert) binlogParser,
             transactionBuffer,
             destination);
+        mysqlMultiStageCoprocessor.setEventsPublishBlockingTime(eventsPublishBlockingTime);
+        return mysqlMultiStageCoprocessor;
     }
 
     // ============================ setter / getter =========================
@@ -173,7 +189,7 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
         if (this.enableTsdb) {
             if (tableMetaTSDB == null) {
                 // 初始化
-                tableMetaTSDB = TableMetaTSDBBuilder.build(destination, tsdbSpringXml);
+                tableMetaTSDB = tableMetaTSDBFactory.build(destination, tsdbSpringXml);
             }
         }
     }
@@ -183,9 +199,17 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
         if (this.enableTsdb) {
             if (tableMetaTSDB == null) {
                 // 初始化
-                tableMetaTSDB = TableMetaTSDBBuilder.build(destination, tsdbSpringXml);
+                tableMetaTSDB = tableMetaTSDBFactory.build(destination, tsdbSpringXml);
             }
         }
     }
 
+    public void setTableMetaTSDBFactory(TableMetaTSDBFactory tableMetaTSDBFactory) {
+        this.tableMetaTSDBFactory = tableMetaTSDBFactory;
+    }
+
+    public AtomicLong getEventsPublishBlockingTime() {
+        return this.eventsPublishBlockingTime;
+    }
+
 }

+ 69 - 12
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinLogConnection.java

@@ -10,18 +10,18 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.alibaba.otter.canal.parse.driver.mysql.packets.GTIDSet;
+import com.alibaba.otter.canal.parse.exception.CanalParseException;
+import com.alibaba.otter.canal.parse.exception.ServerIdNotMatchException;
 import com.alibaba.otter.canal.parse.inbound.ErosaConnection;
 import com.alibaba.otter.canal.parse.inbound.MultiStageCoprocessor;
 import com.alibaba.otter.canal.parse.inbound.SinkFunction;
 import com.alibaba.otter.canal.parse.inbound.mysql.local.BinLogFileQueue;
 import com.taobao.tddl.dbsync.binlog.FileLogFetcher;
-import com.taobao.tddl.dbsync.binlog.LogBuffer;
 import com.taobao.tddl.dbsync.binlog.LogContext;
 import com.taobao.tddl.dbsync.binlog.LogDecoder;
 import com.taobao.tddl.dbsync.binlog.LogEvent;
 import com.taobao.tddl.dbsync.binlog.LogPosition;
 import com.taobao.tddl.dbsync.binlog.event.QueryLogEvent;
-import com.taobao.tddl.dbsync.binlog.event.RotateLogEvent;
 
 /**
  * local bin log connection (not real connection)
@@ -36,6 +36,8 @@ public class LocalBinLogConnection implements ErosaConnection {
     private String              directory;
     private int                 bufferSize = 16 * 1024;
     private boolean             running    = false;
+    private long                serverId;
+    private FileParserListener  parserListener;
 
     public LocalBinLogConnection(){
     }
@@ -88,14 +90,14 @@ public class LocalBinLogConnection implements ErosaConnection {
             while (running) {
                 boolean needContinue = true;
                 LogEvent event = null;
-                // 处理一下binlog文件名
-                event = new RotateLogEvent(context.getLogPosition().getFileName(), 4);
-                func.sink(event);
                 while (fetcher.fetch()) {
                     event = decoder.decode(fetcher, context);
                     if (event == null) {
                         continue;
                     }
+                    if (serverId != 0 && event.getServerId() != serverId) {
+                        throw new ServerIdNotMatchException("unexpected serverId " + serverId + " in binlog file !");
+                    }
 
                     if (!func.sink(event)) {
                         needContinue = false;
@@ -103,8 +105,9 @@ public class LocalBinLogConnection implements ErosaConnection {
                     }
                 }
 
+                fetcher.close(); // 关闭上一个文件
+                parserFinish(current.getName());
                 if (needContinue) {// 读取下一个
-                    fetcher.close(); // 关闭上一个文件
 
                     File nextFile;
                     if (needWait) {
@@ -160,6 +163,10 @@ public class LocalBinLogConnection implements ErosaConnection {
                 while (fetcher.fetch()) {
                     LogEvent event = decoder.decode(fetcher, context);
                     if (event != null) {
+                        if (serverId != 0 && event.getServerId() != serverId) {
+                            throw new ServerIdNotMatchException("unexpected serverId " + serverId + " in binlog file !");
+                        }
+
                         if (event.getWhen() > timestampSeconds) {
                             break;
                         }
@@ -176,6 +183,9 @@ public class LocalBinLogConnection implements ErosaConnection {
                         } else if (LogEvent.XID_EVENT == event.getHeader().getType()) {
                             lastXidLogFilename = current.getName();
                             lastXidLogFileOffset = event.getLogPos();
+                        } else if (LogEvent.FORMAT_DESCRIPTION_EVENT == event.getHeader().getType()) {
+                            lastXidLogFilename = current.getName();
+                            lastXidLogFileOffset = event.getLogPos();
                         }
                     }
                 }
@@ -212,25 +222,37 @@ public class LocalBinLogConnection implements ErosaConnection {
     @Override
     public void dump(String binlogfilename, Long binlogPosition, MultiStageCoprocessor coprocessor) throws IOException {
         File current = new File(directory, binlogfilename);
+        if (!current.exists()) {
+            throw new CanalParseException("binlog:" + binlogfilename + " is not found");
+        }
 
         FileLogFetcher fetcher = new FileLogFetcher(bufferSize);
+        LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.ENUM_END_EVENT);
+        LogContext context = new LogContext();
         try {
             fetcher.open(current, binlogPosition);
+            context.setLogPosition(new LogPosition(binlogfilename, binlogPosition));
             while (running) {
                 boolean needContinue = true;
+                LogEvent event = null;
                 while (fetcher.fetch()) {
-                    LogBuffer buffer = fetcher.duplicate();
-                    fetcher.consume(fetcher.limit());
-                    // set filename
-                    if (!coprocessor.publish(buffer, binlogfilename)) {
+                    event = decoder.decode(fetcher, context);
+                    if (event == null) {
+                        continue;
+                    }
+                    if (serverId != 0 && event.getServerId() != serverId) {
+                        throw new ServerIdNotMatchException("unexpected serverId " + serverId + " in binlog file !");
+                    }
+
+                    if (!coprocessor.publish(event)) {
                         needContinue = false;
                         break;
                     }
                 }
 
+                fetcher.close(); // 关闭上一个文件
+                parserFinish(binlogfilename);
                 if (needContinue) {// 读取下一个
-                    fetcher.close(); // 关闭上一个文件
-
                     File nextFile;
                     if (needWait) {
                         nextFile = binlogs.waitForNextFile(current);
@@ -258,6 +280,12 @@ public class LocalBinLogConnection implements ErosaConnection {
         }
     }
 
+    private void parserFinish(String fileName) {
+        if (parserListener != null) {
+            parserListener.onFinish(fileName);
+        }
+    }
+
     @Override
     public void dump(long timestampMills, MultiStageCoprocessor coprocessor) throws IOException {
         List<File> currentBinlogs = binlogs.currentBinlogs();
@@ -286,6 +314,10 @@ public class LocalBinLogConnection implements ErosaConnection {
                 while (fetcher.fetch()) {
                     LogEvent event = decoder.decode(fetcher, context);
                     if (event != null) {
+                        if (serverId != 0 && event.getServerId() != serverId) {
+                            throw new ServerIdNotMatchException("unexpected serverId " + serverId + " in binlog file !");
+                        }
+
                         if (event.getWhen() > timestampSeconds) {
                             break;
                         }
@@ -302,6 +334,9 @@ public class LocalBinLogConnection implements ErosaConnection {
                         } else if (LogEvent.XID_EVENT == event.getHeader().getType()) {
                             lastXidLogFilename = current.getName();
                             lastXidLogFileOffset = event.getLogPos();
+                        } else if (LogEvent.FORMAT_DESCRIPTION_EVENT == event.getHeader().getType()) {
+                            lastXidLogFilename = current.getName();
+                            lastXidLogFileOffset = event.getLogPos();
                         }
                     }
                 }
@@ -344,6 +379,11 @@ public class LocalBinLogConnection implements ErosaConnection {
         return connection;
     }
 
+    @Override
+    public long queryServerId() {
+        return 0;
+    }
+
     public boolean isNeedWait() {
         return needWait;
     }
@@ -368,4 +408,21 @@ public class LocalBinLogConnection implements ErosaConnection {
         this.bufferSize = bufferSize;
     }
 
+    public long getServerId() {
+        return serverId;
+    }
+
+    public void setServerId(long serverId) {
+        this.serverId = serverId;
+    }
+
+    public void setParserListener(FileParserListener parserListener) {
+        this.parserListener = parserListener;
+    }
+
+    public interface FileParserListener {
+
+        void onFinish(String fileName);
+    }
+
 }

+ 29 - 1
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlConnection.java

@@ -7,8 +7,10 @@ import java.net.InetSocketAddress;
 import java.nio.charset.Charset;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.math.NumberUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -49,6 +51,8 @@ public class MysqlConnection implements ErosaConnection {
     private AuthenticationInfo  authInfo;
     protected int               connTimeout = 5 * 1000;                                      // 5秒
     protected int               soTimeout   = 60 * 60 * 1000;                                // 1小时
+    // dump binlog bytes, 暂不包括meta与TSDB
+    private AtomicLong          receivedBinlogBytes;
 
     public MysqlConnection(){
     }
@@ -124,6 +128,7 @@ public class MysqlConnection implements ErosaConnection {
         decoder.handle(LogEvent.XID_EVENT);
         LogContext context = new LogContext();
         while (fetcher.fetch()) {
+            accumulateReceivedBytes(fetcher.limit());
             LogEvent event = null;
             event = decoder.decode(fetcher, context);
 
@@ -146,6 +151,7 @@ public class MysqlConnection implements ErosaConnection {
         LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.ENUM_END_EVENT);
         LogContext context = new LogContext();
         while (fetcher.fetch()) {
+            accumulateReceivedBytes(fetcher.limit());
             LogEvent event = null;
             event = decoder.decode(fetcher, context);
 
@@ -174,6 +180,7 @@ public class MysqlConnection implements ErosaConnection {
             LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.ENUM_END_EVENT);
             LogContext context = new LogContext();
             while (fetcher.fetch()) {
+                accumulateReceivedBytes(fetcher.limit());
                 LogEvent event = null;
                 event = decoder.decode(fetcher, context);
 
@@ -204,6 +211,7 @@ public class MysqlConnection implements ErosaConnection {
         try {
             fetcher.start(connector.getChannel());
             while (fetcher.fetch()) {
+                accumulateReceivedBytes(fetcher.limit());
                 LogBuffer buffer = fetcher.duplicate();
                 fetcher.consume(fetcher.limit());
                 if (!coprocessor.publish(buffer)) {
@@ -230,6 +238,7 @@ public class MysqlConnection implements ErosaConnection {
         try {
             fetcher.start(connector.getChannel());
             while (fetcher.fetch()) {
+                accumulateReceivedBytes(fetcher.limit());
                 LogBuffer buffer = fetcher.duplicate();
                 fetcher.consume(fetcher.limit());
                 if (!coprocessor.publish(buffer)) {
@@ -324,6 +333,16 @@ public class MysqlConnection implements ErosaConnection {
         return connection;
     }
 
+    @Override
+    public long queryServerId() throws IOException {
+        ResultSetPacket resultSetPacket = query("show variables like 'server_id'");
+        List<String> fieldValues = resultSetPacket.getFieldValues();
+        if (fieldValues == null || fieldValues.size() != 2) {
+            return 0;
+        }
+        return NumberUtils.toLong(fieldValues.get(1));
+    }
+
     // ====================== help method ====================
 
     /**
@@ -334,7 +353,6 @@ public class MysqlConnection implements ErosaConnection {
      * <li>net_read_timeout</li>
      * </ol>
      * 
-     * @param channel
      * @throws IOException
      */
     private void updateSettings() throws IOException {
@@ -453,6 +471,12 @@ public class MysqlConnection implements ErosaConnection {
         }
     }
 
+    private void accumulateReceivedBytes(long x) {
+        if (receivedBinlogBytes != null) {
+            receivedBinlogBytes.addAndGet(x);
+        }
+    }
+
     public static enum BinlogFormat {
 
         STATEMENT("STATEMENT"), ROW("ROW"), MIXED("MIXED");
@@ -592,4 +616,8 @@ public class MysqlConnection implements ErosaConnection {
         this.authInfo = authInfo;
     }
 
+    public void setReceivedBinlogBytes(AtomicLong receivedBinlogBytes) {
+        this.receivedBinlogBytes = receivedBinlogBytes;
+    }
+
 }

+ 32 - 29
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParser.java

@@ -47,30 +47,32 @@ import com.taobao.tddl.dbsync.binlog.LogEvent;
  */
 public class MysqlEventParser extends AbstractMysqlEventParser implements CanalEventParser, CanalHASwitchable {
 
-    private CanalHAController  haController                      = null;
+    private CanalHAController    haController                      = null;
 
-    private int                defaultConnectionTimeoutInSeconds = 30;       // sotimeout
-    private int                receiveBufferSize                 = 64 * 1024;
-    private int                sendBufferSize                    = 64 * 1024;
+    private int                  defaultConnectionTimeoutInSeconds = 30;                // sotimeout
+    private int                  receiveBufferSize                 = 64 * 1024;
+    private int                  sendBufferSize                    = 64 * 1024;
     // 数据库信息
-    private AuthenticationInfo masterInfo;                                   // 主库
-    private AuthenticationInfo standbyInfo;                                  // 备库
+    protected AuthenticationInfo masterInfo;                                            // 主库
+    protected AuthenticationInfo standbyInfo;                                           // 备库
     // binlog信息
-    private EntryPosition      masterPosition;
-    private EntryPosition      standbyPosition;
-    private long               slaveId;                                      // 链接到mysql的slave
+    protected EntryPosition      masterPosition;
+    protected EntryPosition      standbyPosition;
+    private long                 slaveId;                                               // 链接到mysql的slave
     // 心跳检查信息
-    private String             detectingSQL;                                 // 心跳sql
-    private MysqlConnection    metaConnection;                               // 查询meta信息的链接
-    private TableMetaCache     tableMetaCache;                               // 对应meta
-                                                                              // cache
-    private int                fallbackIntervalInSeconds         = 60;       // 切换回退时间
-    private BinlogFormat[]     supportBinlogFormats;                         // 支持的binlogFormat,如果设置会执行强校验
-    private BinlogImage[]      supportBinlogImages;                          // 支持的binlogImage,如果设置会执行强校验
+    private String               detectingSQL;                                          // 心跳sql
+    private MysqlConnection      metaConnection;                                        // 查询meta信息的链接
+    private TableMetaCache       tableMetaCache;                                        // 对应meta
+    private int                  fallbackIntervalInSeconds         = 60;                // 切换回退时间
+    private BinlogFormat[]       supportBinlogFormats;                                  // 支持的binlogFormat,如果设置会执行强校验
+    private BinlogImage[]        supportBinlogImages;                                   // 支持的binlogImage,如果设置会执行强校验
 
     // update by yishun.chen,特殊异常处理参数
-    private int                dumpErrorCount                    = 0;        // binlogDump失败异常计数
-    private int                dumpErrorCountThreshold           = 2;        // binlogDump失败异常计数阀值
+    private int                  dumpErrorCount                    = 0;                 // binlogDump失败异常计数
+    private int                  dumpErrorCountThreshold           = 2;                 // binlogDump失败异常计数阀值
+
+    // instance received binlog bytes
+    private final AtomicLong     receivedBinlogBytes               = new AtomicLong(0L);
 
     protected ErosaConnection buildErosaConnection() {
         return buildMysqlConnection(this.runningInfo);
@@ -314,6 +316,7 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
         connection.getConnector().setSendBufferSize(sendBufferSize);
         connection.getConnector().setSoTimeout(defaultConnectionTimeoutInSeconds * 1000);
         connection.setCharset(connectionCharset);
+        connection.setReceivedBinlogBytes(receivedBinlogBytes);
         // 随机生成slaveId
         if (this.slaveId <= 0) {
             this.slaveId = generateUniqueServerId();
@@ -512,7 +515,7 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
     private Long findTransactionBeginPosition(ErosaConnection mysqlConnection, final EntryPosition entryPosition)
                                                                                                                  throws IOException {
         // 针对开始的第一条为非Begin记录,需要从该binlog扫描
-        final AtomicLong preTransactionStartPosition = new AtomicLong(0L);
+        final java.util.concurrent.atomic.AtomicLong preTransactionStartPosition = new java.util.concurrent.atomic.AtomicLong(0L);
         mysqlConnection.reconnect();
         mysqlConnection.seek(entryPosition.getJournalName(), 4L, new SinkFunction<LogEvent>() {
 
@@ -643,6 +646,9 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                 throw new CanalParseException("command : 'show master status' has an error! pls check. you need (at least one of) the SUPER,REPLICATION CLIENT privilege(s) for this operation");
             }
             EntryPosition endPosition = new EntryPosition(fields.get(0), Long.valueOf(fields.get(1)));
+            if (isGTIDMode && fields.size() > 4) {
+                endPosition.setGtid(fields.get(4));
+            }
             return endPosition;
         } catch (IOException e) {
             throw new CanalParseException("command : 'show master status' has an error!", e);
@@ -750,16 +756,9 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                         Long logposTimestamp = entry.getHeader().getExecuteTime();
                         Long serverId = entry.getHeader().getServerId();
 
-                        if (CanalEntry.EntryType.TRANSACTIONBEGIN.equals(entry.getEntryType())
-                            || CanalEntry.EntryType.TRANSACTIONEND.equals(entry.getEntryType())) {
-                            if (logger.isDebugEnabled()) {
-                                logger.debug("compare exit condition:{},{},{}, startTimestamp={}...", new Object[] {
-                                        logfilename, logfileoffset, logposTimestamp, startTimestamp });
-                            }
-                            // 事务头和尾寻找第一条记录时间戳,如果最小的一条记录都不满足条件,可直接退出
-                            if (logposTimestamp >= startTimestamp) {
-                                return false;
-                            }
+                        // 如果最小的一条记录都不满足条件,可直接退出
+                        if (logposTimestamp >= startTimestamp) {
+                            return false;
                         }
 
                         if (StringUtils.equals(endPosition.getJournalName(), logfilename)
@@ -908,4 +907,8 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
         this.dumpErrorCountThreshold = dumpErrorCountThreshold;
     }
 
+    public AtomicLong getReceivedBinlogBytes() {
+        return this.receivedBinlogBytes;
+    }
+
 }

+ 50 - 30
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlMultiStageCoprocessor.java

@@ -2,10 +2,9 @@ package com.alibaba.otter.canal.parse.inbound.mysql;
 
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.LockSupport;
 
-import org.apache.commons.lang.StringUtils;
-
 import com.alibaba.otter.canal.common.AbstractCanalLifeCycle;
 import com.alibaba.otter.canal.common.utils.NamedThreadFactory;
 import com.alibaba.otter.canal.parse.exception.CanalParseException;
@@ -31,7 +30,6 @@ import com.taobao.tddl.dbsync.binlog.LogBuffer;
 import com.taobao.tddl.dbsync.binlog.LogContext;
 import com.taobao.tddl.dbsync.binlog.LogDecoder;
 import com.taobao.tddl.dbsync.binlog.LogEvent;
-import com.taobao.tddl.dbsync.binlog.LogPosition;
 import com.taobao.tddl.dbsync.binlog.event.DeleteRowsLogEvent;
 import com.taobao.tddl.dbsync.binlog.event.RowsLogEvent;
 import com.taobao.tddl.dbsync.binlog.event.UpdateRowsLogEvent;
@@ -52,6 +50,7 @@ import com.taobao.tddl.dbsync.binlog.event.WriteRowsLogEvent;
  */
 public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implements MultiStageCoprocessor {
 
+    private static final int             maxFullTimes = 10;
     private LogEventConvert              logEventConvert;
     private EventTransactionBuffer       transactionBuffer;
     private ErosaConnection              connection;
@@ -63,6 +62,7 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
     private ExecutorService              stageExecutor;
     private String                       destination;
     private volatile CanalParseException exception;
+    private AtomicLong                   eventsPublishBlockingTime;
 
     public MysqlMultiStageCoprocessor(int ringBufferSize, int parserThreadCount, LogEventConvert logEventConvert,
                                       EventTransactionBuffer transactionBuffer, String destination){
@@ -138,14 +138,18 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
         super.stop();
     }
 
+    public boolean publish(LogBuffer buffer) {
+        return this.publish(buffer, null);
+    }
+
     /**
      * 网络数据投递
      */
-    public boolean publish(LogBuffer buffer) {
-        return publish(buffer, null);
+    public boolean publish(LogEvent event) {
+        return this.publish(null, event);
     }
 
-    public boolean publish(LogBuffer buffer, String binlogFileName) {
+    private boolean publish(LogBuffer buffer, LogEvent event) {
         if (!isStart()) {
             if (exception != null) {
                 throw exception;
@@ -161,25 +165,51 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
             throw exception;
         }
         boolean interupted = false;
+        long blockingStart = 0L;
+        int fullTimes = 0;
         do {
             try {
                 long next = disruptorMsgBuffer.tryNext();
-                MessageEvent event = disruptorMsgBuffer.get(next);
-                event.setBuffer(buffer);
-                if (binlogFileName != null) {
-                    event.setBinlogFileName(binlogFileName);
+                MessageEvent data = disruptorMsgBuffer.get(next);
+                if (buffer != null) {
+                    data.setBuffer(buffer);
+                } else {
+                    data.setEvent(event);
                 }
                 disruptorMsgBuffer.publish(next);
+                if (fullTimes > 0) {
+                    eventsPublishBlockingTime.addAndGet(System.nanoTime() - blockingStart);
+                }
                 break;
             } catch (InsufficientCapacityException e) {
+                if (fullTimes == 0) {
+                    blockingStart = System.nanoTime();
+                }
                 // park
-                LockSupport.parkNanos(1L);
+                // LockSupport.parkNanos(1L);
+                applyWait(++fullTimes);
                 interupted = Thread.interrupted();
+                if (fullTimes % 1000 == 0) {
+                    long nextStart = System.nanoTime();
+                    eventsPublishBlockingTime.addAndGet(nextStart - blockingStart);
+                    blockingStart = nextStart;
+                }
             }
         } while (!interupted && isStart());
         return isStart();
     }
 
+    // 处理无数据的情况,避免空循环挂死
+    private void applyWait(int fullTimes) {
+        int newFullTimes = fullTimes > maxFullTimes ? maxFullTimes : fullTimes;
+        if (fullTimes <= 3) { // 3次以内
+            Thread.yield();
+        } else { // 超过3次,最多只sleep 1ms
+            LockSupport.parkNanos(100 * 1000L * newFullTimes);
+        }
+
+    }
+
     @Override
     public void reset() {
         if (isStart()) {
@@ -201,17 +231,13 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
 
         public void onEvent(MessageEvent event, long sequence, boolean endOfBatch) throws Exception {
             try {
-                LogBuffer buffer = event.getBuffer();
-                if (StringUtils.isNotEmpty(event.getBinlogFileName())
-                    && !context.getLogPosition().getFileName().equals(event.getBinlogFileName())) {
-                    // set roate binlog file name
-                    context.setLogPosition(new LogPosition(event.getBinlogFileName(), context.getLogPosition()
-                        .getPosition()));
+                LogEvent logEvent = event.getEvent();
+                if (logEvent == null) {
+                    LogBuffer buffer = event.getBuffer();
+                    logEvent = decoder.decode(buffer, context);
+                    event.setEvent(logEvent);
                 }
 
-                LogEvent logEvent = decoder.decode(buffer, context);
-                event.setEvent(logEvent);
-
                 int eventType = logEvent.getHeader().getType();
                 TableMeta tableMeta = null;
                 boolean needDmlParse = false;
@@ -312,7 +338,6 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
 
                 // clear for gc
                 event.setBuffer(null);
-                event.setBinlogFileName(null);
                 event.setEvent(null);
                 event.setTable(null);
                 event.setEntry(null);
@@ -336,21 +361,12 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
 
     class MessageEvent {
 
-        private String           binlogFileName;      // for local binlog parse
         private LogBuffer        buffer;
         private CanalEntry.Entry entry;
         private boolean          needDmlParse = false;
         private TableMeta        table;
         private LogEvent         event;
 
-        public String getBinlogFileName() {
-            return binlogFileName;
-        }
-
-        public void setBinlogFileName(String binlogFileName) {
-            this.binlogFileName = binlogFileName;
-        }
-
         public LogBuffer getBuffer() {
             return buffer;
         }
@@ -427,4 +443,8 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
         this.connection = connection;
     }
 
+    public void setEventsPublishBlockingTime(AtomicLong eventsPublishBlockingTime) {
+        this.eventsPublishBlockingTime = eventsPublishBlockingTime;
+    }
+
 }

+ 26 - 6
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java

@@ -42,6 +42,7 @@ import com.google.protobuf.ByteString;
 import com.taobao.tddl.dbsync.binlog.LogEvent;
 import com.taobao.tddl.dbsync.binlog.event.DeleteRowsLogEvent;
 import com.taobao.tddl.dbsync.binlog.event.GtidLogEvent;
+import com.taobao.tddl.dbsync.binlog.event.HeartbeatLogEvent;
 import com.taobao.tddl.dbsync.binlog.event.IntvarLogEvent;
 import com.taobao.tddl.dbsync.binlog.event.LogHeader;
 import com.taobao.tddl.dbsync.binlog.event.QueryLogEvent;
@@ -144,6 +145,8 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                 return parseRandLogEvent((RandLogEvent) logEvent);
             case LogEvent.GTID_LOG_EVENT:
                 return parseGTIDLogEvent((GtidLogEvent) logEvent);
+            case LogEvent.HEARTBEAT_LOG_EVENT:
+                return parseHeartbeatLogEvent((HeartbeatLogEvent) logEvent);
             default:
                 break;
         }
@@ -158,6 +161,15 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
         }
     }
 
+    private Entry parseHeartbeatLogEvent(HeartbeatLogEvent logEvent) {
+        Header.Builder headerBuilder = Header.newBuilder();
+        headerBuilder.setEventType(EventType.MHEARTBEAT);
+        Entry.Builder entryBuilder = Entry.newBuilder();
+        entryBuilder.setHeader(headerBuilder.build());
+        entryBuilder.setEntryType(EntryType.HEARTBEAT);
+        return entryBuilder.build();
+    }
+
     private Entry parseGTIDLogEvent(GtidLogEvent logEvent) {
         LogHeader logHeader = logEvent.getHeader();
         String value = logEvent.getSid().toString() + ":" + logEvent.getGno();
@@ -541,12 +553,16 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                     tableError |= parseOneRow(rowDataBuilder, event, buffer, changeColumns, true, tableMeta);
                 }
 
-                rowsCount ++;
+                rowsCount++;
                 rowChangeBuider.addRowDatas(rowDataBuilder.build());
             }
 
             TableMapLogEvent table = event.getTable();
-            Header header = createHeader(event.getHeader(), table.getDbName(), table.getTableName(), eventType, rowsCount);
+            Header header = createHeader(event.getHeader(),
+                table.getDbName(),
+                table.getTableName(),
+                eventType,
+                rowsCount);
 
             RowChange rowChange = rowChangeBuider.build();
             if (tableError) {
@@ -755,6 +771,8 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                         } else {
                             // byte数组,直接使用iso-8859-1保留对应编码,浪费内存
                             columnBuilder.setValue(new String((byte[]) value, ISO_8859_1));
+                            // columnBuilder.setValueBytes(ByteString.copyFrom((byte[])
+                            // value));
                             javaType = Types.BLOB;
                         }
                         break;
@@ -801,12 +819,12 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
         return createEntry(header, EntryType.ROWDATA, rowChangeBuider.build().toByteString());
     }
 
-
     private Header createHeader(LogHeader logHeader, String schemaName, String tableName, EventType eventType) {
         return createHeader(logHeader, schemaName, tableName, eventType, -1);
     }
 
-    private Header createHeader(LogHeader logHeader, String schemaName, String tableName, EventType eventType, Integer rowsCount) {
+    private Header createHeader(LogHeader logHeader, String schemaName, String tableName, EventType eventType,
+                                Integer rowsCount) {
         // header会做信息冗余,方便以后做检索或者过滤
         Header.Builder headerBuilder = Header.newBuilder();
         headerBuilder.setVersion(version);
@@ -869,11 +887,14 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
     private TableMeta getTableMeta(String dbName, String tbName, boolean useCache, EntryPosition position) {
         try {
             return tableMetaCache.getTableMeta(dbName, tbName, useCache, position);
-        } catch (Exception e) {
+        } catch (Throwable e) {
             String message = ExceptionUtils.getRootCauseMessage(e);
             if (filterTableError) {
                 if (StringUtils.contains(message, "errorNumber=1146") && StringUtils.contains(message, "doesn't exist")) {
                     return null;
+                } else if (StringUtils.contains(message, "errorNumber=1142")
+                           && StringUtils.contains(message, "command denied")) {
+                    return null;
                 }
             }
 
@@ -960,5 +981,4 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
     public void setGtidSet(GTIDSet gtidSet) {
         this.gtidSet = gtidSet;
     }
-
 }

+ 46 - 16
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/local/BinLogFileQueue.java

@@ -25,13 +25,14 @@ import com.alibaba.otter.canal.parse.exception.CanalParseException;
  */
 public class BinLogFileQueue {
 
-    private String        baseName       = "mysql-bin.";
-    private List<File>    binlogs        = new ArrayList<File>();
-    private File          directory;
-    private ReentrantLock lock           = new ReentrantLock();
-    private Condition     nextCondition  = lock.newCondition();
-    private Timer         timer          = new Timer(true);
-    private long          reloadInterval = 10 * 1000L;           // 10秒
+    private String              baseName       = "mysql-bin.";
+    private List<File>          binlogs        = new ArrayList<File>();
+    private File                directory;
+    private ReentrantLock       lock           = new ReentrantLock();
+    private Condition           nextCondition  = lock.newCondition();
+    private Timer               timer          = new Timer(true);
+    private long                reloadInterval = 10 * 1000L;           // 10秒
+    private CanalParseException exception      = null;
 
     public BinLogFileQueue(String directory){
         this(new File(directory));
@@ -52,9 +53,24 @@ public class BinLogFileQueue {
         timer.scheduleAtFixedRate(new TimerTask() {
 
             public void run() {
-                List<File> files = listBinlogFiles();
-                for (File file : files) {
-                    offer(file);
+                try {
+                    // File errorFile = new File(BinLogFileQueue.this.directory,
+                    // errorFileName);
+                    // if (errorFile.isFile() && errorFile.exists()) {
+                    // String text = StringUtils.join(IOUtils.readLines(new
+                    // FileInputStream(errorFile)), "\n");
+                    // exception = new CanalParseException(text);
+                    // }
+                    List<File> files = listBinlogFiles();
+                    for (File file : files) {
+                        offer(file);
+                    }
+                } catch (Throwable e) {
+                    exception = new CanalParseException(e);
+                }
+
+                if (exception != null) {
+                    offer(null);
                 }
             }
         }, reloadInterval, reloadInterval);
@@ -69,6 +85,10 @@ public class BinLogFileQueue {
     public File getNextFile(File pre) {
         try {
             lock.lockInterruptibly();
+            if (exception != null) {
+                throw exception;
+            }
+
             if (binlogs.size() == 0) {
                 return null;
             } else {
@@ -94,6 +114,10 @@ public class BinLogFileQueue {
     public File getBefore(File file) {
         try {
             lock.lockInterruptibly();
+            if (exception != null) {
+                throw exception;
+            }
+
             if (binlogs.size() == 0) {
                 return null;
             } else {
@@ -130,6 +154,9 @@ public class BinLogFileQueue {
                 nextCondition.await();// 等待新文件
             }
 
+            if (exception != null) {
+                throw exception;
+            }
             if (pre == null) {// 第一次
                 return binlogs.get(0);
             } else {
@@ -170,13 +197,16 @@ public class BinLogFileQueue {
     private boolean offer(File file) {
         try {
             lock.lockInterruptibly();
-            if (!binlogs.contains(file)) {
-                binlogs.add(file);
-                nextCondition.signalAll();// 唤醒
-                return true;
-            } else {
-                return false;
+            if (file != null) {
+                if (!binlogs.contains(file)) {
+                    binlogs.add(file);
+                    nextCondition.signalAll();// 唤醒
+                    return true;
+                }
             }
+
+            nextCondition.signalAll();// 唤醒
+            return false;
         } catch (InterruptedException e) {
             Thread.currentThread().interrupt();
             return false;

+ 339 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/BinlogDownloadQueue.java

@@ -0,0 +1,339 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds;
+
+import io.netty.handler.codec.http.HttpResponseStatus;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.cert.CertificateException;
+import java.security.cert.X509Certificate;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.LockSupport;
+
+import javax.net.ssl.SSLContext;
+
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.config.RegistryBuilder;
+import org.apache.http.conn.socket.ConnectionSocketFactory;
+import org.apache.http.conn.socket.PlainConnectionSocketFactory;
+import org.apache.http.conn.ssl.NoopHostnameVerifier;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
+import org.apache.http.ssl.SSLContextBuilder;
+import org.apache.http.ssl.TrustStrategy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.data.BinlogFile;
+
+/**
+ * @author chengjin.lyf on 2018/8/7 下午3:10
+ * @since 1.0.25
+ */
+public class BinlogDownloadQueue {
+
+    private static final Logger             logger        = LoggerFactory.getLogger(BinlogDownloadQueue.class);
+    private static final int                TIMEOUT       = 10000;
+
+    private LinkedBlockingQueue<BinlogFile> downloadQueue = new LinkedBlockingQueue<BinlogFile>();
+    private LinkedBlockingQueue<Runnable>   taskQueue     = new LinkedBlockingQueue<Runnable>();
+    private LinkedList<BinlogFile>          binlogList;
+    private final int                       batchFileSize;
+    private Thread                          downloadThread;
+    public boolean                          running       = true;
+    private final String                    destDir;
+    private String                          hostId;
+    private int                             currentSize;
+    private String                          lastDownload;
+
+    public BinlogDownloadQueue(List<BinlogFile> downloadQueue, int batchFileSize, String destDir) throws IOException{
+        this.binlogList = new LinkedList(downloadQueue);
+        this.batchFileSize = batchFileSize;
+        this.destDir = destDir;
+        this.currentSize = 0;
+        prepareBinlogList();
+        cleanDir();
+    }
+
+    private void prepareBinlogList() {
+        for (BinlogFile binlog : this.binlogList) {
+            String fileName = StringUtils.substringBetween(binlog.getDownloadLink(), "mysql-bin.", "?");
+            binlog.setFileName(fileName);
+        }
+        Collections.sort(this.binlogList, new Comparator<BinlogFile>() {
+
+            @Override
+            public int compare(BinlogFile o1, BinlogFile o2) {
+                return o1.getFileName().compareTo(o2.getFileName());
+            }
+        });
+    }
+
+    public void cleanDir() throws IOException {
+        File destDirFile = new File(destDir);
+        FileUtils.forceMkdir(destDirFile);
+        FileUtils.cleanDirectory(destDirFile);
+    }
+
+    public void silenceDownload() {
+        if (downloadThread != null) {
+            return;
+        }
+        downloadThread = new Thread(new DownloadThread(), "download-" + destDir);
+        downloadThread.setDaemon(true);
+        downloadThread.start();
+    }
+
+    public BinlogFile tryOne() throws Throwable {
+        BinlogFile binlogFile = binlogList.poll();
+        download(binlogFile);
+        hostId = binlogFile.getHostInstanceID();
+        this.currentSize++;
+        return binlogFile;
+    }
+
+    public void notifyNotMatch() {
+        this.currentSize--;
+        filter(hostId);
+    }
+
+    private void filter(String hostInstanceId) {
+        Iterator<BinlogFile> it = binlogList.iterator();
+        while (it.hasNext()) {
+            BinlogFile bf = it.next();
+            if (bf.getHostInstanceID().equalsIgnoreCase(hostInstanceId)) {
+                it.remove();
+            } else {
+                hostId = bf.getHostInstanceID();
+            }
+        }
+    }
+
+    public boolean isLastFile(String fileName) {
+        String needCompareName = lastDownload;
+        if (StringUtils.isNotEmpty(needCompareName) && StringUtils.endsWith(needCompareName, "tar")) {
+            needCompareName = needCompareName.substring(0, needCompareName.indexOf("."));
+        }
+        return fileName.equalsIgnoreCase(needCompareName) && binlogList.isEmpty();
+    }
+
+    public void prepare() throws InterruptedException {
+        for (int i = this.currentSize; i < batchFileSize && !binlogList.isEmpty(); i++) {
+            BinlogFile binlogFile = null;
+            while (!binlogList.isEmpty()) {
+                binlogFile = binlogList.poll();
+                if (!binlogFile.getHostInstanceID().equalsIgnoreCase(hostId)) {
+                    continue;
+                }
+                break;
+            }
+            if (binlogFile == null) {
+                break;
+            }
+            this.downloadQueue.put(binlogFile);
+            this.lastDownload = "mysql-bin." + binlogFile.getFileName();
+            this.currentSize++;
+        }
+    }
+
+    public void downOne() {
+        this.currentSize--;
+    }
+
+    public void release() {
+        running = false;
+        this.currentSize = 0;
+        binlogList.clear();
+        downloadQueue.clear();
+    }
+
+    private void download(BinlogFile binlogFile) throws Throwable {
+        String downloadLink = binlogFile.getDownloadLink();
+        String fileName = binlogFile.getFileName();
+
+        downloadLink = downloadLink.trim();
+        CloseableHttpClient httpClient = null;
+        if (downloadLink.startsWith("https")) {
+            HttpClientBuilder builder = HttpClientBuilder.create();
+            builder.setMaxConnPerRoute(50);
+            builder.setMaxConnTotal(100);
+            // 创建支持忽略证书的https
+            final SSLContext sslContext = new SSLContextBuilder().loadTrustMaterial(null, new TrustStrategy() {
+
+                @Override
+                public boolean isTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException {
+                    return true;
+                }
+            }).build();
+
+            httpClient = HttpClientBuilder.create()
+                .setSSLContext(sslContext)
+                .setConnectionManager(new PoolingHttpClientConnectionManager(RegistryBuilder.<ConnectionSocketFactory> create()
+                    .register("http", PlainConnectionSocketFactory.INSTANCE)
+                    .register("https", new SSLConnectionSocketFactory(sslContext, NoopHostnameVerifier.INSTANCE))
+                    .build()))
+                .build();
+        } else {
+            httpClient = HttpClientBuilder.create().setMaxConnPerRoute(50).setMaxConnTotal(100).build();
+        }
+
+        HttpGet httpGet = new HttpGet(downloadLink);
+        RequestConfig requestConfig = RequestConfig.custom()
+            .setConnectTimeout(TIMEOUT)
+            .setConnectionRequestTimeout(TIMEOUT)
+            .setSocketTimeout(TIMEOUT)
+            .build();
+        httpGet.setConfig(requestConfig);
+        HttpResponse response = httpClient.execute(httpGet);
+        int statusCode = response.getStatusLine().getStatusCode();
+        if (statusCode != HttpResponseStatus.OK.code()) {
+            throw new RuntimeException("download failed , url:" + downloadLink + " , statusCode:" + statusCode);
+        }
+        saveFile(new File(destDir), "mysql-bin." + fileName, response);
+    }
+
+    private static void saveFile(File parentFile, String fileName, HttpResponse response) throws IOException {
+        InputStream is = response.getEntity().getContent();
+        long totalSize = Long.parseLong(response.getFirstHeader("Content-Length").getValue());
+        if (response.getFirstHeader("Content-Disposition") != null) {
+            fileName = response.getFirstHeader("Content-Disposition").getValue();
+            fileName = StringUtils.substringAfter(fileName, "filename=");
+        }
+        boolean isTar = StringUtils.endsWith(fileName, ".tar");
+        FileUtils.forceMkdir(parentFile);
+        FileOutputStream fos = null;
+        try {
+            if (isTar) {
+                TarArchiveInputStream tais = new TarArchiveInputStream(is);
+                TarArchiveEntry tarArchiveEntry = null;
+                while ((tarArchiveEntry = tais.getNextTarEntry()) != null) {
+                    String name = tarArchiveEntry.getName();
+                    File tarFile = new File(parentFile, name + ".tmp");
+                    logger.info("start to download file " + tarFile.getName());
+                    if (tarFile.exists()) {
+                        tarFile.delete();
+                    }
+                    BufferedOutputStream bos = null;
+                    try {
+                        bos = new BufferedOutputStream(new FileOutputStream(tarFile));
+                        int read = -1;
+                        byte[] buffer = new byte[1024];
+                        while ((read = tais.read(buffer)) != -1) {
+                            bos.write(buffer, 0, read);
+                        }
+                        logger.info("download file " + tarFile.getName() + " end!");
+                        tarFile.renameTo(new File(parentFile, name));
+                    } finally {
+                        IOUtils.closeQuietly(bos);
+                    }
+                }
+                tais.close();
+            } else {
+                File file = new File(parentFile, fileName + ".tmp");
+                if (file.exists()) {
+                    file.delete();
+                }
+
+                if (!file.isFile()) {
+                    file.createNewFile();
+                }
+                try {
+                    fos = new FileOutputStream(file);
+                    byte[] buffer = new byte[1024];
+                    int len;
+                    long copySize = 0;
+                    long nextPrintProgress = 0;
+                    logger.info("start to download file " + file.getName());
+                    while ((len = is.read(buffer)) != -1) {
+                        fos.write(buffer, 0, len);
+                        copySize += len;
+                        long progress = copySize * 100 / totalSize;
+                        if (progress >= nextPrintProgress) {
+                            logger.info("download " + file.getName() + " progress : " + progress
+                                        + "% , download size : " + copySize + ", total size : " + totalSize);
+                            nextPrintProgress += 10;
+                        }
+                    }
+                    logger.info("download file " + file.getName() + " end!");
+                    fos.flush();
+                } finally {
+                    IOUtils.closeQuietly(fos);
+                }
+                file.renameTo(new File(parentFile, fileName));
+            }
+        } finally {
+            IOUtils.closeQuietly(fos);
+        }
+    }
+
+    public void execute(Runnable runnable) throws InterruptedException {
+        taskQueue.put(runnable);
+    }
+
+    private class DownloadThread implements Runnable {
+
+        @Override
+        public void run() {
+            while (running) {
+                BinlogFile binlogFile = null;
+                try {
+                    binlogFile = downloadQueue.poll(5000, TimeUnit.MILLISECONDS);
+                    if (binlogFile != null) {
+                        int retry = 1;
+                        while (true) {
+                            try {
+                                download(binlogFile);
+                                break;
+                            } catch (Throwable e) {
+                                if (retry % 10 == 0) {
+                                    retry = retry + 1;
+                                    try {
+                                        logger.warn("download failed + " + binlogFile.toString() + "], retry : "
+                                                    + retry, e);
+                                        // File errorFile = new File(destDir,
+                                        // "error.txt");
+                                        // FileWriter writer = new
+                                        // FileWriter(errorFile);
+                                        // writer.write(ExceptionUtils.getFullStackTrace(e));
+                                        // writer.flush();
+                                        // IOUtils.closeQuietly(writer);
+                                        LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100 * retry));
+                                    } catch (Throwable e1) {
+                                        logger.error("write error failed", e1);
+                                    }
+                                } else {
+                                    retry = retry + 1;
+                                }
+                            }
+                        }
+                    }
+
+                    Runnable runnable = taskQueue.poll(5000, TimeUnit.MILLISECONDS);
+                    if (runnable != null) {
+                        runnable.run();
+                    }
+                } catch (Throwable e) {
+                    logger.error("task process failed", e);
+                }
+            }
+
+        }
+    }
+}

+ 1 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/HttpHelper.java

@@ -155,6 +155,7 @@ public class HttpHelper {
                     .register("https", new SSLConnectionSocketFactory(sslContext, NoopHostnameVerifier.INSTANCE))
                     .build()))
                 .build();
+
             // ---------------- 创建支持https 的client成功---------
 
             URI uri = new URIBuilder(url).build();

+ 166 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsBinlogEventParserProxy.java

@@ -0,0 +1,166 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+
+import org.apache.commons.lang.StringUtils;
+
+import com.alibaba.otter.canal.parse.exception.PositionNotFoundException;
+import com.alibaba.otter.canal.parse.inbound.ParserExceptionHandler;
+import com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser;
+
+/**
+ * aliyun rds的binlog parser支持
+ * 
+ * <pre>
+ * 注意点:aliyun的binlog会有定期清理并备份到oss上, 这里实现了一份自动下载oss+rds binlog的机制
+ * </pre>
+ * 
+ * @author chengjin.lyf on 2018/7/20 上午10:52
+ * @since 1.0.25
+ */
+public class RdsBinlogEventParserProxy extends MysqlEventParser {
+
+    private String                    rdsOpenApiUrl             = "https://rds.aliyuncs.com/";    // openapi地址
+    private String                    accesskey;                                                  // 云账号的ak
+    private String                    secretkey;                                                  // 云账号sk
+    private String                    instanceId;                                                 // rds实例id
+    private String                    directory;                                                  // binlog目录
+    private int                       batchFileSize             = 4;                              // 最多下载的binlog文件数量
+
+    private RdsLocalBinlogEventParser rdsLocalBinlogEventParser = new RdsLocalBinlogEventParser();
+    private ExecutorService           executorService           = Executors.newSingleThreadExecutor(new ThreadFactory() {
+
+                                                                    @Override
+                                                                    public Thread newThread(Runnable r) {
+                                                                        Thread t = new Thread(r,
+                                                                            "rds-binlog-daemon-thread");
+                                                                        t.setDaemon(true);
+                                                                        return t;
+                                                                    }
+                                                                });
+
+    @Override
+    public void start() {
+        if (StringUtils.isNotEmpty(accesskey) && StringUtils.isNotEmpty(secretkey)
+            && StringUtils.isNotEmpty(instanceId)) {
+            final ParserExceptionHandler targetHandler = this.getParserExceptionHandler();
+            if (directory == null) {
+                directory = System.getProperty("java.io.tmpdir", "/tmp") + "/" + destination;
+            }
+            rdsLocalBinlogEventParser.setLogPositionManager(this.getLogPositionManager());
+            rdsLocalBinlogEventParser.setDestination(destination);
+            rdsLocalBinlogEventParser.setAlarmHandler(this.getAlarmHandler());
+            rdsLocalBinlogEventParser.setConnectionCharset(this.connectionCharset);
+            rdsLocalBinlogEventParser.setConnectionCharsetNumber(this.connectionCharsetNumber);
+            rdsLocalBinlogEventParser.setEnableTsdb(this.enableTsdb);
+            rdsLocalBinlogEventParser.setEventBlackFilter(this.eventBlackFilter);
+            rdsLocalBinlogEventParser.setFilterQueryDcl(this.filterQueryDcl);
+            rdsLocalBinlogEventParser.setFilterQueryDdl(this.filterQueryDdl);
+            rdsLocalBinlogEventParser.setFilterQueryDml(this.filterQueryDml);
+            rdsLocalBinlogEventParser.setFilterRows(this.filterRows);
+            rdsLocalBinlogEventParser.setFilterTableError(this.filterTableError);
+            // rdsLocalBinlogEventParser.setIsGTIDMode(this.isGTIDMode);
+            rdsLocalBinlogEventParser.setMasterInfo(this.masterInfo);
+            rdsLocalBinlogEventParser.setEventFilter(this.eventFilter);
+            rdsLocalBinlogEventParser.setMasterPosition(this.masterPosition);
+            rdsLocalBinlogEventParser.setTransactionSize(this.transactionSize);
+            rdsLocalBinlogEventParser.setUrl(this.rdsOpenApiUrl);
+            rdsLocalBinlogEventParser.setAccesskey(this.accesskey);
+            rdsLocalBinlogEventParser.setSecretkey(this.secretkey);
+            rdsLocalBinlogEventParser.setInstanceId(this.instanceId);
+            rdsLocalBinlogEventParser.setEventSink(eventSink);
+            rdsLocalBinlogEventParser.setDirectory(directory);
+            rdsLocalBinlogEventParser.setBatchFileSize(batchFileSize);
+            rdsLocalBinlogEventParser.setParallel(this.parallel);
+            rdsLocalBinlogEventParser.setParallelBufferSize(this.parallelBufferSize);
+            rdsLocalBinlogEventParser.setParallelThreadSize(this.parallelThreadSize);
+            rdsLocalBinlogEventParser.setFinishListener(new RdsLocalBinlogEventParser.ParseFinishListener() {
+
+                @Override
+                public void onFinish() {
+                    executorService.execute(new Runnable() {
+
+                        @Override
+                        public void run() {
+                            rdsLocalBinlogEventParser.stop();
+                            RdsBinlogEventParserProxy.this.start();
+                        }
+                    });
+
+                }
+            });
+            this.setParserExceptionHandler(new ParserExceptionHandler() {
+
+                @Override
+                public void handle(Throwable e) {
+                    handleMysqlParserException(e);
+                    if (targetHandler != null) {
+                        targetHandler.handle(e);
+                    }
+                }
+            });
+        }
+
+        super.start();
+    }
+
+    private void handleMysqlParserException(Throwable throwable) {
+        if (throwable instanceof PositionNotFoundException) {
+            logger.info("remove rds not found position, try download rds binlog!");
+            executorService.execute(new Runnable() {
+
+                @Override
+                public void run() {
+                    try {
+                        logger.info("stop mysql parser!");
+                        RdsBinlogEventParserProxy rdsBinlogEventParserProxy = RdsBinlogEventParserProxy.this;
+                        long serverId = rdsBinlogEventParserProxy.getServerId();
+                        rdsLocalBinlogEventParser.setServerId(serverId);
+                        rdsBinlogEventParserProxy.stop();
+                        logger.info("start rds mysql binlog parser!");
+                        rdsLocalBinlogEventParser.start();
+                    } catch (Throwable e) {
+                        logger.info("handle exception failed", e);
+                    }
+                }
+            });
+        }
+    }
+
+    @Override
+    public void stop() {
+        super.stop();
+    }
+
+    @Override
+    public boolean isStart() {
+        return super.isStart();
+    }
+
+    public void setRdsOpenApiUrl(String rdsOpenApiUrl) {
+        this.rdsOpenApiUrl = rdsOpenApiUrl;
+    }
+
+    public void setAccesskey(String accesskey) {
+        this.accesskey = accesskey;
+    }
+
+    public void setSecretkey(String secretkey) {
+        this.secretkey = secretkey;
+    }
+
+    public void setInstanceId(String instanceId) {
+        this.instanceId = instanceId;
+    }
+
+    public void setDirectory(String directory) {
+        this.directory = directory;
+    }
+
+    public void setBatchFileSize(int batchFileSize) {
+        this.batchFileSize = batchFileSize;
+    }
+
+}

+ 67 - 301
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsBinlogOpenApi.java

@@ -1,46 +1,21 @@
 package com.alibaba.otter.canal.parse.inbound.mysql.rds;
 
-import io.netty.handler.codec.http.HttpResponseStatus;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.UnsupportedEncodingException;
-import java.net.URLEncoder;
-import java.text.SimpleDateFormat;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.Collections;
 import java.util.Date;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
-import java.util.TreeMap;
-import java.util.UUID;
-
-import javax.crypto.Mac;
-import javax.crypto.SecretKey;
-import javax.crypto.spec.SecretKeySpec;
 
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
-import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.StringUtils;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClientBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.alibaba.fastjson.JSON;
-import com.alibaba.fastjson.JSONArray;
-import com.alibaba.fastjson.JSONObject;
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.data.BinlogFile;
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.data.DescribeBinlogFileResult;
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.data.RdsBackupPolicy;
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.data.RdsItem;
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.request.DescribeBackupPolicyRequest;
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.request.DescribeBinlogFilesRequest;
 
 /**
  * @author agapple 2017年10月14日 下午1:53:52
@@ -48,287 +23,78 @@ import com.alibaba.fastjson.JSONObject;
  */
 public class RdsBinlogOpenApi {
 
-    protected static final Logger logger              = LoggerFactory.getLogger(RdsBinlogOpenApi.class);
-    private static final String   ISO8601_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss'Z'";
-    private static final int      TIMEOUT             = 10000;
-    private static final String   ENCODING            = "UTF-8";
-    private static final String   MAC_NAME            = "HmacSHA1";
-    private static final String   API_VERSION         = "2014-08-15";
-    private static final String   SIGNATURE_VERSION   = "1.0";
-
-    public static void downloadBinlogFiles(String url, String ak, String sk, String dbInstanceId, Date startTime,
-                                           Date endTime, File destDir) throws Throwable {
-        int pageSize = 100;
-        int pageNumber = 0;
-        int pageRecordCount = 1;
-        String hostInstanceID = null;
-        while (pageRecordCount > 0 && pageRecordCount <= pageSize) {
-            pageNumber += 1;
-            String result = describeBinlogFiles(url, ak, sk, dbInstanceId, startTime, endTime, pageSize, pageNumber);
-            JSONObject jsobObj = JSON.parseObject(result);
-            pageRecordCount = jsobObj.getInteger("PageRecordCount");
-
-            if (pageRecordCount > 0) {
-                FileUtils.forceMkdir(destDir);
-                File hostIdFile = new File(destDir, "hostId");
-                if (hostIdFile.exists()) {
-                    List<String> lines = IOUtils.readLines(new FileInputStream(hostIdFile));
-                    hostInstanceID = StringUtils.join(lines, "\n");
-                }
-
-                String itemStr = jsobObj.getString("Items");
-                JSONObject binLogFileObj = JSONObject.parseObject(itemStr);
-                JSONArray items = binLogFileObj.getJSONArray("BinLogFile");
-                if (items == null || items.isEmpty()) {
-                    continue;
-                }
-                for (int i = 0; i < items.size(); i++) {
-                    JSONObject item = (JSONObject) items.get(i);
-                    String oneHostInstanceID = item.getString("HostInstanceID");
-                    if (hostInstanceID == null) {
-                        hostInstanceID = oneHostInstanceID;
-                        FileOutputStream hostIdFileOut = null;
-                        try {
-                            hostIdFileOut = new FileOutputStream(hostIdFile);
-                            hostIdFileOut.write(oneHostInstanceID.getBytes());
-                            hostIdFileOut.flush();
-                        } finally {
-                            IOUtils.closeQuietly(hostIdFileOut);
-                        }
-                    }
-
-                    if (hostInstanceID.equals(oneHostInstanceID)) { // 只选择一个host下载
-                        String downloadLink = item.getString("DownloadLink");
-                        String fileName = StringUtils.substringBetween(downloadLink, "mysql-bin.", ".tar");
-                        if (StringUtils.isNotEmpty(fileName)) {
-                            File currentFile = new File(destDir, "mysql-bin." + fileName);
-                            if (currentFile.isFile() && currentFile.exists()) {
-                                // 检查一下文件是否存在,存在就就没必要下载了
-                                continue;
-                            }
-                        }
-
-                        HttpGet httpGet = new HttpGet(downloadLink);
-                        CloseableHttpClient httpClient = HttpClientBuilder.create()
-                            .setMaxConnPerRoute(50)
-                            .setMaxConnTotal(100)
-                            .build();
-                        RequestConfig requestConfig = RequestConfig.custom()
-                            .setConnectTimeout(TIMEOUT)
-                            .setConnectionRequestTimeout(TIMEOUT)
-                            .setSocketTimeout(TIMEOUT)
-                            .build();
-                        httpGet.setConfig(requestConfig);
-                        HttpResponse response = httpClient.execute(httpGet);
-                        int statusCode = response.getStatusLine().getStatusCode();
-                        if (statusCode != HttpResponseStatus.OK.code()) {
-                            throw new RuntimeException("download failed , url:" + downloadLink + " , statusCode:"
-                                                       + statusCode);
-                        }
-                        saveFile(destDir, response);
-                    }
-                }
+    protected static final Logger logger = LoggerFactory.getLogger(RdsBinlogOpenApi.class);
+
+    public static List<BinlogFile> listBinlogFiles(String url, String ak, String sk, String dbInstanceId,
+                                                   Date startTime, Date endTime) {
+        DescribeBinlogFilesRequest request = new DescribeBinlogFilesRequest();
+        if (StringUtils.isNotEmpty(url)) {
+            try {
+                URI uri = new URI(url);
+                request.setEndPoint(uri.getHost());
+            } catch (URISyntaxException e) {
+                logger.error("resolve url host failed, will use default rds endpoint!");
             }
         }
-    }
-
-    private static void saveFile(File parentFile, HttpResponse response) throws IOException {
-        InputStream is = response.getEntity().getContent();
-        long totalSize = Long.parseLong(response.getFirstHeader("Content-Length").getValue());
-        String fileName = response.getFirstHeader("Content-Disposition").getValue();
-        fileName = StringUtils.substringAfter(fileName, "filename=");
-        boolean isTar = StringUtils.endsWith(fileName, ".tar");
-        FileUtils.forceMkdir(parentFile);
-        FileOutputStream fos = null;
-        try {
-            if (isTar) {
-                TarArchiveInputStream tais = new TarArchiveInputStream(is);
-                TarArchiveEntry tarArchiveEntry = null;
-                while ((tarArchiveEntry = tais.getNextTarEntry()) != null) {
-                    String name = tarArchiveEntry.getName();
-                    File tarFile = new File(parentFile, name);
-                    logger.info("start to download file " + tarFile.getName());
-                    BufferedOutputStream bos = null;
-                    try {
-                        bos = new BufferedOutputStream(new FileOutputStream(tarFile));
-                        int read = -1;
-                        byte[] buffer = new byte[1024];
-                        while ((read = tais.read(buffer)) != -1) {
-                            bos.write(buffer, 0, read);
-                        }
-                        logger.info("download file " + tarFile.getName() + " end!");
-                    } finally {
-                        IOUtils.closeQuietly(bos);
-                    }
-                }
-                tais.close();
-            } else {
-                File file = new File(parentFile, fileName);
-                if (!file.isFile()) {
-                    file.createNewFile();
+        request.setStartDate(startTime);
+        request.setEndDate(endTime);
+        request.setPageNumber(1);
+        request.setPageSize(100);
+        request.setRdsInstanceId(dbInstanceId);
+        request.setAccessKeyId(ak);
+        request.setAccessKeySecret(sk);
+        DescribeBinlogFileResult result = null;
+        int retryTime = 3;
+        while (true) {
+            try {
+                result = request.doAction();
+                break;
+            } catch (Exception e) {
+                if (retryTime-- <= 0) {
+                    throw new RuntimeException(e);
                 }
                 try {
-                    fos = new FileOutputStream(file);
-                    byte[] buffer = new byte[1024];
-                    int len;
-                    long copySize = 0;
-                    long nextPrintProgress = 0;
-                    logger.info("start to download file " + file.getName());
-                    while ((len = is.read(buffer)) != -1) {
-                        fos.write(buffer, 0, len);
-                        copySize += len;
-                        long progress = copySize * 100 / totalSize;
-                        if (progress >= nextPrintProgress) {
-                            logger.info("download " + file.getName() + " progress : " + progress
-                                        + "% , download size : " + copySize + ", total size : " + totalSize);
-                            nextPrintProgress += 10;
-                        }
-                    }
-                    logger.info("download file " + file.getName() + " end!");
-                    fos.flush();
-                } finally {
-                    IOUtils.closeQuietly(fos);
+                    Thread.sleep(100L);
+                } catch (InterruptedException e1) {
                 }
             }
-        } finally {
-            IOUtils.closeQuietly(fos);
-        }
-    }
-
-    public static String describeBinlogFiles(String url, String ak, String sk, String dbInstanceId, Date startTime,
-                                             Date endTime, int pageSize, int pageNumber) throws Exception {
-        Map<String, String> paramMap = new HashMap<String, String>();
-        paramMap.put("Action", "DescribeBinlogFiles");
-        paramMap.put("DBInstanceId", dbInstanceId); // rds实例id
-        paramMap.put("StartTime", formatIso8601Date(startTime));
-        paramMap.put("EndTime", formatIso8601Date(endTime));
-        paramMap.put("PageSize", String.valueOf(pageSize));
-        paramMap.put("PageNumber", String.valueOf(pageNumber));
-        return doRequest(url, paramMap, ak, sk);
-    }
-
-    private static String doRequest(String domin, Map<String, String> param, String ak, String sk) throws Exception {
-        param.put("AccessKeyId", ak);
-        param.put("SignatureMethod", "HMAC-SHA1");
-        param.put("SignatureVersion", SIGNATURE_VERSION);
-        param.put("Version", API_VERSION);
-        param.put("SignatureNonce", UUID.randomUUID().toString());
-        param.put("Format", "JSON");
-        param.put("Timestamp", formatIso8601Date(new Date()));
-        String signStr = generate("POST", param, sk);
-        param.put("Signature", signStr);
-        String request = concatQueryString(param);
-        String url = domin + "?" + request;
-        String result = HttpHelper.post(url, null, Collections.EMPTY_MAP, TIMEOUT);
-        return result;
-    }
-
-    public static String concatQueryString(Map<String, String> parameters) throws UnsupportedEncodingException {
-        if (null == parameters) {
-            return null;
-        }
-        StringBuilder urlBuilder = new StringBuilder("");
-        for (Map.Entry<String, String> entry : parameters.entrySet()) {
-            String key = entry.getKey();
-            String val = entry.getValue();
-            urlBuilder.append(encode(key));
-            if (val != null) {
-                urlBuilder.append("=").append(encode(val));
-            }
-            urlBuilder.append("&");
         }
-        int strIndex = urlBuilder.length();
-        if (parameters.size() > 0) {
-            urlBuilder.deleteCharAt(strIndex - 1);
+        if (result == null) {
+            return Collections.EMPTY_LIST;
         }
-        return urlBuilder.toString();
-    }
-
-    public static String encode(String value) throws UnsupportedEncodingException {
-        return URLEncoder.encode(value, "UTF-8");
-    }
-
-    private static String formatIso8601Date(Date date) {
-        SimpleDateFormat df = new SimpleDateFormat(ISO8601_DATE_FORMAT);
-        df.setTimeZone(TimeZone.getTimeZone("GMT"));
-        return df.format(date);
-    }
-
-    /**
-     * 使用 HMAC-SHA1 签名方法对对encryptText进行签名
-     *
-     * @param encryptText 被签名的字符串
-     * @param encryptKey 密钥
-     * @return
-     * @throws Exception
-     */
-    public static byte[] HmacSHA1Encrypt(String encryptText, String encryptKey) throws Exception {
-        byte[] data = encryptKey.getBytes(ENCODING);
-        // 根据给定的字节数组构造一个密钥,第二参数指定一个密钥算法的名称
-        SecretKey secretKey = new SecretKeySpec(data, MAC_NAME);
-        // 生成一个指定 Mac 算法 的 Mac 对象
-        Mac mac = Mac.getInstance(MAC_NAME);
-        // 用给定密钥初始化 Mac 对象
-        mac.init(secretKey);
-
-        byte[] text = encryptText.getBytes(ENCODING);
-        // 完成 Mac 操作
-        return mac.doFinal(text);
-    }
-
-    private static String base64(byte input[]) throws UnsupportedEncodingException {
-        return new String(Base64.encodeBase64(input), ENCODING);
-    }
-
-    /** 对参数名称和参数值进行URL编码 **/
-    public static String generate(String method, Map<String, String> parameter, String accessKeySecret)
-                                                                                                       throws Exception {
-        String signString = generateSignString(method, parameter);
-        byte[] signBytes = HmacSHA1Encrypt(signString, accessKeySecret + "&");
-        String signature = base64(signBytes);
-        if ("POST".equals(method)) {
-            return signature;
+        RdsItem rdsItem = result.getItems();
+        if (rdsItem != null) {
+            return rdsItem.getBinLogFile();
         }
-        return URLEncoder.encode(signature, "UTF-8");
+        return Collections.EMPTY_LIST;
     }
 
-    private static String generateQueryString(TreeMap<String, String> treeMap) {
-        StringBuilder canonicalizedQueryString = new StringBuilder();
-        boolean first = true;
-        for (String key : treeMap.navigableKeySet()) {
-            String value = treeMap.get(key);
-            if (!first) {
-                canonicalizedQueryString.append("&");
+    public static RdsBackupPolicy queryBinlogBackupPolicy(String url, String ak, String sk, String dbInstanceId) {
+        DescribeBackupPolicyRequest request = new DescribeBackupPolicyRequest();
+        if (StringUtils.isNotEmpty(url)) {
+            try {
+                URI uri = new URI(url);
+                request.setEndPoint(uri.getHost());
+            } catch (URISyntaxException e) {
+                logger.error("resolve url host failed, will use default rds endpoint!");
             }
-            first = false;
-            canonicalizedQueryString.append(percentEncode(key)).append("=").append(percentEncode(value));
         }
-        return canonicalizedQueryString.toString();
-    }
-
-    public static String generateSignString(String httpMethod, Map<String, String> parameter) throws IOException {
-        TreeMap<String, String> sortParameter = new TreeMap<String, String>();
-        sortParameter.putAll(parameter);
-        String canonicalizedQueryString = generateQueryString(sortParameter);
-        if (null == httpMethod) {
-            throw new RuntimeException("httpMethod can not be empty");
-        }
-        /** 构造待签名的字符串* */
-        StringBuilder stringToSign = new StringBuilder();
-        stringToSign.append(httpMethod).append("&");
-        stringToSign.append(percentEncode("/")).append("&");
-        stringToSign.append(percentEncode(canonicalizedQueryString));
-        return stringToSign.toString();
-    }
-
-    public static String percentEncode(String value) {
-        try {
-            return value == null ? null : URLEncoder.encode(value, ENCODING)
-                .replaceAll("\\+", "%20")
-                .replaceAll("\\*", "%2A")
-                .replaceAll("%7E", "~");
-        } catch (Exception e) {
+        request.setRdsInstanceId(dbInstanceId);
+        request.setAccessKeyId(ak);
+        request.setAccessKeySecret(sk);
+        int retryTime = 3;
+        while (true) {
+            try {
+                return request.doAction();
+            } catch (Exception e) {
+                if (retryTime-- <= 0) {
+                    throw new RuntimeException(e);
+                }
+                try {
+                    Thread.sleep(100L);
+                } catch (InterruptedException e1) {
+                }
+            }
         }
-        return "";
     }
 }

+ 144 - 35
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsLocalBinlogEventParser.java

@@ -2,14 +2,23 @@ package com.alibaba.otter.canal.parse.inbound.mysql.rds;
 
 import java.io.File;
 import java.util.Date;
+import java.util.List;
 
 import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.math.NumberUtils;
 import org.springframework.util.Assert;
 
 import com.alibaba.otter.canal.parse.CanalEventParser;
 import com.alibaba.otter.canal.parse.exception.CanalParseException;
+import com.alibaba.otter.canal.parse.exception.PositionNotFoundException;
+import com.alibaba.otter.canal.parse.exception.ServerIdNotMatchException;
+import com.alibaba.otter.canal.parse.inbound.ErosaConnection;
+import com.alibaba.otter.canal.parse.inbound.ParserExceptionHandler;
+import com.alibaba.otter.canal.parse.inbound.mysql.LocalBinLogConnection;
 import com.alibaba.otter.canal.parse.inbound.mysql.LocalBinlogEventParser;
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.data.BinlogFile;
 import com.alibaba.otter.canal.protocol.position.EntryPosition;
+import com.alibaba.otter.canal.protocol.position.LogPosition;
 
 /**
  * 基于rds binlog备份文件的复制
@@ -17,47 +26,104 @@ import com.alibaba.otter.canal.protocol.position.EntryPosition;
  * @author agapple 2017年10月15日 下午1:27:36
  * @since 1.0.25
  */
-public class RdsLocalBinlogEventParser extends LocalBinlogEventParser implements CanalEventParser {
-
-    private String url = "https://rds.aliyuncs.com/"; // openapi地址
-    private String accesskey;                        // 云账号的ak
-    private String secretkey;                        // 云账号sk
-    private String instanceId;                       // rds实例id
-    private Long   startTime;
-    private Long   endTime;
+public class RdsLocalBinlogEventParser extends LocalBinlogEventParser implements CanalEventParser, LocalBinLogConnection.FileParserListener {
+
+    private String              url;                // openapi地址
+    private String              accesskey;          // 云账号的ak
+    private String              secretkey;          // 云账号sk
+    private String              instanceId;         // rds实例id
+    private Long                startTime;
+    private Long                endTime;
+    private BinlogDownloadQueue binlogDownloadQueue;
+    private ParseFinishListener finishListener;
+    private int                 batchFileSize;
 
     public RdsLocalBinlogEventParser(){
     }
 
     public void start() throws CanalParseException {
         try {
-            Assert.notNull(startTime);
             Assert.notNull(accesskey);
             Assert.notNull(secretkey);
             Assert.notNull(instanceId);
             Assert.notNull(url);
+            Assert.notNull(directory);
+
             if (endTime == null) {
                 endTime = System.currentTimeMillis();
             }
 
-            RdsBinlogOpenApi.downloadBinlogFiles(url,
+            EntryPosition entryPosition = findStartPosition(null);
+            if (entryPosition == null) {
+                throw new PositionNotFoundException("position not found!");
+            }
+            long startTimeInMill = entryPosition.getTimestamp();
+            startTime = startTimeInMill;
+            List<BinlogFile> binlogFiles = RdsBinlogOpenApi.listBinlogFiles(url,
                 accesskey,
                 secretkey,
                 instanceId,
                 new Date(startTime),
-                new Date(endTime),
-                new File(directory));
-
-            // 更新一下时间戳
-            masterPosition = new EntryPosition(startTime);
+                new Date(endTime));
+            binlogDownloadQueue = new BinlogDownloadQueue(binlogFiles, batchFileSize, directory);
+            binlogDownloadQueue.silenceDownload();
+            needWait = true;
+            // try to download one file,use to test server id
+            binlogDownloadQueue.tryOne();
         } catch (Throwable e) {
             logger.error("download binlog failed", e);
             throw new CanalParseException(e);
         }
+        setParserExceptionHandler(new ParserExceptionHandler() {
 
+            @Override
+            public void handle(Throwable e) {
+                handleMysqlParserException(e);
+            }
+        });
         super.start();
     }
 
+    private void handleMysqlParserException(Throwable throwable) {
+        if (throwable instanceof ServerIdNotMatchException) {
+            logger.error("server id not match, try download another rds binlog!");
+            binlogDownloadQueue.notifyNotMatch();
+            try {
+                binlogDownloadQueue.cleanDir();
+                binlogDownloadQueue.tryOne();
+                binlogDownloadQueue.prepare();
+            } catch (Throwable e) {
+                throw new RuntimeException(e);
+            }
+
+            try {
+                binlogDownloadQueue.execute(new Runnable() {
+
+                    @Override
+                    public void run() {
+                        RdsLocalBinlogEventParser.super.stop();
+                        RdsLocalBinlogEventParser.super.start();
+                    }
+                });
+            } catch (InterruptedException e) {
+                throw new RuntimeException(e);
+            }
+
+        }
+    }
+
+    @Override
+    protected ErosaConnection buildErosaConnection() {
+        ErosaConnection connection = super.buildErosaConnection();
+        if (connection instanceof LocalBinLogConnection) {
+            LocalBinLogConnection localBinLogConnection = (LocalBinLogConnection) connection;
+            localBinLogConnection.setNeedWait(true);
+            localBinLogConnection.setServerId(serverId);
+            localBinLogConnection.setParserListener(this);
+        }
+        return connection;
+    }
+
     public String getUrl() {
         return url;
     }
@@ -68,44 +134,87 @@ public class RdsLocalBinlogEventParser extends LocalBinlogEventParser implements
         }
     }
 
-    public String getAccesskey() {
-        return accesskey;
-    }
-
     public void setAccesskey(String accesskey) {
         this.accesskey = accesskey;
     }
 
-    public String getSecretkey() {
-        return secretkey;
-    }
-
     public void setSecretkey(String secretkey) {
         this.secretkey = secretkey;
     }
 
-    public String getInstanceId() {
-        return instanceId;
-    }
-
     public void setInstanceId(String instanceId) {
         this.instanceId = instanceId;
     }
 
-    public Long getStartTime() {
-        return startTime;
-    }
-
     public void setStartTime(Long startTime) {
         this.startTime = startTime;
     }
 
-    public Long getEndTime() {
-        return endTime;
-    }
-
     public void setEndTime(Long endTime) {
         this.endTime = endTime;
     }
 
+    @Override
+    public void onFinish(String fileName) {
+        try {
+            binlogDownloadQueue.downOne();
+            File needDeleteFile = new File(directory + File.separator + fileName);
+            if (needDeleteFile.exists()) {
+                needDeleteFile.delete();
+            }
+            // 处理下logManager位点问题
+            LogPosition logPosition = logPositionManager.getLatestIndexBy(destination);
+            Long timestamp = 0L;
+            if (logPosition != null && logPosition.getPostion() != null) {
+                timestamp = logPosition.getPostion().getTimestamp();
+                EntryPosition position = logPosition.getPostion();
+                LogPosition newLogPosition = new LogPosition();
+                String journalName = position.getJournalName();
+                int sepIdx = journalName.indexOf(".");
+                String fileIndex = journalName.substring(sepIdx + 1);
+                int index = NumberUtils.toInt(fileIndex) + 1;
+                String newJournalName = journalName.substring(0, sepIdx) + "."
+                                        + StringUtils.leftPad(String.valueOf(index), fileIndex.length(), "0");
+                newLogPosition.setPostion(new EntryPosition(newJournalName,
+                    4L,
+                    position.getTimestamp(),
+                    position.getServerId()));
+                newLogPosition.setIdentity(logPosition.getIdentity());
+                logPositionManager.persistLogPosition(destination, newLogPosition);
+            }
+
+            if (binlogDownloadQueue.isLastFile(fileName)) {
+                logger.warn("last file : " + fileName + " , timestamp : " + timestamp
+                            + " , all file parse complete, switch to mysql parser!");
+                finishListener.onFinish();
+                return;
+            } else {
+                logger.warn("parse local binlog file : " + fileName + " , timestamp : " + timestamp
+                            + " , try the next binlog !");
+            }
+            binlogDownloadQueue.prepare();
+        } catch (Exception e) {
+            logger.error("prepare download binlog file failed!", e);
+            throw new RuntimeException(e);
+        }
+    }
+
+    @Override
+    public void stop() {
+        this.binlogDownloadQueue.release();
+        super.stop();
+    }
+
+    public void setFinishListener(ParseFinishListener finishListener) {
+        this.finishListener = finishListener;
+    }
+
+    public interface ParseFinishListener {
+
+        void onFinish();
+    }
+
+    public void setBatchFileSize(int batchFileSize) {
+        this.batchFileSize = batchFileSize;
+    }
 }

+ 80 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/BinlogFile.java

@@ -0,0 +1,80 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds.data;
+
+/**
+ * @author chengjin.lyf on 2018/8/7 下午2:26
+ * @since 1.0.25
+ */
+public class BinlogFile {
+
+    private Long   FileSize;
+    private String LogBeginTime;
+    private String LogEndTime;
+    private String DownloadLink;
+    private String HostInstanceID;
+    private String LinkExpiredTime;
+    private String fileName;
+
+    public Long getFileSize() {
+        return FileSize;
+    }
+
+    public void setFileSize(Long fileSize) {
+        FileSize = fileSize;
+    }
+
+    public String getLogBeginTime() {
+        return LogBeginTime;
+    }
+
+    public void setLogBeginTime(String logBeginTime) {
+        LogBeginTime = logBeginTime;
+    }
+
+    public String getLogEndTime() {
+        return LogEndTime;
+    }
+
+    public void setLogEndTime(String logEndTime) {
+        LogEndTime = logEndTime;
+    }
+
+    public String getDownloadLink() {
+        return DownloadLink;
+    }
+
+    public void setDownloadLink(String downloadLink) {
+        DownloadLink = downloadLink;
+    }
+
+    public String getHostInstanceID() {
+        return HostInstanceID;
+    }
+
+    public void setHostInstanceID(String hostInstanceID) {
+        HostInstanceID = hostInstanceID;
+    }
+
+    public String getLinkExpiredTime() {
+        return LinkExpiredTime;
+    }
+
+    public void setLinkExpiredTime(String linkExpiredTime) {
+        LinkExpiredTime = linkExpiredTime;
+    }
+
+    public String getFileName() {
+        return fileName;
+    }
+
+    public void setFileName(String fileName) {
+        this.fileName = fileName;
+    }
+
+    @Override
+    public String toString() {
+        return "BinlogFile [FileSize=" + FileSize + ", LogBeginTime=" + LogBeginTime + ", LogEndTime=" + LogEndTime
+               + ", DownloadLink=" + DownloadLink + ", HostInstanceID=" + HostInstanceID + ", LinkExpiredTime="
+               + LinkExpiredTime + ", fileName=" + fileName + "]";
+    }
+
+}

+ 63 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/DescribeBinlogFileResult.java

@@ -0,0 +1,63 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds.data;
+
+/**
+ * @author chengjin.lyf on 2018/8/7 下午2:26
+ * @since 1.0.25
+ */
+public class DescribeBinlogFileResult {
+
+    private RdsItem Items;
+    private long    PageNumber;
+    private long    TotalRecordCount;
+    private long    TotalFileSize;
+    private String  RequestId;
+    private long    PageRecordCount;
+
+    public RdsItem getItems() {
+        return Items;
+    }
+
+    public void setItems(RdsItem items) {
+        Items = items;
+    }
+
+    public long getPageNumber() {
+        return PageNumber;
+    }
+
+    public void setPageNumber(long pageNumber) {
+        PageNumber = pageNumber;
+    }
+
+    public long getTotalRecordCount() {
+        return TotalRecordCount;
+    }
+
+    public void setTotalRecordCount(long totalRecordCount) {
+        TotalRecordCount = totalRecordCount;
+    }
+
+    public long getTotalFileSize() {
+        return TotalFileSize;
+    }
+
+    public void setTotalFileSize(long totalFileSize) {
+        TotalFileSize = totalFileSize;
+    }
+
+    public String getRequestId() {
+        return RequestId;
+    }
+
+    public void setRequestId(String requestId) {
+        RequestId = requestId;
+    }
+
+    public long getPageRecordCount() {
+        return PageRecordCount;
+    }
+
+    public void setPageRecordCount(long pageRecordCount) {
+        PageRecordCount = pageRecordCount;
+    }
+}

+ 78 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/RdsBackupPolicy.java

@@ -0,0 +1,78 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds.data;
+
+/**
+ * @author chengjin.lyf on 2018/8/7 下午2:26
+ * @since 1.0.25
+ */
+public class RdsBackupPolicy {
+
+    /**
+     * 数据备份保留天数(7到730天)。
+     */
+    private String  BackupRetentionPeriod;
+    /**
+     * 数据备份时间,格式:HH:mmZ- HH:mm Z。
+     */
+    private String  PreferredBackupTime;
+    /**
+     * 数据备份周期。Monday:周一;Tuesday:周二;Wednesday:周三;Thursday:周四;Friday:周五;Saturday:
+     * 周六;Sunday:周日。
+     */
+    private String  PreferredBackupPeriod;
+    /**
+     * 日志备份状态。Enable:开启;Disabled:关闭。
+     */
+    private boolean BackupLog;
+    /**
+     * 日志备份保留天数(7到730天)。
+     */
+    private int     LogBackupRetentionPeriod;
+
+    public String getBackupRetentionPeriod() {
+        return BackupRetentionPeriod;
+    }
+
+    public void setBackupRetentionPeriod(String backupRetentionPeriod) {
+        BackupRetentionPeriod = backupRetentionPeriod;
+    }
+
+    public String getPreferredBackupTime() {
+        return PreferredBackupTime;
+    }
+
+    public void setPreferredBackupTime(String preferredBackupTime) {
+        PreferredBackupTime = preferredBackupTime;
+    }
+
+    public String getPreferredBackupPeriod() {
+        return PreferredBackupPeriod;
+    }
+
+    public void setPreferredBackupPeriod(String preferredBackupPeriod) {
+        PreferredBackupPeriod = preferredBackupPeriod;
+    }
+
+    public boolean isBackupLog() {
+        return BackupLog;
+    }
+
+    public void setBackupLog(boolean backupLog) {
+        BackupLog = backupLog;
+    }
+
+    public int getLogBackupRetentionPeriod() {
+        return LogBackupRetentionPeriod;
+    }
+
+    public void setLogBackupRetentionPeriod(int logBackupRetentionPeriod) {
+        LogBackupRetentionPeriod = logBackupRetentionPeriod;
+    }
+
+    @Override
+    public String toString() {
+        return "RdsBackupPolicy [BackupRetentionPeriod=" + BackupRetentionPeriod + ", PreferredBackupTime="
+               + PreferredBackupTime + ", PreferredBackupPeriod=" + PreferredBackupPeriod + ", BackupLog=" + BackupLog
+               + ", LogBackupRetentionPeriod=" + LogBackupRetentionPeriod + "]";
+    }
+
+}

+ 26 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/data/RdsItem.java

@@ -0,0 +1,26 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds.data;
+
+import java.util.List;
+
+/**
+ * @author chengjin.lyf on 2018/8/7 下午2:26
+ * @since 1.0.25
+ */
+public class RdsItem {
+
+    private List<BinlogFile> BinLogFile;
+
+    public List<BinlogFile> getBinLogFile() {
+        return BinLogFile;
+    }
+
+    public void setBinLogFile(List<BinlogFile> binLogFile) {
+        BinLogFile = binLogFile;
+    }
+
+    @Override
+    public String toString() {
+        return "RdsItem [BinLogFile=" + BinLogFile + "]";
+    }
+
+}

Alguns arquivos não foram mostrados porque muitos arquivos mudaram nesse diff