Browse Source

Merge pull request #1 from alibaba/master

pull & merge
WU Jianqiang 7 years ago
parent
commit
0b82ee8d10
100 changed files with 4614 additions and 618 deletions
  1. 1 1
      README.md
  2. 1 1
      client/pom.xml
  3. 1 1
      client/src/test/java/logback.xml
  4. 30 3
      common/pom.xml
  5. 2 0
      common/src/main/java/com/alibaba/otter/canal/common/utils/JsonUtils.java
  6. 1 1
      dbsync/pom.xml
  7. 1 0
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/LogBuffer.java
  8. 7 2
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/LogDecoder.java
  9. 21 1
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/LogEvent.java
  10. 21 0
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/mariadb/MariaGtidLogEvent.java
  11. 1 1
      deployer/pom.xml
  12. 1 3
      deployer/src/main/java/com/alibaba/otter/canal/deployer/monitor/SpringInstanceConfigMonitor.java
  13. 5 0
      deployer/src/main/resources/canal.properties
  14. 22 16
      deployer/src/main/resources/example/instance.properties
  15. 32 0
      deployer/src/main/resources/example/rds_instance.properties
  16. 1 1
      deployer/src/main/resources/logback.xml
  17. 6 1
      deployer/src/main/resources/spring/default-instance.xml
  18. 6 1
      deployer/src/main/resources/spring/file-instance.xml
  19. 4 2
      deployer/src/main/resources/spring/group-instance.xml
  20. 22 44
      deployer/src/main/resources/spring/local-instance.xml
  21. 6 1
      deployer/src/main/resources/spring/memory-instance.xml
  22. 60 0
      deployer/src/main/resources/spring/tsdb/h2-tsdb.xml
  23. 63 0
      deployer/src/main/resources/spring/tsdb/mysql-tsdb.xml
  24. 8 0
      deployer/src/main/resources/spring/tsdb/sql-map/sqlmap-config.xml
  25. 45 0
      deployer/src/main/resources/spring/tsdb/sql-map/sqlmap_history.xml
  26. 51 0
      deployer/src/main/resources/spring/tsdb/sql-map/sqlmap_snapshot.xml
  27. 39 0
      deployer/src/main/resources/spring/tsdb/sql/create_table.sql
  28. 1 1
      driver/pom.xml
  29. 21 4
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlConnector.java
  30. 56 1
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlQueryExecutor.java
  31. 4 3
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/client/ClientAuthenticationPacket.java
  32. 57 0
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/client/RegisterSlaveCommandPacket.java
  33. 55 0
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/client/SemiAckCommandPacket.java
  34. 50 5
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/SocketChannel.java
  35. 35 27
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/SocketChannelPool.java
  36. 12 1
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/utils/ByteHelper.java
  37. 10 0
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/utils/PacketManager.java
  38. 1 1
      example/pom.xml
  39. 1 1
      filter/pom.xml
  40. 1 1
      instance/core/pom.xml
  41. 1 1
      instance/manager/pom.xml
  42. 1 1
      instance/pom.xml
  43. 1 1
      instance/spring/pom.xml
  44. 1 1
      meta/pom.xml
  45. 1 1
      meta/src/main/java/com/alibaba/otter/canal/meta/FileMixedMetaManager.java
  46. 5 5
      meta/src/test/java/com/alibaba/otter/canal/meta/FileMixedMetaManagerTest.java
  47. 31 1
      parse/pom.xml
  48. 12 4
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/AbstractEventParser.java
  49. 1 1
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/BinlogParser.java
  50. 0 2
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/ErosaConnection.java
  51. 104 49
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/TableMeta.java
  52. 70 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/AbstractMysqlEventParser.java
  53. 29 39
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinLogConnection.java
  54. 15 8
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogEventParser.java
  55. 101 2
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlConnection.java
  56. 131 79
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParser.java
  57. 19 1
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/DirectLogFetcher.java
  58. 187 98
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java
  59. 140 66
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/TableMetaCache.java
  60. 115 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/ddl/DdlResult.java
  61. 213 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/ddl/DruidDdlParser.java
  62. 1 99
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/ddl/SimpleDdlParser.java
  63. 368 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/HttpHelper.java
  64. 334 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsBinlogOpenApi.java
  65. 111 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsLocalBinlogEventParser.java
  66. 493 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/DatabaseTableMeta.java
  67. 244 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MemoryTableMeta.java
  68. 41 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaTSDB.java
  69. 51 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaTSDBBuilder.java
  70. 59 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaBaseDAO.java
  71. 52 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaHistoryDAO.java
  72. 148 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaHistoryDO.java
  73. 56 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaSnapshotDAO.java
  74. 110 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaSnapshotDO.java
  75. 1 0
      parse/src/main/java/com/alibaba/otter/canal/parse/index/FileMixedLogPositionManager.java
  76. 1 0
      parse/src/main/java/com/alibaba/otter/canal/parse/index/PeriodMixedLogPositionManager.java
  77. 22 0
      parse/src/main/resources/ddl/derby/meta_history.sql
  78. 18 0
      parse/src/main/resources/ddl/derby/meta_snapshot.sql
  79. 21 0
      parse/src/main/resources/ddl/h2/meta_history.sql
  80. 17 0
      parse/src/main/resources/ddl/h2/meta_snapshot.sql
  81. 21 0
      parse/src/main/resources/ddl/mysql/meta_history.sql
  82. 17 0
      parse/src/main/resources/ddl/mysql/meta_snapshot.sql
  83. 100 2
      parse/src/test/java/com/alibaba/otter/canal/parse/DirectLogFetcherTest.java
  84. 19 11
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/TableMetaCacheTest.java
  85. 2 2
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/group/GroupEventPaserTest.java
  86. 3 3
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogDumpTest.java
  87. 8 8
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogEventParserTest.java
  88. 9 3
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlDumpTest.java
  89. 3 3
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParserTest.java
  90. 27 0
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/RdsBinlogOpenApiTest.java
  91. 117 0
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/RdsLocalBinlogDumpTest.java
  92. 2 2
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/SimpleDdlParserTest.java
  93. 37 0
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MemoryTableMetaTest.java
  94. 33 0
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MetaHistoryDAOTest.java
  95. 18 0
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaManagerBuilderTest.java
  96. 46 0
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaManagerTest.java
  97. BIN
      parse/src/test/resources/binlog/tsdb/mysql-bin.000001
  98. BIN
      parse/src/test/resources/binlog/tsdb/mysql-bin.000002
  99. BIN
      parse/src/test/resources/binlog/tsdb/mysql-bin.000003
  100. 64 0
      parse/src/test/resources/ddl/create.sql

+ 1 - 1
README.md

@@ -36,7 +36,7 @@
 <li>slave重做中继日志中的事件,将改变反映它自己的数据。</li>
 <li>slave重做中继日志中的事件,将改变反映它自己的数据。</li>
 </ol>
 </ol>
 <h3>canal的工作原理:</h3>
 <h3>canal的工作原理:</h3>
-<p><img width="590" src="https://camo.githubusercontent.com/46c626b4cde399db43b2634a7911a04aecf273a0/687474703a2f2f646c2e69746579652e636f6d2f75706c6f61642f6174746163686d656e742f303038302f333130372f63383762363762612d333934632d333038362d393537372d3964623035626530346339352e6a7067" alt="" height="273">
+<p><img width="590" src="http://dl.iteye.com/upload/attachment/0080/3107/c87b67ba-394c-3086-9577-9db05be04c95.jpg" alt="" height="273">
 <p>原理相对比较简单:</p>
 <p>原理相对比较简单:</p>
 <ol>
 <ol>
 <li>canal模拟mysql slave的交互协议,伪装自己为mysql slave,向mysql master发送dump协议</li>
 <li>canal模拟mysql slave的交互协议,伪装自己为mysql slave,向mysql master发送dump协议</li>

+ 1 - 1
client/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
 		<artifactId>canal</artifactId>
-		<version>1.0.25-SNAPSHOT</version>
+		<version>1.0.26-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 1
client/src/test/java/logback.xml

@@ -8,7 +8,7 @@
 		</encoder>
 		</encoder>
 	</appender>
 	</appender>
 	
 	
-	<root level="WARN">
+	<root level="INFO">
 		<appender-ref ref="STDOUT"/>
 		<appender-ref ref="STDOUT"/>
 	</root>
 	</root>
 </configuration>
 </configuration>

+ 30 - 3
common/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
 		<artifactId>canal</artifactId>
-		<version>1.0.25-SNAPSHOT</version>
+		<version>1.0.26-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	</parent>
 	<artifactId>canal.common</artifactId>
 	<artifactId>canal.common</artifactId>
@@ -35,8 +35,8 @@
 			<artifactId>commons-lang</artifactId>
 			<artifactId>commons-lang</artifactId>
 		</dependency>
 		</dependency>
 		<dependency>
 		<dependency>
-			<groupId>org.springframework</groupId>
-			<artifactId>spring</artifactId>
+			<groupId>commons-codec</groupId>
+			<artifactId>commons-codec</artifactId>
 		</dependency>
 		</dependency>
 		<dependency>
 		<dependency>
 			<groupId>com.alibaba</groupId>
 			<groupId>com.alibaba</groupId>
@@ -63,10 +63,37 @@
 			<groupId>org.slf4j</groupId>
 			<groupId>org.slf4j</groupId>
 			<artifactId>slf4j-api</artifactId>
 			<artifactId>slf4j-api</artifactId>
 		</dependency>
 		</dependency>
+		<dependency>
+			<groupId>org.springframework</groupId>
+			<artifactId>spring-core</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.springframework</groupId>
+			<artifactId>spring-aop</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.springframework</groupId>
+			<artifactId>spring-context</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.springframework</groupId>
+			<artifactId>spring-jdbc</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.springframework</groupId>
+			<artifactId>spring-orm</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.springframework</groupId>
+			<artifactId>spring-test</artifactId>
+			<version>${spring_version}</version>
+			<scope>test</scope>
+		</dependency>
 		<!-- junit -->
 		<!-- junit -->
 		<dependency>
 		<dependency>
 			<groupId>junit</groupId>
 			<groupId>junit</groupId>
 			<artifactId>junit</artifactId>
 			<artifactId>junit</artifactId>
+			<scope>test</scope>
 		</dependency>
 		</dependency>
 	</dependencies>
 	</dependencies>
 </project>
 </project>

+ 2 - 0
common/src/main/java/com/alibaba/otter/canal/common/utils/JsonUtils.java

@@ -10,6 +10,7 @@ import java.util.List;
 
 
 import com.alibaba.fastjson.JSON;
 import com.alibaba.fastjson.JSON;
 import com.alibaba.fastjson.TypeReference;
 import com.alibaba.fastjson.TypeReference;
+import com.alibaba.fastjson.parser.ParserConfig;
 import com.alibaba.fastjson.serializer.JSONSerializer;
 import com.alibaba.fastjson.serializer.JSONSerializer;
 import com.alibaba.fastjson.serializer.ObjectSerializer;
 import com.alibaba.fastjson.serializer.ObjectSerializer;
 import com.alibaba.fastjson.serializer.PropertyFilter;
 import com.alibaba.fastjson.serializer.PropertyFilter;
@@ -28,6 +29,7 @@ public class JsonUtils {
         SerializeConfig.getGlobalInstance().put(InetAddress.class, InetAddressSerializer.instance);
         SerializeConfig.getGlobalInstance().put(InetAddress.class, InetAddressSerializer.instance);
         SerializeConfig.getGlobalInstance().put(Inet4Address.class, InetAddressSerializer.instance);
         SerializeConfig.getGlobalInstance().put(Inet4Address.class, InetAddressSerializer.instance);
         SerializeConfig.getGlobalInstance().put(Inet6Address.class, InetAddressSerializer.instance);
         SerializeConfig.getGlobalInstance().put(Inet6Address.class, InetAddressSerializer.instance);
+        ParserConfig.getGlobalInstance().setAutoTypeSupport(true);
     }
     }
 
 
     public static <T> T unmarshalFromByte(byte[] bytes, Class<T> targetClass) {
     public static <T> T unmarshalFromByte(byte[] bytes, Class<T> targetClass) {

+ 1 - 1
dbsync/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
 		<artifactId>canal</artifactId>
-		<version>1.0.25-SNAPSHOT</version>
+		<version>1.0.26-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 0
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/LogBuffer.java

@@ -20,6 +20,7 @@ public class LogBuffer {
 
 
     protected int    origin, limit;
     protected int    origin, limit;
     protected int    position;
     protected int    position;
+    protected int    semival;
 
 
     protected LogBuffer(){
     protected LogBuffer(){
     }
     }

+ 7 - 2
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/LogDecoder.java

@@ -104,9 +104,14 @@ public final class LogDecoder {
                     try {
                     try {
                         /* Decoding binary-log to event */
                         /* Decoding binary-log to event */
                         event = decode(buffer, header, context);
                         event = decode(buffer, header, context);
+                        if (event != null) {
+                            event.setSemival(buffer.semival);
+                        }
                     } catch (IOException e) {
                     } catch (IOException e) {
-                        if (logger.isWarnEnabled()) logger.warn("Decoding " + LogEvent.getTypeName(header.getType())
-                                                                + " failed from: " + context.getLogPosition(), e);
+                        if (logger.isWarnEnabled()) {
+                            logger.warn("Decoding " + LogEvent.getTypeName(header.getType()) + " failed from: "
+                                        + context.getLogPosition(), e);
+                        }
                         throw e;
                         throw e;
                     } finally {
                     } finally {
                         buffer.limit(limit); /* Restore limit */
                         buffer.limit(limit); /* Restore limit */

+ 21 - 1
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/LogEvent.java

@@ -359,8 +359,28 @@ public abstract class LogEvent {
     protected static final Log logger = LogFactory.getLog(LogEvent.class);
     protected static final Log logger = LogFactory.getLog(LogEvent.class);
 
 
     protected final LogHeader  header;
     protected final LogHeader  header;
+    
+    /**
+     * mysql半同步semi标识
+     * 
+     * <pre>
+     * 0不需要semi ack 给mysql
+     * 1需要semi ack给mysql
+     * </pre>
+     */
+    protected int              semival;
+    
+    
+
+    public int getSemival() {
+		return semival;
+	}
+
+	public void setSemival(int semival) {
+		this.semival = semival;
+	}
 
 
-    protected LogEvent(LogHeader header){
+	protected LogEvent(LogHeader header){
         this.header = header;
         this.header = header;
     }
     }
 
 

+ 21 - 0
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/mariadb/MariaGtidLogEvent.java

@@ -13,9 +13,30 @@ import com.taobao.tddl.dbsync.binlog.event.LogHeader;
  */
  */
 public class MariaGtidLogEvent extends IgnorableLogEvent {
 public class MariaGtidLogEvent extends IgnorableLogEvent {
 
 
+    private long gtid;
+
+    /**
+     * <pre>
+     * mariadb gtidlog event format
+     *     uint<8> GTID sequence
+     *     uint<4> Replication Domain ID
+     *     uint<1> Flags
+     * 
+     * 	if flag & FL_GROUP_COMMIT_ID
+     * 	    uint<8> commit_id
+     * 	else
+     * 	    uint<6> 0
+     * </pre>
+     */
+
     public MariaGtidLogEvent(LogHeader header, LogBuffer buffer, FormatDescriptionLogEvent descriptionEvent){
     public MariaGtidLogEvent(LogHeader header, LogBuffer buffer, FormatDescriptionLogEvent descriptionEvent){
         super(header, buffer, descriptionEvent);
         super(header, buffer, descriptionEvent);
+        gtid = buffer.getUlong64().longValue();
         // do nothing , just ignore log event
         // do nothing , just ignore log event
     }
     }
 
 
+    public long getGtid() {
+        return gtid;
+    }
+
 }
 }

+ 1 - 1
deployer/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
 		<artifactId>canal</artifactId>
-		<version>1.0.25-SNAPSHOT</version>
+		<version>1.0.26-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 3
deployer/src/main/java/com/alibaba/otter/canal/deployer/monitor/SpringInstanceConfigMonitor.java

@@ -208,8 +208,7 @@ public class SpringInstanceConfigMonitor extends AbstractCanalLifeCycle implemen
                 action.reload(destination);
                 action.reload(destination);
                 logger.info("auto notify reload {} successful.", destination);
                 logger.info("auto notify reload {} successful.", destination);
             } catch (Throwable e) {
             } catch (Throwable e) {
-                logger.error(String.format("scan reload found[%s] but reload failed",
-                    destination), e);
+                logger.error(String.format("scan reload found[%s] but reload failed", destination), e);
             }
             }
         }
         }
     }
     }
@@ -312,5 +311,4 @@ public class SpringInstanceConfigMonitor extends AbstractCanalLifeCycle implemen
         }
         }
 
 
     }
     }
-
 }
 }

+ 5 - 0
deployer/src/main/resources/canal.properties

@@ -36,6 +36,7 @@ canal.instance.network.sendBufferSize = 16384
 canal.instance.network.soTimeout = 30
 canal.instance.network.soTimeout = 30
 
 
 # binlog filter config
 # binlog filter config
+canal.instance.filter.druid.ddl = true
 canal.instance.filter.query.dcl = false
 canal.instance.filter.query.dcl = false
 canal.instance.filter.query.dml = false
 canal.instance.filter.query.dml = false
 canal.instance.filter.query.ddl = false
 canal.instance.filter.query.ddl = false
@@ -59,9 +60,13 @@ canal.conf.dir = ../conf
 canal.auto.scan = true
 canal.auto.scan = true
 canal.auto.scan.interval = 5
 canal.auto.scan.interval = 5
 
 
+canal.instance.tsdb.spring.xml=classpath:spring/tsdb/h2-tsdb.xml
+#canal.instance.tsdb.spring.xml=classpath:spring/tsdb/mysql-tsdb.xml
+
 canal.instance.global.mode = spring 
 canal.instance.global.mode = spring 
 canal.instance.global.lazy = false
 canal.instance.global.lazy = false
 #canal.instance.global.manager.address = 127.0.0.1:1099
 #canal.instance.global.manager.address = 127.0.0.1:1099
+#canal.instance.global.spring.xml = classpath:spring/local-instance.xml
 #canal.instance.global.spring.xml = classpath:spring/memory-instance.xml
 #canal.instance.global.spring.xml = classpath:spring/memory-instance.xml
 canal.instance.global.spring.xml = classpath:spring/file-instance.xml
 canal.instance.global.spring.xml = classpath:spring/file-instance.xml
 #canal.instance.global.spring.xml = classpath:spring/default-instance.xml
 #canal.instance.global.spring.xml = classpath:spring/default-instance.xml

+ 22 - 16
deployer/src/main/resources/example/instance.properties

@@ -1,27 +1,33 @@
 #################################################
 #################################################
 ## mysql serverId
 ## mysql serverId
-canal.instance.mysql.slaveId = 1234
-
+canal.instance.mysql.slaveId=0
 # position info
 # position info
-canal.instance.master.address = 127.0.0.1:3306
-canal.instance.master.journal.name = 
-canal.instance.master.position = 
-canal.instance.master.timestamp = 
+canal.instance.master.address=127.0.0.1:3306
+canal.instance.master.journal.name=
+canal.instance.master.position=
+canal.instance.master.timestamp=
+
+
+# table meta tsdb info
+canal.instance.tsdb.enable=true
+canal.instance.tsdb.dir=${canal.file.data.dir:../conf}/${canal.instance.destination:}
+canal.instance.tsdb.url=jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
+#canal.instance.tsdb.url=jdbc:mysql://127.0.0.1:3306/canal_tsdb
+canal.instance.tsdb.dbUsername=canal
+canal.instance.tsdb.dbPassword=canal
+
 
 
-#canal.instance.standby.address = 
+#canal.instance.standby.address =
 #canal.instance.standby.journal.name =
 #canal.instance.standby.journal.name =
 #canal.instance.standby.position = 
 #canal.instance.standby.position = 
 #canal.instance.standby.timestamp = 
 #canal.instance.standby.timestamp = 
-
 # username/password
 # username/password
-canal.instance.dbUsername = canal
-canal.instance.dbPassword = canal
-canal.instance.defaultDatabaseName =
-canal.instance.connectionCharset = UTF-8
-
+canal.instance.dbUsername=canal
+canal.instance.dbPassword=canal
+canal.instance.defaultDatabaseName=test
+canal.instance.connectionCharset=UTF-8
 # table regex
 # table regex
-canal.instance.filter.regex = .*\\..*
+canal.instance.filter.regex=.*\\..*
 # table black regex
 # table black regex
-canal.instance.filter.black.regex =  
-
+canal.instance.filter.black.regex=
 #################################################
 #################################################

+ 32 - 0
deployer/src/main/resources/example/rds_instance.properties

@@ -0,0 +1,32 @@
+#################################################
+# rds openapi binlog
+canal.instance.rds.open.url=https://rds.aliyuncs.com/
+canal.instance.rds.open.accesskey=
+canal.instance.rds.open.secretkey=
+canal.instance.rds.instanceId=
+canal.instance.rds.startTime=
+canal.instance.rds.endTime=
+
+# local binlog dir
+canal.instance.parser.directory=${canal.file.data.dir:../conf}/${canal.instance.destination:}/binlog
+# position info
+canal.instance.master.address=127.0.0.1:3306
+
+# table meta tsdb info
+canal.instance.tsdb.enable=true
+canal.instance.tsdb.dir=${canal.file.data.dir:../conf}/${canal.instance.destination:}
+canal.instance.tsdb.url=jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
+#canal.instance.tsdb.url=jdbc:mysql://127.0.0.1:3306/canal_tsdb
+#canal.instance.tsdb.dbUsername=canal
+#canal.instance.tsdb.dbPassword=canal
+
+# username/password
+canal.instance.dbUsername=canal
+canal.instance.dbPassword=canal
+canal.instance.defaultDatabaseName=test
+canal.instance.connectionCharset=UTF-8
+# table regex
+canal.instance.filter.regex=test\\..*
+# table black regex
+canal.instance.filter.black.regex=
+#################################################

+ 1 - 1
deployer/src/main/resources/logback.xml

@@ -75,7 +75,7 @@
     </logger>
     </logger>
     
     
 	<root level="WARN">
 	<root level="WARN">
-		<!--<appender-ref ref="STDOUT"/>-->
+		<appender-ref ref="STDOUT"/>
 		<appender-ref ref="CANAL-ROOT" />
 		<appender-ref ref="CANAL-ROOT" />
 	</root>
 	</root>
 </configuration>
 </configuration>

+ 6 - 1
deployer/src/main/resources/spring/default-instance.xml

@@ -85,7 +85,7 @@
 
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
 		<property name="destination" value="${canal.instance.destination}" />
 		<property name="destination" value="${canal.instance.destination}" />
-		<property name="slaveId" value="${canal.instance.mysql.slaveId:1234}" />
+		<property name="slaveId" value="${canal.instance.mysql.slaveId:0}" />
 		<!-- 心跳配置 -->
 		<!-- 心跳配置 -->
 		<property name="detectingEnable" value="${canal.instance.detecting.enable:false}" />
 		<property name="detectingEnable" value="${canal.instance.detecting.enable:false}" />
 		<property name="detectingSQL" value="${canal.instance.detecting.sql}" />
 		<property name="detectingSQL" value="${canal.instance.detecting.sql}" />
@@ -178,9 +178,14 @@
 		<property name="filterQueryDml" value="${canal.instance.filter.query.dml:false}" />
 		<property name="filterQueryDml" value="${canal.instance.filter.query.dml:false}" />
 		<property name="filterQueryDcl" value="${canal.instance.filter.query.dcl:false}" />
 		<property name="filterQueryDcl" value="${canal.instance.filter.query.dcl:false}" />
 		<property name="filterQueryDdl" value="${canal.instance.filter.query.ddl:false}" />
 		<property name="filterQueryDdl" value="${canal.instance.filter.query.ddl:false}" />
+		<property name="useDruidDdlFilter" value="${canal.instance.filter.druid.ddl:true}" />
 		<property name="filterRows" value="${canal.instance.filter.rows:false}" />
 		<property name="filterRows" value="${canal.instance.filter.rows:false}" />
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />
 		<property name="supportBinlogImages" value="${canal.instance.binlog.image}" />
 		<property name="supportBinlogImages" value="${canal.instance.binlog.image}" />
+		
+		<!--表结构相关-->
+		<property name="enableTsdb" value="${canal.instance.tsdb.enable:true}"/>
+		<property name="tsdbSpringXml" value="${canal.instance.tsdb.spring.xml:}"/>
 	</bean>
 	</bean>
 </beans>
 </beans>

+ 6 - 1
deployer/src/main/resources/spring/file-instance.xml

@@ -71,7 +71,7 @@
 
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
 		<property name="destination" value="${canal.instance.destination}" />
 		<property name="destination" value="${canal.instance.destination}" />
-		<property name="slaveId" value="${canal.instance.mysql.slaveId:1234}" />
+		<property name="slaveId" value="${canal.instance.mysql.slaveId:0}" />
 		<!-- 心跳配置 -->
 		<!-- 心跳配置 -->
 		<property name="detectingEnable" value="${canal.instance.detecting.enable:false}" />
 		<property name="detectingEnable" value="${canal.instance.detecting.enable:false}" />
 		<property name="detectingSQL" value="${canal.instance.detecting.sql}" />
 		<property name="detectingSQL" value="${canal.instance.detecting.sql}" />
@@ -163,9 +163,14 @@
 		<property name="filterQueryDml" value="${canal.instance.filter.query.dml:false}" />
 		<property name="filterQueryDml" value="${canal.instance.filter.query.dml:false}" />
 		<property name="filterQueryDcl" value="${canal.instance.filter.query.dcl:false}" />
 		<property name="filterQueryDcl" value="${canal.instance.filter.query.dcl:false}" />
 		<property name="filterQueryDdl" value="${canal.instance.filter.query.ddl:false}" />
 		<property name="filterQueryDdl" value="${canal.instance.filter.query.ddl:false}" />
+		<property name="useDruidDdlFilter" value="${canal.instance.filter.druid.ddl:true}" />
 		<property name="filterRows" value="${canal.instance.filter.rows:false}" />
 		<property name="filterRows" value="${canal.instance.filter.rows:false}" />
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />
 		<property name="supportBinlogImages" value="${canal.instance.binlog.image}" />
 		<property name="supportBinlogImages" value="${canal.instance.binlog.image}" />
+
+		<!--表结构相关-->
+		<property name="enableTsdb" value="${canal.instance.tsdb.enable:true}"/>
+		<property name="tsdbSpringXml" value="${canal.instance.tsdb.spring.xml:}"/>
 	</bean>
 	</bean>
 </beans>
 </beans>

+ 4 - 2
deployer/src/main/resources/spring/group-instance.xml

@@ -77,7 +77,7 @@
 
 
 	<bean id="eventParser1" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
 	<bean id="eventParser1" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
 		<property name="destination" value="${canal.instance.destination}" />
 		<property name="destination" value="${canal.instance.destination}" />
-		<property name="slaveId" value="${canal.instance.mysql.slaveId:1234}" />
+		<property name="slaveId" value="${canal.instance.mysql.slaveId:0}" />
 		<!-- 心跳配置 -->
 		<!-- 心跳配置 -->
 		<property name="detectingEnable" value="${canal.instance.detecting.enable:false}" />
 		<property name="detectingEnable" value="${canal.instance.detecting.enable:false}" />
 		<property name="detectingSQL" value="${canal.instance.detecting.sql}" />
 		<property name="detectingSQL" value="${canal.instance.detecting.sql}" />
@@ -160,6 +160,7 @@
 		<property name="filterQueryDml" value="${canal.instance.filter.query.dml:false}" />
 		<property name="filterQueryDml" value="${canal.instance.filter.query.dml:false}" />
 		<property name="filterQueryDcl" value="${canal.instance.filter.query.dcl:false}" />
 		<property name="filterQueryDcl" value="${canal.instance.filter.query.dcl:false}" />
 		<property name="filterQueryDdl" value="${canal.instance.filter.query.ddl:false}" />
 		<property name="filterQueryDdl" value="${canal.instance.filter.query.ddl:false}" />
+		<property name="useDruidDdlFilter" value="${canal.instance.filter.druid.ddl:true}" />
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />
 		<property name="supportBinlogImages" value="${canal.instance.binlog.image}" />
 		<property name="supportBinlogImages" value="${canal.instance.binlog.image}" />
@@ -167,7 +168,7 @@
 	
 	
 	<bean id="eventParser2" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
 	<bean id="eventParser2" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
 		<property name="destination" value="${canal.instance.destination}" />
 		<property name="destination" value="${canal.instance.destination}" />
-		<property name="slaveId" value="${canal.instance.mysql.slaveId:1234}" />
+		<property name="slaveId" value="${canal.instance.mysql.slaveId:0}" />
 		<!-- 心跳配置 -->
 		<!-- 心跳配置 -->
 		<property name="detectingEnable" value="${canal.instance.detecting.enable:false}" />
 		<property name="detectingEnable" value="${canal.instance.detecting.enable:false}" />
 		<property name="detectingSQL" value="${canal.instance.detecting.sql}" />
 		<property name="detectingSQL" value="${canal.instance.detecting.sql}" />
@@ -250,6 +251,7 @@
 		<property name="filterQueryDml" value="${canal.instance.filter.query.dml:false}" />
 		<property name="filterQueryDml" value="${canal.instance.filter.query.dml:false}" />
 		<property name="filterQueryDcl" value="${canal.instance.filter.query.dcl:false}" />
 		<property name="filterQueryDcl" value="${canal.instance.filter.query.dcl:false}" />
 		<property name="filterQueryDdl" value="${canal.instance.filter.query.ddl:false}" />
 		<property name="filterQueryDdl" value="${canal.instance.filter.query.ddl:false}" />
+		<property name="useDruidDdlFilter" value="${canal.instance.filter.druid.ddl:true}" />
 		<property name="filterRows" value="${canal.instance.filter.rows:false}" />
 		<property name="filterRows" value="${canal.instance.filter.rows:false}" />
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />

+ 22 - 44
deployer/src/main/resources/spring/local-instance.xml

@@ -48,41 +48,14 @@
 		<property name="alarmHandler">
 		<property name="alarmHandler">
 			<ref local="alarmHandler" />
 			<ref local="alarmHandler" />
 		</property>
 		</property>
-		<property name="eventFetcher">
-			<ref local="eventFetcher"/>
-		</property>
-	</bean>
-	
-	<bean id="eventFetcher" class="com.alibaba.otter.canal.mq.fetch.CanalEventFetcherWithMq">
-		<property name="enable" value="${canal.instance.mq.enable}"/>
-		<property name="sendMode" value="${canal.instance.mq.sendMode}"/>
 	</bean>
 	</bean>
 	
 	
 	<!-- 报警处理类 -->
 	<!-- 报警处理类 -->
-	<bean id="alarmHandler" class="com.alibaba.otter.canal.common.alarm.MsgAlarmHandler">
-		<property name="enabled" value="${canal.alarm.enable}" />
-		<property name="environment" value="${canal.alarm.environment}" />
-		<property name="intercenterApi" value="${canal.alarm.intercenter.api}" />
-		<property name="administrators" value="${canal.alarm.administrators}" />
-	</bean>
+	<bean id="alarmHandler" class="com.alibaba.otter.canal.common.alarm.LogAlarmHandler" />
 	
 	
-	<bean id="zkClientx" class="org.springframework.beans.factory.config.MethodInvokingFactoryBean" >
-		<property name="targetClass" value="com.alibaba.otter.canal.common.zookeeper.ZkClientx" />
-		<property name="targetMethod" value="getZkClient" />
-		<property name="arguments">
-			<list>
-				<value>${canal.zkServers:127.0.0.1:2181}</value>
-			</list>
-		</property>
-	</bean>
-	
-	<bean id="metaManager" class="com.alibaba.otter.canal.meta.PeriodMixedMetaManager">
-		<property name="zooKeeperMetaManager">
-			<bean class="com.alibaba.otter.canal.meta.ZooKeeperMetaManager">
-				<property name="zkClientx" ref="zkClientx" />
-			</bean>
-		</property>
-		<property name="period" value="${canal.zookeeper.flush.period:1000}" />
+	<bean id="metaManager" class="com.alibaba.otter.canal.meta.FileMixedMetaManager">
+		<property name="dataDir" value="${canal.file.data.dir:../conf}" />
+		<property name="period" value="${canal.file.flush.period:1000}" />
 	</bean>
 	</bean>
 	
 	
 	<bean id="eventStore" class="com.alibaba.otter.canal.store.memory.MemoryEventStoreWithBuffer">
 	<bean id="eventStore" class="com.alibaba.otter.canal.store.memory.MemoryEventStoreWithBuffer">
@@ -96,10 +69,10 @@
 		<property name="eventStore" ref="eventStore" />
 		<property name="eventStore" ref="eventStore" />
 	</bean>
 	</bean>
 
 
-	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.LocalBinlogEventParser">
+	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.rds.RdsLocalBinlogEventParser">
 		<property name="destination" value="${canal.instance.destination}" />			
 		<property name="destination" value="${canal.instance.destination}" />			
 		<property name="alarmHandler" ref="alarmHandler" />
 		<property name="alarmHandler" ref="alarmHandler" />
-		
+
 		<!-- 解析过滤处理 -->
 		<!-- 解析过滤处理 -->
 		<property name="eventFilter">
 		<property name="eventFilter">
 			<bean class="com.alibaba.otter.canal.filter.aviater.AviaterRegexFilter" >
 			<bean class="com.alibaba.otter.canal.filter.aviater.AviaterRegexFilter" >
@@ -144,20 +117,25 @@
 			</bean>
 			</bean>
 		</property>
 		</property>
 		
 		
-		<!-- 解析起始位点 -->
-		<property name="masterPosition">
-			<bean class="com.alibaba.otter.canal.protocol.position.EntryPosition">
-				<property name="journalName" value="${canal.instance.master.journal.name}" />
-				<property name="position" value="${canal.instance.master.position}" />
-				<property name="timestamp" value="${canal.instance.master.timestamp}" />
-			</bean>
-		</property>
 		<property name="filterQueryDml" value="${canal.instance.filter.query.dml:false}" />
 		<property name="filterQueryDml" value="${canal.instance.filter.query.dml:false}" />
 		<property name="filterQueryDcl" value="${canal.instance.filter.query.dcl:false}" />
 		<property name="filterQueryDcl" value="${canal.instance.filter.query.dcl:false}" />
 		<property name="filterQueryDdl" value="${canal.instance.filter.query.ddl:false}" />
 		<property name="filterQueryDdl" value="${canal.instance.filter.query.ddl:false}" />
+		<property name="useDruidDdlFilter" value="${canal.instance.filter.druid.ddl:true}" />
 		<property name="filterRows" value="${canal.instance.filter.rows:false}" />
 		<property name="filterRows" value="${canal.instance.filter.rows:false}" />
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
-		<property name="needWait" value="${canal.instance.parser.needWait:true}"/>
-		<property name="directory" value="${canal.instance.parser.directory}"/>
+		<property name="needWait" value="${canal.instance.parser.needWait:false}"/>
+		<property name="directory" value="${canal.instance.parser.directory:}"/>
+		
+		<!-- rds相关 -->
+		<property name="url" value="${canal.instance.rds.open.url:}"/>
+		<property name="accesskey" value="${canal.instance.rds.open.accesskey:}"/>
+		<property name="secretkey" value="${canal.instance.rds.open.secretkey:}"/>
+		<property name="instanceId" value="${canal.instance.rds.instanceId:}"/>
+		<property name="startTime" value="${canal.instance.rds.startTime:}"/>
+		<property name="endTime" value="${canal.instance.rds.endTime:}"/>
+		
+		<!--表结构相关-->
+		<property name="enableTsdb" value="${canal.instance.tsdb.enable:true}"/>
+		<property name="tsdbSpringXml" value="${canal.instance.tsdb.spring.xml:}"/>
 	</bean>
 	</bean>
-</beans>
+</beans>

+ 6 - 1
deployer/src/main/resources/spring/memory-instance.xml

@@ -68,7 +68,7 @@
 
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
 		<property name="destination" value="${canal.instance.destination}" />
 		<property name="destination" value="${canal.instance.destination}" />
-		<property name="slaveId" value="${canal.instance.mysql.slaveId:1234}" />
+		<property name="slaveId" value="${canal.instance.mysql.slaveId:0}" />
 		<!-- 心跳配置 -->
 		<!-- 心跳配置 -->
 		<property name="detectingEnable" value="${canal.instance.detecting.enable:false}" />
 		<property name="detectingEnable" value="${canal.instance.detecting.enable:false}" />
 		<property name="detectingSQL" value="${canal.instance.detecting.sql}" />
 		<property name="detectingSQL" value="${canal.instance.detecting.sql}" />
@@ -151,9 +151,14 @@
 		<property name="filterQueryDml" value="${canal.instance.filter.query.dml:false}" />
 		<property name="filterQueryDml" value="${canal.instance.filter.query.dml:false}" />
 		<property name="filterQueryDcl" value="${canal.instance.filter.query.dcl:false}" />
 		<property name="filterQueryDcl" value="${canal.instance.filter.query.dcl:false}" />
 		<property name="filterQueryDdl" value="${canal.instance.filter.query.ddl:false}" />
 		<property name="filterQueryDdl" value="${canal.instance.filter.query.ddl:false}" />
+		<property name="useDruidDdlFilter" value="${canal.instance.filter.druid.ddl:true}" />
 		<property name="filterRows" value="${canal.instance.filter.rows:false}" />
 		<property name="filterRows" value="${canal.instance.filter.rows:false}" />
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />
 		<property name="supportBinlogImages" value="${canal.instance.binlog.image}" />
 		<property name="supportBinlogImages" value="${canal.instance.binlog.image}" />
+		
+		<!--表结构相关-->
+		<property name="enableTsdb" value="${canal.instance.tsdb.enable:false}"/>
+		<property name="tsdbSpringXml" value="${canal.instance.tsdb.spring.xml:}"/>
 	</bean>
 	</bean>
 </beans>
 </beans>

+ 60 - 0
deployer/src/main/resources/spring/tsdb/h2-tsdb.xml

@@ -0,0 +1,60 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<beans xmlns="http://www.springframework.org/schema/beans"
+	xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:tx="http://www.springframework.org/schema/tx"
+	xmlns:aop="http://www.springframework.org/schema/aop" xmlns:lang="http://www.springframework.org/schema/lang"
+	xmlns:context="http://www.springframework.org/schema/context"
+	xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.0.xsd
+           http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-2.0.xsd
+           http://www.springframework.org/schema/lang http://www.springframework.org/schema/lang/spring-lang-2.0.xsd
+           http://www.springframework.org/schema/tx http://www.springframework.org/schema/tx/spring-tx-2.0.xsd
+           http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-2.5.xsd"
+	default-autowire="byName">
+	
+	<!-- properties -->
+	<bean class="com.alibaba.otter.canal.instance.spring.support.PropertyPlaceholderConfigurer" lazy-init="false">
+		<property name="ignoreResourceNotFound" value="true" />
+		<property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_OVERRIDE"/><!-- 允许system覆盖 -->
+		<property name="locationNames">
+			<list>
+				<value>classpath:canal.properties</value>
+				<value>classpath:${canal.instance.destination:}/instance.properties</value>
+			</list>
+		</property>
+	</bean>
+	
+	<!-- 基于db的实现 -->
+	<bean id="tableMetaTSDB" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.DatabaseTableMeta">
+		<property name="metaHistoryDAO" ref="metaHistoryDAO"/>
+		<property name="metaSnapshotDAO" ref="metaSnapshotDAO"/>
+	</bean>
+	
+    <bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" destroy-method="close">
+        <property name="driverClassName" value="org.h2.Driver" />
+		<property name="url" value="${canal.instance.tsdb.url:}" />
+		<property name="username" value="${canal.instance.tsdb.dbUsername:canal}" />
+		<property name="password" value="${canal.instance.tsdb.dbPassword:canal}" />
+      	<property name="maxActive" value="30" />
+        <property name="initialSize" value="0" />
+        <property name="minIdle" value="1" />
+        <property name="maxWait" value="10000" />
+        <property name="timeBetweenEvictionRunsMillis" value="60000" />
+        <property name="minEvictableIdleTimeMillis" value="300000" />
+        <property name="testWhileIdle" value="true" />
+        <property name="testOnBorrow" value="false" />
+        <property name="testOnReturn" value="false" />
+        <property name="useUnfairLock" value="true" />
+	</bean>
+
+    <bean id="sqlMapClient" class="org.springframework.orm.ibatis.SqlMapClientFactoryBean">
+        <property name="dataSource" ref="dataSource"/>
+        <property name="configLocation" value="classpath:spring/tsdb/sql-map/sqlmap-config.xml"/>
+    </bean>
+
+    <bean id="metaHistoryDAO" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDAO">
+        <property name="sqlMapClient" ref="sqlMapClient"/>
+    </bean>
+
+    <bean id="metaSnapshotDAO" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaSnapshotDAO">
+        <property name="sqlMapClient" ref="sqlMapClient"/>
+    </bean>
+</beans>

+ 63 - 0
deployer/src/main/resources/spring/tsdb/mysql-tsdb.xml

@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<beans xmlns="http://www.springframework.org/schema/beans"
+	xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:tx="http://www.springframework.org/schema/tx"
+	xmlns:aop="http://www.springframework.org/schema/aop" xmlns:lang="http://www.springframework.org/schema/lang"
+	xmlns:context="http://www.springframework.org/schema/context"
+	xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.0.xsd
+           http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-2.0.xsd
+           http://www.springframework.org/schema/lang http://www.springframework.org/schema/lang/spring-lang-2.0.xsd
+           http://www.springframework.org/schema/tx http://www.springframework.org/schema/tx/spring-tx-2.0.xsd
+           http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-2.5.xsd"
+	default-autowire="byName">
+	
+	<!-- properties -->
+	<bean class="com.alibaba.otter.canal.instance.spring.support.PropertyPlaceholderConfigurer" lazy-init="false">
+		<property name="ignoreResourceNotFound" value="true" />
+		<property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_OVERRIDE"/><!-- 允许system覆盖 -->
+		<property name="locationNames">
+			<list>
+				<value>classpath:canal.properties</value>
+				<value>classpath:${canal.instance.destination:}/instance.properties</value>
+			</list>
+		</property>
+	</bean>
+	
+	<!-- 基于db的实现 -->
+	<bean id="tableMetaTSDB" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.DatabaseTableMeta">
+		<property name="metaHistoryDAO" ref="metaHistoryDAO"/>
+		<property name="metaSnapshotDAO" ref="metaSnapshotDAO"/>
+	</bean>
+	
+    <bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" destroy-method="close">
+        <property name="driverClassName" value="com.mysql.jdbc.Driver" />
+		<property name="url" value="${canal.instance.tsdb.url:}" />
+		<property name="username" value="${canal.instance.tsdb.dbUsername:canal}" />
+		<property name="password" value="${canal.instance.tsdb.dbPassword:canal}" />
+        <property name="maxActive" value="30" />
+        <property name="initialSize" value="0" />
+        <property name="minIdle" value="1" />
+        <property name="maxWait" value="10000" />
+        <property name="timeBetweenEvictionRunsMillis" value="60000" />
+        <property name="minEvictableIdleTimeMillis" value="300000" />
+        <property name="validationQuery" value="SELECT 1" />
+        <property name="exceptionSorterClassName" value="com.alibaba.druid.pool.vendor.MySqlExceptionSorter" />
+        <property name="validConnectionCheckerClassName" value="com.alibaba.druid.pool.vendor.MySqlValidConnectionChecker" />
+        <property name="testWhileIdle" value="true" />
+        <property name="testOnBorrow" value="false" />
+        <property name="testOnReturn" value="false" />
+        <property name="useUnfairLock" value="true" />
+	</bean>
+
+    <bean id="sqlMapClient" class="org.springframework.orm.ibatis.SqlMapClientFactoryBean">
+        <property name="dataSource" ref="dataSource"/>
+        <property name="configLocation" value="classpath:spring/tsdb/sql-map/sqlmap-config.xml"/>
+    </bean>
+
+    <bean id="metaHistoryDAO" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDAO">
+        <property name="sqlMapClient" ref="sqlMapClient"/>
+    </bean>
+
+    <bean id="metaSnapshotDAO" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaSnapshotDAO">
+        <property name="sqlMapClient" ref="sqlMapClient"/>
+    </bean>
+</beans>

+ 8 - 0
deployer/src/main/resources/spring/tsdb/sql-map/sqlmap-config.xml

@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!DOCTYPE sqlMapConfig PUBLIC "-//iBATIS.com//DTD SQL Map Config 2.0//EN"
+        "http://www.ibatis.com/dtd/sql-map-config-2.dtd">
+<sqlMapConfig>
+    <settings useStatementNamespaces="true"/>
+    <sqlMap resource="spring/tsdb/sql-map/sqlmap_history.xml"/>
+    <sqlMap resource="spring/tsdb/sql-map/sqlmap_snapshot.xml"/>
+</sqlMapConfig>

+ 45 - 0
deployer/src/main/resources/spring/tsdb/sql-map/sqlmap_history.xml

@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!DOCTYPE sqlMap PUBLIC "-//ibatis.apache.org//DTD SQL Map 2.0//EN" "http://ibatis.apache.org/dtd/sql-map-2.dtd" >
+<sqlMap namespace="meta_history">
+    <typeAlias alias="metaHistoryDO" type="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDO"/>
+    <sql id="allColumns">
+        <![CDATA[
+		gmt_create,gmt_modified,destination,binlog_file,binlog_offest,binlog_master_id,binlog_timestamp,use_schema,sql_schema,sql_table,sql_text,sql_type,extra
+        ]]>
+    </sql>
+    <sql id="allVOColumns">
+        <![CDATA[
+		a.id as id,a.gmt_create as gmtCreate,a.gmt_modified as gmtModified,
+		a.destination as destination,a.binlog_file as binlogFile,a.binlog_offest as binlogOffest,a.binlog_master_id as binlogMasterId,a.binlog_timestamp as binlogTimestamp,
+		a.use_schema as useSchema,a.sql_schema as sqlSchema,a.sql_table as sqlTable,a.sql_text as sqlText,a.sql_type as sqlType,a.extra as extra
+        ]]>
+    </sql>
+
+    <select id="findByTimestamp" parameterClass="java.util.Map" resultClass="metaHistoryDO">
+        select
+        <include refid="allVOColumns"/>
+        from meta_history a
+        <![CDATA[
+        where destination = #destination# and binlog_timestamp >= #snapshotTimestamp# and binlog_timestamp <= #timestamp#
+        order by binlog_timestamp asc,id asc
+        ]]>
+    </select>
+
+    <insert id="insert" parameterClass="metaHistoryDO">
+        insert into meta_history (<include refid="allColumns"/>)
+        values(CURRENT_TIMESTAMP,CURRENT_TIMESTAMP,#destination#,#binlogFile#,#binlogOffest#,#binlogMasterId#,#binlogTimestamp#,#useSchema#,#sqlSchema#,#sqlTable#,#sqlText#,#sqlType#,#extra#)
+    </insert>
+    
+    <delete id="deleteByName" parameterClass="java.util.Map">
+        delete from meta_history 
+        where destination=#destination#
+    </delete>
+
+
+    <delete id="deleteByGmtModified" parameterClass="java.util.Map">
+        <![CDATA[
+		delete from meta_history
+		where gmt_modified < timestamp(#timestamp#)
+        ]]>
+    </delete>
+</sqlMap>

+ 51 - 0
deployer/src/main/resources/spring/tsdb/sql-map/sqlmap_snapshot.xml

@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!DOCTYPE sqlMap PUBLIC "-//ibatis.apache.org//DTD SQL Map 2.0//EN" "http://ibatis.apache.org/dtd/sql-map-2.dtd" >
+<sqlMap namespace="meta_snapshot">
+    <typeAlias alias="metaSnapshotDO" type="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaSnapshotDO"/>
+    <typeAlias alias="tableMetaSnapshotDO"
+               type="com.alibaba.middleware.jingwei.biz.dataobject.CanalTableMetaSnapshotDO"/>
+    <sql id="allColumns">
+        <![CDATA[
+		gmt_create,gmt_modified,destination,binlog_file,binlog_offest,binlog_master_id,binlog_timestamp,data,extra
+        ]]>
+    </sql>
+    <sql id="allVOColumns">
+        <![CDATA[
+		a.id as id,a.gmt_create as gmtCreate,a.gmt_modified as gmtModified,
+		a.destination as destination,a.binlog_file as binlogFile,a.binlog_offest as binlogOffest,a.binlog_master_id as binlogMasterId,a.binlog_timestamp as binlogTimestamp,a.data as data,a.extra as extra
+        ]]>
+    </sql>
+
+    <select id="findByTimestamp" parameterClass="java.util.Map" resultClass="metaSnapshotDO">
+    	select <include refid="allVOColumns"/>
+    	<![CDATA[
+        from meta_snapshot a
+        where destination = #destination# and binlog_timestamp < #timestamp#
+        order by binlog_timestamp desc,id desc
+        limit 1
+        ]]>
+    </select>
+    
+    <insert id="insert" parameterClass="metaSnapshotDO">
+        insert into meta_snapshot (<include refid="allColumns"/>)
+        values(CURRENT_TIMESTAMP,CURRENT_TIMESTAMP,#destination#,#binlogFile#,#binlogOffest#,#binlogMasterId#,#binlogTimestamp#,#data#,#extra#)
+    </insert>
+
+    <update id="update" parameterClass="metaSnapshotDO">
+        update meta_snapshot set gmt_modified=now(),
+        binlog_file=#binlogFile#,binlog_offest=#binlogOffest#,binlog_master_id=#binlogMasterId#,binlog_timestamp=#binlogTimestamp#,data=#data#,extra=#extra#
+        where destination=#destination# and binlog_timestamp=0
+    </update>
+
+ 	<delete id="deleteByName" parameterClass="java.util.Map">
+        delete from meta_snapshot 
+        where destination=#destination#
+    </delete>
+
+    <delete id="deleteByGmtModified" parameterClass="java.util.Map">
+        <![CDATA[
+		delete from meta_snapshot
+		where gmt_modified < timestamp(#timestamp#)
+        ]]>
+    </delete>
+</sqlMap>

+ 39 - 0
deployer/src/main/resources/spring/tsdb/sql/create_table.sql

@@ -0,0 +1,39 @@
+CREATE TABLE IF NOT EXISTS `meta_snapshot` (
+  `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键',
+  `gmt_create` datetime NOT NULL COMMENT '创建时间',
+  `gmt_modified` datetime NOT NULL COMMENT '修改时间',
+  `destination` varchar(128) DEFAULT NULL COMMENT '通道名称',
+  `binlog_file` varchar(64) DEFAULT NULL COMMENT 'binlog文件名',
+  `binlog_offest` bigint(20) DEFAULT NULL COMMENT 'binlog偏移量',
+  `binlog_master_id` varchar(64) DEFAULT NULL COMMENT 'binlog节点id',
+  `binlog_timestamp` bigint(20) DEFAULT NULL COMMENT 'binlog应用的时间戳',
+  `data` longtext DEFAULT NULL COMMENT '表结构数据',
+  `extra` text DEFAULT NULL COMMENT '额外的扩展信息',
+  PRIMARY KEY (`id`),
+  UNIQUE KEY binlog_file_offest(`destination`,`binlog_master_id`,`binlog_file`,`binlog_offest`),
+  KEY `destination` (`destination`),
+  KEY `destination_timestamp` (`destination`,`binlog_timestamp`),
+  KEY `gmt_modified` (`gmt_modified`)
+) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 COMMENT='表结构记录表快照表';
+
+CREATE TABLE IF NOT EXISTS `meta_history` (
+  `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键',
+  `gmt_create` datetime NOT NULL COMMENT '创建时间',
+  `gmt_modified` datetime NOT NULL COMMENT '修改时间',
+  `destination` varchar(128) DEFAULT NULL COMMENT '通道名称',
+  `binlog_file` varchar(64) DEFAULT NULL COMMENT 'binlog文件名',
+  `binlog_offest` bigint(20) DEFAULT NULL COMMENT 'binlog偏移量',
+  `binlog_master_id` varchar(64) DEFAULT NULL COMMENT 'binlog节点id',
+  `binlog_timestamp` bigint(20) DEFAULT NULL COMMENT 'binlog应用的时间戳',
+  `use_schema` varchar(1024) DEFAULT NULL COMMENT '执行sql时对应的schema',
+  `schema` varchar(1024) DEFAULT NULL COMMENT '对应的schema',
+  `table` varchar(1024) DEFAULT NULL COMMENT '对应的table',
+  `sql` longtext DEFAULT NULL COMMENT '执行的sql',
+  `type` varchar(256) DEFAULT NULL COMMENT 'sql类型',
+  `extra` text DEFAULT NULL COMMENT '额外的扩展信息',
+  PRIMARY KEY (`id`),
+  UNIQUE KEY binlog_file_offest(`destination`,`binlog_master_id`,`binlog_file`,`binlog_offest`),
+  KEY `destination` (`destination`),
+  KEY `destination_timestamp` (`destination`,`binlog_timestamp`),
+  KEY `gmt_modified` (`gmt_modified`)
+) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 COMMENT='表结构变化明细表';

+ 1 - 1
driver/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
 		<artifactId>canal</artifactId>
-		<version>1.0.25-SNAPSHOT</version>
+		<version>1.0.26-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>
 	<groupId>com.alibaba.otter</groupId>

+ 21 - 4
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlConnector.java

@@ -34,6 +34,7 @@ public class MysqlConnector {
     private byte                charsetNumber     = 33;
     private byte                charsetNumber     = 33;
     private String              defaultSchema     = "retl";
     private String              defaultSchema     = "retl";
     private int                 soTimeout         = 30 * 1000;
     private int                 soTimeout         = 30 * 1000;
+    private int                 connTimeout       = 5 * 1000;
     private int                 receiveBufferSize = 16 * 1024;
     private int                 receiveBufferSize = 16 * 1024;
     private int                 sendBufferSize    = 16 * 1024;
     private int                 sendBufferSize    = 16 * 1024;
 
 
@@ -42,6 +43,8 @@ public class MysqlConnector {
     // mysql connectinnId
     // mysql connectinnId
     private long                connectionId      = -1;
     private long                connectionId      = -1;
     private AtomicBoolean       connected         = new AtomicBoolean(false);
     private AtomicBoolean       connected         = new AtomicBoolean(false);
+    
+    public static final int timeout = 3000; // 3s
 
 
     public MysqlConnector(){
     public MysqlConnector(){
     }
     }
@@ -100,7 +103,8 @@ public class MysqlConnector {
                     MysqlUpdateExecutor executor = new MysqlUpdateExecutor(connector);
                     MysqlUpdateExecutor executor = new MysqlUpdateExecutor(connector);
                     executor.update("KILL CONNECTION " + connectionId);
                     executor.update("KILL CONNECTION " + connectionId);
                 } catch (Exception e) {
                 } catch (Exception e) {
-                    throw new IOException("KILL DUMP " + connectionId + " failure", e);
+                    // 忽略具体异常
+                    logger.info("KILL DUMP " + connectionId + " failure", e);
                 } finally {
                 } finally {
                     if (connector != null) {
                     if (connector != null) {
                         connector.disconnect();
                         connector.disconnect();
@@ -128,6 +132,7 @@ public class MysqlConnector {
         connector.setReceiveBufferSize(getReceiveBufferSize());
         connector.setReceiveBufferSize(getReceiveBufferSize());
         connector.setSendBufferSize(getSendBufferSize());
         connector.setSendBufferSize(getSendBufferSize());
         connector.setSoTimeout(getSoTimeout());
         connector.setSoTimeout(getSoTimeout());
+        connector.setConnTimeout(connTimeout);
         return connector;
         return connector;
     }
     }
 
 
@@ -142,8 +147,8 @@ public class MysqlConnector {
     }
     }
 
 
     private void negotiate(SocketChannel channel) throws IOException {
     private void negotiate(SocketChannel channel) throws IOException {
-        HeaderPacket header = PacketManager.readHeader(channel, 4);
-        byte[] body = PacketManager.readBytes(channel, header.getPacketBodyLength());
+        HeaderPacket header = PacketManager.readHeader(channel, 4, timeout);
+        byte[] body = PacketManager.readBytes(channel, header.getPacketBodyLength(), timeout);
         if (body[0] < 0) {// check field_count
         if (body[0] < 0) {// check field_count
             if (body[0] == -1) {
             if (body[0] == -1) {
                 ErrorPacket error = new ErrorPacket();
                 ErrorPacket error = new ErrorPacket();
@@ -182,7 +187,7 @@ public class MysqlConnector {
         header = null;
         header = null;
         header = PacketManager.readHeader(channel, 4);
         header = PacketManager.readHeader(channel, 4);
         body = null;
         body = null;
-        body = PacketManager.readBytes(channel, header.getPacketBodyLength());
+        body = PacketManager.readBytes(channel, header.getPacketBodyLength(), timeout);
         assert body != null;
         assert body != null;
         if (body[0] < 0) {
         if (body[0] < 0) {
             if (body[0] == -1) {
             if (body[0] == -1) {
@@ -324,4 +329,16 @@ public class MysqlConnector {
         this.dumping = dumping;
         this.dumping = dumping;
     }
     }
 
 
+    public int getConnTimeout() {
+        return connTimeout;
+    }
+
+    public void setConnTimeout(int connTimeout) {
+        this.connTimeout = connTimeout;
+    }
+
+    public String getPassword() {
+        return password;
+    }
+
 }
 }

+ 56 - 1
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlQueryExecutor.java

@@ -6,6 +6,7 @@ import java.util.List;
 
 
 import com.alibaba.otter.canal.parse.driver.mysql.packets.HeaderPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.HeaderPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.client.QueryCommandPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.client.QueryCommandPacket;
+import com.alibaba.otter.canal.parse.driver.mysql.packets.server.EOFPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ErrorPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ErrorPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.server.FieldPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.server.FieldPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ResultSetHeaderPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ResultSetHeaderPacket;
@@ -93,11 +94,65 @@ public class MysqlQueryExecutor {
         return resultSet;
         return resultSet;
     }
     }
 
 
-    private void readEofPacket() throws IOException {
+    public List<ResultSetPacket> queryMulti(String queryString) throws IOException {
+        QueryCommandPacket cmd = new QueryCommandPacket();
+        cmd.setQueryString(queryString);
+        byte[] bodyBytes = cmd.toBytes();
+        PacketManager.writeBody(channel, bodyBytes);
+        List<ResultSetPacket> resultSets = new ArrayList<ResultSetPacket>();
+        boolean moreResult = true;
+        while (moreResult) {
+            byte[] body = readNextPacket();
+            if (body[0] < 0) {
+                ErrorPacket packet = new ErrorPacket();
+                packet.fromBytes(body);
+                throw new IOException(packet + "\n with command: " + queryString);
+            }
+
+            ResultSetHeaderPacket rsHeader = new ResultSetHeaderPacket();
+            rsHeader.fromBytes(body);
+
+            List<FieldPacket> fields = new ArrayList<FieldPacket>();
+            for (int i = 0; i < rsHeader.getColumnCount(); i++) {
+                FieldPacket fp = new FieldPacket();
+                fp.fromBytes(readNextPacket());
+                fields.add(fp);
+            }
+
+            moreResult = readEofPacket();
+
+            List<RowDataPacket> rowData = new ArrayList<RowDataPacket>();
+            while (true) {
+                body = readNextPacket();
+                if (body[0] == -2) {
+                    break;
+                }
+                RowDataPacket rowDataPacket = new RowDataPacket();
+                rowDataPacket.fromBytes(body);
+                rowData.add(rowDataPacket);
+            }
+
+            ResultSetPacket resultSet = new ResultSetPacket();
+            resultSet.getFieldDescriptors().addAll(fields);
+            for (RowDataPacket r : rowData) {
+                resultSet.getFieldValues().addAll(r.getColumns());
+            }
+            resultSet.setSourceAddress(channel.getRemoteSocketAddress());
+            resultSets.add(resultSet);
+        }
+
+        return resultSets;
+    }
+
+    private boolean readEofPacket() throws IOException {
         byte[] eofBody = readNextPacket();
         byte[] eofBody = readNextPacket();
+        EOFPacket packet = new EOFPacket();
+        packet.fromBytes(eofBody);
         if (eofBody[0] != -2) {
         if (eofBody[0] != -2) {
             throw new IOException("EOF Packet is expected, but packet with field_count=" + eofBody[0] + " is found.");
             throw new IOException("EOF Packet is expected, but packet with field_count=" + eofBody[0] + " is found.");
         }
         }
+
+        return (packet.statusFlag & 0x0008) != 0;
     }
     }
 
 
     protected byte[] readNextPacket() throws IOException {
     protected byte[] readNextPacket() throws IOException {

+ 4 - 3
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/client/ClientAuthenticationPacket.java

@@ -47,10 +47,11 @@ public class ClientAuthenticationPacket extends PacketWithHeaderPacket {
         /**
         /**
          * CLIENT_LONG_PASSWORD CLIENT_LONG_FLAG CLIENT_PROTOCOL_41
          * CLIENT_LONG_PASSWORD CLIENT_LONG_FLAG CLIENT_PROTOCOL_41
          * CLIENT_INTERACTIVE CLIENT_TRANSACTIONS CLIENT_SECURE_CONNECTION
          * CLIENT_INTERACTIVE CLIENT_TRANSACTIONS CLIENT_SECURE_CONNECTION
+         * CLIENT_MULTI_STATEMENTS;
          */
          */
-        ByteHelper.writeUnsignedIntLittleEndian(1 | 4 | 512 | 8192 | 32768, out); // remove
-                                                                                  // client_interactive
-                                                                                  // feature
+        ByteHelper.writeUnsignedIntLittleEndian(1 | 4 | 512 | 8192 | 32768 | 0x00010000, out); // remove
+        // client_interactive
+        // feature
 
 
         // 2. write max_packet_size
         // 2. write max_packet_size
         ByteHelper.writeUnsignedIntLittleEndian(MSC.MAX_PACKET_LENGTH, out);
         ByteHelper.writeUnsignedIntLittleEndian(MSC.MAX_PACKET_LENGTH, out);

+ 57 - 0
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/client/RegisterSlaveCommandPacket.java

@@ -0,0 +1,57 @@
+package com.alibaba.otter.canal.parse.driver.mysql.packets.client;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+
+import com.alibaba.otter.canal.parse.driver.mysql.packets.CommandPacket;
+import com.alibaba.otter.canal.parse.driver.mysql.utils.ByteHelper;
+
+/**
+ * COM_REGISTER_SLAVE
+ * 
+ * @author zhibinliu
+ * @since 1.0.24
+ */
+public class RegisterSlaveCommandPacket extends CommandPacket {
+
+    public String reportHost;
+    public int    reportPort;
+    public String reportUser;
+    public String reportPasswd;
+    public long   serverId;
+
+    public RegisterSlaveCommandPacket(){
+        setCommand((byte) 0x15);
+    }
+
+    public void fromBytes(byte[] data) {
+        // bypass
+    }
+
+    public static byte[] toLH(int n) {
+        byte[] b = new byte[4];
+        b[0] = (byte) (n & 0xff);
+        b[1] = (byte) (n >> 8 & 0xff);
+        b[2] = (byte) (n >> 16 & 0xff);
+        b[3] = (byte) (n >> 24 & 0xff);
+        return b;
+    }
+
+    public byte[] toBytes() throws IOException {
+        ByteArrayOutputStream out = new ByteArrayOutputStream();
+        out.write(getCommand());
+        ByteHelper.writeUnsignedIntLittleEndian(serverId, out);
+        out.write((byte) reportHost.getBytes().length);
+        ByteHelper.writeFixedLengthBytesFromStart(reportHost.getBytes(), reportHost.getBytes().length, out);
+        out.write((byte) reportUser.getBytes().length);
+        ByteHelper.writeFixedLengthBytesFromStart(reportUser.getBytes(), reportUser.getBytes().length, out);
+        out.write((byte) reportPasswd.getBytes().length);
+        ByteHelper.writeFixedLengthBytesFromStart(reportPasswd.getBytes(), reportPasswd.getBytes().length, out);
+        ByteHelper.writeUnsignedShortLittleEndian(reportPort, out);
+        ByteHelper.writeUnsignedIntLittleEndian(0, out);// Fake
+                                                        // rpl_recovery_rank
+        ByteHelper.writeUnsignedIntLittleEndian(0, out);// master id
+        return out.toByteArray();
+    }
+
+}

+ 55 - 0
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/packets/client/SemiAckCommandPacket.java

@@ -0,0 +1,55 @@
+package com.alibaba.otter.canal.parse.driver.mysql.packets.client;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+
+import org.apache.commons.lang.StringUtils;
+
+import com.alibaba.otter.canal.parse.driver.mysql.packets.CommandPacket;
+import com.alibaba.otter.canal.parse.driver.mysql.utils.ByteHelper;
+
+/**
+ * semi ack command
+ * 
+ * @author amos_chen
+ */
+public class SemiAckCommandPacket extends CommandPacket {
+
+    public long   binlogPosition;
+    public String binlogFileName;
+
+    public SemiAckCommandPacket(){
+
+    }
+
+    @Override
+    public void fromBytes(byte[] data) throws IOException {
+    }
+
+    /**
+     * <pre>
+     * Bytes                        Name
+     *  --------------------------------------------------------
+     *  Bytes                        Name
+     *  -----                        ----
+     *  1                            semi mark
+     *  8                            binlog position to start at (little endian)
+     *  n                            binlog file name
+     * 
+     * </pre>
+     */
+    public byte[] toBytes() throws IOException {
+        ByteArrayOutputStream out = new ByteArrayOutputStream();
+        // 0 write semi mark
+        out.write(0xef);
+        // 1 write 8 bytes for position
+        ByteHelper.write8ByteUnsignedIntLittleEndian(binlogPosition, out);
+
+        // 2 write binlog filename
+        if (StringUtils.isNotEmpty(binlogFileName)) {
+            out.write(binlogFileName.getBytes());
+        }
+        return out.toByteArray();
+    }
+
+}

+ 50 - 5
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/SocketChannel.java

@@ -27,24 +27,69 @@ public class SocketChannel {
         this.channel = channel;
         this.channel = channel;
     }
     }
 
 
-    public void writeCache(ByteBuf buf) {
+    public void writeCache(ByteBuf buf) throws InterruptedException {
         synchronized (lock) {
         synchronized (lock) {
-            cache.discardReadBytes();// 回收内存
-            cache.writeBytes(buf);
+            while (true) {
+                cache.discardReadBytes();// 回收内存
+                //source buffer is empty.
+                if (!buf.isReadable()) {
+                    break;
+                }
+
+                if (cache.isWritable()) {
+                    cache.writeBytes(buf, Math.min(cache.writableBytes(), buf.readableBytes()));
+                } else {
+                    //dest buffer is full.
+                    lock.wait(100);
+                }
+            }
         }
         }
     }
     }
 
 
     public void writeChannel(byte[]... buf) throws IOException {
     public void writeChannel(byte[]... buf) throws IOException {
-        if (channel != null && channel.isWritable()) channel.writeAndFlush(Unpooled.copiedBuffer(buf));
-        else throw new IOException("write  failed  !  please checking !");
+        if (channel != null && channel.isWritable()) {
+            channel.writeAndFlush(Unpooled.copiedBuffer(buf));
+        } else {
+            throw new IOException("write  failed  !  please checking !");
+        }
     }
     }
 
 
     public byte[] read(int readSize) throws IOException {
     public byte[] read(int readSize) throws IOException {
+        do {
+            if (readSize > cache.readableBytes()) {
+                if (null == channel) {
+                    throw new java.nio.channels.ClosedByInterruptException();
+                }
+                synchronized (this) {
+                    try {
+                        wait(100);
+                    } catch (InterruptedException e) {
+                        throw new java.nio.channels.ClosedByInterruptException();
+                    }
+                }
+            } else {
+                byte[] back = new byte[readSize];
+                synchronized (lock) {
+                    cache.readBytes(back);
+                }
+                return back;
+            }
+        } while (true);
+    }
+    
+    public byte[] read(int readSize, int timeout) throws IOException {
+        int accumulatedWaitTime = 0;
         do {
         do {
             if (readSize > cache.readableBytes()) {
             if (readSize > cache.readableBytes()) {
                 if (null == channel) {
                 if (null == channel) {
                     throw new IOException("socket has Interrupted !");
                     throw new IOException("socket has Interrupted !");
                 }
                 }
+
+                accumulatedWaitTime += 100;
+                if (accumulatedWaitTime > timeout) {
+                    throw new IOException("socket read timeout occured !");
+                }
+
                 synchronized (this) {
                 synchronized (this) {
                     try {
                     try {
                         wait(100);
                         wait(100);

+ 35 - 27
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/socket/SocketChannelPool.java

@@ -6,9 +6,8 @@ import io.netty.buffer.PooledByteBufAllocator;
 import io.netty.channel.AdaptiveRecvByteBufAllocator;
 import io.netty.channel.AdaptiveRecvByteBufAllocator;
 import io.netty.channel.Channel;
 import io.netty.channel.Channel;
 import io.netty.channel.ChannelFuture;
 import io.netty.channel.ChannelFuture;
-import io.netty.channel.ChannelFutureListener;
 import io.netty.channel.ChannelHandlerContext;
 import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelInboundHandlerAdapter;
+import io.netty.channel.SimpleChannelInboundHandler;
 import io.netty.channel.ChannelInitializer;
 import io.netty.channel.ChannelInitializer;
 import io.netty.channel.ChannelOption;
 import io.netty.channel.ChannelOption;
 import io.netty.channel.EventLoopGroup;
 import io.netty.channel.EventLoopGroup;
@@ -20,8 +19,10 @@ import java.io.IOException;
 import java.net.SocketAddress;
 import java.net.SocketAddress;
 import java.util.Map;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
 
 
-import com.alibaba.otter.canal.common.utils.BooleanMutex;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
 /**
  * @author luoyaogui 实现channel的管理(监听连接、读数据、回收) 2016-12-28
  * @author luoyaogui 实现channel的管理(监听连接、读数据、回收) 2016-12-28
@@ -29,9 +30,10 @@ import com.alibaba.otter.canal.common.utils.BooleanMutex;
 @SuppressWarnings({ "rawtypes", "deprecation" })
 @SuppressWarnings({ "rawtypes", "deprecation" })
 public abstract class SocketChannelPool {
 public abstract class SocketChannelPool {
 
 
-    private static EventLoopGroup              group     = new NioEventLoopGroup();                        // 非阻塞IO线程组
-    private static Bootstrap                   boot      = new Bootstrap();                                // 主
+    private static EventLoopGroup              group     = new NioEventLoopGroup();                         // 非阻塞IO线程组
+    private static Bootstrap                   boot      = new Bootstrap();                                 // 主
     private static Map<Channel, SocketChannel> chManager = new ConcurrentHashMap<Channel, SocketChannel>();
     private static Map<Channel, SocketChannel> chManager = new ConcurrentHashMap<Channel, SocketChannel>();
+    private static final Logger                logger    = LoggerFactory.getLogger(SocketChannelPool.class);
 
 
     static {
     static {
         boot.group(group)
         boot.group(group)
@@ -54,31 +56,25 @@ public abstract class SocketChannelPool {
     }
     }
 
 
     public static SocketChannel open(SocketAddress address) throws Exception {
     public static SocketChannel open(SocketAddress address) throws Exception {
-        final SocketChannel socket = new SocketChannel();
-        final BooleanMutex mutex = new BooleanMutex(false);
-        boot.connect(address).addListener(new ChannelFutureListener() {
-
-            @Override
-            public void operationComplete(ChannelFuture arg0) throws Exception {
-                if (arg0.isSuccess()) {
-                    socket.setChannel(arg0.channel());
-                }
+        SocketChannel socket = null;
+        ChannelFuture future = boot.connect(address).sync();
 
 
-                mutex.set(true);
-            }
-        });
-        // wait for complete
-        mutex.get();
-        if (null == socket.getChannel()) {
+        if (future.isSuccess()) {
+            future.channel().pipeline().get(BusinessHandler.class).latch.await();
+            socket = chManager.get(future.channel());
+        }
+
+        if (null == socket) {
             throw new IOException("can't create socket!");
             throw new IOException("can't create socket!");
         }
         }
-        chManager.put(socket.getChannel(), socket);
+
         return socket;
         return socket;
     }
     }
 
 
-    public static class BusinessHandler extends ChannelInboundHandlerAdapter {
+    public static class BusinessHandler extends SimpleChannelInboundHandler<ByteBuf> {
 
 
-        private SocketChannel socket = null;
+        private SocketChannel        socket = null;
+        private final CountDownLatch latch  = new CountDownLatch(1);
 
 
         @Override
         @Override
         public void channelInactive(ChannelHandlerContext ctx) throws Exception {
         public void channelInactive(ChannelHandlerContext ctx) throws Exception {
@@ -87,16 +83,28 @@ public abstract class SocketChannelPool {
         }
         }
 
 
         @Override
         @Override
-        public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
-            if (null == socket) socket = chManager.get(ctx.channel());
+        public void channelActive(ChannelHandlerContext ctx) throws Exception {
+            socket = new SocketChannel();
+            socket.setChannel(ctx.channel());
+            chManager.put(ctx.channel(), socket);
+            latch.countDown();
+            super.channelActive(ctx);
+        }
+
+        @Override
+        protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception {
             if (socket != null) {
             if (socket != null) {
-                socket.writeCache((ByteBuf) msg);
+                socket.writeCache(msg);
+            } else {
+                // TODO: need graceful error handler.
+                logger.error("no socket available.");
             }
             }
-            ReferenceCountUtil.release(msg);// 添加防止内存泄漏的
         }
         }
 
 
         @Override
         @Override
         public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
         public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
+            //need output error for troubeshooting.
+            logger.error("business error.", cause);
             ctx.close();
             ctx.close();
         }
         }
     }
     }

+ 12 - 1
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/utils/ByteHelper.java

@@ -108,7 +108,18 @@ public abstract class ByteHelper {
 
 
         return out.toByteArray();
         return out.toByteArray();
     }
     }
-
+    
+    public static void write8ByteUnsignedIntLittleEndian(long data, ByteArrayOutputStream out) {
+        out.write((byte) (data & 0xFF));
+        out.write((byte) (data >>> 8));
+        out.write((byte) (data >>> 16));
+        out.write((byte) (data >>> 24));
+        out.write((byte) (data >>> 32));
+        out.write((byte) (data >>> 40));
+        out.write((byte) (data >>> 48));
+        out.write((byte) (data >>> 56));
+    }
+    
     public static void writeUnsignedIntLittleEndian(long data, ByteArrayOutputStream out) {
     public static void writeUnsignedIntLittleEndian(long data, ByteArrayOutputStream out) {
         out.write((byte) (data & 0xFF));
         out.write((byte) (data & 0xFF));
         out.write((byte) (data >>> 8));
         out.write((byte) (data >>> 8));

+ 10 - 0
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/utils/PacketManager.java

@@ -13,9 +13,19 @@ public abstract class PacketManager {
         return header;
         return header;
     }
     }
 
 
+    public static HeaderPacket readHeader(SocketChannel ch, int len, int timeout) throws IOException {
+    	HeaderPacket header = new HeaderPacket();
+    	header.fromBytes(ch.read(len, timeout));
+    	return header;
+    }
+
     public static byte[] readBytes(SocketChannel ch, int len) throws IOException {
     public static byte[] readBytes(SocketChannel ch, int len) throws IOException {
         return ch.read(len);
         return ch.read(len);
     }
     }
+    
+    public static byte[] readBytes(SocketChannel ch, int len, int timeout) throws IOException {
+        return ch.read(len, timeout);
+    }
 
 
     public static void writePkg(SocketChannel ch, byte[]... srcs) throws IOException {
     public static void writePkg(SocketChannel ch, byte[]... srcs) throws IOException {
         ch.writeChannel(srcs);
         ch.writeChannel(srcs);

+ 1 - 1
example/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
 		<artifactId>canal</artifactId>
-		<version>1.0.25-SNAPSHOT</version>
+		<version>1.0.26-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 1
filter/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
 		<artifactId>canal</artifactId>
-		<version>1.0.25-SNAPSHOT</version>
+		<version>1.0.26-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 1
instance/core/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
 		<artifactId>canal</artifactId>
-		<version>1.0.25-SNAPSHOT</version>
+		<version>1.0.26-SNAPSHOT</version>
 		<relativePath>../../pom.xml</relativePath>
 		<relativePath>../../pom.xml</relativePath>
 	</parent>
 	</parent>
 	<artifactId>canal.instance.core</artifactId>
 	<artifactId>canal.instance.core</artifactId>

+ 1 - 1
instance/manager/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
 		<artifactId>canal</artifactId>
-		<version>1.0.25-SNAPSHOT</version>
+		<version>1.0.26-SNAPSHOT</version>
 		<relativePath>../../pom.xml</relativePath>
 		<relativePath>../../pom.xml</relativePath>
 	</parent>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 1
instance/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
 		<artifactId>canal</artifactId>
-		<version>1.0.25-SNAPSHOT</version>
+		<version>1.0.26-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 1
instance/spring/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
 		<artifactId>canal</artifactId>
-		<version>1.0.25-SNAPSHOT</version>
+		<version>1.0.26-SNAPSHOT</version>
 		<relativePath>../../pom.xml</relativePath>
 		<relativePath>../../pom.xml</relativePath>
 	</parent>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 1
meta/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
 		<artifactId>canal</artifactId>
-		<version>1.0.25-SNAPSHOT</version>
+		<version>1.0.26-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	</parent>
 	<groupId>com.alibaba.otter</groupId>
 	<groupId>com.alibaba.otter</groupId>

+ 1 - 1
meta/src/main/java/com/alibaba/otter/canal/meta/FileMixedMetaManager.java

@@ -367,7 +367,7 @@ public class FileMixedMetaManager extends MemoryMetaManager implements CanalMeta
         this.dataDir = new File(dataDir);
         this.dataDir = new File(dataDir);
     }
     }
 
 
-    public void setDataDir(File dataDir) {
+    public void setDataDirByFile(File dataDir) {
         this.dataDir = dataDir;
         this.dataDir = dataDir;
     }
     }
 
 

+ 5 - 5
meta/src/test/java/com/alibaba/otter/canal/meta/FileMixedMetaManagerTest.java

@@ -31,7 +31,7 @@ public class FileMixedMetaManagerTest extends AbstractMetaManagerTest {
     @Test
     @Test
     public void testSubscribeAll() {
     public void testSubscribeAll() {
         FileMixedMetaManager metaManager = new FileMixedMetaManager();
         FileMixedMetaManager metaManager = new FileMixedMetaManager();
-        metaManager.setDataDir(dataDir);
+        metaManager.setDataDirByFile(dataDir);
         metaManager.setPeriod(100);
         metaManager.setPeriod(100);
 
 
         metaManager.start();
         metaManager.start();
@@ -40,7 +40,7 @@ public class FileMixedMetaManagerTest extends AbstractMetaManagerTest {
         sleep(2000L);
         sleep(2000L);
         // 重新构建一次,能获得上一次zk上的记录
         // 重新构建一次,能获得上一次zk上的记录
         FileMixedMetaManager metaManager2 = new FileMixedMetaManager();
         FileMixedMetaManager metaManager2 = new FileMixedMetaManager();
-        metaManager2.setDataDir(dataDir);
+        metaManager2.setDataDirByFile(dataDir);
         metaManager2.setPeriod(100);
         metaManager2.setPeriod(100);
         metaManager2.start();
         metaManager2.start();
 
 
@@ -52,7 +52,7 @@ public class FileMixedMetaManagerTest extends AbstractMetaManagerTest {
     @Test
     @Test
     public void testBatchAll() {
     public void testBatchAll() {
         FileMixedMetaManager metaManager = new FileMixedMetaManager();
         FileMixedMetaManager metaManager = new FileMixedMetaManager();
-        metaManager.setDataDir(dataDir);
+        metaManager.setDataDirByFile(dataDir);
         metaManager.setPeriod(100);
         metaManager.setPeriod(100);
 
 
         metaManager.start();
         metaManager.start();
@@ -67,7 +67,7 @@ public class FileMixedMetaManagerTest extends AbstractMetaManagerTest {
     @Test
     @Test
     public void testCursorAll() {
     public void testCursorAll() {
         FileMixedMetaManager metaManager = new FileMixedMetaManager();
         FileMixedMetaManager metaManager = new FileMixedMetaManager();
-        metaManager.setDataDir(dataDir);
+        metaManager.setDataDirByFile(dataDir);
         metaManager.setPeriod(100);
         metaManager.setPeriod(100);
         metaManager.start();
         metaManager.start();
 
 
@@ -76,7 +76,7 @@ public class FileMixedMetaManagerTest extends AbstractMetaManagerTest {
         sleep(1000L);
         sleep(1000L);
         // 重新构建一次,能获得上一次zk上的记录
         // 重新构建一次,能获得上一次zk上的记录
         FileMixedMetaManager metaManager2 = new FileMixedMetaManager();
         FileMixedMetaManager metaManager2 = new FileMixedMetaManager();
-        metaManager2.setDataDir(dataDir);
+        metaManager2.setDataDirByFile(dataDir);
         metaManager2.setPeriod(100);
         metaManager2.setPeriod(100);
         metaManager2.start();
         metaManager2.start();
 
 

+ 31 - 1
parse/pom.xml

@@ -3,7 +3,7 @@
 	<parent>
 	<parent>
 		<groupId>com.alibaba.otter</groupId>
 		<groupId>com.alibaba.otter</groupId>
 		<artifactId>canal</artifactId>
 		<artifactId>canal</artifactId>
-		<version>1.0.25-SNAPSHOT</version>
+		<version>1.0.26-SNAPSHOT</version>
 		<relativePath>../pom.xml</relativePath>
 		<relativePath>../pom.xml</relativePath>
 	</parent>
 	</parent>
 	<artifactId>canal.parse</artifactId>
 	<artifactId>canal.parse</artifactId>
@@ -45,11 +45,41 @@
 			<artifactId>canal.parse.driver</artifactId>
 			<artifactId>canal.parse.driver</artifactId>
 			<version>${project.version}</version>
 			<version>${project.version}</version>
 		</dependency>
 		</dependency>
+		<dependency>
+			<groupId>com.alibaba</groupId>
+			<artifactId>druid</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>mysql</groupId>
+			<artifactId>mysql-connector-java</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.ibatis</groupId>
+			<artifactId>ibatis-sqlmap</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>com.h2database</groupId>
+			<artifactId>h2</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.httpcomponents</groupId>
+			<artifactId>httpclient</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.commons</groupId>
+			<artifactId>commons-compress</artifactId>
+		</dependency>
 		<!-- test dependency -->
 		<!-- test dependency -->
 		<dependency>
 		<dependency>
 			<groupId>junit</groupId>
 			<groupId>junit</groupId>
 			<artifactId>junit</artifactId>
 			<artifactId>junit</artifactId>
 			<scope>test</scope>
 			<scope>test</scope>
 		</dependency>
 		</dependency>
+		<dependency>
+			<groupId>org.springframework</groupId>
+			<artifactId>spring-test</artifactId>
+			<scope>test</scope>
+		</dependency>
+
 	</dependencies>
 	</dependencies>
 </project>
 </project>

+ 12 - 4
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/AbstractEventParser.java

@@ -96,6 +96,10 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
     protected void preDump(ErosaConnection connection) {
     protected void preDump(ErosaConnection connection) {
     }
     }
 
 
+    protected boolean processTableMeta(EntryPosition position) {
+        return true;
+    }
+
     protected void afterDump(ErosaConnection connection) {
     protected void afterDump(ErosaConnection connection) {
     }
     }
 
 
@@ -145,7 +149,6 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
                 ErosaConnection erosaConnection = null;
                 ErosaConnection erosaConnection = null;
                 while (running) {
                 while (running) {
                     try {
                     try {
-
                         // 开始执行replication
                         // 开始执行replication
                         // 1. 构造Erosa连接
                         // 1. 构造Erosa连接
                         erosaConnection = buildErosaConnection();
                         erosaConnection = buildErosaConnection();
@@ -163,6 +166,11 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
                         if (startPosition == null) {
                         if (startPosition == null) {
                             throw new CanalParseException("can't find start position for " + destination);
                             throw new CanalParseException("can't find start position for " + destination);
                         }
                         }
+
+                        if (!processTableMeta(startPosition)) {
+                            throw new CanalParseException("can't find init table meta for " + destination
+                                                          + " with position : " + startPosition);
+                        }
                         logger.info("find start position : {}", startPosition.toString());
                         logger.info("find start position : {}", startPosition.toString());
                         // 重新链接,因为在找position过程中可能有状态,需要断开后重建
                         // 重新链接,因为在找position过程中可能有状态,需要断开后重建
                         erosaConnection.reconnect();
                         erosaConnection.reconnect();
@@ -173,7 +181,7 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
 
 
                             public boolean sink(EVENT event) {
                             public boolean sink(EVENT event) {
                                 try {
                                 try {
-                                    CanalEntry.Entry entry = parseAndProfilingIfNecessary(event);
+                                    CanalEntry.Entry entry = parseAndProfilingIfNecessary(event, false);
 
 
                                     if (!running) {
                                     if (!running) {
                                         return false;
                                         return false;
@@ -320,13 +328,13 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
         return result;
         return result;
     }
     }
 
 
-    protected CanalEntry.Entry parseAndProfilingIfNecessary(EVENT bod) throws Exception {
+    protected CanalEntry.Entry parseAndProfilingIfNecessary(EVENT bod, boolean isSeek) throws Exception {
         long startTs = -1;
         long startTs = -1;
         boolean enabled = getProfilingEnabled();
         boolean enabled = getProfilingEnabled();
         if (enabled) {
         if (enabled) {
             startTs = System.currentTimeMillis();
             startTs = System.currentTimeMillis();
         }
         }
-        CanalEntry.Entry event = binlogParser.parse(bod);
+        CanalEntry.Entry event = binlogParser.parse(bod, isSeek);
         if (enabled) {
         if (enabled) {
             this.parsingInterval = System.currentTimeMillis() - startTs;
             this.parsingInterval = System.currentTimeMillis() - startTs;
         }
         }

+ 1 - 1
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/BinlogParser.java

@@ -11,7 +11,7 @@ import com.alibaba.otter.canal.protocol.CanalEntry;
  */
  */
 public interface BinlogParser<T> extends CanalLifeCycle {
 public interface BinlogParser<T> extends CanalLifeCycle {
 
 
-    CanalEntry.Entry parse(T event) throws CanalParseException;
+    CanalEntry.Entry parse(T event, boolean isSeek) throws CanalParseException;
 
 
     void reset();
     void reset();
 }
 }

+ 0 - 2
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/ErosaConnection.java

@@ -15,8 +15,6 @@ public interface ErosaConnection {
 
 
     public void disconnect() throws IOException;
     public void disconnect() throws IOException;
 
 
-    public boolean isConnected();
-
     /**
     /**
      * 用于快速数据查找,和dump的区别在于,seek会只给出部分的数据
      * 用于快速数据查找,和dump的区别在于,seek会只给出部分的数据
      */
      */

+ 104 - 49
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/TableMeta.java

@@ -3,51 +3,79 @@ package com.alibaba.otter.canal.parse.inbound;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 
 
-import org.apache.commons.lang.StringUtils;
-
 import com.taobao.tddl.dbsync.binlog.event.TableMapLogEvent;
 import com.taobao.tddl.dbsync.binlog.event.TableMapLogEvent;
+import org.apache.commons.lang.StringUtils;
 
 
 /**
 /**
  * 描述数据meta对象,mysql binlog中对应的{@linkplain TableMapLogEvent}包含的信息不全
  * 描述数据meta对象,mysql binlog中对应的{@linkplain TableMapLogEvent}包含的信息不全
- * 
+ *
  * <pre>
  * <pre>
  * 1. 主键信息
  * 1. 主键信息
  * 2. column name
  * 2. column name
  * 3. unsigned字段
  * 3. unsigned字段
  * </pre>
  * </pre>
- * 
+ *
  * @author jianghang 2013-1-18 下午12:24:59
  * @author jianghang 2013-1-18 下午12:24:59
  * @version 1.0.0
  * @version 1.0.0
  */
  */
 public class TableMeta {
 public class TableMeta {
 
 
-    private String          fullName; // schema.table
-    private List<FieldMeta> fileds;
+    private String          schema;
+    private String          table;
+    private List<FieldMeta> fields = new ArrayList<TableMeta.FieldMeta>();
+    private String          ddl;                                          // 表结构的DDL语句
+
+    public TableMeta(){
 
 
-    public TableMeta(String fullName, List<FieldMeta> fileds){
-        this.fullName = fullName;
-        this.fileds = fileds;
+    }
+
+    public TableMeta(String schema, String table, List<FieldMeta> fields){
+        this.schema = schema;
+        this.table = table;
+        this.fields = fields;
     }
     }
 
 
     public String getFullName() {
     public String getFullName() {
-        return fullName;
+        return schema + "." + table;
+    }
+
+    public String getSchema() {
+        return schema;
     }
     }
 
 
-    public void setFullName(String fullName) {
-        this.fullName = fullName;
+    public void setSchema(String schema) {
+        this.schema = schema;
     }
     }
 
 
-    public List<FieldMeta> getFileds() {
-        return fileds;
+    public String getTable() {
+        return table;
     }
     }
 
 
-    public void setFileds(List<FieldMeta> fileds) {
-        this.fileds = fileds;
+    public void setTable(String table) {
+        this.table = table;
+    }
+
+    public List<FieldMeta> getFields() {
+        return fields;
+    }
+
+    public void setFields(List<FieldMeta> fileds) {
+        this.fields = fileds;
+    }
+
+    public FieldMeta getFieldMetaByName(String name) {
+        for (FieldMeta meta : fields) {
+            if (meta.getColumnName().equalsIgnoreCase(name)) {
+                return meta;
+            }
+        }
+
+        throw new RuntimeException("unknow column : " + name);
     }
     }
 
 
     public List<FieldMeta> getPrimaryFields() {
     public List<FieldMeta> getPrimaryFields() {
         List<FieldMeta> primarys = new ArrayList<TableMeta.FieldMeta>();
         List<FieldMeta> primarys = new ArrayList<TableMeta.FieldMeta>();
-        for (FieldMeta meta : fileds) {
+        for (FieldMeta meta : fields) {
             if (meta.isKey()) {
             if (meta.isKey()) {
                 primarys.add(meta);
                 primarys.add(meta);
             }
             }
@@ -56,14 +84,49 @@ public class TableMeta {
         return primarys;
         return primarys;
     }
     }
 
 
+    public String getDdl() {
+        return ddl;
+    }
+
+    public void setDdl(String ddl) {
+        this.ddl = ddl;
+    }
+
+    public void addFieldMeta(FieldMeta fieldMeta) {
+        this.fields.add(fieldMeta);
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder data = new StringBuilder();
+        data.append("TableMeta [schema=" + schema + ", table=" + table + ", fileds=");
+        for (FieldMeta field : fields) {
+            data.append("\n\t").append(field.toString());
+        }
+        data.append("\n]");
+        return data.toString();
+    }
+
     public static class FieldMeta {
     public static class FieldMeta {
 
 
-        private String columnName;
-        private String columnType;
-        private String isNullable;
-        private String iskey;
-        private String defaultValue;
-        private String extra;
+        public FieldMeta(){
+
+        }
+
+        public FieldMeta(String columnName, String columnType, boolean nullable, boolean key, String defaultValue){
+            this.columnName = columnName;
+            this.columnType = columnType;
+            this.nullable = nullable;
+            this.key = key;
+            this.defaultValue = defaultValue;
+        }
+
+        private String  columnName;
+        private String  columnType;
+        private boolean nullable;
+        private boolean key;
+        private String  defaultValue;
+        private String  extra;
 
 
         public String getColumnName() {
         public String getColumnName() {
             return columnName;
             return columnName;
@@ -81,20 +144,8 @@ public class TableMeta {
             this.columnType = columnType;
             this.columnType = columnType;
         }
         }
 
 
-        public String getIsNullable() {
-            return isNullable;
-        }
-
-        public void setIsNullable(String isNullable) {
-            this.isNullable = isNullable;
-        }
-
-        public String getIskey() {
-            return iskey;
-        }
-
-        public void setIskey(String iskey) {
-            this.iskey = iskey;
+        public void setNullable(boolean nullable) {
+            this.nullable = nullable;
         }
         }
 
 
         public String getDefaultValue() {
         public String getDefaultValue() {
@@ -105,30 +156,34 @@ public class TableMeta {
             this.defaultValue = defaultValue;
             this.defaultValue = defaultValue;
         }
         }
 
 
-        public String getExtra() {
-            return extra;
+        public boolean isUnsigned() {
+            return StringUtils.containsIgnoreCase(columnType, "unsigned");
         }
         }
 
 
-        public void setExtra(String extra) {
-            this.extra = extra;
+        public boolean isNullable() {
+            return nullable;
         }
         }
 
 
-        public boolean isUnsigned() {
-            return StringUtils.containsIgnoreCase(columnType, "unsigned");
+        public boolean isKey() {
+            return key;
         }
         }
 
 
-        public boolean isKey() {
-            return StringUtils.equalsIgnoreCase(iskey, "PRI");
+        public void setKey(boolean key) {
+            this.key = key;
         }
         }
 
 
-        public boolean isNullable() {
-            return StringUtils.equalsIgnoreCase(isNullable, "YES");
+        public String getExtra() {
+            return extra;
+        }
+
+        public void setExtra(String extra) {
+            this.extra = extra;
         }
         }
 
 
         public String toString() {
         public String toString() {
             return "FieldMeta [columnName=" + columnName + ", columnType=" + columnType + ", defaultValue="
             return "FieldMeta [columnName=" + columnName + ", columnType=" + columnType + ", defaultValue="
-                   + defaultValue + ", extra=" + extra + ", isNullable=" + isNullable + ", iskey=" + iskey + "]";
+                   + defaultValue + ", nullable=" + nullable + ", key=" + key + "]";
         }
         }
-
     }
     }
+
 }
 }

+ 70 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/AbstractMysqlEventParser.java

@@ -7,15 +7,22 @@ import org.slf4j.LoggerFactory;
 
 
 import com.alibaba.otter.canal.filter.CanalEventFilter;
 import com.alibaba.otter.canal.filter.CanalEventFilter;
 import com.alibaba.otter.canal.filter.aviater.AviaterRegexFilter;
 import com.alibaba.otter.canal.filter.aviater.AviaterRegexFilter;
+import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.inbound.AbstractEventParser;
 import com.alibaba.otter.canal.parse.inbound.AbstractEventParser;
 import com.alibaba.otter.canal.parse.inbound.BinlogParser;
 import com.alibaba.otter.canal.parse.inbound.BinlogParser;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.LogEventConvert;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.LogEventConvert;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.TableMetaTSDB;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.TableMetaTSDBBuilder;
+import com.alibaba.otter.canal.protocol.position.EntryPosition;
 
 
 public abstract class AbstractMysqlEventParser extends AbstractEventParser {
 public abstract class AbstractMysqlEventParser extends AbstractEventParser {
 
 
     protected final Logger      logger                  = LoggerFactory.getLogger(this.getClass());
     protected final Logger      logger                  = LoggerFactory.getLogger(this.getClass());
     protected static final long BINLOG_START_OFFEST     = 4L;
     protected static final long BINLOG_START_OFFEST     = 4L;
 
 
+    protected boolean           enableTsdb              = false;
+    protected String            tsdbSpringXml;
+    protected TableMetaTSDB     tableMetaTSDB;
     // 编码信息
     // 编码信息
     protected byte              connectionCharsetNumber = (byte) 33;
     protected byte              connectionCharsetNumber = (byte) 33;
     protected Charset           connectionCharset       = Charset.forName("UTF-8");
     protected Charset           connectionCharset       = Charset.forName("UTF-8");
@@ -24,6 +31,7 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
     protected boolean           filterQueryDdl          = false;
     protected boolean           filterQueryDdl          = false;
     protected boolean           filterRows              = false;
     protected boolean           filterRows              = false;
     protected boolean           filterTableError        = false;
     protected boolean           filterTableError        = false;
+    protected boolean           useDruidDdlFilter       = true;
 
 
     protected BinlogParser buildParser() {
     protected BinlogParser buildParser() {
         LogEventConvert convert = new LogEventConvert();
         LogEventConvert convert = new LogEventConvert();
@@ -53,6 +61,40 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
         }
         }
     }
     }
 
 
+    /**
+     * 回滚到指定位点
+     * 
+     * @param position
+     * @return
+     */
+    protected boolean processTableMeta(EntryPosition position) {
+        if (tableMetaTSDB != null) {
+            return tableMetaTSDB.rollback(position);
+        }
+
+        return true;
+    }
+
+    public void start() throws CanalParseException {
+        if (enableTsdb) {
+            if (tableMetaTSDB == null) {
+                // 初始化
+                tableMetaTSDB = TableMetaTSDBBuilder.build(destination, tsdbSpringXml);
+            }
+        }
+
+        super.start();
+    }
+
+    public void stop() throws CanalParseException {
+        if (enableTsdb) {
+            TableMetaTSDBBuilder.destory(destination);
+            tableMetaTSDB = null;
+        }
+
+        super.stop();
+    }
+
     public void setEventBlackFilter(CanalEventFilter eventBlackFilter) {
     public void setEventBlackFilter(CanalEventFilter eventBlackFilter) {
         super.setEventBlackFilter(eventBlackFilter);
         super.setEventBlackFilter(eventBlackFilter);
 
 
@@ -97,4 +139,32 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
         this.filterTableError = filterTableError;
         this.filterTableError = filterTableError;
     }
     }
 
 
+    public boolean isUseDruidDdlFilter() {
+        return useDruidDdlFilter;
+    }
+
+    public void setUseDruidDdlFilter(boolean useDruidDdlFilter) {
+        this.useDruidDdlFilter = useDruidDdlFilter;
+    }
+
+    public void setEnableTsdb(boolean enableTsdb) {
+        this.enableTsdb = enableTsdb;
+        if (this.enableTsdb) {
+            if (tableMetaTSDB == null) {
+                // 初始化
+                tableMetaTSDB = TableMetaTSDBBuilder.build(destination, tsdbSpringXml);
+            }
+        }
+    }
+
+    public void setTsdbSpringXml(String tsdbSpringXml) {
+        this.tsdbSpringXml = tsdbSpringXml;
+        if (this.enableTsdb) {
+            if (tableMetaTSDB == null) {
+                // 初始化
+                tableMetaTSDB = TableMetaTSDBBuilder.build(destination, tsdbSpringXml);
+            }
+        }
+    }
+
 }
 }

+ 29 - 39
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinLogConnection.java

@@ -8,6 +8,7 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
+import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.inbound.ErosaConnection;
 import com.alibaba.otter.canal.parse.inbound.ErosaConnection;
 import com.alibaba.otter.canal.parse.inbound.SinkFunction;
 import com.alibaba.otter.canal.parse.inbound.SinkFunction;
 import com.alibaba.otter.canal.parse.inbound.mysql.local.BinLogFileQueue;
 import com.alibaba.otter.canal.parse.inbound.mysql.local.BinLogFileQueue;
@@ -84,26 +85,19 @@ public class LocalBinLogConnection implements ErosaConnection {
             while (running) {
             while (running) {
                 boolean needContinue = true;
                 boolean needContinue = true;
                 LogEvent event = null;
                 LogEvent event = null;
-                L: while (fetcher.fetch()) {
-                    /*
-                     * event = decoder.decode(fetcher, context); if (event ==
-                     * null) { throw new CanalParseException("parse failed"); }
-                     * if (!func.sink(event)) { needContinue = false; break; }
-                     */
-
-                    do {
-                        if (event == null) {
-                            event = new RotateLogEvent(context.getLogPosition().getFileName(), context.getLogPosition()
-                                .getPosition());
-                        } else {
-                            event = decoder.decode(fetcher, context);
-                        }
+                // 处理一下binlog文件名
+                event = new RotateLogEvent(context.getLogPosition().getFileName(), 4);
+                func.sink(event);
+                while (fetcher.fetch()) {
+                    event = decoder.decode(fetcher, context);
+                    if (event == null) {
+                        throw new CanalParseException("parse failed");
+                    }
 
 
-                        if (event != null && !func.sink(event)) {
-                            needContinue = false;
-                            break L;
-                        }
-                    } while (event != null);
+                    if (!func.sink(event)) {
+                        needContinue = false;
+                        break;
+                    }
                 }
                 }
 
 
                 if (needContinue) {// 读取下一个
                 if (needContinue) {// 读取下一个
@@ -121,7 +115,6 @@ public class LocalBinLogConnection implements ErosaConnection {
                     }
                     }
 
 
                     current = nextFile;
                     current = nextFile;
-
                     fetcher.open(current);
                     fetcher.open(current);
                     context.setLogPosition(new LogPosition(nextFile.getName()));
                     context.setLogPosition(new LogPosition(nextFile.getName()));
                 } else {
                 } else {
@@ -161,30 +154,27 @@ public class LocalBinLogConnection implements ErosaConnection {
 
 
                 binlogFilename = lastXidLogFilename;
                 binlogFilename = lastXidLogFilename;
                 binlogFileOffset = lastXidLogFileOffset;
                 binlogFileOffset = lastXidLogFileOffset;
-                L: while (fetcher.fetch()) {
-                    LogEvent event;
-                    do {
-                        event = decoder.decode(fetcher, context);
-                        if (event != null) {
-                            if (event.getWhen() > timestampSeconds) {
-                                break L;
-                            }
+                while (fetcher.fetch()) {
+                    LogEvent event = decoder.decode(fetcher, context);
+                    if (event != null) {
+                        if (event.getWhen() > timestampSeconds) {
+                            break;
+                        }
 
 
-                            needContinue = false;
-                            if (LogEvent.QUERY_EVENT == event.getHeader().getType()) {
-                                if (StringUtils.endsWithIgnoreCase(((QueryLogEvent) event).getQuery(), "BEGIN")) {
-                                    binlogFilename = lastXidLogFilename;
-                                    binlogFileOffset = lastXidLogFileOffset;
-                                } else if (StringUtils.endsWithIgnoreCase(((QueryLogEvent) event).getQuery(), "COMMIT")) {
-                                    lastXidLogFilename = current.getName();
-                                    lastXidLogFileOffset = event.getLogPos();
-                                }
-                            } else if (LogEvent.XID_EVENT == event.getHeader().getType()) {
+                        needContinue = false;
+                        if (LogEvent.QUERY_EVENT == event.getHeader().getType()) {
+                            if (StringUtils.endsWithIgnoreCase(((QueryLogEvent) event).getQuery(), "BEGIN")) {
+                                binlogFilename = lastXidLogFilename;
+                                binlogFileOffset = lastXidLogFileOffset;
+                            } else if (StringUtils.endsWithIgnoreCase(((QueryLogEvent) event).getQuery(), "COMMIT")) {
                                 lastXidLogFilename = current.getName();
                                 lastXidLogFilename = current.getName();
                                 lastXidLogFileOffset = event.getLogPos();
                                 lastXidLogFileOffset = event.getLogPos();
                             }
                             }
+                        } else if (LogEvent.XID_EVENT == event.getHeader().getType()) {
+                            lastXidLogFilename = current.getName();
+                            lastXidLogFileOffset = event.getLogPos();
                         }
                         }
-                    } while (event != null);
+                    }
                 }
                 }
 
 
                 if (needContinue) {// 读取下一个
                 if (needContinue) {// 读取下一个

+ 15 - 8
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogEventParser.java

@@ -9,6 +9,7 @@ import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.inbound.ErosaConnection;
 import com.alibaba.otter.canal.parse.inbound.ErosaConnection;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.LogEventConvert;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.LogEventConvert;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.TableMetaCache;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.TableMetaCache;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.DatabaseTableMeta;
 import com.alibaba.otter.canal.parse.index.CanalLogPositionManager;
 import com.alibaba.otter.canal.parse.index.CanalLogPositionManager;
 import com.alibaba.otter.canal.parse.support.AuthenticationInfo;
 import com.alibaba.otter.canal.parse.support.AuthenticationInfo;
 import com.alibaba.otter.canal.protocol.position.EntryPosition;
 import com.alibaba.otter.canal.protocol.position.EntryPosition;
@@ -23,14 +24,14 @@ import com.alibaba.otter.canal.protocol.position.LogPosition;
 public class LocalBinlogEventParser extends AbstractMysqlEventParser implements CanalEventParser {
 public class LocalBinlogEventParser extends AbstractMysqlEventParser implements CanalEventParser {
 
 
     // 数据库信息
     // 数据库信息
-    private AuthenticationInfo masterInfo;
-    private EntryPosition      masterPosition;        // binlog信息
-    private MysqlConnection    metaConnection;        // 查询meta信息的链接
-    private TableMetaCache     tableMetaCache;        // 对应meta
+    protected AuthenticationInfo masterInfo;
+    protected EntryPosition      masterPosition;        // binlog信息
+    protected MysqlConnection    metaConnection;        // 查询meta信息的链接
+    protected TableMetaCache     tableMetaCache;        // 对应meta
 
 
-    private String             directory;
-    private boolean            needWait   = false;
-    private int                bufferSize = 16 * 1024;
+    protected String             directory;
+    protected boolean            needWait   = false;
+    protected int                bufferSize = 16 * 1024;
 
 
     public LocalBinlogEventParser(){
     public LocalBinlogEventParser(){
         // this.runningInfo = new AuthenticationInfo();
         // this.runningInfo = new AuthenticationInfo();
@@ -50,7 +51,13 @@ public class LocalBinlogEventParser extends AbstractMysqlEventParser implements
             throw new CanalParseException(e);
             throw new CanalParseException(e);
         }
         }
 
 
-        tableMetaCache = new TableMetaCache(metaConnection);
+        if (tableMetaTSDB != null && tableMetaTSDB instanceof DatabaseTableMeta) {
+            ((DatabaseTableMeta) tableMetaTSDB).setConnection(metaConnection);
+            ((DatabaseTableMeta) tableMetaTSDB).setFilter(eventFilter);
+            ((DatabaseTableMeta) tableMetaTSDB).setBlackFilter(eventBlackFilter);
+        }
+
+        tableMetaCache = new TableMetaCache(metaConnection, tableMetaTSDB);
         ((LogEventConvert) binlogParser).setTableMetaCache(tableMetaCache);
         ((LogEventConvert) binlogParser).setTableMetaCache(tableMetaCache);
     }
     }
 
 

+ 101 - 2
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlConnection.java

@@ -14,36 +14,60 @@ import com.alibaba.otter.canal.parse.driver.mysql.MysqlQueryExecutor;
 import com.alibaba.otter.canal.parse.driver.mysql.MysqlUpdateExecutor;
 import com.alibaba.otter.canal.parse.driver.mysql.MysqlUpdateExecutor;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.HeaderPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.HeaderPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.client.BinlogDumpCommandPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.client.BinlogDumpCommandPacket;
+import com.alibaba.otter.canal.parse.driver.mysql.packets.client.RegisterSlaveCommandPacket;
+import com.alibaba.otter.canal.parse.driver.mysql.packets.client.SemiAckCommandPacket;
+import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ErrorPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ResultSetPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ResultSetPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.utils.PacketManager;
 import com.alibaba.otter.canal.parse.driver.mysql.utils.PacketManager;
 import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.inbound.ErosaConnection;
 import com.alibaba.otter.canal.parse.inbound.ErosaConnection;
 import com.alibaba.otter.canal.parse.inbound.SinkFunction;
 import com.alibaba.otter.canal.parse.inbound.SinkFunction;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.DirectLogFetcher;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.DirectLogFetcher;
+import com.alibaba.otter.canal.parse.support.AuthenticationInfo;
 import com.taobao.tddl.dbsync.binlog.LogContext;
 import com.taobao.tddl.dbsync.binlog.LogContext;
 import com.taobao.tddl.dbsync.binlog.LogDecoder;
 import com.taobao.tddl.dbsync.binlog.LogDecoder;
 import com.taobao.tddl.dbsync.binlog.LogEvent;
 import com.taobao.tddl.dbsync.binlog.LogEvent;
 
 
 public class MysqlConnection implements ErosaConnection {
 public class MysqlConnection implements ErosaConnection {
 
 
-    private static final Logger logger  = LoggerFactory.getLogger(MysqlConnection.class);
+    private static final Logger logger      = LoggerFactory.getLogger(MysqlConnection.class);
 
 
     private MysqlConnector      connector;
     private MysqlConnector      connector;
     private long                slaveId;
     private long                slaveId;
-    private Charset             charset = Charset.forName("UTF-8");
+    private Charset             charset     = Charset.forName("UTF-8");
     private BinlogFormat        binlogFormat;
     private BinlogFormat        binlogFormat;
     private BinlogImage         binlogImage;
     private BinlogImage         binlogImage;
 
 
+    // tsdb releated
+    private AuthenticationInfo  authInfo;
+    protected int               connTimeout = 5 * 1000;                                      // 5秒
+    protected int               soTimeout   = 60 * 60 * 1000;                                // 1小时
+
     public MysqlConnection(){
     public MysqlConnection(){
     }
     }
 
 
     public MysqlConnection(InetSocketAddress address, String username, String password){
     public MysqlConnection(InetSocketAddress address, String username, String password){
+        authInfo = new AuthenticationInfo();
+        authInfo.setAddress(address);
+        authInfo.setUsername(username);
+        authInfo.setPassword(password);
         connector = new MysqlConnector(address, username, password);
         connector = new MysqlConnector(address, username, password);
+        // 将connection里面的参数透传下
+        connector.setSoTimeout(soTimeout);
+        connector.setConnTimeout(connTimeout);
     }
     }
 
 
     public MysqlConnection(InetSocketAddress address, String username, String password, byte charsetNumber,
     public MysqlConnection(InetSocketAddress address, String username, String password, byte charsetNumber,
                            String defaultSchema){
                            String defaultSchema){
+        authInfo = new AuthenticationInfo();
+        authInfo.setAddress(address);
+        authInfo.setUsername(username);
+        authInfo.setPassword(password);
+        authInfo.setDefaultDatabaseName(defaultSchema);
         connector = new MysqlConnector(address, username, password, charsetNumber, defaultSchema);
         connector = new MysqlConnector(address, username, password, charsetNumber, defaultSchema);
+        // 将connection里面的参数透传下
+        connector.setSoTimeout(soTimeout);
+        connector.setConnTimeout(connTimeout);
     }
     }
 
 
     public void connect() throws IOException {
     public void connect() throws IOException {
@@ -67,6 +91,11 @@ public class MysqlConnection implements ErosaConnection {
         return exector.query(cmd);
         return exector.query(cmd);
     }
     }
 
 
+    public List<ResultSetPacket> queryMulti(String cmd) throws IOException {
+        MysqlQueryExecutor exector = new MysqlQueryExecutor(connector);
+        return exector.queryMulti(cmd);
+    }
+
     public void update(String cmd) throws IOException {
     public void update(String cmd) throws IOException {
         MysqlUpdateExecutor exector = new MysqlUpdateExecutor(connector);
         MysqlUpdateExecutor exector = new MysqlUpdateExecutor(connector);
         exector.update(cmd);
         exector.update(cmd);
@@ -103,6 +132,7 @@ public class MysqlConnection implements ErosaConnection {
 
 
     public void dump(String binlogfilename, Long binlogPosition, SinkFunction func) throws IOException {
     public void dump(String binlogfilename, Long binlogPosition, SinkFunction func) throws IOException {
         updateSettings();
         updateSettings();
+        sendRegisterSlave();
         sendBinlogDump(binlogfilename, binlogPosition);
         sendBinlogDump(binlogfilename, binlogPosition);
         DirectLogFetcher fetcher = new DirectLogFetcher(connector.getReceiveBufferSize());
         DirectLogFetcher fetcher = new DirectLogFetcher(connector.getReceiveBufferSize());
         fetcher.start(connector.getChannel());
         fetcher.start(connector.getChannel());
@@ -119,6 +149,10 @@ public class MysqlConnection implements ErosaConnection {
             if (!func.sink(event)) {
             if (!func.sink(event)) {
                 break;
                 break;
             }
             }
+
+            if (event.getSemival() == 1) {
+                sendSemiAck(context.getLogPosition().getFileName(), binlogPosition);
+            }
         }
         }
     }
     }
 
 
@@ -126,6 +160,36 @@ public class MysqlConnection implements ErosaConnection {
         throw new NullPointerException("Not implement yet");
         throw new NullPointerException("Not implement yet");
     }
     }
 
 
+    private void sendRegisterSlave() throws IOException {
+        RegisterSlaveCommandPacket cmd = new RegisterSlaveCommandPacket();
+        cmd.reportHost = authInfo.getAddress().getAddress().getHostAddress();
+        cmd.reportPasswd = authInfo.getPassword();
+        cmd.reportUser = authInfo.getUsername();
+        cmd.reportPort = authInfo.getAddress().getPort(); // 暂时先用master节点的port
+        cmd.serverId = this.slaveId;
+        byte[] cmdBody = cmd.toBytes();
+
+        logger.info("Register slave {}", cmd);
+
+        HeaderPacket header = new HeaderPacket();
+        header.setPacketBodyLength(cmdBody.length);
+        header.setPacketSequenceNumber((byte) 0x00);
+        PacketManager.writePkg(connector.getChannel(), header.toBytes(), cmdBody);
+
+        header = PacketManager.readHeader(connector.getChannel(), 4);
+        byte[] body = PacketManager.readBytes(connector.getChannel(), header.getPacketBodyLength());
+        assert body != null;
+        if (body[0] < 0) {
+            if (body[0] == -1) {
+                ErrorPacket err = new ErrorPacket();
+                err.fromBytes(body);
+                throw new IOException("Error When doing Register slave:" + err.toString());
+            } else {
+                throw new IOException("unpexpected packet with field_count=" + body[0]);
+            }
+        }
+    }
+
     private void sendBinlogDump(String binlogfilename, Long binlogPosition) throws IOException {
     private void sendBinlogDump(String binlogfilename, Long binlogPosition) throws IOException {
         BinlogDumpCommandPacket binlogDumpCmd = new BinlogDumpCommandPacket();
         BinlogDumpCommandPacket binlogDumpCmd = new BinlogDumpCommandPacket();
         binlogDumpCmd.binlogFileName = binlogfilename;
         binlogDumpCmd.binlogFileName = binlogfilename;
@@ -141,11 +205,27 @@ public class MysqlConnection implements ErosaConnection {
         connector.setDumping(true);
         connector.setDumping(true);
     }
     }
 
 
+    private void sendSemiAck(String binlogfilename, Long binlogPosition) throws IOException {
+        SemiAckCommandPacket semiAckCmd = new SemiAckCommandPacket();
+        semiAckCmd.binlogFileName = binlogfilename;
+        semiAckCmd.binlogPosition = binlogPosition;
+
+        byte[] cmdBody = semiAckCmd.toBytes();
+
+        logger.info("SEMI ACK with position:{}", semiAckCmd);
+        HeaderPacket semiAckHeader = new HeaderPacket();
+        semiAckHeader.setPacketBodyLength(cmdBody.length);
+        semiAckHeader.setPacketSequenceNumber((byte) 0x00);
+        PacketManager.writePkg(connector.getChannel(), semiAckHeader.toBytes(), cmdBody);
+    }
+
     public MysqlConnection fork() {
     public MysqlConnection fork() {
         MysqlConnection connection = new MysqlConnection();
         MysqlConnection connection = new MysqlConnection();
         connection.setCharset(getCharset());
         connection.setCharset(getCharset());
         connection.setSlaveId(getSlaveId());
         connection.setSlaveId(getSlaveId());
         connection.setConnector(connector.fork());
         connection.setConnector(connector.fork());
+        // set authInfo
+        connection.setAuthInfo(authInfo);
         return connection;
         return connection;
     }
     }
 
 
@@ -381,4 +461,23 @@ public class MysqlConnection implements ErosaConnection {
         return binlogImage;
         return binlogImage;
     }
     }
 
 
+    public InetSocketAddress getAddress() {
+        return authInfo.getAddress();
+    }
+
+    public void setConnTimeout(int connTimeout) {
+        this.connTimeout = connTimeout;
+    }
+
+    public void setSoTimeout(int soTimeout) {
+        this.soTimeout = soTimeout;
+    }
+
+    public AuthenticationInfo getAuthInfo() {
+        return authInfo;
+    }
+
+    public void setAuthInfo(AuthenticationInfo authInfo) {
+        this.authInfo = authInfo;
+    }
 }
 }

+ 131 - 79
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParser.java

@@ -1,12 +1,13 @@
 package com.alibaba.otter.canal.parse.inbound.mysql;
 package com.alibaba.otter.canal.parse.inbound.mysql;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.net.InetAddress;
 import java.net.SocketTimeoutException;
 import java.net.SocketTimeoutException;
+import java.net.UnknownHostException;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.TimerTask;
 import java.util.TimerTask;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicLong;
 
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.StringUtils;
@@ -26,6 +27,7 @@ import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection.BinlogFormat;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection.BinlogImage;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection.BinlogImage;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.LogEventConvert;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.LogEventConvert;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.TableMetaCache;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.TableMetaCache;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.DatabaseTableMeta;
 import com.alibaba.otter.canal.parse.support.AuthenticationInfo;
 import com.alibaba.otter.canal.parse.support.AuthenticationInfo;
 import com.alibaba.otter.canal.protocol.CanalEntry;
 import com.alibaba.otter.canal.protocol.CanalEntry;
 import com.alibaba.otter.canal.protocol.position.EntryPosition;
 import com.alibaba.otter.canal.protocol.position.EntryPosition;
@@ -117,7 +119,13 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                 }
                 }
             }
             }
 
 
-            tableMetaCache = new TableMetaCache(metaConnection);
+            if (tableMetaTSDB != null && tableMetaTSDB instanceof DatabaseTableMeta) {
+                ((DatabaseTableMeta) tableMetaTSDB).setConnection(metaConnection);
+                ((DatabaseTableMeta) tableMetaTSDB).setFilter(eventFilter);
+                ((DatabaseTableMeta) tableMetaTSDB).setBlackFilter(eventBlackFilter);
+            }
+
+            tableMetaCache = new TableMetaCache(metaConnection, tableMetaTSDB);
             ((LogEventConvert) binlogParser).setTableMetaCache(tableMetaCache);
             ((LogEventConvert) binlogParser).setTableMetaCache(tableMetaCache);
         }
         }
     }
     }
@@ -306,10 +314,35 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
         connection.getConnector().setSendBufferSize(sendBufferSize);
         connection.getConnector().setSendBufferSize(sendBufferSize);
         connection.getConnector().setSoTimeout(defaultConnectionTimeoutInSeconds * 1000);
         connection.getConnector().setSoTimeout(defaultConnectionTimeoutInSeconds * 1000);
         connection.setCharset(connectionCharset);
         connection.setCharset(connectionCharset);
+        // 随机生成slaveId
+        if (this.slaveId <= 0) {
+            this.slaveId = generateUniqueServerId();
+        }
         connection.setSlaveId(this.slaveId);
         connection.setSlaveId(this.slaveId);
         return connection;
         return connection;
     }
     }
 
 
+    private final long generateUniqueServerId() {
+        try {
+            // a=`echo $masterip|cut -d\. -f1`
+            // b=`echo $masterip|cut -d\. -f2`
+            // c=`echo $masterip|cut -d\. -f3`
+            // d=`echo $masterip|cut -d\. -f4`
+            // #server_id=`expr $a \* 256 \* 256 \* 256 + $b \* 256 \* 256 + $c
+            // \* 256 + $d `
+            // #server_id=$b$c$d
+            // server_id=`expr $b \* 256 \* 256 + $c \* 256 + $d `
+            InetAddress localHost = InetAddress.getLocalHost();
+            byte[] addr = localHost.getAddress();
+            int salt = (destination != null) ? destination.hashCode() : 0;
+            return ((0x7f & salt) << 24) + ((0xff & (int) addr[1]) << 16) // NL
+                   + ((0xff & (int) addr[2]) << 8) // NL
+                   + (0xff & (int) addr[3]);
+        } catch (UnknownHostException e) {
+            throw new CanalParseException("Unknown host", e);
+        }
+    }
+
     protected EntryPosition findStartPosition(ErosaConnection connection) throws IOException {
     protected EntryPosition findStartPosition(ErosaConnection connection) throws IOException {
         EntryPosition startPosition = findStartPositionInternal(connection);
         EntryPosition startPosition = findStartPositionInternal(connection);
         if (needTransactionPosition.get()) {
         if (needTransactionPosition.get()) {
@@ -332,6 +365,41 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
         return endPosition;
         return endPosition;
     }
     }
 
 
+    protected EntryPosition findEndPositionWithMasterIdAndTimestamp(MysqlConnection connection) {
+        MysqlConnection mysqlConnection = (MysqlConnection) connection;
+        final EntryPosition endPosition = findEndPosition(mysqlConnection);
+        if (tableMetaTSDB != null) {
+            long startTimestamp = System.currentTimeMillis();
+            return findAsPerTimestampInSpecificLogFile(mysqlConnection,
+                startTimestamp,
+                endPosition,
+                endPosition.getJournalName(),
+                true);
+        } else {
+            return endPosition;
+        }
+    }
+
+    protected EntryPosition findPositionWithMasterIdAndTimestamp(MysqlConnection connection, EntryPosition fixedPosition) {
+        MysqlConnection mysqlConnection = (MysqlConnection) connection;
+        if (tableMetaTSDB != null && (fixedPosition.getTimestamp() == null || fixedPosition.getTimestamp() <= 0)) {
+            // 使用一个未来极大的时间,基于位点进行定位
+            long startTimestamp = System.currentTimeMillis() + 102L * 365 * 24 * 3600 * 1000; // 当前时间的未来102年
+            EntryPosition entryPosition = findAsPerTimestampInSpecificLogFile(mysqlConnection,
+                startTimestamp,
+                fixedPosition,
+                fixedPosition.getJournalName(),
+                true);
+            if (entryPosition == null) {
+                throw new CanalParseException("[fixed timestamp] can't found begin/commit position before with fixed position"
+                                              + fixedPosition.getJournalName() + ":" + fixedPosition.getPosition());
+            }
+            return entryPosition;
+        } else {
+            return fixedPosition;
+        }
+    }
+
     protected EntryPosition findStartPositionInternal(ErosaConnection connection) {
     protected EntryPosition findStartPositionInternal(ErosaConnection connection) {
         MysqlConnection mysqlConnection = (MysqlConnection) connection;
         MysqlConnection mysqlConnection = (MysqlConnection) connection;
         LogPosition logPosition = logPositionManager.getLatestIndexBy(destination);
         LogPosition logPosition = logPositionManager.getLatestIndexBy(destination);
@@ -345,7 +413,7 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
             }
             }
 
 
             if (entryPosition == null) {
             if (entryPosition == null) {
-                entryPosition = findEndPosition(mysqlConnection); // 默认从当前最后一个位置进行消费
+                entryPosition = findEndPositionWithMasterIdAndTimestamp(mysqlConnection); // 默认从当前最后一个位置进行消费
             }
             }
 
 
             // 判断一下是否需要按时间订阅
             // 判断一下是否需要按时间订阅
@@ -357,13 +425,15 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                     return findByStartTimeStamp(mysqlConnection, entryPosition.getTimestamp());
                     return findByStartTimeStamp(mysqlConnection, entryPosition.getTimestamp());
                 } else {
                 } else {
                     logger.warn("prepare to find start position just show master status");
                     logger.warn("prepare to find start position just show master status");
-                    return findEndPosition(mysqlConnection); // 默认从当前最后一个位置进行消费
+                    return findEndPositionWithMasterIdAndTimestamp(mysqlConnection); // 默认从当前最后一个位置进行消费
                 }
                 }
             } else {
             } else {
                 if (entryPosition.getPosition() != null && entryPosition.getPosition() > 0L) {
                 if (entryPosition.getPosition() != null && entryPosition.getPosition() > 0L) {
                     // 如果指定binlogName + offest,直接返回
                     // 如果指定binlogName + offest,直接返回
+                    entryPosition = findPositionWithMasterIdAndTimestamp(mysqlConnection, entryPosition);
                     logger.warn("prepare to find start position {}:{}:{}",
                     logger.warn("prepare to find start position {}:{}:{}",
-                        new Object[] { entryPosition.getJournalName(), entryPosition.getPosition(), "" });
+                        new Object[] { entryPosition.getJournalName(), entryPosition.getPosition(),
+                                entryPosition.getTimestamp() });
                     return entryPosition;
                     return entryPosition;
                 } else {
                 } else {
                     EntryPosition specificLogFilePosition = null;
                     EntryPosition specificLogFilePosition = null;
@@ -377,7 +447,8 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                             specificLogFilePosition = findAsPerTimestampInSpecificLogFile(mysqlConnection,
                             specificLogFilePosition = findAsPerTimestampInSpecificLogFile(mysqlConnection,
                                 entryPosition.getTimestamp(),
                                 entryPosition.getTimestamp(),
                                 endPosition,
                                 endPosition,
-                                entryPosition.getJournalName());
+                                entryPosition.getJournalName(),
+                                true);
                         }
                         }
                     }
                     }
 
 
@@ -428,83 +499,47 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
     // 主要考虑一个事务执行时间可能会几秒种,如果仅仅按照timestamp相同,则可能会丢失事务的前半部分数据
     // 主要考虑一个事务执行时间可能会几秒种,如果仅仅按照timestamp相同,则可能会丢失事务的前半部分数据
     private Long findTransactionBeginPosition(ErosaConnection mysqlConnection, final EntryPosition entryPosition)
     private Long findTransactionBeginPosition(ErosaConnection mysqlConnection, final EntryPosition entryPosition)
                                                                                                                  throws IOException {
                                                                                                                  throws IOException {
-        // 尝试找到一个合适的位置
-        final AtomicBoolean reDump = new AtomicBoolean(false);
+        // 针对开始的第一条为非Begin记录,需要从该binlog扫描
+        final AtomicLong preTransactionStartPosition = new AtomicLong(0L);
         mysqlConnection.reconnect();
         mysqlConnection.reconnect();
-        mysqlConnection.seek(entryPosition.getJournalName(), entryPosition.getPosition(), new SinkFunction<LogEvent>() {
+        mysqlConnection.seek(entryPosition.getJournalName(), 4L, new SinkFunction<LogEvent>() {
 
 
             private LogPosition lastPosition;
             private LogPosition lastPosition;
 
 
             public boolean sink(LogEvent event) {
             public boolean sink(LogEvent event) {
                 try {
                 try {
-                    CanalEntry.Entry entry = parseAndProfilingIfNecessary(event);
+                    CanalEntry.Entry entry = parseAndProfilingIfNecessary(event, true);
                     if (entry == null) {
                     if (entry == null) {
                         return true;
                         return true;
                     }
                     }
 
 
-                    // 直接查询第一条业务数据,确认是否为事务Begin/End
-                    if (CanalEntry.EntryType.TRANSACTIONBEGIN == entry.getEntryType()
-                        || CanalEntry.EntryType.TRANSACTIONEND == entry.getEntryType()) {
-                        lastPosition = buildLastPosition(entry);
-                        return false;
-                    } else {
-                        reDump.set(true);
-                        lastPosition = buildLastPosition(entry);
-                        return false;
+                    // 直接查询第一条业务数据,确认是否为事务Begin
+                    // 记录一下transaction begin position
+                    if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONBEGIN
+                        && entry.getHeader().getLogfileOffset() < entryPosition.getPosition()) {
+                        preTransactionStartPosition.set(entry.getHeader().getLogfileOffset());
+                    }
+
+                    if (entry.getHeader().getLogfileOffset() >= entryPosition.getPosition()) {
+                        return false;// 退出
                     }
                     }
+
+                    lastPosition = buildLastPosition(entry);
                 } catch (Exception e) {
                 } catch (Exception e) {
-                    // 上一次记录的poistion可能为一条update/insert/delete变更事件,直接进行dump的话,会缺少tableMap事件,导致tableId未进行解析
                     processSinkError(e, lastPosition, entryPosition.getJournalName(), entryPosition.getPosition());
                     processSinkError(e, lastPosition, entryPosition.getJournalName(), entryPosition.getPosition());
-                    reDump.set(true);
                     return false;
                     return false;
                 }
                 }
+
+                return running;
             }
             }
         });
         });
-        // 针对开始的第一条为非Begin记录,需要从该binlog扫描
-        if (reDump.get()) {
-            final AtomicLong preTransactionStartPosition = new AtomicLong(0L);
-            mysqlConnection.reconnect();
-            mysqlConnection.seek(entryPosition.getJournalName(), 4L, new SinkFunction<LogEvent>() {
-
-                private LogPosition lastPosition;
-
-                public boolean sink(LogEvent event) {
-                    try {
-                        CanalEntry.Entry entry = parseAndProfilingIfNecessary(event);
-                        if (entry == null) {
-                            return true;
-                        }
-
-                        // 直接查询第一条业务数据,确认是否为事务Begin
-                        // 记录一下transaction begin position
-                        if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONBEGIN
-                            && entry.getHeader().getLogfileOffset() < entryPosition.getPosition()) {
-                            preTransactionStartPosition.set(entry.getHeader().getLogfileOffset());
-                        }
-
-                        if (entry.getHeader().getLogfileOffset() >= entryPosition.getPosition()) {
-                            return false;// 退出
-                        }
-
-                        lastPosition = buildLastPosition(entry);
-                    } catch (Exception e) {
-                        processSinkError(e, lastPosition, entryPosition.getJournalName(), entryPosition.getPosition());
-                        return false;
-                    }
 
 
-                    return running;
-                }
-            });
-
-            // 判断一下找到的最接近position的事务头的位置
-            if (preTransactionStartPosition.get() > entryPosition.getPosition()) {
-                logger.error("preTransactionEndPosition greater than startPosition from zk or localconf, maybe lost data");
-                throw new CanalParseException("preTransactionStartPosition greater than startPosition from zk or localconf, maybe lost data");
-            }
-            return preTransactionStartPosition.get();
-        } else {
-            return entryPosition.getPosition();
+        // 判断一下找到的最接近position的事务头的位置
+        if (preTransactionStartPosition.get() > entryPosition.getPosition()) {
+            logger.error("preTransactionEndPosition greater than startPosition from zk or localconf, maybe lost data");
+            throw new CanalParseException("preTransactionStartPosition greater than startPosition from zk or localconf, maybe lost data");
         }
         }
+        return preTransactionStartPosition.get();
     }
     }
 
 
     // 根据时间查找binlog位置
     // 根据时间查找binlog位置
@@ -521,7 +556,8 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                 EntryPosition entryPosition = findAsPerTimestampInSpecificLogFile(mysqlConnection,
                 EntryPosition entryPosition = findAsPerTimestampInSpecificLogFile(mysqlConnection,
                     startTimestamp,
                     startTimestamp,
                     endPosition,
                     endPosition,
-                    startSearchBinlogFile);
+                    startSearchBinlogFile,
+                    false);
                 if (entryPosition == null) {
                 if (entryPosition == null) {
                     if (StringUtils.equalsIgnoreCase(minBinlogFileName, startSearchBinlogFile)) {
                     if (StringUtils.equalsIgnoreCase(minBinlogFileName, startSearchBinlogFile)) {
                         // 已经找到最早的一个binlog,没必要往前找了
                         // 已经找到最早的一个binlog,没必要往前找了
@@ -549,7 +585,8 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                 }
                 }
             } catch (Exception e) {
             } catch (Exception e) {
                 logger.warn(String.format("the binlogfile:%s doesn't exist, to continue to search the next binlogfile , caused by",
                 logger.warn(String.format("the binlogfile:%s doesn't exist, to continue to search the next binlogfile , caused by",
-                    startSearchBinlogFile), e);
+                    startSearchBinlogFile),
+                    e);
                 int binlogSeqNum = Integer.parseInt(startSearchBinlogFile.substring(startSearchBinlogFile.indexOf(".") + 1));
                 int binlogSeqNum = Integer.parseInt(startSearchBinlogFile.substring(startSearchBinlogFile.indexOf(".") + 1));
                 if (binlogSeqNum <= 1) {
                 if (binlogSeqNum <= 1) {
                     logger.warn("Didn't find the corresponding binlog files");
                     logger.warn("Didn't find the corresponding binlog files");
@@ -668,7 +705,8 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
     private EntryPosition findAsPerTimestampInSpecificLogFile(MysqlConnection mysqlConnection,
     private EntryPosition findAsPerTimestampInSpecificLogFile(MysqlConnection mysqlConnection,
                                                               final Long startTimestamp,
                                                               final Long startTimestamp,
                                                               final EntryPosition endPosition,
                                                               final EntryPosition endPosition,
-                                                              final String searchBinlogFile) {
+                                                              final String searchBinlogFile,
+                                                              final Boolean justForPositionTimestamp) {
 
 
         final LogPosition logPosition = new LogPosition();
         final LogPosition logPosition = new LogPosition();
         try {
         try {
@@ -681,7 +719,16 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                 public boolean sink(LogEvent event) {
                 public boolean sink(LogEvent event) {
                     EntryPosition entryPosition = null;
                     EntryPosition entryPosition = null;
                     try {
                     try {
-                        CanalEntry.Entry entry = parseAndProfilingIfNecessary(event);
+                        CanalEntry.Entry entry = parseAndProfilingIfNecessary(event, true);
+                        if (justForPositionTimestamp && logPosition.getPostion() == null && event.getWhen() > 0) {
+                            // 初始位点
+                            entryPosition = new EntryPosition(searchBinlogFile,
+                                event.getLogPos(),
+                                event.getWhen() * 1000,
+                                event.getServerId());
+                            logPosition.setPostion(entryPosition);
+                        }
+
                         if (entry == null) {
                         if (entry == null) {
                             return true;
                             return true;
                         }
                         }
@@ -689,11 +736,14 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                         String logfilename = entry.getHeader().getLogfileName();
                         String logfilename = entry.getHeader().getLogfileName();
                         Long logfileoffset = entry.getHeader().getLogfileOffset();
                         Long logfileoffset = entry.getHeader().getLogfileOffset();
                         Long logposTimestamp = entry.getHeader().getExecuteTime();
                         Long logposTimestamp = entry.getHeader().getExecuteTime();
+                        Long serverId = entry.getHeader().getServerId();
 
 
                         if (CanalEntry.EntryType.TRANSACTIONBEGIN.equals(entry.getEntryType())
                         if (CanalEntry.EntryType.TRANSACTIONBEGIN.equals(entry.getEntryType())
                             || CanalEntry.EntryType.TRANSACTIONEND.equals(entry.getEntryType())) {
                             || CanalEntry.EntryType.TRANSACTIONEND.equals(entry.getEntryType())) {
-                            logger.debug("compare exit condition:{},{},{}, startTimestamp={}...", new Object[] {
-                                    logfilename, logfileoffset, logposTimestamp, startTimestamp });
+                            if (logger.isDebugEnabled()) {
+                                logger.debug("compare exit condition:{},{},{}, startTimestamp={}...", new Object[] {
+                                        logfilename, logfileoffset, logposTimestamp, startTimestamp });
+                            }
                             // 事务头和尾寻找第一条记录时间戳,如果最小的一条记录都不满足条件,可直接退出
                             // 事务头和尾寻找第一条记录时间戳,如果最小的一条记录都不满足条件,可直接退出
                             if (logposTimestamp >= startTimestamp) {
                             if (logposTimestamp >= startTimestamp) {
                                 return false;
                                 return false;
@@ -701,7 +751,7 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                         }
                         }
 
 
                         if (StringUtils.equals(endPosition.getJournalName(), logfilename)
                         if (StringUtils.equals(endPosition.getJournalName(), logfilename)
-                            && endPosition.getPosition() <= (logfileoffset + event.getEventLen())) {
+                            && endPosition.getPosition() < logfileoffset) {
                             return false;
                             return false;
                         }
                         }
 
 
@@ -709,17 +759,19 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                         // position = current +
                         // position = current +
                         // data.length,代表该事务的下一条offest,避免多余的事务重复
                         // data.length,代表该事务的下一条offest,避免多余的事务重复
                         if (CanalEntry.EntryType.TRANSACTIONEND.equals(entry.getEntryType())) {
                         if (CanalEntry.EntryType.TRANSACTIONEND.equals(entry.getEntryType())) {
-                            entryPosition = new EntryPosition(logfilename,
-                                logfileoffset + event.getEventLen(),
-                                logposTimestamp);
-                            logger.debug("set {} to be pending start position before finding another proper one...",
-                                entryPosition);
+                            entryPosition = new EntryPosition(logfilename, logfileoffset, logposTimestamp, serverId);
+                            if (logger.isDebugEnabled()) {
+                                logger.debug("set {} to be pending start position before finding another proper one...",
+                                    entryPosition);
+                            }
                             logPosition.setPostion(entryPosition);
                             logPosition.setPostion(entryPosition);
                         } else if (CanalEntry.EntryType.TRANSACTIONBEGIN.equals(entry.getEntryType())) {
                         } else if (CanalEntry.EntryType.TRANSACTIONBEGIN.equals(entry.getEntryType())) {
                             // 当前事务开始位点
                             // 当前事务开始位点
-                            entryPosition = new EntryPosition(logfilename, logfileoffset, logposTimestamp);
-                            logger.debug("set {} to be pending start position before finding another proper one...",
-                                entryPosition);
+                            entryPosition = new EntryPosition(logfilename, logfileoffset, logposTimestamp, serverId);
+                            if (logger.isDebugEnabled()) {
+                                logger.debug("set {} to be pending start position before finding another proper one...",
+                                    entryPosition);
+                            }
                             logPosition.setPostion(entryPosition);
                             logPosition.setPostion(entryPosition);
                         }
                         }
 
 

+ 19 - 1
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/DirectLogFetcher.java

@@ -37,6 +37,8 @@ public class DirectLogFetcher extends LogFetcher {
 
 
     private SocketChannel         channel;
     private SocketChannel         channel;
 
 
+    private boolean               issemi            = false;
+
     // private BufferedInputStream input;
     // private BufferedInputStream input;
 
 
     public DirectLogFetcher(){
     public DirectLogFetcher(){
@@ -53,6 +55,10 @@ public class DirectLogFetcher extends LogFetcher {
 
 
     public void start(SocketChannel channel) throws IOException {
     public void start(SocketChannel channel) throws IOException {
         this.channel = channel;
         this.channel = channel;
+        String dbsemi = System.getProperty("db.semi");
+        if ("1".equals(dbsemi)) {
+            issemi = true;
+        }
         // 和mysql driver一样,提供buffer机制,提升读取binlog速度
         // 和mysql driver一样,提供buffer机制,提升读取binlog速度
         // this.input = new
         // this.input = new
         // BufferedInputStream(channel.socket().getInputStream(), 16384);
         // BufferedInputStream(channel.socket().getInputStream(), 16384);
@@ -106,6 +112,14 @@ public class DirectLogFetcher extends LogFetcher {
                 }
                 }
             }
             }
 
 
+            // if mysql is in semi mode
+            if (issemi) {
+                // parse semi mark
+                int semimark = getUint8(NET_HEADER_SIZE + 1);
+                int semival = getUint8(NET_HEADER_SIZE + 2);
+                this.semival = semival;
+            }
+
             // The first packet is a multi-packet, concatenate the packets.
             // The first packet is a multi-packet, concatenate the packets.
             while (netlen == MAX_PACKET_LENGTH) {
             while (netlen == MAX_PACKET_LENGTH) {
                 if (!fetch0(0, NET_HEADER_SIZE)) {
                 if (!fetch0(0, NET_HEADER_SIZE)) {
@@ -122,7 +136,11 @@ public class DirectLogFetcher extends LogFetcher {
             }
             }
 
 
             // Preparing buffer variables to decoding.
             // Preparing buffer variables to decoding.
-            origin = NET_HEADER_SIZE + 1;
+            if (issemi) {
+                origin = NET_HEADER_SIZE + 3;
+            } else {
+                origin = NET_HEADER_SIZE + 1;
+            }
             position = origin;
             position = origin;
             limit -= origin;
             limit -= origin;
             return true;
             return true;

+ 187 - 98
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java

@@ -6,6 +6,7 @@ import java.math.BigDecimal;
 import java.math.BigInteger;
 import java.math.BigInteger;
 import java.nio.charset.Charset;
 import java.nio.charset.Charset;
 import java.sql.Types;
 import java.sql.Types;
+import java.util.Arrays;
 import java.util.BitSet;
 import java.util.BitSet;
 import java.util.List;
 import java.util.List;
 
 
@@ -21,7 +22,9 @@ import com.alibaba.otter.canal.parse.exception.TableIdNotFoundException;
 import com.alibaba.otter.canal.parse.inbound.BinlogParser;
 import com.alibaba.otter.canal.parse.inbound.BinlogParser;
 import com.alibaba.otter.canal.parse.inbound.TableMeta;
 import com.alibaba.otter.canal.parse.inbound.TableMeta;
 import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
 import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
-import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.SimpleDdlParser.DdlResult;
+import com.alibaba.otter.canal.parse.inbound.mysql.ddl.DdlResult;
+import com.alibaba.otter.canal.parse.inbound.mysql.ddl.DruidDdlParser;
+import com.alibaba.otter.canal.parse.inbound.mysql.ddl.SimpleDdlParser;
 import com.alibaba.otter.canal.protocol.CanalEntry.Column;
 import com.alibaba.otter.canal.protocol.CanalEntry.Column;
 import com.alibaba.otter.canal.protocol.CanalEntry.Entry;
 import com.alibaba.otter.canal.protocol.CanalEntry.Entry;
 import com.alibaba.otter.canal.protocol.CanalEntry.EntryType;
 import com.alibaba.otter.canal.protocol.CanalEntry.EntryType;
@@ -33,6 +36,7 @@ import com.alibaba.otter.canal.protocol.CanalEntry.RowData;
 import com.alibaba.otter.canal.protocol.CanalEntry.TransactionBegin;
 import com.alibaba.otter.canal.protocol.CanalEntry.TransactionBegin;
 import com.alibaba.otter.canal.protocol.CanalEntry.TransactionEnd;
 import com.alibaba.otter.canal.protocol.CanalEntry.TransactionEnd;
 import com.alibaba.otter.canal.protocol.CanalEntry.Type;
 import com.alibaba.otter.canal.protocol.CanalEntry.Type;
+import com.alibaba.otter.canal.protocol.position.EntryPosition;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
 import com.taobao.tddl.dbsync.binlog.LogEvent;
 import com.taobao.tddl.dbsync.binlog.LogEvent;
 import com.taobao.tddl.dbsync.binlog.event.DeleteRowsLogEvent;
 import com.taobao.tddl.dbsync.binlog.event.DeleteRowsLogEvent;
@@ -86,8 +90,10 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
     private boolean                     filterTableError    = false;
     private boolean                     filterTableError    = false;
     // 新增rows过滤,用于仅订阅除rows以外的数据
     // 新增rows过滤,用于仅订阅除rows以外的数据
     private boolean                     filterRows          = false;
     private boolean                     filterRows          = false;
+    private boolean                     useDruidDdlFilter   = true;
 
 
-    public Entry parse(LogEvent logEvent) throws CanalParseException {
+    @Override
+    public Entry parse(LogEvent logEvent, boolean isSeek) throws CanalParseException {
         if (logEvent == null || logEvent instanceof UnknownLogEvent) {
         if (logEvent == null || logEvent instanceof UnknownLogEvent) {
             return null;
             return null;
         }
         }
@@ -98,7 +104,7 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                 binlogFileName = ((RotateLogEvent) logEvent).getFilename();
                 binlogFileName = ((RotateLogEvent) logEvent).getFilename();
                 break;
                 break;
             case LogEvent.QUERY_EVENT:
             case LogEvent.QUERY_EVENT:
-                return parseQueryEvent((QueryLogEvent) logEvent);
+                return parseQueryEvent((QueryLogEvent) logEvent, isSeek);
             case LogEvent.XID_EVENT:
             case LogEvent.XID_EVENT:
                 return parseXidEvent((XidLogEvent) logEvent);
                 return parseXidEvent((XidLogEvent) logEvent);
             case LogEvent.TABLE_MAP_EVENT:
             case LogEvent.TABLE_MAP_EVENT:
@@ -137,7 +143,7 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
         }
         }
     }
     }
 
 
-    private Entry parseQueryEvent(QueryLogEvent event) {
+    private Entry parseQueryEvent(QueryLogEvent event, boolean isSeek) {
         String queryString = event.getQuery();
         String queryString = event.getQuery();
         if (StringUtils.endsWithIgnoreCase(queryString, BEGIN)) {
         if (StringUtils.endsWithIgnoreCase(queryString, BEGIN)) {
             TransactionBegin transactionBegin = createTransactionBegin(event.getSessionId());
             TransactionBegin transactionBegin = createTransactionBegin(event.getSessionId());
@@ -148,107 +154,139 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
             Header header = createHeader(binlogFileName, event.getHeader(), "", "", null);
             Header header = createHeader(binlogFileName, event.getHeader(), "", "", null);
             return createEntry(header, EntryType.TRANSACTIONEND, transactionEnd.toByteString());
             return createEntry(header, EntryType.TRANSACTIONEND, transactionEnd.toByteString());
         } else {
         } else {
-            // DDL语句处理
-            DdlResult result = SimpleDdlParser.parse(queryString, event.getDbName());
-
-            String schemaName = event.getDbName();
-            if (StringUtils.isNotEmpty(result.getSchemaName())) {
-                schemaName = result.getSchemaName();
-            }
-
-            String tableName = result.getTableName();
+            boolean notFilter = false;
             EventType type = EventType.QUERY;
             EventType type = EventType.QUERY;
-
-            // 更新下table meta cache
-            if (tableMetaCache != null
-                && (result.getType() == EventType.ALTER || result.getType() == EventType.ERASE || result.getType() == EventType.RENAME)) {
-                for (DdlResult renameResult = result; renameResult != null; renameResult = renameResult.getRenameTableResult()) {
-                    String schemaName0 = event.getDbName(); // 防止rename语句后产生schema变更带来影响
-                    if (StringUtils.isNotEmpty(renameResult.getSchemaName())) {
-                        schemaName0 = renameResult.getSchemaName();
-                    }
-
-                    tableName = renameResult.getTableName();
-                    if (StringUtils.isNotEmpty(tableName)) {
-                        // 如果解析到了正确的表信息,则根据全名进行清除
-                        tableMetaCache.clearTableMeta(schemaName0, tableName);
-                    } else {
-                        // 如果无法解析正确的表信息,则根据schema进行清除
-                        tableMetaCache.clearTableMetaWithSchemaName(schemaName0);
+            String tableName = null;
+            String schemaName = null;
+            if (useDruidDdlFilter) {
+                List<DdlResult> results = DruidDdlParser.parse(queryString, event.getDbName());
+                for (DdlResult result : results) {
+                    if (!processFilter(queryString, result)) {
+                        // 只要有一个数据不进行过滤
+                        notFilter = true;
                     }
                     }
                 }
                 }
-            }
-
-            // fixed issue https://github.com/alibaba/canal/issues/58
-            if (result.getType() == EventType.ALTER || result.getType() == EventType.ERASE
-                || result.getType() == EventType.CREATE || result.getType() == EventType.TRUNCATE
-                || result.getType() == EventType.RENAME || result.getType() == EventType.CINDEX
-                || result.getType() == EventType.DINDEX) { // 针对DDL类型
-
-                if (filterQueryDdl) {
-                    return null;
+                if (results.size() > 0) {
+                    // 如果针对多行的DDL,只能取第一条
+                    type = results.get(0).getType();
+                    schemaName = results.get(0).getSchemaName();
+                    tableName = results.get(0).getTableName();
+                }
+            } else {
+                DdlResult result = SimpleDdlParser.parse(queryString, event.getDbName());
+                if (!processFilter(queryString, result)) {
+                    notFilter = true;
                 }
                 }
 
 
                 type = result.getType();
                 type = result.getType();
-                if (StringUtils.isEmpty(tableName)
-                    || (result.getType() == EventType.RENAME && StringUtils.isEmpty(result.getOriTableName()))) {
-                    // 如果解析不出tableName,记录一下日志,方便bugfix,目前直接抛出异常,中断解析
-                    throw new CanalParseException("SimpleDdlParser process query failed. pls submit issue with this queryString: "
-                                                  + queryString + " , and DdlResult: " + result.toString());
-                    // return null;
-                } else {
-                    // check name filter
-                    String name = schemaName + "." + tableName;
-                    if (nameFilter != null && !nameFilter.filter(name)) {
-                        if (result.getType() == EventType.RENAME) {
-                            // rename校验只要源和目标满足一个就进行操作
-                            if (nameFilter != null
-                                && !nameFilter.filter(result.getOriSchemaName() + "." + result.getOriTableName())) {
-                                return null;
-                            }
-                        } else {
-                            // 其他情况返回null
-                            return null;
-                        }
-                    }
+                schemaName = result.getSchemaName();
+                tableName = result.getTableName();
+            }
 
 
-                    if (nameBlackFilter != null && nameBlackFilter.filter(name)) {
-                        if (result.getType() == EventType.RENAME) {
-                            // rename校验只要源和目标满足一个就进行操作
-                            if (nameBlackFilter != null
-                                && nameBlackFilter.filter(result.getOriSchemaName() + "." + result.getOriTableName())) {
-                                return null;
-                            }
-                        } else {
-                            // 其他情况返回null
-                            return null;
-                        }
-                    }
-                }
-            } else if (result.getType() == EventType.INSERT || result.getType() == EventType.UPDATE
-                       || result.getType() == EventType.DELETE) {
-                // 对外返回,保证兼容,还是返回QUERY类型,这里暂不解析tableName,所以无法支持过滤
-                if (filterQueryDml) {
-                    return null;
-                }
-            } else if (filterQueryDcl) {
+            if (!notFilter) {
+                // 如果是过滤的数据就不处理了
                 return null;
                 return null;
             }
             }
 
 
+            if (!isSeek) {
+                // 使用新的表结构元数据管理方式
+                EntryPosition position = createPosition(event.getHeader());
+                tableMetaCache.apply(position, event.getDbName(), queryString, null);
+            }
+
             Header header = createHeader(binlogFileName, event.getHeader(), schemaName, tableName, type);
             Header header = createHeader(binlogFileName, event.getHeader(), schemaName, tableName, type);
             RowChange.Builder rowChangeBuider = RowChange.newBuilder();
             RowChange.Builder rowChangeBuider = RowChange.newBuilder();
-            if (result.getType() != EventType.QUERY) {
+            if (type != EventType.QUERY) {
                 rowChangeBuider.setIsDdl(true);
                 rowChangeBuider.setIsDdl(true);
             }
             }
             rowChangeBuider.setSql(queryString);
             rowChangeBuider.setSql(queryString);
             if (StringUtils.isNotEmpty(event.getDbName())) {// 可能为空
             if (StringUtils.isNotEmpty(event.getDbName())) {// 可能为空
                 rowChangeBuider.setDdlSchemaName(event.getDbName());
                 rowChangeBuider.setDdlSchemaName(event.getDbName());
             }
             }
-            rowChangeBuider.setEventType(result.getType());
+            rowChangeBuider.setEventType(type);
             return createEntry(header, EntryType.ROWDATA, rowChangeBuider.build().toByteString());
             return createEntry(header, EntryType.ROWDATA, rowChangeBuider.build().toByteString());
         }
         }
     }
     }
 
 
+    private boolean processFilter(String queryString, DdlResult result) {
+        String schemaName = result.getSchemaName();
+        String tableName = result.getTableName();
+        // fixed issue https://github.com/alibaba/canal/issues/58
+        // 更新下table meta cache
+        if (tableMetaCache != null
+            && (result.getType() == EventType.ALTER || result.getType() == EventType.ERASE || result.getType() == EventType.RENAME)) {
+            // 对外返回,保证兼容,还是返回QUERY类型,这里暂不解析tableName,所以无法支持过滤
+            for (DdlResult renameResult = result; renameResult != null; renameResult = renameResult.getRenameTableResult()) {
+                String schemaName0 = renameResult.getSchemaName();
+                String tableName0 = renameResult.getTableName();
+                if (StringUtils.isNotEmpty(tableName0)) {
+                    // 如果解析到了正确的表信息,则根据全名进行清除
+                    tableMetaCache.clearTableMeta(schemaName0, tableName0);
+                } else {
+                    // 如果无法解析正确的表信息,则根据schema进行清除
+                    tableMetaCache.clearTableMetaWithSchemaName(schemaName0);
+                }
+            }
+        }
+
+        // fixed issue https://github.com/alibaba/canal/issues/58
+        if (result.getType() == EventType.ALTER || result.getType() == EventType.ERASE
+            || result.getType() == EventType.CREATE || result.getType() == EventType.TRUNCATE
+            || result.getType() == EventType.RENAME || result.getType() == EventType.CINDEX
+            || result.getType() == EventType.DINDEX) { // 针对DDL类型
+
+            if (filterQueryDdl) {
+                return true;
+            }
+
+            if (StringUtils.isEmpty(tableName)
+                || (result.getType() == EventType.RENAME && StringUtils.isEmpty(result.getOriTableName()))) {
+                // 如果解析不出tableName,记录一下日志,方便bugfix,目前直接抛出异常,中断解析
+                throw new CanalParseException("SimpleDdlParser process query failed. pls submit issue with this queryString: "
+                                              + queryString + " , and DdlResult: " + result.toString());
+                // return null;
+            } else {
+                // check name filter
+                String name = schemaName + "." + tableName;
+                if (nameFilter != null && !nameFilter.filter(name)) {
+                    if (result.getType() == EventType.RENAME) {
+                        // rename校验只要源和目标满足一个就进行操作
+                        if (nameFilter != null
+                            && !nameFilter.filter(result.getOriSchemaName() + "." + result.getOriTableName())) {
+                            return true;
+                        }
+                    } else {
+                        // 其他情况返回null
+                        return true;
+                    }
+                }
+
+                if (nameBlackFilter != null && nameBlackFilter.filter(name)) {
+                    if (result.getType() == EventType.RENAME) {
+                        // rename校验只要源和目标满足一个就进行操作
+                        if (nameBlackFilter != null
+                            && nameBlackFilter.filter(result.getOriSchemaName() + "." + result.getOriTableName())) {
+                            return true;
+                        }
+                    } else {
+                        // 其他情况返回null
+                        return true;
+                    }
+                }
+            }
+        } else if (result.getType() == EventType.INSERT || result.getType() == EventType.UPDATE
+                   || result.getType() == EventType.DELETE) {
+            // 对外返回,保证兼容,还是返回QUERY类型,这里暂不解析tableName,所以无法支持过滤
+            if (filterQueryDml) {
+                return true;
+            }
+        } else if (filterQueryDcl) {
+            return true;
+        }
+
+        return false;
+    }
+
     private Entry parseRowsQueryEvent(RowsQueryLogEvent event) {
     private Entry parseRowsQueryEvent(RowsQueryLogEvent event) {
         if (filterQueryDml) {
         if (filterQueryDml) {
             return null;
             return null;
@@ -257,7 +295,15 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
         String queryString = null;
         String queryString = null;
         try {
         try {
             queryString = new String(event.getRowsQuery().getBytes(ISO_8859_1), charset.name());
             queryString = new String(event.getRowsQuery().getBytes(ISO_8859_1), charset.name());
-            return buildQueryEntry(queryString, event.getHeader());
+            String tableName = null;
+            if (useDruidDdlFilter) {
+                List<DdlResult> results = DruidDdlParser.parse(queryString, null);
+                if (results.size() > 0) {
+                    tableName = results.get(0).getTableName();
+                }
+            }
+
+            return buildQueryEntry(queryString, event.getHeader(), tableName);
         } catch (UnsupportedEncodingException e) {
         } catch (UnsupportedEncodingException e) {
             throw new CanalParseException(e);
             throw new CanalParseException(e);
         }
         }
@@ -318,6 +364,10 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                 throw new TableIdNotFoundException("not found tableId:" + event.getTableId());
                 throw new TableIdNotFoundException("not found tableId:" + event.getTableId());
             }
             }
 
 
+            boolean isHeartBeat = isAliSQLHeartBeat(table.getDbName(), table.getTableName());
+            boolean isRDSHeartBeat = tableMetaCache.isOnRDS()
+                                     && isRDSHeartBeat(table.getDbName(), table.getTableName());
+
             String fullname = table.getDbName() + "." + table.getTableName();
             String fullname = table.getDbName() + "." + table.getTableName();
             // check name filter
             // check name filter
             if (nameFilter != null && !nameFilter.filter(fullname)) {
             if (nameFilter != null && !nameFilter.filter(fullname)) {
@@ -327,9 +377,23 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                 return null;
                 return null;
             }
             }
 
 
-            if (tableMetaCache.isOnRDS() && "mysql.ha_health_check".equals(fullname)) {
-                // 忽略rds模式的mysql.ha_health_check心跳数据
-                return null;
+            // if (isHeartBeat || isRDSHeartBeat) {
+            // // 忽略rds模式的mysql.ha_health_check心跳数据
+            // return null;
+            // }
+            TableMeta tableMeta = null;
+            if (isRDSHeartBeat) {
+                // 处理rds模式的mysql.ha_health_check心跳数据
+                // 主要RDS的心跳表基本无权限,需要mock一个tableMeta
+                FieldMeta idMeta = new FieldMeta("id", "bigint(20)", true, false, "0");
+                FieldMeta typeMeta = new FieldMeta("type", "char(1)", false, true, "0");
+                tableMeta = new TableMeta(table.getDbName(), table.getTableName(), Arrays.asList(idMeta, typeMeta));
+            } else if (isHeartBeat) {
+                // 处理alisql模式的test.heartbeat心跳数据
+                // 心跳表基本无权限,需要mock一个tableMeta
+                FieldMeta idMeta = new FieldMeta("id", "smallint(6)", false, true, null);
+                FieldMeta typeMeta = new FieldMeta("type", "int(11)", true, false, null);
+                tableMeta = new TableMeta(table.getDbName(), table.getTableName(), Arrays.asList(idMeta, typeMeta));
             }
             }
 
 
             EventType eventType = null;
             EventType eventType = null;
@@ -349,6 +413,7 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                 table.getDbName(),
                 table.getDbName(),
                 table.getTableName(),
                 table.getTableName(),
                 eventType);
                 eventType);
+            EntryPosition position = createPosition(event.getHeader());
             RowChange.Builder rowChangeBuider = RowChange.newBuilder();
             RowChange.Builder rowChangeBuider = RowChange.newBuilder();
             rowChangeBuider.setTableId(event.getTableId());
             rowChangeBuider.setTableId(event.getTableId());
             rowChangeBuider.setIsDdl(false);
             rowChangeBuider.setIsDdl(false);
@@ -358,9 +423,9 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
             BitSet columns = event.getColumns();
             BitSet columns = event.getColumns();
             BitSet changeColumns = event.getChangeColumns();
             BitSet changeColumns = event.getChangeColumns();
             boolean tableError = false;
             boolean tableError = false;
-            TableMeta tableMeta = null;
-            if (tableMetaCache != null) {// 入错存在table meta cache
-                tableMeta = getTableMeta(table.getDbName(), table.getTableName(), true);
+            if (tableMetaCache != null && tableMeta == null) {// 入错存在table meta
+                                                              // cache
+                tableMeta = getTableMeta(table.getDbName(), table.getTableName(), true, position);
                 if (tableMeta == null) {
                 if (tableMeta == null) {
                     tableError = true;
                     tableError = true;
                     if (!filterTableError) {
                     if (!filterTableError) {
@@ -406,6 +471,13 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
         }
         }
     }
     }
 
 
+    private EntryPosition createPosition(LogHeader logHeader) {
+        return new EntryPosition(binlogFileName,
+            logHeader.getLogPos(),
+            logHeader.getWhen() * 1000L,
+            logHeader.getServerId()); // 记录到秒
+    }
+
     private boolean parseOneRow(RowData.Builder rowDataBuilder, RowsLogEvent event, RowsLogBuffer buffer, BitSet cols,
     private boolean parseOneRow(RowData.Builder rowDataBuilder, RowsLogEvent event, RowsLogBuffer buffer, BitSet cols,
                                 boolean isAfter, TableMeta tableMeta) throws UnsupportedEncodingException {
                                 boolean isAfter, TableMeta tableMeta) throws UnsupportedEncodingException {
         int columnCnt = event.getTable().getColumnCnt();
         int columnCnt = event.getTable().getColumnCnt();
@@ -414,18 +486,19 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
         boolean tableError = false;
         boolean tableError = false;
         // check table fileds count,只能处理加字段
         // check table fileds count,只能处理加字段
         boolean existRDSNoPrimaryKey = false;
         boolean existRDSNoPrimaryKey = false;
-        if (tableMeta != null && columnInfo.length > tableMeta.getFileds().size()) {
+        if (tableMeta != null && columnInfo.length > tableMeta.getFields().size()) {
             if (tableMetaCache.isOnRDS()) {
             if (tableMetaCache.isOnRDS()) {
                 // 特殊处理下RDS的场景
                 // 特殊处理下RDS的场景
                 List<FieldMeta> primaryKeys = tableMeta.getPrimaryFields();
                 List<FieldMeta> primaryKeys = tableMeta.getPrimaryFields();
                 if (primaryKeys == null || primaryKeys.isEmpty()) {
                 if (primaryKeys == null || primaryKeys.isEmpty()) {
-                    if (columnInfo.length == tableMeta.getFileds().size() + 1
+                    if (columnInfo.length == tableMeta.getFields().size() + 1
                         && columnInfo[columnInfo.length - 1].type == LogEvent.MYSQL_TYPE_LONGLONG) {
                         && columnInfo[columnInfo.length - 1].type == LogEvent.MYSQL_TYPE_LONGLONG) {
                         existRDSNoPrimaryKey = true;
                         existRDSNoPrimaryKey = true;
                     }
                     }
                 }
                 }
             }
             }
 
 
+            EntryPosition position = createPosition(event.getHeader());
             if (!existRDSNoPrimaryKey) {
             if (!existRDSNoPrimaryKey) {
                 // online ddl增加字段操作步骤:
                 // online ddl增加字段操作步骤:
                 // 1. 新增一张临时表,将需要做ddl表的数据全量导入
                 // 1. 新增一张临时表,将需要做ddl表的数据全量导入
@@ -433,7 +506,7 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                 // 3. 锁住应用请求,将临时表rename为老表的名字,完成增加字段的操作
                 // 3. 锁住应用请求,将临时表rename为老表的名字,完成增加字段的操作
                 // 尝试做一次reload,可能因为ddl没有正确解析,或者使用了类似online ddl的操作
                 // 尝试做一次reload,可能因为ddl没有正确解析,或者使用了类似online ddl的操作
                 // 因为online ddl没有对应表名的alter语法,所以不会有clear cache的操作
                 // 因为online ddl没有对应表名的alter语法,所以不会有clear cache的操作
-                tableMeta = getTableMeta(event.getTable().getDbName(), event.getTable().getTableName(), false);// 强制重新获取一次
+                tableMeta = getTableMeta(event.getTable().getDbName(), event.getTable().getTableName(), false, position);// 强制重新获取一次
                 if (tableMeta == null) {
                 if (tableMeta == null) {
                     tableError = true;
                     tableError = true;
                     if (!filterTableError) {
                     if (!filterTableError) {
@@ -443,11 +516,11 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                 }
                 }
 
 
                 // 在做一次判断
                 // 在做一次判断
-                if (tableMeta != null && columnInfo.length > tableMeta.getFileds().size()) {
+                if (tableMeta != null && columnInfo.length > tableMeta.getFields().size()) {
                     tableError = true;
                     tableError = true;
                     if (!filterTableError) {
                     if (!filterTableError) {
                         throw new CanalParseException("column size is not match for table:" + tableMeta.getFullName()
                         throw new CanalParseException("column size is not match for table:" + tableMeta.getFullName()
-                                                      + "," + columnInfo.length + " vs " + tableMeta.getFileds().size());
+                                                      + "," + columnInfo.length + " vs " + tableMeta.getFields().size());
                     }
                     }
                 }
                 }
             } else {
             } else {
@@ -474,7 +547,7 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
             FieldMeta fieldMeta = null;
             FieldMeta fieldMeta = null;
             if (tableMeta != null && !tableError) {
             if (tableMeta != null && !tableError) {
                 // 处理file meta
                 // 处理file meta
-                fieldMeta = tableMeta.getFileds().get(i);
+                fieldMeta = tableMeta.getFields().get(i);
                 columnBuilder.setName(fieldMeta.getColumnName());
                 columnBuilder.setName(fieldMeta.getColumnName());
                 columnBuilder.setIsKey(fieldMeta.isKey());
                 columnBuilder.setIsKey(fieldMeta.isKey());
                 // 增加mysql type类型,issue 73
                 // 增加mysql type类型,issue 73
@@ -615,6 +688,14 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
 
 
     }
     }
 
 
+    private Entry buildQueryEntry(String queryString, LogHeader logHeader, String tableName) {
+        Header header = createHeader(binlogFileName, logHeader, "", tableName, EventType.QUERY);
+        RowChange.Builder rowChangeBuider = RowChange.newBuilder();
+        rowChangeBuider.setSql(queryString);
+        rowChangeBuider.setEventType(EventType.QUERY);
+        return createEntry(header, EntryType.ROWDATA, rowChangeBuider.build().toByteString());
+    }
+
     private Entry buildQueryEntry(String queryString, LogHeader logHeader) {
     private Entry buildQueryEntry(String queryString, LogHeader logHeader) {
         Header header = createHeader(binlogFileName, logHeader, "", "", EventType.QUERY);
         Header header = createHeader(binlogFileName, logHeader, "", "", EventType.QUERY);
         RowChange.Builder rowChangeBuider = RowChange.newBuilder();
         RowChange.Builder rowChangeBuider = RowChange.newBuilder();
@@ -673,9 +754,9 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
         return true;
         return true;
     }
     }
 
 
-    private TableMeta getTableMeta(String dbName, String tbName, boolean useCache) {
+    private TableMeta getTableMeta(String dbName, String tbName, boolean useCache, EntryPosition position) {
         try {
         try {
-            return tableMetaCache.getTableMeta(dbName, tbName, useCache);
+            return tableMetaCache.getTableMeta(dbName, tbName, useCache, position);
         } catch (Exception e) {
         } catch (Exception e) {
             String message = ExceptionUtils.getRootCauseMessage(e);
             String message = ExceptionUtils.getRootCauseMessage(e);
             if (filterTableError) {
             if (filterTableError) {
@@ -692,6 +773,14 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
         return "LONGTEXT".equalsIgnoreCase(columnType) || "MEDIUMTEXT".equalsIgnoreCase(columnType)
         return "LONGTEXT".equalsIgnoreCase(columnType) || "MEDIUMTEXT".equalsIgnoreCase(columnType)
                || "TEXT".equalsIgnoreCase(columnType) || "TINYTEXT".equalsIgnoreCase(columnType);
                || "TEXT".equalsIgnoreCase(columnType) || "TINYTEXT".equalsIgnoreCase(columnType);
     }
     }
+    
+    private boolean isAliSQLHeartBeat(String schema, String table) {
+        return "test".equalsIgnoreCase(schema) && "heartbeat".equalsIgnoreCase(table);
+    }
+
+    private boolean isRDSHeartBeat(String schema, String table) {
+        return "mysql".equalsIgnoreCase(schema) && "ha_health_check".equalsIgnoreCase(table);
+    }
 
 
     public static TransactionBegin createTransactionBegin(long threadId) {
     public static TransactionBegin createTransactionBegin(long threadId) {
         TransactionBegin.Builder beginBuilder = TransactionBegin.newBuilder();
         TransactionBegin.Builder beginBuilder = TransactionBegin.newBuilder();

+ 140 - 66
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/TableMetaCache.java

@@ -14,48 +14,58 @@ import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.inbound.TableMeta;
 import com.alibaba.otter.canal.parse.inbound.TableMeta;
 import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
 import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection;
-import com.google.common.base.Function;
-import com.google.common.collect.MigrateMap;
+import com.alibaba.otter.canal.parse.inbound.mysql.ddl.DruidDdlParser;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.TableMetaTSDB;
+import com.alibaba.otter.canal.protocol.position.EntryPosition;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
 
 
 /**
 /**
  * 处理table meta解析和缓存
  * 处理table meta解析和缓存
- * 
+ *
  * @author jianghang 2013-1-17 下午10:15:16
  * @author jianghang 2013-1-17 下午10:15:16
  * @version 1.0.0
  * @version 1.0.0
  */
  */
 public class TableMetaCache {
 public class TableMetaCache {
 
 
-    public static final String     COLUMN_NAME    = "COLUMN_NAME";
-    public static final String     COLUMN_TYPE    = "COLUMN_TYPE";
-    public static final String     IS_NULLABLE    = "IS_NULLABLE";
-    public static final String     COLUMN_KEY     = "COLUMN_KEY";
-    public static final String     COLUMN_DEFAULT = "COLUMN_DEFAULT";
-    public static final String     EXTRA          = "EXTRA";
-    private MysqlConnection        connection;
-    private boolean                isOnRDS        = false;
+    public static final String              COLUMN_NAME    = "COLUMN_NAME";
+    public static final String              COLUMN_TYPE    = "COLUMN_TYPE";
+    public static final String              IS_NULLABLE    = "IS_NULLABLE";
+    public static final String              COLUMN_KEY     = "COLUMN_KEY";
+    public static final String              COLUMN_DEFAULT = "COLUMN_DEFAULT";
+    public static final String              EXTRA          = "EXTRA";
+    private MysqlConnection                 connection;
+    private boolean                         isOnRDS        = false;
 
 
+    private TableMetaTSDB                   tableMetaTSDB;
     // 第一层tableId,第二层schema.table,解决tableId重复,对应多张表
     // 第一层tableId,第二层schema.table,解决tableId重复,对应多张表
-    private Map<String, TableMeta> tableMetaCache;
+    private LoadingCache<String, TableMeta> tableMetaDB;
 
 
-    public TableMetaCache(MysqlConnection con){
+    public TableMetaCache(MysqlConnection con, TableMetaTSDB tableMetaTSDB){
         this.connection = con;
         this.connection = con;
-        tableMetaCache = MigrateMap.makeComputingMap(new Function<String, TableMeta>() {
+        this.tableMetaTSDB = tableMetaTSDB;
+        // 如果持久存储的表结构为空,从db里面获取下
+        if (tableMetaTSDB == null) {
+            this.tableMetaDB = CacheBuilder.newBuilder().build(new CacheLoader<String, TableMeta>() {
 
 
-            public TableMeta apply(String name) {
-                try {
-                    return getTableMeta0(name);
-                } catch (IOException e) {
-                    // 尝试做一次retry操作
+                @Override
+                public TableMeta load(String name) throws Exception {
                     try {
                     try {
-                        connection.reconnect();
-                        return getTableMeta0(name);
-                    } catch (IOException e1) {
-                        throw new CanalParseException("fetch failed by table meta:" + name, e1);
+                        return getTableMetaByDB(name);
+                    } catch (Throwable e) {
+                        // 尝试做一次retry操作
+                        try {
+                            connection.reconnect();
+                            return getTableMetaByDB(name);
+                        } catch (IOException e1) {
+                            throw new CanalParseException("fetch failed by table meta:" + name, e1);
+                        }
                     }
                     }
                 }
                 }
-            }
 
 
-        });
+            });
+        }
 
 
         try {
         try {
             ResultSetPacket packet = connection.query("show global variables  like 'rds\\_%'");
             ResultSetPacket packet = connection.query("show global variables  like 'rds\\_%'");
@@ -66,47 +76,15 @@ public class TableMetaCache {
         }
         }
     }
     }
 
 
-    public TableMeta getTableMeta(String schema, String table) {
-        return getTableMeta(schema, table, true);
-    }
-
-    public TableMeta getTableMeta(String schema, String table, boolean useCache) {
-        if (!useCache) {
-            tableMetaCache.remove(getFullName(schema, table));
-        }
-
-        return tableMetaCache.get(getFullName(schema, table));
-    }
-
-    public void clearTableMeta(String schema, String table) {
-        tableMetaCache.remove(getFullName(schema, table));
-    }
-
-    public void clearTableMetaWithSchemaName(String schema) {
-        // Set<String> removeNames = new HashSet<String>(); //
-        // 存一份临时变量,避免在遍历的时候进行删除
-        for (String name : tableMetaCache.keySet()) {
-            if (StringUtils.startsWithIgnoreCase(name, schema + ".")) {
-                // removeNames.add(name);
-                tableMetaCache.remove(name);
-            }
-        }
-
-        // for (String name : removeNames) {
-        // tables.remove(name);
-        // }
-    }
-
-    public void clearTableMeta() {
-        tableMetaCache.clear();
-    }
-
-    private TableMeta getTableMeta0(String fullname) throws IOException {
+    private TableMeta getTableMetaByDB(String fullname) throws IOException {
         ResultSetPacket packet = connection.query("desc " + fullname);
         ResultSetPacket packet = connection.query("desc " + fullname);
-        return new TableMeta(fullname, parserTableMeta(packet));
+        String[] names = StringUtils.split(fullname, "`.`");
+        String schema = names[0];
+        String table = names[1].substring(0, names[1].length());
+        return new TableMeta(schema, table, parserTableMeta(packet));
     }
     }
 
 
-    private List<FieldMeta> parserTableMeta(ResultSetPacket packet) {
+    public static List<FieldMeta> parserTableMeta(ResultSetPacket packet) {
         Map<String, Integer> nameMaps = new HashMap<String, Integer>(6, 1f);
         Map<String, Integer> nameMaps = new HashMap<String, Integer>(6, 1f);
 
 
         int index = 0;
         int index = 0;
@@ -122,9 +100,13 @@ public class TableMetaCache {
             // 做一个优化,使用String.intern(),共享String对象,减少内存使用
             // 做一个优化,使用String.intern(),共享String对象,减少内存使用
             meta.setColumnName(packet.getFieldValues().get(nameMaps.get(COLUMN_NAME) + i * size).intern());
             meta.setColumnName(packet.getFieldValues().get(nameMaps.get(COLUMN_NAME) + i * size).intern());
             meta.setColumnType(packet.getFieldValues().get(nameMaps.get(COLUMN_TYPE) + i * size));
             meta.setColumnType(packet.getFieldValues().get(nameMaps.get(COLUMN_TYPE) + i * size));
-            meta.setIsNullable(packet.getFieldValues().get(nameMaps.get(IS_NULLABLE) + i * size));
-            meta.setIskey(packet.getFieldValues().get(nameMaps.get(COLUMN_KEY) + i * size));
-            meta.setDefaultValue(packet.getFieldValues().get(nameMaps.get(COLUMN_DEFAULT) + i * size));
+            meta.setNullable(StringUtils.equalsIgnoreCase(packet.getFieldValues().get(nameMaps.get(IS_NULLABLE) + i
+                                                                                      * size),
+                "YES"));
+            meta.setKey("PRI".equalsIgnoreCase(packet.getFieldValues().get(nameMaps.get(COLUMN_KEY) + i * size)));
+            // 特殊处理引号
+            meta.setDefaultValue(DruidDdlParser.unescapeQuotaName(packet.getFieldValues()
+                .get(nameMaps.get(COLUMN_DEFAULT) + i * size)));
             meta.setExtra(packet.getFieldValues().get(nameMaps.get(EXTRA) + i * size));
             meta.setExtra(packet.getFieldValues().get(nameMaps.get(EXTRA) + i * size));
 
 
             result.add(meta);
             result.add(meta);
@@ -133,6 +115,98 @@ public class TableMetaCache {
         return result;
         return result;
     }
     }
 
 
+    public TableMeta getTableMeta(String schema, String table) {
+        return getTableMeta(schema, table, true);
+    }
+
+    public TableMeta getTableMeta(String schema, String table, boolean useCache) {
+        if (!useCache) {
+            tableMetaDB.invalidate(getFullName(schema, table));
+        }
+
+        return tableMetaDB.getUnchecked(getFullName(schema, table));
+    }
+
+    public TableMeta getTableMeta(String schema, String table, EntryPosition position) {
+        return getTableMeta(schema, table, true, position);
+    }
+
+    public TableMeta getTableMeta(String schema, String table, boolean useCache, EntryPosition position) {
+        TableMeta tableMeta = null;
+        if (tableMetaTSDB != null) {
+            tableMeta = tableMetaTSDB.find(schema, table);
+            if (tableMeta == null) {
+                // 因为条件变化,可能第一次的tableMeta没取到,需要从db获取一次,并记录到snapshot中
+                String fullName = getFullName(schema, table);
+                try {
+                    ResultSetPacket packet = connection.query("show create table " + fullName);
+                    String createDDL = null;
+                    if (packet.getFieldValues().size() > 0) {
+                        createDDL = packet.getFieldValues().get(1);
+                    }
+                    // 强制覆盖掉内存值
+                    tableMetaTSDB.apply(position, schema, createDDL, "first");
+                    tableMeta = tableMetaTSDB.find(schema, table);
+                } catch (IOException e) {
+                    throw new CanalParseException("fetch failed by table meta:" + fullName, e);
+                }
+            }
+            return tableMeta;
+        } else {
+            if (!useCache) {
+                tableMetaDB.invalidate(getFullName(schema, table));
+            }
+
+            return tableMetaDB.getUnchecked(getFullName(schema, table));
+        }
+    }
+
+    public void clearTableMeta(String schema, String table) {
+        if (tableMetaTSDB != null) {
+            // tsdb不需要做,会基于ddl sql自动清理
+        } else {
+            tableMetaDB.invalidate(getFullName(schema, table));
+        }
+    }
+
+    public void clearTableMetaWithSchemaName(String schema) {
+        if (tableMetaTSDB != null) {
+            // tsdb不需要做,会基于ddl sql自动清理
+        } else {
+            for (String name : tableMetaDB.asMap().keySet()) {
+                if (StringUtils.startsWithIgnoreCase(name, schema + ".")) {
+                    // removeNames.add(name);
+                    tableMetaDB.invalidate(name);
+                }
+            }
+        }
+    }
+
+    public void clearTableMeta() {
+        if (tableMetaTSDB != null) {
+            // tsdb不需要做,会基于ddl sql自动清理
+        } else {
+            tableMetaDB.invalidateAll();
+        }
+    }
+
+    /**
+     * 更新一下本地的表结构内存
+     *
+     * @param position
+     * @param schema
+     * @param ddl
+     * @return
+     */
+    public boolean apply(EntryPosition position, String schema, String ddl, String extra) {
+        if (tableMetaTSDB != null) {
+            return tableMetaTSDB.apply(position, schema, ddl, extra);
+        } else {
+            // ignore
+            return true;
+        }
+    }
+
     private String getFullName(String schema, String table) {
     private String getFullName(String schema, String table) {
         StringBuilder builder = new StringBuilder();
         StringBuilder builder = new StringBuilder();
         return builder.append('`')
         return builder.append('`')

+ 115 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/ddl/DdlResult.java

@@ -0,0 +1,115 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.ddl;
+
+import com.alibaba.otter.canal.protocol.CanalEntry.EventType;
+
+/**
+ * @author agapple 2017年8月1日 下午7:30:42
+ * @since 3.2.5
+ */
+public class DdlResult {
+
+    private String    schemaName;
+    private String    tableName;
+    private String    oriSchemaName;    // rename ddl中的源表
+    private String    oriTableName;     // rename ddl中的目标表
+    private EventType type;
+    private DdlResult renameTableResult; // 多个rename table的存储
+
+    /*
+     * RENAME TABLE tbl_name TO new_tbl_name [, tbl_name2 TO new_tbl_name2] ...
+     */
+
+    public DdlResult(){
+    }
+
+    public DdlResult(String schemaName){
+        this.schemaName = schemaName;
+    }
+
+    public DdlResult(String schemaName, String tableName){
+        this.schemaName = schemaName;
+        this.tableName = tableName;
+    }
+
+    public DdlResult(String schemaName, String tableName, String oriSchemaName, String oriTableName){
+        this.schemaName = schemaName;
+        this.tableName = tableName;
+        this.oriSchemaName = oriSchemaName;
+        this.oriTableName = oriTableName;
+    }
+
+    public String getSchemaName() {
+        return schemaName;
+    }
+
+    public void setSchemaName(String schemaName) {
+        this.schemaName = schemaName;
+    }
+
+    public String getTableName() {
+        return tableName;
+    }
+
+    public void setTableName(String tableName) {
+        this.tableName = tableName;
+    }
+
+    public EventType getType() {
+        return type;
+    }
+
+    public void setType(EventType type) {
+        this.type = type;
+    }
+
+    public String getOriSchemaName() {
+        return oriSchemaName;
+    }
+
+    public void setOriSchemaName(String oriSchemaName) {
+        this.oriSchemaName = oriSchemaName;
+    }
+
+    public String getOriTableName() {
+        return oriTableName;
+    }
+
+    public void setOriTableName(String oriTableName) {
+        this.oriTableName = oriTableName;
+    }
+
+    public DdlResult getRenameTableResult() {
+        return renameTableResult;
+    }
+
+    public void setRenameTableResult(DdlResult renameTableResult) {
+        this.renameTableResult = renameTableResult;
+    }
+
+    @Override
+    public DdlResult clone() {
+        DdlResult result = new DdlResult();
+        result.setOriSchemaName(oriSchemaName);
+        result.setOriTableName(oriTableName);
+        result.setSchemaName(schemaName);
+        result.setTableName(tableName);
+        // result.setType(type);
+        return result;
+    }
+
+    @Override
+    public String toString() {
+        DdlResult ddlResult = this;
+        StringBuffer sb = new StringBuffer();
+        do {
+            sb.append(String.format("DdlResult [schemaName=%s , tableName=%s , oriSchemaName=%s , oriTableName=%s , type=%s ];",
+                ddlResult.schemaName,
+                ddlResult.tableName,
+                ddlResult.oriSchemaName,
+                ddlResult.oriTableName,
+                ddlResult.type));
+            ddlResult = ddlResult.renameTableResult;
+        } while (ddlResult != null);
+        return sb.toString();
+    }
+}

+ 213 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/ddl/DruidDdlParser.java

@@ -0,0 +1,213 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.ddl;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import com.alibaba.druid.sql.SQLUtils;
+import com.alibaba.druid.sql.ast.SQLExpr;
+import com.alibaba.druid.sql.ast.SQLStatement;
+import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr;
+import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableAddConstraint;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableAddIndex;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableDropConstraint;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableDropIndex;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableDropKey;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableItem;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableRename;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableStatement;
+import com.alibaba.druid.sql.ast.statement.SQLConstraint;
+import com.alibaba.druid.sql.ast.statement.SQLCreateIndexStatement;
+import com.alibaba.druid.sql.ast.statement.SQLCreateTableStatement;
+import com.alibaba.druid.sql.ast.statement.SQLDeleteStatement;
+import com.alibaba.druid.sql.ast.statement.SQLDropIndexStatement;
+import com.alibaba.druid.sql.ast.statement.SQLDropTableStatement;
+import com.alibaba.druid.sql.ast.statement.SQLExprTableSource;
+import com.alibaba.druid.sql.ast.statement.SQLInsertStatement;
+import com.alibaba.druid.sql.ast.statement.SQLTableSource;
+import com.alibaba.druid.sql.ast.statement.SQLTruncateStatement;
+import com.alibaba.druid.sql.ast.statement.SQLUnique;
+import com.alibaba.druid.sql.ast.statement.SQLUpdateStatement;
+import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlRenameTableStatement;
+import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlRenameTableStatement.Item;
+import com.alibaba.druid.sql.parser.ParserException;
+import com.alibaba.druid.util.JdbcConstants;
+import com.alibaba.otter.canal.protocol.CanalEntry.EventType;
+
+/**
+ * @author agapple 2017年7月27日 下午4:05:34
+ * @since 1.0.25
+ */
+public class DruidDdlParser {
+
+    public static List<DdlResult> parse(String queryString, String schmeaName) {
+        List<SQLStatement> stmtList = null;
+        try {
+            stmtList = SQLUtils.parseStatements(queryString, JdbcConstants.MYSQL, false);
+        } catch (ParserException e) {
+            // 可能存在一些SQL是不支持的,比如存储过程
+            DdlResult ddlResult = new DdlResult();
+            ddlResult.setType(EventType.QUERY);
+            return Arrays.asList(ddlResult);
+        }
+
+        List<DdlResult> ddlResults = new ArrayList<DdlResult>();
+        for (SQLStatement statement : stmtList) {
+            if (statement instanceof SQLCreateTableStatement) {
+                DdlResult ddlResult = new DdlResult();
+                SQLCreateTableStatement createTable = (SQLCreateTableStatement) statement;
+                processName(ddlResult, schmeaName, createTable.getName(), false);
+                ddlResult.setType(EventType.CREATE);
+                ddlResults.add(ddlResult);
+            } else if (statement instanceof SQLAlterTableStatement) {
+                SQLAlterTableStatement alterTable = (SQLAlterTableStatement) statement;
+                for (SQLAlterTableItem item : alterTable.getItems()) {
+                    if (item instanceof SQLAlterTableRename) {
+                        DdlResult ddlResult = new DdlResult();
+                        processName(ddlResult, schmeaName, alterTable.getName(), true);
+                        processName(ddlResult, schmeaName, ((SQLAlterTableRename) item).getToName(), false);
+                        ddlResults.add(ddlResult);
+                    } else if (item instanceof SQLAlterTableAddIndex) {
+                        DdlResult ddlResult = new DdlResult();
+                        processName(ddlResult, schmeaName, alterTable.getName(), false);
+                        ddlResult.setType(EventType.CINDEX);
+                        ddlResults.add(ddlResult);
+                    } else if (item instanceof SQLAlterTableDropIndex || item instanceof SQLAlterTableDropKey) {
+                        DdlResult ddlResult = new DdlResult();
+                        processName(ddlResult, schmeaName, alterTable.getName(), false);
+                        ddlResult.setType(EventType.DINDEX);
+                        ddlResults.add(ddlResult);
+                    } else if (item instanceof SQLAlterTableAddConstraint) {
+                        DdlResult ddlResult = new DdlResult();
+                        processName(ddlResult, schmeaName, alterTable.getName(), false);
+                        SQLConstraint constraint = ((SQLAlterTableAddConstraint) item).getConstraint();
+                        if (constraint instanceof SQLUnique) {
+                            ddlResult.setType(EventType.CINDEX);
+                            ddlResults.add(ddlResult);
+                        }
+                    } else if (item instanceof SQLAlterTableDropConstraint) {
+                        DdlResult ddlResult = new DdlResult();
+                        processName(ddlResult, schmeaName, alterTable.getName(), false);
+                        ddlResult.setType(EventType.DINDEX);
+                        ddlResults.add(ddlResult);
+                    } else {
+                        DdlResult ddlResult = new DdlResult();
+                        processName(ddlResult, schmeaName, alterTable.getName(), false);
+                        ddlResult.setType(EventType.ALTER);
+                        ddlResults.add(ddlResult);
+                    }
+                }
+            } else if (statement instanceof SQLDropTableStatement) {
+                SQLDropTableStatement dropTable = (SQLDropTableStatement) statement;
+                for (SQLExprTableSource tableSource : dropTable.getTableSources()) {
+                    DdlResult ddlResult = new DdlResult();
+                    processName(ddlResult, schmeaName, tableSource.getExpr(), false);
+                    ddlResult.setType(EventType.ERASE);
+                    ddlResults.add(ddlResult);
+                }
+            } else if (statement instanceof SQLCreateIndexStatement) {
+                SQLCreateIndexStatement createIndex = (SQLCreateIndexStatement) statement;
+                SQLTableSource tableSource = createIndex.getTable();
+                DdlResult ddlResult = new DdlResult();
+                processName(ddlResult, schmeaName, ((SQLExprTableSource) tableSource).getExpr(), false);
+                ddlResult.setType(EventType.CINDEX);
+                ddlResults.add(ddlResult);
+            } else if (statement instanceof SQLDropIndexStatement) {
+                SQLDropIndexStatement dropIndex = (SQLDropIndexStatement) statement;
+                SQLExprTableSource tableSource = dropIndex.getTableName();
+                DdlResult ddlResult = new DdlResult();
+                processName(ddlResult, schmeaName, tableSource.getExpr(), false);
+                ddlResult.setType(EventType.DINDEX);
+                ddlResults.add(ddlResult);
+            } else if (statement instanceof SQLTruncateStatement) {
+                SQLTruncateStatement truncate = (SQLTruncateStatement) statement;
+                for (SQLExprTableSource tableSource : truncate.getTableSources()) {
+                    DdlResult ddlResult = new DdlResult();
+                    processName(ddlResult, schmeaName, tableSource.getExpr(), false);
+                    ddlResult.setType(EventType.TRUNCATE);
+                    ddlResults.add(ddlResult);
+                }
+            } else if (statement instanceof MySqlRenameTableStatement) {
+                MySqlRenameTableStatement rename = (MySqlRenameTableStatement) statement;
+                for (Item item : rename.getItems()) {
+                    DdlResult ddlResult = new DdlResult();
+                    processName(ddlResult, schmeaName, item.getName(), true);
+                    processName(ddlResult, schmeaName, item.getTo(), false);
+                    ddlResult.setType(EventType.RENAME);
+                    ddlResults.add(ddlResult);
+                }
+            } else if (statement instanceof SQLInsertStatement) {
+                DdlResult ddlResult = new DdlResult();
+                SQLInsertStatement insert = (SQLInsertStatement) statement;
+                processName(ddlResult, schmeaName, insert.getTableName(), false);
+                ddlResult.setType(EventType.INSERT);
+                ddlResults.add(ddlResult);
+            } else if (statement instanceof SQLUpdateStatement) {
+                DdlResult ddlResult = new DdlResult();
+                SQLUpdateStatement update = (SQLUpdateStatement) statement;
+                // 拿到的表名可能为null,比如update a,b set a.id=x
+                processName(ddlResult, schmeaName, update.getTableName(), false);
+                ddlResult.setType(EventType.UPDATE);
+                ddlResults.add(ddlResult);
+            } else if (statement instanceof SQLDeleteStatement) {
+                DdlResult ddlResult = new DdlResult();
+                SQLDeleteStatement delete = (SQLDeleteStatement) statement;
+                // 拿到的表名可能为null,比如delete a,b from a where a.id = b.id
+                processName(ddlResult, schmeaName, delete.getTableName(), false);
+                ddlResult.setType(EventType.DELETE);
+                ddlResults.add(ddlResult);
+            }
+        }
+
+        return ddlResults;
+    }
+
+    private static void processName(DdlResult ddlResult, String schema, SQLExpr sqlName, boolean isOri) {
+        if (sqlName == null) {
+            return;
+        }
+
+        String table = null;
+        if (sqlName instanceof SQLPropertyExpr) {
+            SQLIdentifierExpr owner = (SQLIdentifierExpr) ((SQLPropertyExpr) sqlName).getOwner();
+            schema = unescapeName(owner.getName());
+            table = unescapeName(((SQLPropertyExpr) sqlName).getName());
+        } else if (sqlName instanceof SQLIdentifierExpr) {
+            table = unescapeName(((SQLIdentifierExpr) sqlName).getName());
+        }
+
+        if (isOri) {
+            ddlResult.setOriSchemaName(schema);
+            ddlResult.setOriTableName(table);
+        } else {
+            ddlResult.setSchemaName(schema);
+            ddlResult.setTableName(table);
+        }
+    }
+
+    public static String unescapeName(String name) {
+        if (name != null && name.length() > 2) {
+            char c0 = name.charAt(0);
+            char x0 = name.charAt(name.length() - 1);
+            if ((c0 == '"' && x0 == '"') || (c0 == '`' && x0 == '`')) {
+                return name.substring(1, name.length() - 1);
+            }
+        }
+
+        return name;
+    }
+
+    public static String unescapeQuotaName(String name) {
+        if (name != null && name.length() > 2) {
+            char c0 = name.charAt(0);
+            char x0 = name.charAt(name.length() - 1);
+            if (c0 == '\'' && x0 == '\'') {
+                return name.substring(1, name.length() - 1);
+            }
+        }
+
+        return name;
+    }
+
+}

+ 1 - 99
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/SimpleDdlParser.java → parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/ddl/SimpleDdlParser.java

@@ -1,4 +1,4 @@
-package com.alibaba.otter.canal.parse.inbound.mysql.dbsync;
+package com.alibaba.otter.canal.parse.inbound.mysql.ddl;
 
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.oro.text.regex.Perl5Matcher;
 import org.apache.oro.text.regex.Perl5Matcher;
@@ -218,102 +218,4 @@ public class SimpleDdlParser {
             sql = sb.toString();
             sql = sb.toString();
         }
         }
     }
     }
-
-    public static class DdlResult {
-
-        private String    schemaName;
-        private String    tableName;
-        private String    oriSchemaName;    // rename ddl中的源表
-        private String    oriTableName;     // rename ddl中的目标表
-        private EventType type;
-        private DdlResult renameTableResult; // 多个rename table的存储
-
-        /*
-         * RENAME TABLE tbl_name TO new_tbl_name [, tbl_name2 TO new_tbl_name2]
-         * ...
-         */
-
-        public DdlResult(){
-        }
-
-        public DdlResult(String schemaName){
-            this.schemaName = schemaName;
-        }
-
-        public DdlResult(String schemaName, String tableName){
-            this.schemaName = schemaName;
-            this.tableName = tableName;
-        }
-
-        public DdlResult(String schemaName, String tableName, String oriSchemaName, String oriTableName){
-            this.schemaName = schemaName;
-            this.tableName = tableName;
-            this.oriSchemaName = oriSchemaName;
-            this.oriTableName = oriTableName;
-        }
-
-        public String getSchemaName() {
-            return schemaName;
-        }
-
-        public void setSchemaName(String schemaName) {
-            this.schemaName = schemaName;
-        }
-
-        public String getTableName() {
-            return tableName;
-        }
-
-        public void setTableName(String tableName) {
-            this.tableName = tableName;
-        }
-
-        public EventType getType() {
-            return type;
-        }
-
-        public void setType(EventType type) {
-            this.type = type;
-        }
-
-        public String getOriSchemaName() {
-            return oriSchemaName;
-        }
-
-        public void setOriSchemaName(String oriSchemaName) {
-            this.oriSchemaName = oriSchemaName;
-        }
-
-        public String getOriTableName() {
-            return oriTableName;
-        }
-
-        public void setOriTableName(String oriTableName) {
-            this.oriTableName = oriTableName;
-        }
-
-        public DdlResult getRenameTableResult() {
-            return renameTableResult;
-        }
-
-        public void setRenameTableResult(DdlResult renameTableResult) {
-            this.renameTableResult = renameTableResult;
-        }
-
-        @Override
-        public String toString() {
-            DdlResult ddlResult = this;
-            StringBuffer sb = new StringBuffer();
-            do {
-                sb.append(String.format("DdlResult [schemaName=%s , tableName=%s , oriSchemaName=%s , oriTableName=%s , type=%s ];",
-                    ddlResult.schemaName,
-                    ddlResult.tableName,
-                    ddlResult.oriSchemaName,
-                    ddlResult.oriTableName,
-                    ddlResult.type));
-                ddlResult = ddlResult.renameTableResult;
-            } while (ddlResult != null);
-            return sb.toString();
-        }
-    }
 }
 }

+ 368 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/HttpHelper.java

@@ -0,0 +1,368 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds;
+
+import static org.apache.http.client.config.RequestConfig.custom;
+
+import java.io.IOException;
+import java.net.URI;
+import java.nio.charset.Charset;
+import java.security.cert.CertificateException;
+import java.security.cert.X509Certificate;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import javax.net.ssl.SSLContext;
+
+import org.apache.http.HttpStatus;
+import org.apache.http.NameValuePair;
+import org.apache.http.client.CookieStore;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.client.entity.UrlEncodedFormEntity;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.protocol.HttpClientContext;
+import org.apache.http.client.utils.URIBuilder;
+import org.apache.http.config.RegistryBuilder;
+import org.apache.http.conn.socket.ConnectionSocketFactory;
+import org.apache.http.conn.socket.PlainConnectionSocketFactory;
+import org.apache.http.conn.ssl.NoopHostnameVerifier;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.cookie.Cookie;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
+import org.apache.http.message.BasicNameValuePair;
+import org.apache.http.ssl.SSLContextBuilder;
+import org.apache.http.ssl.TrustStrategy;
+import org.apache.http.util.EntityUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+
+public class HttpHelper {
+
+    protected static final Logger logger = LoggerFactory.getLogger(HttpHelper.class);
+
+    public static byte[] getBytes(String url, int timeout) throws Exception {
+        long start = System.currentTimeMillis();
+        HttpClientBuilder builder = HttpClientBuilder.create();
+        builder.setMaxConnPerRoute(50);
+        builder.setMaxConnTotal(100);
+        CloseableHttpClient httpclient = builder.build();
+        URI uri = new URIBuilder(url).build();
+        RequestConfig config = custom().setConnectTimeout(timeout)
+            .setConnectionRequestTimeout(timeout)
+            .setSocketTimeout(timeout)
+            .build();
+        HttpGet httpGet = new HttpGet(uri);
+        HttpClientContext context = HttpClientContext.create();
+        context.setRequestConfig(config);
+        CloseableHttpResponse response = httpclient.execute(httpGet, context);
+        try {
+            int statusCode = response.getStatusLine().getStatusCode();
+            long end = System.currentTimeMillis();
+            long cost = end - start;
+            if (logger.isWarnEnabled()) {
+                logger.warn("post " + url + ", cost : " + cost);
+            }
+            if (statusCode == HttpStatus.SC_OK) {
+                return EntityUtils.toByteArray(response.getEntity());
+            } else {
+                String errorMsg = EntityUtils.toString(response.getEntity());
+                throw new RuntimeException("requestGet remote error, url=" + uri.toString() + ", code=" + statusCode
+                                           + ", error msg=" + errorMsg);
+            }
+        } finally {
+            response.close();
+            httpGet.releaseConnection();
+        }
+    }
+
+    public static String get(String url, int timeout) {
+        // logger.info("get url is :" + url);
+        // 支持采用https协议,忽略证书
+        url = url.trim();
+        if (url.startsWith("https")) {
+            return getIgnoreCerf(url, null, null, timeout);
+        }
+        long start = System.currentTimeMillis();
+        HttpClientBuilder builder = HttpClientBuilder.create();
+        builder.setMaxConnPerRoute(50);
+        builder.setMaxConnTotal(100);
+        CloseableHttpClient httpclient = builder.build();
+        CloseableHttpResponse response = null;
+        HttpGet httpGet = null;
+        try {
+            URI uri = new URIBuilder(url).build();
+            RequestConfig config = custom().setConnectTimeout(timeout)
+                .setConnectionRequestTimeout(timeout)
+                .setSocketTimeout(timeout)
+                .build();
+            httpGet = new HttpGet(uri);
+            HttpClientContext context = HttpClientContext.create();
+            context.setRequestConfig(config);
+            response = httpclient.execute(httpGet, context);
+            int statusCode = response.getStatusLine().getStatusCode();
+            if (statusCode == HttpStatus.SC_OK) {
+                return EntityUtils.toString(response.getEntity());
+            } else {
+                String errorMsg = EntityUtils.toString(response.getEntity());
+                throw new RuntimeException("requestGet remote error, url=" + uri.toString() + ", code=" + statusCode
+                                           + ", error msg=" + errorMsg);
+            }
+        } catch (Throwable t) {
+            long end = System.currentTimeMillis();
+            long cost = end - start;
+            String curlRequest = getCurlRequest(url, null, null, cost);
+            throw new RuntimeException("requestGet remote error, request : " + curlRequest, t);
+        } finally {
+            long end = System.currentTimeMillis();
+            long cost = end - start;
+            printCurlRequest(url, null, null, cost);
+            if (response != null) {
+                try {
+                    response.close();
+                } catch (IOException e) {
+                }
+            }
+            httpGet.releaseConnection();
+        }
+    }
+
+    private static String getIgnoreCerf(String url, CookieStore cookieStore, Map<String, String> params, int timeout) {
+        long start = System.currentTimeMillis();
+        HttpClientBuilder builder = HttpClientBuilder.create();
+        builder.setMaxConnPerRoute(50);
+        builder.setMaxConnTotal(100);
+        HttpGet httpGet = null;
+        CloseableHttpResponse response = null;
+        try {
+            // 创建支持忽略证书的https
+            final SSLContext sslContext = new SSLContextBuilder().loadTrustMaterial(null, new TrustStrategy() {
+
+                @Override
+                public boolean isTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException {
+                    return true;
+                }
+            }).build();
+
+            CloseableHttpClient httpClient = HttpClientBuilder.create()
+                .setSSLContext(sslContext)
+                .setConnectionManager(new PoolingHttpClientConnectionManager(RegistryBuilder.<ConnectionSocketFactory> create()
+                    .register("http", PlainConnectionSocketFactory.INSTANCE)
+                    .register("https", new SSLConnectionSocketFactory(sslContext, NoopHostnameVerifier.INSTANCE))
+                    .build()))
+                .build();
+            // ---------------- 创建支持https 的client成功---------
+
+            URI uri = new URIBuilder(url).build();
+            RequestConfig config = custom().setConnectTimeout(timeout)
+                .setConnectionRequestTimeout(timeout)
+                .setSocketTimeout(timeout)
+                .build();
+            httpGet = new HttpGet(uri);
+            HttpClientContext context = HttpClientContext.create();
+            context.setRequestConfig(config);
+            response = httpClient.execute(httpGet, context);
+            int statusCode = response.getStatusLine().getStatusCode();
+            if (statusCode == HttpStatus.SC_OK) {
+                return EntityUtils.toString(response.getEntity());
+            } else {
+                String errorMsg = EntityUtils.toString(response.getEntity());
+                throw new RuntimeException("requestGet remote error, url=" + uri.toString() + ", code=" + statusCode
+                                           + ", error msg=" + errorMsg);
+            }
+        } catch (Throwable t) {
+            long end = System.currentTimeMillis();
+            long cost = end - start;
+            String curlRequest = getCurlRequest(url, cookieStore, params, cost);
+            throw new RuntimeException("requestPost(Https) remote error, request : " + curlRequest, t);
+        } finally {
+            long end = System.currentTimeMillis();
+            long cost = end - start;
+            printCurlRequest(url, null, null, cost);
+            if (response != null) {
+                try {
+                    response.close();
+                } catch (IOException e) {
+                }
+            }
+            if (httpGet != null) {
+                httpGet.releaseConnection();
+            }
+        }
+    }
+
+    private static String postIgnoreCerf(String url, CookieStore cookieStore, Map<String, String> params, int timeout) {
+        long start = System.currentTimeMillis();
+        HttpClientBuilder builder = HttpClientBuilder.create();
+        builder.setMaxConnPerRoute(50);
+        builder.setMaxConnTotal(100);
+        HttpPost httpPost = null;
+        CloseableHttpResponse response = null;
+        try {
+            // 创建支持忽略证书的https
+            final SSLContext sslContext = new SSLContextBuilder().loadTrustMaterial(null, new TrustStrategy() {
+
+                @Override
+                public boolean isTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException {
+                    return true;
+                }
+            }).build();
+
+            CloseableHttpClient httpClient = HttpClientBuilder.create()
+                .setSSLContext(sslContext)
+                .setConnectionManager(new PoolingHttpClientConnectionManager(RegistryBuilder.<ConnectionSocketFactory> create()
+                    .register("http", PlainConnectionSocketFactory.INSTANCE)
+                    .register("https", new SSLConnectionSocketFactory(sslContext, NoopHostnameVerifier.INSTANCE))
+                    .build()))
+                .build();
+            // ---------------- 创建支持https 的client成功---------
+
+            URI uri = new URIBuilder(url).build();
+            RequestConfig config = custom().setConnectTimeout(timeout)
+                .setConnectionRequestTimeout(timeout)
+                .setSocketTimeout(timeout)
+                .build();
+            httpPost = new HttpPost(uri);
+            List<NameValuePair> parameters = Lists.newArrayList();
+            for (String key : params.keySet()) {
+                NameValuePair nameValuePair = new BasicNameValuePair(key, params.get(key));
+                parameters.add(nameValuePair);
+            }
+            httpPost.setEntity(new UrlEncodedFormEntity(parameters, Charset.forName("UTF-8")));
+            HttpClientContext context = HttpClientContext.create();
+            context.setRequestConfig(config);
+            context.setCookieStore(cookieStore);
+
+            response = httpClient.execute(httpPost, context);
+            int statusCode = response.getStatusLine().getStatusCode();
+            if (statusCode == HttpStatus.SC_OK) {
+                long end = System.currentTimeMillis();
+                long cost = end - start;
+                printCurlRequest(url, cookieStore, params, cost);
+                return EntityUtils.toString(response.getEntity());
+            } else {
+                long end = System.currentTimeMillis();
+                long cost = end - start;
+                String curlRequest = getCurlRequest(url, cookieStore, params, cost);
+                throw new RuntimeException("requestPost(Https) remote error, request : " + curlRequest
+                                           + ", statusCode=" + statusCode + "");
+            }
+        } catch (Throwable t) {
+            long end = System.currentTimeMillis();
+            long cost = end - start;
+            String curlRequest = getCurlRequest(url, cookieStore, params, cost);
+            throw new RuntimeException("requestPost(Https) remote error, request : " + curlRequest, t);
+        } finally {
+            if (response != null) {
+                try {
+                    response.close();
+                } catch (IOException e) {
+                }
+            }
+            if (httpPost != null) {
+                httpPost.releaseConnection();
+            }
+        }
+    }
+
+    public static String post(String url, CookieStore cookieStore, Map<String, String> params, int timeout) {
+        url = url.trim();
+        // 支持采用https协议,忽略证书
+        if (url.startsWith("https")) {
+            return postIgnoreCerf(url, cookieStore, params, timeout);
+        }
+        long start = System.currentTimeMillis();
+        HttpClientBuilder builder = HttpClientBuilder.create();
+        builder.setMaxConnPerRoute(50);
+        builder.setMaxConnTotal(100);
+        HttpPost httpPost = null;
+        CloseableHttpResponse response = null;
+        try {
+            CloseableHttpClient httpclient = builder.build();
+            URI uri = new URIBuilder(url).build();
+            RequestConfig config = custom().setConnectTimeout(timeout)
+                .setConnectionRequestTimeout(timeout)
+                .setSocketTimeout(timeout)
+                .build();
+            httpPost = new HttpPost(uri);
+            List<NameValuePair> parameters = Lists.newArrayList();
+            for (String key : params.keySet()) {
+                NameValuePair nameValuePair = new BasicNameValuePair(key, params.get(key));
+                parameters.add(nameValuePair);
+            }
+            httpPost.setEntity(new UrlEncodedFormEntity(parameters, Charset.forName("UTF-8")));
+            HttpClientContext context = HttpClientContext.create();
+            context.setRequestConfig(config);
+            context.setCookieStore(cookieStore);
+
+            response = httpclient.execute(httpPost, context);
+            int statusCode = response.getStatusLine().getStatusCode();
+            if (statusCode == HttpStatus.SC_OK) {
+                long end = System.currentTimeMillis();
+                long cost = end - start;
+                printCurlRequest(url, cookieStore, params, cost);
+                return EntityUtils.toString(response.getEntity());
+            } else {
+                long end = System.currentTimeMillis();
+                long cost = end - start;
+                String curlRequest = getCurlRequest(url, cookieStore, params, cost);
+                throw new RuntimeException("requestPost remote error, request : " + curlRequest + ", statusCode="
+                                           + statusCode + ";" + EntityUtils.toString(response.getEntity()));
+            }
+        } catch (Throwable t) {
+            long end = System.currentTimeMillis();
+            long cost = end - start;
+            String curlRequest = getCurlRequest(url, cookieStore, params, cost);
+            throw new RuntimeException("requestPost remote error, request : " + curlRequest, t);
+        } finally {
+            if (response != null) {
+                try {
+                    response.close();
+                } catch (IOException e) {
+                }
+            }
+            if (httpPost != null) {
+                httpPost.releaseConnection();
+            }
+        }
+    }
+
+    public static void printCurlRequest(String url, CookieStore cookieStore, Map<String, String> params, long cost) {
+        logger.warn(getCurlRequest(url, cookieStore, params, cost));
+    }
+
+    private static String getCurlRequest(String url, CookieStore cookieStore, Map<String, String> params, long cost) {
+        if (params == null) {
+            return "curl '" + url + "'\ncost : " + cost;
+        } else {
+            StringBuilder paramsStr = new StringBuilder();
+            Iterator<Map.Entry<String, String>> iterator = params.entrySet().iterator();
+            while (iterator.hasNext()) {
+                Map.Entry<String, String> entry = iterator.next();
+                paramsStr.append(entry.getKey() + "=" + entry.getValue());
+                if (iterator.hasNext()) {
+                    paramsStr.append("&");
+                }
+            }
+            if (cookieStore == null) {
+                return "curl '" + url + "' -d '" + paramsStr.toString() + "'\ncost : " + cost;
+            } else {
+                StringBuilder cookieStr = new StringBuilder();
+                List<Cookie> cookies = cookieStore.getCookies();
+                Iterator<Cookie> iteratorCookie = cookies.iterator();
+                while (iteratorCookie.hasNext()) {
+                    Cookie cookie = iteratorCookie.next();
+                    cookieStr.append(cookie.getName() + "=" + cookie.getValue());
+                    if (iteratorCookie.hasNext()) {
+                        cookieStr.append(";");
+                    }
+                }
+                return "curl '" + url + "' -b '" + cookieStr + "' -d '" + paramsStr.toString() + "'\ncost : " + cost;
+            }
+        }
+    }
+}

+ 334 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsBinlogOpenApi.java

@@ -0,0 +1,334 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds;
+
+import io.netty.handler.codec.http.HttpResponseStatus;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
+import java.text.SimpleDateFormat;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TimeZone;
+import java.util.TreeMap;
+import java.util.UUID;
+
+import javax.crypto.Mac;
+import javax.crypto.SecretKey;
+import javax.crypto.spec.SecretKeySpec;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.fastjson.JSON;
+import com.alibaba.fastjson.JSONArray;
+import com.alibaba.fastjson.JSONObject;
+
+/**
+ * @author agapple 2017年10月14日 下午1:53:52
+ * @since 1.0.25
+ */
+public class RdsBinlogOpenApi {
+
+    protected static final Logger logger              = LoggerFactory.getLogger(RdsBinlogOpenApi.class);
+    private static final String   ISO8601_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss'Z'";
+    private static final int      TIMEOUT             = 10000;
+    private static final String   ENCODING            = "UTF-8";
+    private static final String   MAC_NAME            = "HmacSHA1";
+    private static final String   API_VERSION         = "2014-08-15";
+    private static final String   SIGNATURE_VERSION   = "1.0";
+
+    public static void downloadBinlogFiles(String url, String ak, String sk, String dbInstanceId, Date startTime,
+                                           Date endTime, File destDir) throws Throwable {
+        int pageSize = 100;
+        int pageNumber = 0;
+        int pageRecordCount = 1;
+        String hostInstanceID = null;
+        while (pageRecordCount > 0 && pageRecordCount <= pageSize) {
+            pageNumber += 1;
+            String result = describeBinlogFiles(url, ak, sk, dbInstanceId, startTime, endTime, pageSize, pageNumber);
+            JSONObject jsobObj = JSON.parseObject(result);
+            pageRecordCount = jsobObj.getInteger("PageRecordCount");
+
+            if (pageRecordCount > 0) {
+                FileUtils.forceMkdir(destDir);
+                File hostIdFile = new File(destDir, "hostId");
+                if (hostIdFile.exists()) {
+                    List<String> lines = IOUtils.readLines(new FileInputStream(hostIdFile));
+                    hostInstanceID = StringUtils.join(lines, "\n");
+                }
+
+                String itemStr = jsobObj.getString("Items");
+                JSONObject binLogFileObj = JSONObject.parseObject(itemStr);
+                JSONArray items = binLogFileObj.getJSONArray("BinLogFile");
+                if (items == null || items.isEmpty()) {
+                    continue;
+                }
+                for (int i = 0; i < items.size(); i++) {
+                    JSONObject item = (JSONObject) items.get(i);
+                    String oneHostInstanceID = item.getString("HostInstanceID");
+                    if (hostInstanceID == null) {
+                        hostInstanceID = oneHostInstanceID;
+                        FileOutputStream hostIdFileOut = null;
+                        try {
+                            hostIdFileOut = new FileOutputStream(hostIdFile);
+                            hostIdFileOut.write(oneHostInstanceID.getBytes());
+                            hostIdFileOut.flush();
+                        } finally {
+                            IOUtils.closeQuietly(hostIdFileOut);
+                        }
+                    }
+
+                    if (hostInstanceID.equals(oneHostInstanceID)) { // 只选择一个host下载
+                        String downloadLink = item.getString("DownloadLink");
+                        String fileName = StringUtils.substringBetween(downloadLink, "mysql-bin.", ".tar");
+                        if (StringUtils.isNotEmpty(fileName)) {
+                            File currentFile = new File(destDir, "mysql-bin." + fileName);
+                            if (currentFile.isFile() && currentFile.exists()) {
+                                // 检查一下文件是否存在,存在就就没必要下载了
+                                continue;
+                            }
+                        }
+
+                        HttpGet httpGet = new HttpGet(downloadLink);
+                        CloseableHttpClient httpClient = HttpClientBuilder.create()
+                            .setMaxConnPerRoute(50)
+                            .setMaxConnTotal(100)
+                            .build();
+                        RequestConfig requestConfig = RequestConfig.custom()
+                            .setConnectTimeout(TIMEOUT)
+                            .setConnectionRequestTimeout(TIMEOUT)
+                            .setSocketTimeout(TIMEOUT)
+                            .build();
+                        httpGet.setConfig(requestConfig);
+                        HttpResponse response = httpClient.execute(httpGet);
+                        int statusCode = response.getStatusLine().getStatusCode();
+                        if (statusCode != HttpResponseStatus.OK.code()) {
+                            throw new RuntimeException("download failed , url:" + downloadLink + " , statusCode:"
+                                                       + statusCode);
+                        }
+                        saveFile(destDir, response);
+                    }
+                }
+            }
+        }
+    }
+
+    private static void saveFile(File parentFile, HttpResponse response) throws IOException {
+        InputStream is = response.getEntity().getContent();
+        long totalSize = Long.parseLong(response.getFirstHeader("Content-Length").getValue());
+        String fileName = response.getFirstHeader("Content-Disposition").getValue();
+        fileName = StringUtils.substringAfter(fileName, "filename=");
+        boolean isTar = StringUtils.endsWith(fileName, ".tar");
+        FileUtils.forceMkdir(parentFile);
+        FileOutputStream fos = null;
+        try {
+            if (isTar) {
+                TarArchiveInputStream tais = new TarArchiveInputStream(is);
+                TarArchiveEntry tarArchiveEntry = null;
+                while ((tarArchiveEntry = tais.getNextTarEntry()) != null) {
+                    String name = tarArchiveEntry.getName();
+                    File tarFile = new File(parentFile, name);
+                    logger.info("start to download file " + tarFile.getName());
+                    BufferedOutputStream bos = null;
+                    try {
+                        bos = new BufferedOutputStream(new FileOutputStream(tarFile));
+                        int read = -1;
+                        byte[] buffer = new byte[1024];
+                        while ((read = tais.read(buffer)) != -1) {
+                            bos.write(buffer, 0, read);
+                        }
+                        logger.info("download file " + tarFile.getName() + " end!");
+                    } finally {
+                        IOUtils.closeQuietly(bos);
+                    }
+                }
+                tais.close();
+            } else {
+                File file = new File(parentFile, fileName);
+                if (!file.isFile()) {
+                    file.createNewFile();
+                }
+                try {
+                    fos = new FileOutputStream(file);
+                    byte[] buffer = new byte[1024];
+                    int len;
+                    long copySize = 0;
+                    long nextPrintProgress = 0;
+                    logger.info("start to download file " + file.getName());
+                    while ((len = is.read(buffer)) != -1) {
+                        fos.write(buffer, 0, len);
+                        copySize += len;
+                        long progress = copySize * 100 / totalSize;
+                        if (progress >= nextPrintProgress) {
+                            logger.info("download " + file.getName() + " progress : " + progress
+                                        + "% , download size : " + copySize + ", total size : " + totalSize);
+                            nextPrintProgress += 10;
+                        }
+                    }
+                    logger.info("download file " + file.getName() + " end!");
+                    fos.flush();
+                } finally {
+                    IOUtils.closeQuietly(fos);
+                }
+            }
+        } finally {
+            IOUtils.closeQuietly(fos);
+        }
+    }
+
+    public static String describeBinlogFiles(String url, String ak, String sk, String dbInstanceId, Date startTime,
+                                             Date endTime, int pageSize, int pageNumber) throws Exception {
+        Map<String, String> paramMap = new HashMap<String, String>();
+        paramMap.put("Action", "DescribeBinlogFiles");
+        paramMap.put("DBInstanceId", dbInstanceId); // rds实例id
+        paramMap.put("StartTime", formatIso8601Date(startTime));
+        paramMap.put("EndTime", formatIso8601Date(endTime));
+        paramMap.put("PageSize", String.valueOf(pageSize));
+        paramMap.put("PageNumber", String.valueOf(pageNumber));
+        return doRequest(url, paramMap, ak, sk);
+    }
+
+    private static String doRequest(String domin, Map<String, String> param, String ak, String sk) throws Exception {
+        param.put("AccessKeyId", ak);
+        param.put("SignatureMethod", "HMAC-SHA1");
+        param.put("SignatureVersion", SIGNATURE_VERSION);
+        param.put("Version", API_VERSION);
+        param.put("SignatureNonce", UUID.randomUUID().toString());
+        param.put("Format", "JSON");
+        param.put("Timestamp", formatIso8601Date(new Date()));
+        String signStr = generate("POST", param, sk);
+        param.put("Signature", signStr);
+        String request = concatQueryString(param);
+        String url = domin + "?" + request;
+        String result = HttpHelper.post(url, null, Collections.EMPTY_MAP, TIMEOUT);
+        return result;
+    }
+
+    public static String concatQueryString(Map<String, String> parameters) throws UnsupportedEncodingException {
+        if (null == parameters) {
+            return null;
+        }
+        StringBuilder urlBuilder = new StringBuilder("");
+        for (Map.Entry<String, String> entry : parameters.entrySet()) {
+            String key = entry.getKey();
+            String val = entry.getValue();
+            urlBuilder.append(encode(key));
+            if (val != null) {
+                urlBuilder.append("=").append(encode(val));
+            }
+            urlBuilder.append("&");
+        }
+        int strIndex = urlBuilder.length();
+        if (parameters.size() > 0) {
+            urlBuilder.deleteCharAt(strIndex - 1);
+        }
+        return urlBuilder.toString();
+    }
+
+    public static String encode(String value) throws UnsupportedEncodingException {
+        return URLEncoder.encode(value, "UTF-8");
+    }
+
+    private static String formatIso8601Date(Date date) {
+        SimpleDateFormat df = new SimpleDateFormat(ISO8601_DATE_FORMAT);
+        df.setTimeZone(TimeZone.getTimeZone("GMT"));
+        return df.format(date);
+    }
+
+    /**
+     * 使用 HMAC-SHA1 签名方法对对encryptText进行签名
+     *
+     * @param encryptText 被签名的字符串
+     * @param encryptKey 密钥
+     * @return
+     * @throws Exception
+     */
+    public static byte[] HmacSHA1Encrypt(String encryptText, String encryptKey) throws Exception {
+        byte[] data = encryptKey.getBytes(ENCODING);
+        // 根据给定的字节数组构造一个密钥,第二参数指定一个密钥算法的名称
+        SecretKey secretKey = new SecretKeySpec(data, MAC_NAME);
+        // 生成一个指定 Mac 算法 的 Mac 对象
+        Mac mac = Mac.getInstance(MAC_NAME);
+        // 用给定密钥初始化 Mac 对象
+        mac.init(secretKey);
+
+        byte[] text = encryptText.getBytes(ENCODING);
+        // 完成 Mac 操作
+        return mac.doFinal(text);
+    }
+
+    private static String base64(byte input[]) throws UnsupportedEncodingException {
+        return new String(Base64.encodeBase64(input), ENCODING);
+    }
+
+    /** 对参数名称和参数值进行URL编码 **/
+    public static String generate(String method, Map<String, String> parameter, String accessKeySecret)
+                                                                                                       throws Exception {
+        String signString = generateSignString(method, parameter);
+        byte[] signBytes = HmacSHA1Encrypt(signString, accessKeySecret + "&");
+        String signature = base64(signBytes);
+        if ("POST".equals(method)) {
+            return signature;
+        }
+        return URLEncoder.encode(signature, "UTF-8");
+    }
+
+    private static String generateQueryString(TreeMap<String, String> treeMap) {
+        StringBuilder canonicalizedQueryString = new StringBuilder();
+        boolean first = true;
+        for (String key : treeMap.navigableKeySet()) {
+            String value = treeMap.get(key);
+            if (!first) {
+                canonicalizedQueryString.append("&");
+            }
+            first = false;
+            canonicalizedQueryString.append(percentEncode(key)).append("=").append(percentEncode(value));
+        }
+        return canonicalizedQueryString.toString();
+    }
+
+    public static String generateSignString(String httpMethod, Map<String, String> parameter) throws IOException {
+        TreeMap<String, String> sortParameter = new TreeMap<String, String>();
+        sortParameter.putAll(parameter);
+        String canonicalizedQueryString = generateQueryString(sortParameter);
+        if (null == httpMethod) {
+            throw new RuntimeException("httpMethod can not be empty");
+        }
+        /** 构造待签名的字符串* */
+        StringBuilder stringToSign = new StringBuilder();
+        stringToSign.append(httpMethod).append("&");
+        stringToSign.append(percentEncode("/")).append("&");
+        stringToSign.append(percentEncode(canonicalizedQueryString));
+        return stringToSign.toString();
+    }
+
+    public static String percentEncode(String value) {
+        try {
+            return value == null ? null : URLEncoder.encode(value, ENCODING)
+                .replaceAll("\\+", "%20")
+                .replaceAll("\\*", "%2A")
+                .replaceAll("%7E", "~");
+        } catch (Exception e) {
+        }
+        return "";
+    }
+}

+ 111 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/rds/RdsLocalBinlogEventParser.java

@@ -0,0 +1,111 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.rds;
+
+import java.io.File;
+import java.util.Date;
+
+import org.apache.commons.lang.StringUtils;
+import org.springframework.util.Assert;
+
+import com.alibaba.otter.canal.parse.CanalEventParser;
+import com.alibaba.otter.canal.parse.exception.CanalParseException;
+import com.alibaba.otter.canal.parse.inbound.mysql.LocalBinlogEventParser;
+import com.alibaba.otter.canal.protocol.position.EntryPosition;
+
+/**
+ * 基于rds binlog备份文件的复制
+ * 
+ * @author agapple 2017年10月15日 下午1:27:36
+ * @since 1.0.25
+ */
+public class RdsLocalBinlogEventParser extends LocalBinlogEventParser implements CanalEventParser {
+
+    private String url = "https://rds.aliyuncs.com/"; // openapi地址
+    private String accesskey;                        // 云账号的ak
+    private String secretkey;                        // 云账号sk
+    private String instanceId;                       // rds实例id
+    private Long   startTime;
+    private Long   endTime;
+
+    public RdsLocalBinlogEventParser(){
+    }
+
+    public void start() throws CanalParseException {
+        try {
+            Assert.notNull(startTime);
+            Assert.notNull(accesskey);
+            Assert.notNull(secretkey);
+            Assert.notNull(instanceId);
+            Assert.notNull(url);
+            if (endTime == null) {
+                endTime = System.currentTimeMillis();
+            }
+
+            RdsBinlogOpenApi.downloadBinlogFiles(url,
+                accesskey,
+                secretkey,
+                instanceId,
+                new Date(startTime),
+                new Date(endTime),
+                new File(directory));
+
+            // 更新一下时间戳
+            masterPosition = new EntryPosition(startTime);
+        } catch (Throwable e) {
+            logger.error("download binlog failed", e);
+            throw new CanalParseException(e);
+        }
+
+        super.start();
+    }
+
+    public String getUrl() {
+        return url;
+    }
+
+    public void setUrl(String url) {
+        if (StringUtils.isNotEmpty(url)) {
+            this.url = url;
+        }
+    }
+
+    public String getAccesskey() {
+        return accesskey;
+    }
+
+    public void setAccesskey(String accesskey) {
+        this.accesskey = accesskey;
+    }
+
+    public String getSecretkey() {
+        return secretkey;
+    }
+
+    public void setSecretkey(String secretkey) {
+        this.secretkey = secretkey;
+    }
+
+    public String getInstanceId() {
+        return instanceId;
+    }
+
+    public void setInstanceId(String instanceId) {
+        this.instanceId = instanceId;
+    }
+
+    public Long getStartTime() {
+        return startTime;
+    }
+
+    public void setStartTime(Long startTime) {
+        this.startTime = startTime;
+    }
+
+    public Long getEndTime() {
+        return endTime;
+    }
+
+    public void setEndTime(Long endTime) {
+        this.endTime = endTime;
+    }
+
+}

+ 493 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/DatabaseTableMeta.java

@@ -0,0 +1,493 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Pattern;
+
+import org.apache.commons.beanutils.BeanUtils;
+import org.apache.commons.lang.ObjectUtils;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
+
+import com.alibaba.druid.sql.repository.Schema;
+import com.alibaba.fastjson.JSON;
+import com.alibaba.fastjson.JSONObject;
+import com.alibaba.otter.canal.filter.CanalEventFilter;
+import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ResultSetPacket;
+import com.alibaba.otter.canal.parse.exception.CanalParseException;
+import com.alibaba.otter.canal.parse.inbound.TableMeta;
+import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
+import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection;
+import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.TableMetaCache;
+import com.alibaba.otter.canal.parse.inbound.mysql.ddl.DdlResult;
+import com.alibaba.otter.canal.parse.inbound.mysql.ddl.DruidDdlParser;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDAO;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDO;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaSnapshotDAO;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaSnapshotDO;
+import com.alibaba.otter.canal.protocol.position.EntryPosition;
+
+/**
+ * 基于db远程管理 see internal class: CanalTableMeta , ConsoleTableMetaTSDB
+ *
+ * @author agapple 2017年7月27日 下午10:47:55
+ * @since 3.2.5
+ */
+public class DatabaseTableMeta implements TableMetaTSDB {
+
+    private static Logger              logger        = LoggerFactory.getLogger(DatabaseTableMeta.class);
+    private static Pattern             pattern       = Pattern.compile("Duplicate entry '.*' for key '*'");
+    private static Pattern             h2Pattern     = Pattern.compile("Unique index or primary key violation");
+    private static final EntryPosition INIT_POSITION = new EntryPosition("0", 0L, -2L, -1L);
+    private String                     destination;
+    private MemoryTableMeta            memoryTableMeta;
+    private MysqlConnection            connection;                                                              // 查询meta信息的链接
+    private CanalEventFilter           filter;
+    private CanalEventFilter           blackFilter;
+    private EntryPosition              lastPosition;
+    private ScheduledExecutorService   scheduler;
+    private MetaHistoryDAO             metaHistoryDAO;
+    private MetaSnapshotDAO            metaSnapshotDAO;
+
+    public DatabaseTableMeta(){
+
+    }
+
+    @Override
+    public boolean init(final String destination) {
+        this.destination = destination;
+        this.memoryTableMeta = new MemoryTableMeta();
+        this.scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
+
+            @Override
+            public Thread newThread(Runnable r) {
+                return new Thread(r, "[scheduler-table-meta-snapshot]");
+            }
+        });
+
+        // 24小时生成一份snapshot
+        scheduler.scheduleWithFixedDelay(new Runnable() {
+
+            @Override
+            public void run() {
+                try {
+                    MDC.put("destination", destination);
+                    applySnapshotToDB(lastPosition, false);
+                } catch (Throwable e) {
+                    logger.error("scheudle applySnapshotToDB faield", e);
+                }
+            }
+        }, 24, 24, TimeUnit.HOURS);
+        return true;
+    }
+
+    @Override
+    public TableMeta find(String schema, String table) {
+        synchronized (memoryTableMeta) {
+            return memoryTableMeta.find(schema, table);
+        }
+    }
+
+    @Override
+    public boolean apply(EntryPosition position, String schema, String ddl, String extra) {
+        // 首先记录到内存结构
+        synchronized (memoryTableMeta) {
+            if (memoryTableMeta.apply(position, schema, ddl, extra)) {
+                this.lastPosition = position;
+                // 同步每次变更给远程做历史记录
+                return applyHistoryToDB(position, schema, ddl, extra);
+            } else {
+                throw new RuntimeException("apply to memory is failed");
+            }
+        }
+    }
+
+    @Override
+    public boolean rollback(EntryPosition position) {
+        // 每次rollback需要重新构建一次memory data
+        this.memoryTableMeta = new MemoryTableMeta();
+        boolean flag = false;
+        EntryPosition snapshotPosition = buildMemFromSnapshot(position);
+        if (snapshotPosition != null) {
+            applyHistoryOnMemory(snapshotPosition, position);
+            flag = true;
+        }
+
+        if (!flag) {
+            // 如果没有任何数据,则为初始化状态,全量dump一份关注的表
+            if (dumpTableMeta(connection, filter)) {
+                // 记录一下snapshot结果,方便快速恢复
+                flag = applySnapshotToDB(INIT_POSITION, true);
+            }
+        }
+
+        return flag;
+    }
+
+    @Override
+    public Map<String, String> snapshot() {
+        return memoryTableMeta.snapshot();
+    }
+
+    /**
+     * 初始化的时候dump一下表结构
+     */
+    private boolean dumpTableMeta(MysqlConnection connection, final CanalEventFilter filter) {
+        try {
+            ResultSetPacket packet = connection.query("show databases");
+            List<String> schemas = new ArrayList<String>();
+            for (String schema : packet.getFieldValues()) {
+                schemas.add(schema);
+            }
+
+            for (String schema : schemas) {
+                packet = connection.query("show tables from `" + schema + "`");
+                List<String> tables = new ArrayList<String>();
+                for (String table : packet.getFieldValues()) {
+                    String fullName = schema + "." + table;
+                    if (blackFilter == null || !blackFilter.filter(fullName)) {
+                        if (filter == null || filter.filter(fullName)) {
+                            tables.add(table);
+                        }
+                    }
+                }
+
+                if (tables.isEmpty()) {
+                    continue;
+                }
+
+                StringBuilder sql = new StringBuilder();
+                for (String table : tables) {
+                    sql.append("show create table `" + schema + "`.`" + table + "`;");
+                }
+
+                List<ResultSetPacket> packets = connection.queryMulti(sql.toString());
+                for (ResultSetPacket onePacket : packets) {
+                    if (onePacket.getFieldValues().size() > 1) {
+                        String oneTableCreateSql = onePacket.getFieldValues().get(1);
+                        memoryTableMeta.apply(INIT_POSITION, schema, oneTableCreateSql, null);
+                    }
+                }
+            }
+
+            return true;
+        } catch (IOException e) {
+            throw new CanalParseException(e);
+        }
+    }
+
+    private boolean applyHistoryToDB(EntryPosition position, String schema, String ddl, String extra) {
+        Map<String, String> content = new HashMap<String, String>();
+        content.put("destination", destination);
+        content.put("binlogFile", position.getJournalName());
+        content.put("binlogOffest", String.valueOf(position.getPosition()));
+        content.put("binlogMasterId", String.valueOf(position.getServerId()));
+        content.put("binlogTimestamp", String.valueOf(position.getTimestamp()));
+        content.put("useSchema", schema);
+        if (content.isEmpty()) {
+            throw new RuntimeException("apply failed caused by content is empty in applyHistoryToDB");
+        }
+        // 待补充
+        List<DdlResult> ddlResults = DruidDdlParser.parse(ddl, schema);
+        if (ddlResults.size() > 0) {
+            DdlResult ddlResult = ddlResults.get(0);
+            content.put("sqlSchema", ddlResult.getSchemaName());
+            content.put("sqlTable", ddlResult.getTableName());
+            content.put("sqlType", ddlResult.getType().name());
+            content.put("sqlText", ddl);
+            content.put("extra", extra);
+        }
+
+        MetaHistoryDO metaDO = new MetaHistoryDO();
+        try {
+            BeanUtils.populate(metaDO, content);
+            // 会建立唯一约束,解决:
+            // 1. 重复的binlog file+offest
+            // 2. 重复的masterId+timestamp
+            metaHistoryDAO.insert(metaDO);
+        } catch (Throwable e) {
+            if (isUkDuplicateException(e)) {
+                // 忽略掉重复的位点
+                logger.warn("dup apply for sql : " + ddl);
+            } else {
+                throw new CanalParseException("apply history to db failed caused by : " + e.getMessage(), e);
+            }
+
+        }
+        return true;
+    }
+
+    /**
+     * 发布数据到console上
+     */
+    private boolean applySnapshotToDB(EntryPosition position, boolean init) {
+        // 获取一份快照
+        MemoryTableMeta tmpMemoryTableMeta = new MemoryTableMeta();
+        Map<String, String> schemaDdls = null;
+        synchronized (memoryTableMeta) {
+            if (!init && position == null) {
+                // 如果是持续构建,则识别一下是否有DDL变更过,如果没有就忽略了
+                return false;
+            }
+            schemaDdls = memoryTableMeta.snapshot();
+            for (Map.Entry<String, String> entry : schemaDdls.entrySet()) {
+                tmpMemoryTableMeta.apply(position, entry.getKey(), entry.getValue(), null);
+            }
+        }
+
+        // 基于临时内存对象进行对比
+        boolean compareAll = true;
+        for (Schema schema : tmpMemoryTableMeta.getRepository().getSchemas()) {
+            for (String table : schema.showTables()) {
+                if (!compareTableMetaDbAndMemory(connection, tmpMemoryTableMeta, schema.getName(), table)) {
+                    compareAll = false;
+                }
+            }
+        }
+        if (compareAll) {
+            Map<String, String> content = new HashMap<String, String>();
+            content.put("destination", destination);
+            content.put("binlogFile", position.getJournalName());
+            content.put("binlogOffest", String.valueOf(position.getPosition()));
+            content.put("binlogMasterId", String.valueOf(position.getServerId()));
+            content.put("binlogTimestamp", String.valueOf(position.getTimestamp()));
+            content.put("data", JSON.toJSONString(schemaDdls));
+            if (content.isEmpty()) {
+                throw new RuntimeException("apply failed caused by content is empty in applySnapshotToDB");
+            }
+
+            MetaSnapshotDO snapshotDO = new MetaSnapshotDO();
+            try {
+                BeanUtils.populate(snapshotDO, content);
+                metaSnapshotDAO.insert(snapshotDO);
+            } catch (Throwable e) {
+                if (isUkDuplicateException(e)) {
+                    // 忽略掉重复的位点
+                    logger.info("dup apply snapshot use position : " + position + " , just ignore");
+                } else {
+                    throw new CanalParseException("apply failed caused by : " + e.getMessage(), e);
+                }
+            }
+            return true;
+        } else {
+            logger.error("compare failed , check log");
+        }
+        return false;
+    }
+
+    private boolean compareTableMetaDbAndMemory(MysqlConnection connection, MemoryTableMeta memoryTableMeta,
+                                                final String schema, final String table) {
+        TableMeta tableMetaFromMem = memoryTableMeta.find(schema, table);
+
+        TableMeta tableMetaFromDB = new TableMeta();
+        tableMetaFromDB.setSchema(schema);
+        tableMetaFromDB.setTable(table);
+        try {
+            ResultSetPacket packet = connection.query("desc " + getFullName(schema, table));
+            tableMetaFromDB.setFields(TableMetaCache.parserTableMeta(packet));
+        } catch (IOException e) {
+            if (e.getMessage().contains("errorNumber=1146")) {
+                logger.error("table not exist in db , pls check :" + getFullName(schema, table) + " , mem : "
+                             + tableMetaFromMem);
+                return false;
+            }
+            throw new CanalParseException(e);
+        }
+
+        boolean result = compareTableMeta(tableMetaFromMem, tableMetaFromDB);
+        if (!result) {
+            String createDDL = null;
+            try {
+                ResultSetPacket packet = connection.query("show create table " + getFullName(schema, table));
+                if (packet.getFieldValues().size() > 1) {
+                    createDDL = packet.getFieldValues().get(1);
+                }
+            } catch (IOException e) {
+                // ignore
+            }
+
+            logger.error("pls submit github issue, show create table ddl:" + createDDL + " , compare failed . \n db : "
+                         + tableMetaFromDB + " \n mem : " + tableMetaFromMem);
+        }
+        return result;
+    }
+
+    private EntryPosition buildMemFromSnapshot(EntryPosition position) {
+        try {
+            MetaSnapshotDO snapshotDO = metaSnapshotDAO.findByTimestamp(destination, position.getTimestamp());
+            if (snapshotDO == null) {
+                return null;
+            }
+            String binlogFile = snapshotDO.getBinlogFile();
+            Long binlogOffest = snapshotDO.getBinlogOffest();
+            String binlogMasterId = snapshotDO.getBinlogMasterId();
+            Long binlogTimestamp = snapshotDO.getBinlogTimestamp();
+
+            EntryPosition snapshotPosition = new EntryPosition(binlogFile,
+                binlogOffest == null ? 0l : binlogOffest,
+                binlogTimestamp == null ? 0l : binlogTimestamp,
+                Long.valueOf(binlogMasterId == null ? "-2" : binlogMasterId));
+            // data存储为Map<String,String>,每个分库一套建表
+            String sqlData = snapshotDO.getData();
+            JSONObject jsonObj = JSON.parseObject(sqlData);
+            for (Map.Entry entry : jsonObj.entrySet()) {
+                // 记录到内存
+                if (!memoryTableMeta.apply(snapshotPosition,
+                    ObjectUtils.toString(entry.getKey()),
+                    ObjectUtils.toString(entry.getValue()),
+                    null)) {
+                    return null;
+                }
+            }
+
+            return snapshotPosition;
+        } catch (Throwable e) {
+            throw new CanalParseException("apply failed caused by : " + e.getMessage(), e);
+        }
+    }
+
+    private boolean applyHistoryOnMemory(EntryPosition position, EntryPosition rollbackPosition) {
+        try {
+            List<MetaHistoryDO> metaHistoryDOList = metaHistoryDAO.findByTimestamp(destination,
+                position.getTimestamp(),
+                rollbackPosition.getTimestamp());
+            if (metaHistoryDOList == null) {
+                return true;
+            }
+
+            for (MetaHistoryDO metaHistoryDO : metaHistoryDOList) {
+                String binlogFile = metaHistoryDO.getBinlogFile();
+                Long binlogOffest = metaHistoryDO.getBinlogOffest();
+                String binlogMasterId = metaHistoryDO.getBinlogMasterId();
+                Long binlogTimestamp = metaHistoryDO.getBinlogTimestamp();
+                String useSchema = metaHistoryDO.getUseSchema();
+                String sqlData = metaHistoryDO.getSqlText();
+                EntryPosition snapshotPosition = new EntryPosition(binlogFile,
+                    binlogOffest == null ? 0L : binlogOffest,
+                    binlogTimestamp == null ? 0L : binlogTimestamp,
+                    Long.valueOf(binlogMasterId == null ? "-2" : binlogMasterId));
+
+                // 如果是同一秒内,对比一下history的位点,如果比期望的位点要大,忽略之
+                if (snapshotPosition.getTimestamp() > rollbackPosition.getTimestamp()) {
+                    continue;
+                } else if (rollbackPosition.getServerId() == snapshotPosition.getServerId()
+                           && snapshotPosition.compareTo(rollbackPosition) > 0) {
+                    continue;
+                }
+
+                // 记录到内存
+                if (!memoryTableMeta.apply(snapshotPosition, useSchema, sqlData, null)) {
+                    return false;
+                }
+
+            }
+
+            return metaHistoryDOList.size() > 0;
+        } catch (Throwable e) {
+            throw new CanalParseException("apply failed", e);
+        }
+    }
+
+    private String getFullName(String schema, String table) {
+        StringBuilder builder = new StringBuilder();
+        return builder.append('`')
+            .append(schema)
+            .append('`')
+            .append('.')
+            .append('`')
+            .append(table)
+            .append('`')
+            .toString();
+    }
+
+    private boolean compareTableMeta(TableMeta source, TableMeta target) {
+        if (!StringUtils.equalsIgnoreCase(source.getSchema(), target.getSchema())) {
+            return false;
+        }
+
+        if (!StringUtils.equalsIgnoreCase(source.getTable(), target.getTable())) {
+            return false;
+        }
+
+        List<FieldMeta> sourceFields = source.getFields();
+        List<FieldMeta> targetFields = target.getFields();
+        if (sourceFields.size() != targetFields.size()) {
+            return false;
+        }
+
+        for (int i = 0; i < sourceFields.size(); i++) {
+            FieldMeta sourceField = sourceFields.get(i);
+            FieldMeta targetField = targetFields.get(i);
+            if (!StringUtils.equalsIgnoreCase(sourceField.getColumnName(), targetField.getColumnName())) {
+                return false;
+            }
+
+            if (!StringUtils.equalsIgnoreCase(sourceField.getColumnType(), targetField.getColumnType())) {
+                return false;
+            }
+
+            if (!StringUtils.equalsIgnoreCase(sourceField.getDefaultValue(), targetField.getDefaultValue())) {
+                return false;
+            }
+
+            if (sourceField.isNullable() != targetField.isNullable()) {
+                return false;
+            }
+
+            if (sourceField.isKey() != targetField.isKey()) {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    public void setConnection(MysqlConnection connection) {
+        this.connection = connection;
+    }
+
+    public void setFilter(CanalEventFilter filter) {
+        this.filter = filter;
+    }
+
+    public MetaHistoryDAO getMetaHistoryDAO() {
+        return metaHistoryDAO;
+    }
+
+    public void setMetaHistoryDAO(MetaHistoryDAO metaHistoryDAO) {
+        this.metaHistoryDAO = metaHistoryDAO;
+    }
+
+    public MetaSnapshotDAO getMetaSnapshotDAO() {
+        return metaSnapshotDAO;
+    }
+
+    public void setMetaSnapshotDAO(MetaSnapshotDAO metaSnapshotDAO) {
+        this.metaSnapshotDAO = metaSnapshotDAO;
+    }
+
+    public void setBlackFilter(CanalEventFilter blackFilter) {
+        this.blackFilter = blackFilter;
+    }
+
+    public MysqlConnection getConnection() {
+        return connection;
+    }
+
+    public boolean isUkDuplicateException(Throwable t) {
+        if (pattern.matcher(t.getMessage()).find() || h2Pattern.matcher(t.getMessage()).find()) {
+            // 违反外键约束时也抛出这种异常,所以这里还要判断包含字符串Duplicate entry
+            return true;
+        }
+        return false;
+    }
+}

+ 244 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MemoryTableMeta.java

@@ -0,0 +1,244 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.druid.sql.ast.SQLDataType;
+import com.alibaba.druid.sql.ast.SQLDataTypeImpl;
+import com.alibaba.druid.sql.ast.SQLExpr;
+import com.alibaba.druid.sql.ast.SQLStatement;
+import com.alibaba.druid.sql.ast.expr.SQLCharExpr;
+import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr;
+import com.alibaba.druid.sql.ast.expr.SQLNullExpr;
+import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr;
+import com.alibaba.druid.sql.ast.statement.SQLColumnConstraint;
+import com.alibaba.druid.sql.ast.statement.SQLColumnDefinition;
+import com.alibaba.druid.sql.ast.statement.SQLColumnPrimaryKey;
+import com.alibaba.druid.sql.ast.statement.SQLCreateTableStatement;
+import com.alibaba.druid.sql.ast.statement.SQLNotNullConstraint;
+import com.alibaba.druid.sql.ast.statement.SQLNullConstraint;
+import com.alibaba.druid.sql.ast.statement.SQLSelectOrderByItem;
+import com.alibaba.druid.sql.ast.statement.SQLTableElement;
+import com.alibaba.druid.sql.dialect.mysql.ast.MySqlPrimaryKey;
+import com.alibaba.druid.sql.repository.Schema;
+import com.alibaba.druid.sql.repository.SchemaObject;
+import com.alibaba.druid.sql.repository.SchemaRepository;
+import com.alibaba.druid.util.JdbcConstants;
+import com.alibaba.otter.canal.parse.inbound.TableMeta;
+import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
+import com.alibaba.otter.canal.parse.inbound.mysql.ddl.DruidDdlParser;
+import com.alibaba.otter.canal.protocol.position.EntryPosition;
+
+/**
+ * 基于DDL维护的内存表结构
+ *
+ * @author agapple 2017年7月27日 下午4:19:40
+ * @since 3.2.5
+ */
+public class MemoryTableMeta implements TableMetaTSDB {
+
+    private Logger                       logger     = LoggerFactory.getLogger(MemoryTableMeta.class);
+    private Map<List<String>, TableMeta> tableMetas = new ConcurrentHashMap<List<String>, TableMeta>();
+    private SchemaRepository             repository = new SchemaRepository(JdbcConstants.MYSQL);
+
+    public MemoryTableMeta(){
+    }
+
+    @Override
+    public boolean init(String destination) {
+        return true;
+    }
+
+    public boolean apply(EntryPosition position, String schema, String ddl, String extra) {
+        tableMetas.clear();
+        synchronized (this) {
+            if (StringUtils.isNotEmpty(schema)) {
+                repository.setDefaultSchema(schema);
+            }
+
+            try {
+                // druid暂时flush privileges语法解析有问题
+                if (!StringUtils.startsWithIgnoreCase(StringUtils.trim(ddl), "flush")) {
+                    repository.console(ddl);
+                }
+            } catch (Throwable e) {
+                logger.warn("parse faield : " + ddl, e);
+            }
+        }
+
+        // TableMeta meta = find("tddl5_00", "ab");
+        // if (meta != null) {
+        // repository.setDefaultSchema("tddl5_00");
+        // System.out.println(repository.console("show create table tddl5_00.ab"));
+        // System.out.println(repository.console("show columns from tddl5_00.ab"));
+        // }
+        return true;
+    }
+
+    @Override
+    public TableMeta find(String schema, String table) {
+        List<String> keys = Arrays.asList(schema, table);
+        TableMeta tableMeta = tableMetas.get(keys);
+        if (tableMeta == null) {
+            synchronized (this) {
+                tableMeta = tableMetas.get(keys);
+                if (tableMeta == null) {
+                    Schema schemaRep = repository.findSchema(schema);
+                    if (schema == null) {
+                        return null;
+                    }
+                    SchemaObject data = schemaRep.findTable(table);
+                    if (data == null) {
+                        return null;
+                    }
+                    SQLStatement statement = data.getStatement();
+                    if (statement == null) {
+                        return null;
+                    }
+                    if (statement instanceof SQLCreateTableStatement) {
+                        tableMeta = parse((SQLCreateTableStatement) statement);
+                    }
+                    if (tableMeta != null) {
+                        if (table != null) {
+                            tableMeta.setTable(table);
+                        }
+                        if (schema != null) {
+                            tableMeta.setSchema(schema);
+                        }
+
+                        tableMetas.put(keys, tableMeta);
+                    }
+                }
+            }
+        }
+
+        return tableMeta;
+    }
+
+    @Override
+    public boolean rollback(EntryPosition position) {
+        throw new RuntimeException("not support for memory");
+    }
+
+    public Map<String, String> snapshot() {
+        Map<String, String> schemaDdls = new HashMap<String, String>();
+        for (Schema schema : repository.getSchemas()) {
+            StringBuffer data = new StringBuffer(4 * 1024);
+            for (String table : schema.showTables()) {
+                SchemaObject schemaObject = schema.findTable(table);
+                schemaObject.getStatement().output(data);
+                data.append("; \n");
+            }
+            schemaDdls.put(schema.getName(), data.toString());
+        }
+
+        return schemaDdls;
+    }
+
+    private TableMeta parse(SQLCreateTableStatement statement) {
+        int size = statement.getTableElementList().size();
+        if (size > 0) {
+            TableMeta tableMeta = new TableMeta();
+            for (int i = 0; i < size; ++i) {
+                SQLTableElement element = statement.getTableElementList().get(i);
+                processTableElement(element, tableMeta);
+            }
+            return tableMeta;
+        }
+
+        return null;
+    }
+
+    private void processTableElement(SQLTableElement element, TableMeta tableMeta) {
+        if (element instanceof SQLColumnDefinition) {
+            FieldMeta fieldMeta = new FieldMeta();
+            SQLColumnDefinition column = (SQLColumnDefinition) element;
+            String name = getSqlName(column.getName());
+            // String charset = getSqlName(column.getCharsetExpr());
+            SQLDataType dataType = column.getDataType();
+            String dataTypStr = dataType.getName();
+            if (dataType.getArguments().size() > 0) {
+                dataTypStr += "(";
+                for (int i = 0; i < column.getDataType().getArguments().size(); i++) {
+                    if (i != 0) {
+                        dataTypStr += ",";
+                    }
+                    SQLExpr arg = column.getDataType().getArguments().get(i);
+                    dataTypStr += arg.toString();
+                }
+                dataTypStr += ")";
+            }
+
+            if (dataType instanceof SQLDataTypeImpl) {
+                SQLDataTypeImpl dataTypeImpl = (SQLDataTypeImpl) dataType;
+                if (dataTypeImpl.isUnsigned()) {
+                    dataTypStr += " unsigned";
+                }
+
+                if (dataTypeImpl.isZerofill()) {
+                    dataTypStr += " zerofill";
+                }
+            }
+
+            if (column.getDefaultExpr() == null || column.getDefaultExpr() instanceof SQLNullExpr) {
+                fieldMeta.setDefaultValue(null);
+            } else {
+                fieldMeta.setDefaultValue(DruidDdlParser.unescapeQuotaName(getSqlName(column.getDefaultExpr())));
+            }
+
+            fieldMeta.setColumnName(name);
+            fieldMeta.setColumnType(dataTypStr);
+            fieldMeta.setNullable(true);
+            List<SQLColumnConstraint> constraints = column.getConstraints();
+            for (SQLColumnConstraint constraint : constraints) {
+                if (constraint instanceof SQLNotNullConstraint) {
+                    fieldMeta.setNullable(false);
+                } else if (constraint instanceof SQLNullConstraint) {
+                    fieldMeta.setNullable(true);
+                } else if (constraint instanceof SQLColumnPrimaryKey) {
+                    fieldMeta.setKey(true);
+                }
+            }
+            tableMeta.addFieldMeta(fieldMeta);
+        } else if (element instanceof MySqlPrimaryKey) {
+            MySqlPrimaryKey column = (MySqlPrimaryKey) element;
+            List<SQLSelectOrderByItem> pks = column.getColumns();
+            for (SQLSelectOrderByItem pk : pks) {
+                String name = getSqlName(pk.getExpr());
+                FieldMeta field = tableMeta.getFieldMetaByName(name);
+                field.setKey(true);
+            }
+        }
+    }
+
+    private String getSqlName(SQLExpr sqlName) {
+        if (sqlName == null) {
+            return null;
+        }
+
+        if (sqlName instanceof SQLPropertyExpr) {
+            SQLIdentifierExpr owner = (SQLIdentifierExpr) ((SQLPropertyExpr) sqlName).getOwner();
+            return DruidDdlParser.unescapeName(owner.getName()) + "."
+                   + DruidDdlParser.unescapeName(((SQLPropertyExpr) sqlName).getName());
+        } else if (sqlName instanceof SQLIdentifierExpr) {
+            return DruidDdlParser.unescapeName(((SQLIdentifierExpr) sqlName).getName());
+        } else if (sqlName instanceof SQLCharExpr) {
+            return ((SQLCharExpr) sqlName).getText();
+        } else {
+            return sqlName.toString();
+        }
+    }
+    
+
+    public SchemaRepository getRepository() {
+        return repository;
+    }
+
+}

+ 41 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaTSDB.java

@@ -0,0 +1,41 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.util.Map;
+
+import com.alibaba.otter.canal.parse.inbound.TableMeta;
+import com.alibaba.otter.canal.protocol.position.EntryPosition;
+
+/**
+ * 表结构的时间序列存储
+ *
+ * @author agapple 2017年7月27日 下午4:06:30
+ * @since 1.0.25
+ */
+public interface TableMetaTSDB {
+
+    /**
+     * 初始化
+     */
+    public boolean init(String destination);
+
+    /**
+     * 获取当前的表结构
+     */
+    public TableMeta find(String schema, String table);
+
+    /**
+     * 添加ddl到时间序列库中
+     */
+    public boolean apply(EntryPosition position, String schema, String ddl, String extra);
+
+    /**
+     * 回滚到指定位点的表结构
+     */
+    public boolean rollback(EntryPosition position);
+
+    /**
+     * 生成快照内容
+     */
+    public Map<String/* schema */, String> snapshot();
+
+}

+ 51 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaTSDBBuilder.java

@@ -0,0 +1,51 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.context.support.ClassPathXmlApplicationContext;
+
+import com.google.common.collect.Maps;
+
+/**
+ * @author agapple 2017年10月11日 下午8:45:40
+ * @since 1.0.25
+ */
+public class TableMetaTSDBBuilder {
+
+    protected final static Logger                                        logger   = LoggerFactory.getLogger(TableMetaTSDBBuilder.class);
+    private static ConcurrentMap<String, ClassPathXmlApplicationContext> contexts = Maps.newConcurrentMap();
+
+    /**
+     * 代理一下tableMetaTSDB的获取,使用隔离的spring定义
+     */
+    public static TableMetaTSDB build(String destination, String springXml) {
+        if (StringUtils.isNotEmpty(springXml)) {
+            ClassPathXmlApplicationContext applicationContext = contexts.get(destination);
+            if (applicationContext == null) {
+                synchronized (contexts) {
+                    if (applicationContext == null) {
+                        applicationContext = new ClassPathXmlApplicationContext(springXml);
+                        contexts.put(destination, applicationContext);
+                    }
+                }
+            }
+            TableMetaTSDB tableMetaTSDB = (TableMetaTSDB) applicationContext.getBean("tableMetaTSDB");
+            tableMetaTSDB.init(destination);
+            logger.info("{} init TableMetaTSDB with {}", destination, springXml);
+            return tableMetaTSDB;
+        } else {
+            return null;
+        }
+    }
+
+    public static void destory(String destination) {
+        ClassPathXmlApplicationContext context = contexts.remove(destination);
+        if (context != null) {
+            logger.info("{} destory TableMetaTSDB", destination);
+            context.close();
+        }
+    }
+}

+ 59 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaBaseDAO.java

@@ -0,0 +1,59 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+import javax.sql.DataSource;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringUtils;
+import org.springframework.orm.ibatis.support.SqlMapClientDaoSupport;
+
+/**
+ * @author agapple 2017年10月14日 上午1:05:22
+ * @since 1.0.25
+ */
+@SuppressWarnings("deprecation")
+public class MetaBaseDAO extends SqlMapClientDaoSupport {
+
+    protected boolean isH2 = false;
+
+    protected void initTable(String tableName) throws Exception {
+        Connection conn = null;
+        InputStream input = null;
+        try {
+            DataSource dataSource = getDataSource();
+            conn = dataSource.getConnection();
+            String name = "mysql";
+            isH2 = isH2(conn);
+            if (isH2) {
+                name = "h2";
+            }
+            input = Thread.currentThread()
+                .getContextClassLoader()
+                .getResourceAsStream("ddl/" + name + "/" + tableName + ".sql");
+            if (input == null) {
+                return;
+            }
+
+            String sql = StringUtils.join(IOUtils.readLines(input), "\n");
+            Statement stmt = conn.createStatement();
+            stmt.execute(sql);
+            stmt.close();
+        } catch (Throwable e) {
+            logger.warn("init " + tableName + " failed", e);
+        } finally {
+            IOUtils.closeQuietly(input);
+            if (conn != null) {
+                conn.close();
+            }
+        }
+    }
+
+    private boolean isH2(Connection conn) throws SQLException {
+        String product = conn.getMetaData().getDatabaseProductName();
+        return StringUtils.containsIgnoreCase(product, "H2");
+    }
+}

+ 52 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaHistoryDAO.java

@@ -0,0 +1,52 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao;
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+
+import com.google.common.collect.Maps;
+
+/**
+ * canal数据的存储
+ *
+ * @author wanshao 2017年7月27日 下午10:51:55
+ * @since 3.2.5
+ */
+@SuppressWarnings("deprecation")
+public class MetaHistoryDAO extends MetaBaseDAO {
+
+    public Long insert(MetaHistoryDO metaDO) {
+        return (Long) getSqlMapClientTemplate().insert("meta_history.insert", metaDO);
+    }
+
+    public List<MetaHistoryDO> findByTimestamp(String destination, Long snapshotTimestamp, Long timestamp) {
+        HashMap params = Maps.newHashMapWithExpectedSize(2);
+        params.put("destination", destination);
+        params.put("snapshotTimestamp", snapshotTimestamp == null ? 0L : snapshotTimestamp);
+        params.put("timestamp", timestamp == null ? 0L : timestamp);
+        return (List<MetaHistoryDO>) getSqlMapClientTemplate().queryForList("meta_history.findByTimestamp", params);
+    }
+
+    public Integer deleteByName(String destination) {
+        HashMap params = Maps.newHashMapWithExpectedSize(2);
+        params.put("destination", destination);
+        return getSqlMapClientTemplate().delete("meta_history.deleteByName", params);
+    }
+
+    /**
+     * 删除interval秒之前的数据
+     */
+    public Integer deleteByGmtModified(int interval) {
+        HashMap params = Maps.newHashMapWithExpectedSize(2);
+        long timestamp = System.currentTimeMillis() - interval * 1000;
+        Date date = new Date(timestamp);
+        SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+        params.put("timestamp", format.format(date));
+        return getSqlMapClientTemplate().delete("meta_history.deleteByGmtModified", params);
+    }
+
+    protected void initDao() throws Exception {
+        initTable("meta_history");
+    }
+}

+ 148 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaHistoryDO.java

@@ -0,0 +1,148 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao;
+
+import java.util.Date;
+
+/**
+ * @author agapple 2017年7月27日 下午11:09:41
+ * @since 1.0.25
+ */
+public class MetaHistoryDO {
+
+    private Long   id;
+    private Date   gmtCreate;
+    private Date   gmtModified;
+    private String destination;
+    private String binlogFile;
+    private Long   binlogOffest;
+    private String binlogMasterId;
+    private Long   binlogTimestamp;
+    private String useSchema;
+    private String sqlSchema;
+    private String sqlTable;
+    private String sqlText;
+    private String sqlType;
+    private String extra;
+
+    public Long getId() {
+        return id;
+    }
+
+    public void setId(Long id) {
+        this.id = id;
+    }
+
+    public Date getGmtCreate() {
+        return gmtCreate;
+    }
+
+    public void setGmtCreate(Date gmtCreate) {
+        this.gmtCreate = gmtCreate;
+    }
+
+    public Date getGmtModified() {
+        return gmtModified;
+    }
+
+    public void setGmtModified(Date gmtModified) {
+        this.gmtModified = gmtModified;
+    }
+
+    public String getBinlogFile() {
+        return binlogFile;
+    }
+
+    public void setBinlogFile(String binlogFile) {
+        this.binlogFile = binlogFile;
+    }
+
+    public Long getBinlogOffest() {
+        return binlogOffest;
+    }
+
+    public void setBinlogOffest(Long binlogOffest) {
+        this.binlogOffest = binlogOffest;
+    }
+
+    public String getBinlogMasterId() {
+        return binlogMasterId;
+    }
+
+    public void setBinlogMasterId(String binlogMasterId) {
+        this.binlogMasterId = binlogMasterId;
+    }
+
+    public Long getBinlogTimestamp() {
+        return binlogTimestamp;
+    }
+
+    public void setBinlogTimestamp(Long binlogTimestamp) {
+        this.binlogTimestamp = binlogTimestamp;
+    }
+
+    public String getUseSchema() {
+        return useSchema;
+    }
+
+    public void setUseSchema(String useSchema) {
+        this.useSchema = useSchema;
+    }
+
+
+    public String getExtra() {
+        return extra;
+    }
+
+    public void setExtra(String extra) {
+        this.extra = extra;
+    }
+
+    public String getDestination() {
+        return destination;
+    }
+
+    public void setDestination(String destination) {
+        this.destination = destination;
+    }
+
+    public String getSqlSchema() {
+        return sqlSchema;
+    }
+
+    public void setSqlSchema(String sqlSchema) {
+        this.sqlSchema = sqlSchema;
+    }
+
+    public String getSqlTable() {
+        return sqlTable;
+    }
+
+    public void setSqlTable(String sqlTable) {
+        this.sqlTable = sqlTable;
+    }
+
+    public String getSqlText() {
+        return sqlText;
+    }
+
+    public void setSqlText(String sqlText) {
+        this.sqlText = sqlText;
+    }
+
+    public String getSqlType() {
+        return sqlType;
+    }
+
+    public void setSqlType(String sqlType) {
+        this.sqlType = sqlType;
+    }
+
+    @Override
+    public String toString() {
+        return "MetaHistoryDO [id=" + id + ", gmtCreate=" + gmtCreate + ", gmtModified=" + gmtModified
+               + ", destination=" + destination + ", binlogFile=" + binlogFile + ", binlogOffest=" + binlogOffest
+               + ", binlogMasterId=" + binlogMasterId + ", binlogTimestamp=" + binlogTimestamp + ", useSchema="
+               + useSchema + ", sqlSchema=" + sqlSchema + ", sqlTable=" + sqlTable + ", sqlText=" + sqlText
+               + ", sqlType=" + sqlType + ", extra=" + extra + "]";
+    }
+
+}

+ 56 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaSnapshotDAO.java

@@ -0,0 +1,56 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao;
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.HashMap;
+
+import com.google.common.collect.Maps;
+
+/**
+ * canal数据的存储
+ * 
+ * @author wanshao 2017年7月27日 下午10:51:55
+ * @since 3.2.5
+ */
+@SuppressWarnings("deprecation")
+public class MetaSnapshotDAO extends MetaBaseDAO {
+
+    public Long insert(MetaSnapshotDO snapshotDO) {
+        return (Long) getSqlMapClientTemplate().insert("meta_snapshot.insert", snapshotDO);
+    }
+
+    public Long update(MetaSnapshotDO snapshotDO) {
+        return (Long) getSqlMapClientTemplate().insert("meta_snapshot.update", snapshotDO);
+    }
+
+    public MetaSnapshotDO findByTimestamp(String destination, Long timestamp) {
+        HashMap params = Maps.newHashMapWithExpectedSize(2);
+        params.put("timestamp", timestamp == null ? 0L : timestamp);
+        params.put("destination", destination);
+
+        return (MetaSnapshotDO) getSqlMapClientTemplate().queryForObject("meta_snapshot.findByTimestamp", params);
+    }
+
+    public Integer deleteByName(String destination) {
+        HashMap params = Maps.newHashMapWithExpectedSize(2);
+        params.put("destination", destination);
+        return getSqlMapClientTemplate().delete("meta_snapshot.deleteByName", params);
+    }
+
+    /**
+     * 删除interval秒之前的数据
+     */
+    public Integer deleteByGmtModified(int interval) {
+        HashMap params = Maps.newHashMapWithExpectedSize(2);
+        long timestamp = System.currentTimeMillis() - interval * 1000;
+        Date date = new Date(timestamp);
+        SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+        params.put("timestamp", format.format(date));
+        return getSqlMapClientTemplate().delete("meta_snapshot.deleteByGmtModified", params);
+    }
+
+    protected void initDao() throws Exception {
+        initTable("meta_snapshot");
+    }
+
+}

+ 110 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaSnapshotDO.java

@@ -0,0 +1,110 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao;
+
+import java.util.Date;
+
+/**
+ * @author agapple 2017年7月27日 下午11:09:41
+ * @since 1.0.25
+ */
+public class MetaSnapshotDO {
+
+    private Long   id;
+    private Date   gmtCreate;
+    private Date   gmtModified;
+    private String destination;
+    private String binlogFile;
+    private Long   binlogOffest;
+    private String binlogMasterId;
+    private Long   binlogTimestamp;
+    private String data;
+    private String extra;
+
+    public Long getId() {
+        return id;
+    }
+
+    public void setId(Long id) {
+        this.id = id;
+    }
+
+    public Date getGmtCreate() {
+        return gmtCreate;
+    }
+
+    public void setGmtCreate(Date gmtCreate) {
+        this.gmtCreate = gmtCreate;
+    }
+
+    public Date getGmtModified() {
+        return gmtModified;
+    }
+
+    public void setGmtModified(Date gmtModified) {
+        this.gmtModified = gmtModified;
+    }
+
+    public String getBinlogFile() {
+        return binlogFile;
+    }
+
+    public void setBinlogFile(String binlogFile) {
+        this.binlogFile = binlogFile;
+    }
+
+    public Long getBinlogOffest() {
+        return binlogOffest;
+    }
+
+    public void setBinlogOffest(Long binlogOffest) {
+        this.binlogOffest = binlogOffest;
+    }
+
+    public String getBinlogMasterId() {
+        return binlogMasterId;
+    }
+
+    public void setBinlogMasterId(String binlogMasterId) {
+        this.binlogMasterId = binlogMasterId;
+    }
+
+    public Long getBinlogTimestamp() {
+        return binlogTimestamp;
+    }
+
+    public void setBinlogTimestamp(Long binlogTimestamp) {
+        this.binlogTimestamp = binlogTimestamp;
+    }
+
+    public String getData() {
+        return data;
+    }
+
+    public void setData(String data) {
+        this.data = data;
+    }
+
+    public String getExtra() {
+        return extra;
+    }
+
+    public void setExtra(String extra) {
+        this.extra = extra;
+    }
+
+    public String getDestination() {
+        return destination;
+    }
+
+    public void setDestination(String destination) {
+        this.destination = destination;
+    }
+
+    @Override
+    public String toString() {
+        return "MetaSnapshotDO [id=" + id + ", gmtCreate=" + gmtCreate + ", gmtModified=" + gmtModified
+               + ", destination=" + destination + ", binlogFile=" + binlogFile + ", binlogOffest=" + binlogOffest
+               + ", binlogMasterId=" + binlogMasterId + ", binlogTimestamp=" + binlogTimestamp + ", data=" + data
+               + ", extra=" + extra + "]";
+    }
+
+}

+ 1 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/index/FileMixedLogPositionManager.java

@@ -45,6 +45,7 @@ public class FileMixedLogPositionManager extends AbstractLogPositionManager {
 
 
     private ScheduledExecutorService executorService;
     private ScheduledExecutorService executorService;
 
 
+    @SuppressWarnings("serial")
     private final LogPosition        nullPosition = new LogPosition() {
     private final LogPosition        nullPosition = new LogPosition() {
                                                   };
                                                   };
 
 

+ 1 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/index/PeriodMixedLogPositionManager.java

@@ -29,6 +29,7 @@ public class PeriodMixedLogPositionManager extends AbstractLogPositionManager {
     private long                        period;
     private long                        period;
     private Set<String>                 persistTasks;
     private Set<String>                 persistTasks;
 
 
+    @SuppressWarnings("serial")
     private final LogPosition           nullPosition = new LogPosition() {
     private final LogPosition           nullPosition = new LogPosition() {
                                                      };
                                                      };
 
 

+ 22 - 0
parse/src/main/resources/ddl/derby/meta_history.sql

@@ -0,0 +1,22 @@
+CREATE TABLE meta_history (
+  id bigint GENERATED ALWAYS AS IDENTITY NOT NULL,
+  gmt_create timestamp NOT NULL,
+  gmt_modified timestamp NOT NULL,
+  destination varchar(128) DEFAULT NULL,
+  binlog_file varchar(64) DEFAULT NULL,
+  binlog_offest bigint DEFAULT NULL,
+  binlog_master_id varchar(64) DEFAULT NULL,
+  binlog_timestamp bigint DEFAULT NULL,
+  use_schema varchar(1024) DEFAULT NULL,
+  sql_schema varchar(1024) DEFAULT NULL,
+  sql_table varchar(1024) DEFAULT NULL,
+  sql_text clob(16 M) DEFAULT NULL,
+  sql_type varchar(1024) DEFAULT NULL,
+  extra varchar(512) DEFAULT NULL,
+  PRIMARY KEY (id),
+  CONSTRAINT meta_history_binlog_file_offest UNIQUE (destination,binlog_master_id,binlog_file,binlog_offest)
+);
+
+create index meta_history_destination on meta_history(destination);
+create index meta_history_destination_timestamp on meta_history(destination,binlog_timestamp);
+create index meta_history_gmt_modified on meta_history(gmt_modified);

+ 18 - 0
parse/src/main/resources/ddl/derby/meta_snapshot.sql

@@ -0,0 +1,18 @@
+CREATE TABLE meta_snapshot (
+  id bigint GENERATED ALWAYS AS IDENTITY NOT NULL,
+  gmt_create timestamp NOT NULL,
+  gmt_modified timestamp NOT NULL,
+  destination varchar(128) DEFAULT NULL,
+  binlog_file varchar(64) DEFAULT NULL,
+  binlog_offest bigint DEFAULT NULL,
+  binlog_master_id varchar(64) DEFAULT NULL,
+  binlog_timestamp bigint DEFAULT NULL,
+  data clob(16 M) DEFAULT NULL,
+  extra varchar(512) DEFAULT NULL,
+  PRIMARY KEY (id),
+  CONSTRAINT meta_snapshot_binlog_file_offest UNIQUE (destination,binlog_master_id,binlog_file,binlog_offest)
+);
+
+create index meta_snapshot_destination on meta_snapshot(destination);
+create index meta_snapshot_destination_timestamp on meta_snapshot(destination,binlog_timestamp);
+create index meta_snapshot_gmt_modified on meta_snapshot(gmt_modified);

+ 21 - 0
parse/src/main/resources/ddl/h2/meta_history.sql

@@ -0,0 +1,21 @@
+CREATE TABLE IF NOT EXISTS `meta_history` (
+  `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键',
+  `gmt_create` datetime NOT NULL COMMENT '创建时间',
+  `gmt_modified` datetime NOT NULL COMMENT '修改时间',
+  `destination` varchar(128) DEFAULT NULL COMMENT '通道名称',
+  `binlog_file` varchar(64) DEFAULT NULL COMMENT 'binlog文件名',
+  `binlog_offest` bigint(20) DEFAULT NULL COMMENT 'binlog偏移量',
+  `binlog_master_id` varchar(64) DEFAULT NULL COMMENT 'binlog节点id',
+  `binlog_timestamp` bigint(20) DEFAULT NULL COMMENT 'binlog应用的时间戳',
+  `use_schema` varchar(1024) DEFAULT NULL COMMENT '执行sql时对应的schema',
+  `sql_schema` varchar(1024) DEFAULT NULL COMMENT '对应的schema',
+  `sql_table` varchar(1024) DEFAULT NULL COMMENT '对应的table',
+  `sql_text` longtext DEFAULT NULL COMMENT '执行的sql',
+  `sql_type` varchar(256) DEFAULT NULL COMMENT 'sql类型',
+  `extra` text DEFAULT NULL COMMENT '额外的扩展信息',
+  PRIMARY KEY (`id`),
+  UNIQUE KEY meta_history_binlog_file_offest(`destination`,`binlog_master_id`,`binlog_file`,`binlog_offest`),
+  KEY `meta_history_destination` (`destination`),
+  KEY `meta_history_destination_timestamp` (`destination`,`binlog_timestamp`),
+  KEY `meta_history_gmt_modified` (`gmt_modified`)
+);

+ 17 - 0
parse/src/main/resources/ddl/h2/meta_snapshot.sql

@@ -0,0 +1,17 @@
+CREATE TABLE IF NOT EXISTS `meta_snapshot` (
+  `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键',
+  `gmt_create` datetime NOT NULL COMMENT '创建时间',
+  `gmt_modified` datetime NOT NULL COMMENT '修改时间',
+  `destination` varchar(128) DEFAULT NULL COMMENT '通道名称',
+  `binlog_file` varchar(64) DEFAULT NULL COMMENT 'binlog文件名',
+  `binlog_offest` bigint(20) DEFAULT NULL COMMENT 'binlog偏移量',
+  `binlog_master_id` varchar(64) DEFAULT NULL COMMENT 'binlog节点id',
+  `binlog_timestamp` bigint(20) DEFAULT NULL COMMENT 'binlog应用的时间戳',
+  `data` longtext DEFAULT NULL COMMENT '表结构数据',
+  `extra` text DEFAULT NULL COMMENT '额外的扩展信息',
+  PRIMARY KEY (`id`),
+  UNIQUE KEY meta_snapshot_binlog_file_offest(`destination`,`binlog_master_id`,`binlog_file`,`binlog_offest`),
+  KEY `meta_snapshot_destination` (`destination`),
+  KEY `meta_snapshot_destination_timestamp` (`destination`,`binlog_timestamp`),
+  KEY `meta_snapshot_gmt_modified` (`gmt_modified`)
+);

+ 21 - 0
parse/src/main/resources/ddl/mysql/meta_history.sql

@@ -0,0 +1,21 @@
+CREATE TABLE IF NOT EXISTS `meta_history` (
+  `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键',
+  `gmt_create` datetime NOT NULL COMMENT '创建时间',
+  `gmt_modified` datetime NOT NULL COMMENT '修改时间',
+  `destination` varchar(128) DEFAULT NULL COMMENT '通道名称',
+  `binlog_file` varchar(64) DEFAULT NULL COMMENT 'binlog文件名',
+  `binlog_offest` bigint(20) DEFAULT NULL COMMENT 'binlog偏移量',
+  `binlog_master_id` varchar(64) DEFAULT NULL COMMENT 'binlog节点id',
+  `binlog_timestamp` bigint(20) DEFAULT NULL COMMENT 'binlog应用的时间戳',
+  `use_schema` varchar(1024) DEFAULT NULL COMMENT '执行sql时对应的schema',
+  `sql_schema` varchar(1024) DEFAULT NULL COMMENT '对应的schema',
+  `sql_table` varchar(1024) DEFAULT NULL COMMENT '对应的table',
+  `sql_text` longtext DEFAULT NULL COMMENT '执行的sql',
+  `sql_type` varchar(256) DEFAULT NULL COMMENT 'sql类型',
+  `extra` text DEFAULT NULL COMMENT '额外的扩展信息',
+  PRIMARY KEY (`id`),
+  UNIQUE KEY binlog_file_offest(`destination`,`binlog_master_id`,`binlog_file`,`binlog_offest`),
+  KEY `destination` (`destination`),
+  KEY `destination_timestamp` (`destination`,`binlog_timestamp`),
+  KEY `gmt_modified` (`gmt_modified`)
+) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 COMMENT='表结构变化明细表';

+ 17 - 0
parse/src/main/resources/ddl/mysql/meta_snapshot.sql

@@ -0,0 +1,17 @@
+CREATE TABLE IF NOT EXISTS `meta_snapshot` (
+  `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键',
+  `gmt_create` datetime NOT NULL COMMENT '创建时间',
+  `gmt_modified` datetime NOT NULL COMMENT '修改时间',
+  `destination` varchar(128) DEFAULT NULL COMMENT '通道名称',
+  `binlog_file` varchar(64) DEFAULT NULL COMMENT 'binlog文件名',
+  `binlog_offest` bigint(20) DEFAULT NULL COMMENT 'binlog偏移量',
+  `binlog_master_id` varchar(64) DEFAULT NULL COMMENT 'binlog节点id',
+  `binlog_timestamp` bigint(20) DEFAULT NULL COMMENT 'binlog应用的时间戳',
+  `data` longtext DEFAULT NULL COMMENT '表结构数据',
+  `extra` text DEFAULT NULL COMMENT '额外的扩展信息',
+  PRIMARY KEY (`id`),
+  UNIQUE KEY binlog_file_offest(`destination`,`binlog_master_id`,`binlog_file`,`binlog_offest`),
+  KEY `destination` (`destination`),
+  KEY `destination_timestamp` (`destination`,`binlog_timestamp`),
+  KEY `gmt_modified` (`gmt_modified`)
+) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 COMMENT='表结构记录表快照表';

+ 100 - 2
parse/src/test/java/com/alibaba/otter/canal/parse/DirectLogFetcherTest.java

@@ -3,27 +3,38 @@ package com.alibaba.otter.canal.parse;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 
 
+import org.apache.commons.lang.StringUtils;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 import com.alibaba.otter.canal.parse.driver.mysql.MysqlConnector;
 import com.alibaba.otter.canal.parse.driver.mysql.MysqlConnector;
+import com.alibaba.otter.canal.parse.driver.mysql.MysqlUpdateExecutor;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.HeaderPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.HeaderPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.client.BinlogDumpCommandPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.packets.client.BinlogDumpCommandPacket;
+import com.alibaba.otter.canal.parse.driver.mysql.packets.client.RegisterSlaveCommandPacket;
+import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ErrorPacket;
 import com.alibaba.otter.canal.parse.driver.mysql.utils.PacketManager;
 import com.alibaba.otter.canal.parse.driver.mysql.utils.PacketManager;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.DirectLogFetcher;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.DirectLogFetcher;
 import com.taobao.tddl.dbsync.binlog.LogContext;
 import com.taobao.tddl.dbsync.binlog.LogContext;
 import com.taobao.tddl.dbsync.binlog.LogDecoder;
 import com.taobao.tddl.dbsync.binlog.LogDecoder;
 import com.taobao.tddl.dbsync.binlog.LogEvent;
 import com.taobao.tddl.dbsync.binlog.LogEvent;
+import com.taobao.tddl.dbsync.binlog.event.RotateLogEvent;
 
 
 public class DirectLogFetcherTest {
 public class DirectLogFetcherTest {
 
 
+    protected final Logger logger = LoggerFactory.getLogger(this.getClass());
+
     @Test
     @Test
     public void testSimple() {
     public void testSimple() {
         DirectLogFetcher fetcher = new DirectLogFetcher();
         DirectLogFetcher fetcher = new DirectLogFetcher();
         try {
         try {
-            MysqlConnector connector = new MysqlConnector(new InetSocketAddress("127.0.0.1", 3306), "xxxxx", "xxxxx");
+            MysqlConnector connector = new MysqlConnector(new InetSocketAddress("127.0.0.1", 3306), "xxxx", "xxxx");
             connector.connect();
             connector.connect();
-            sendBinlogDump(connector, "mysql-bin.001016", 4L, 3);
+            updateSettings(connector);
+            sendRegisterSlave(connector, 3);
+            sendBinlogDump(connector, "mysql-bin.000001", 4L, 3);
 
 
             fetcher.start(connector.getChannel());
             fetcher.start(connector.getChannel());
 
 
@@ -42,6 +53,7 @@ public class DirectLogFetcherTest {
                     case LogEvent.ROTATE_EVENT:
                     case LogEvent.ROTATE_EVENT:
                         // binlogFileName = ((RotateLogEvent)
                         // binlogFileName = ((RotateLogEvent)
                         // event).getFilename();
                         // event).getFilename();
+                        System.out.println(((RotateLogEvent) event).getFilename());
                         break;
                         break;
                     case LogEvent.WRITE_ROWS_EVENT_V1:
                     case LogEvent.WRITE_ROWS_EVENT_V1:
                     case LogEvent.WRITE_ROWS_EVENT:
                     case LogEvent.WRITE_ROWS_EVENT:
@@ -82,6 +94,33 @@ public class DirectLogFetcherTest {
 
 
     }
     }
 
 
+    private void sendRegisterSlave(MysqlConnector connector, int slaveId) throws IOException {
+        RegisterSlaveCommandPacket cmd = new RegisterSlaveCommandPacket();
+        cmd.reportHost = connector.getAddress().getAddress().getHostAddress();
+        cmd.reportPasswd = connector.getPassword();
+        cmd.reportUser = connector.getUsername();
+        cmd.serverId = slaveId;
+        byte[] cmdBody = cmd.toBytes();
+
+        HeaderPacket header = new HeaderPacket();
+        header.setPacketBodyLength(cmdBody.length);
+        header.setPacketSequenceNumber((byte) 0x00);
+        PacketManager.writePkg(connector.getChannel(), header.toBytes(), cmdBody);
+
+        header = PacketManager.readHeader(connector.getChannel(), 4);
+        byte[] body = PacketManager.readBytes(connector.getChannel(), header.getPacketBodyLength());
+        assert body != null;
+        if (body[0] < 0) {
+            if (body[0] == -1) {
+                ErrorPacket err = new ErrorPacket();
+                err.fromBytes(body);
+                throw new IOException("Error When doing Register slave:" + err.toString());
+            } else {
+                throw new IOException("unpexpected packet with field_count=" + body[0]);
+            }
+        }
+    }
+
     private void sendBinlogDump(MysqlConnector connector, String binlogfilename, Long binlogPosition, int slaveId)
     private void sendBinlogDump(MysqlConnector connector, String binlogfilename, Long binlogPosition, int slaveId)
                                                                                                                   throws IOException {
                                                                                                                   throws IOException {
         BinlogDumpCommandPacket binlogDumpCmd = new BinlogDumpCommandPacket();
         BinlogDumpCommandPacket binlogDumpCmd = new BinlogDumpCommandPacket();
@@ -95,4 +134,63 @@ public class DirectLogFetcherTest {
         binlogDumpHeader.setPacketSequenceNumber((byte) 0x00);
         binlogDumpHeader.setPacketSequenceNumber((byte) 0x00);
         PacketManager.writePkg(connector.getChannel(), binlogDumpHeader.toBytes(), cmdBody);
         PacketManager.writePkg(connector.getChannel(), binlogDumpHeader.toBytes(), cmdBody);
     }
     }
+
+    private void updateSettings(MysqlConnector connector) throws IOException {
+        try {
+            update("set wait_timeout=9999999", connector);
+        } catch (Exception e) {
+            logger.warn("update wait_timeout failed", e);
+        }
+        try {
+            update("set net_write_timeout=1800", connector);
+        } catch (Exception e) {
+            logger.warn("update net_write_timeout failed", e);
+        }
+
+        try {
+            update("set net_read_timeout=1800", connector);
+        } catch (Exception e) {
+            logger.warn("update net_read_timeout failed", e);
+        }
+
+        try {
+            // 设置服务端返回结果时不做编码转化,直接按照数据库的二进制编码进行发送,由客户端自己根据需求进行编码转化
+            update("set names 'binary'", connector);
+        } catch (Exception e) {
+            logger.warn("update names failed", e);
+        }
+
+        try {
+            // mysql5.6针对checksum支持需要设置session变量
+            // 如果不设置会出现错误: Slave can not handle replication events with the
+            // checksum that master is configured to log
+            // 但也不能乱设置,需要和mysql server的checksum配置一致,不然RotateLogEvent会出现乱码
+            update("set @master_binlog_checksum= '@@global.binlog_checksum'", connector);
+        } catch (Exception e) {
+            logger.warn("update master_binlog_checksum failed", e);
+        }
+
+        try {
+            // 参考:https://github.com/alibaba/canal/issues/284
+            // mysql5.6需要设置slave_uuid避免被server kill链接
+            update("set @slave_uuid=uuid()", connector);
+        } catch (Exception e) {
+            if (!StringUtils.contains(e.getMessage(), "Unknown system variable")) {
+                logger.warn("update slave_uuid failed", e);
+            }
+        }
+
+        try {
+            // mariadb针对特殊的类型,需要设置session变量
+            update("SET @mariadb_slave_capability='" + LogEvent.MARIA_SLAVE_CAPABILITY_MINE + "'", connector);
+        } catch (Exception e) {
+            logger.warn("update mariadb_slave_capability failed", e);
+        }
+    }
+
+    public void update(String cmd, MysqlConnector connector) throws IOException {
+        MysqlUpdateExecutor exector = new MysqlUpdateExecutor(connector);
+        exector.update(cmd);
+    }
+
 }
 }

+ 19 - 11
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/TableMetaCacheTest.java

@@ -2,32 +2,40 @@ package com.alibaba.otter.canal.parse.inbound;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
+import java.util.List;
 
 
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
-import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
+import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ResultSetPacket;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection;
-import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.TableMetaCache;
 
 
 public class TableMetaCacheTest {
 public class TableMetaCacheTest {
 
 
     @Test
     @Test
-    public void testSimple() {
-
-        MysqlConnection connection = new MysqlConnection(new InetSocketAddress("127.0.0.1", 3306), "xxxxx", "xxxxx");
+    public void testSimple() throws IOException {
+        MysqlConnection connection = new MysqlConnection(new InetSocketAddress("127.0.0.1", 3306), "root", "hello");
         try {
         try {
             connection.connect();
             connection.connect();
         } catch (IOException e) {
         } catch (IOException e) {
             Assert.fail(e.getMessage());
             Assert.fail(e.getMessage());
         }
         }
 
 
-        TableMetaCache cache = new TableMetaCache(connection);
-        TableMeta meta = cache.getTableMeta("otter1", "otter_stability1");
-        Assert.assertNotNull(meta);
-        for (FieldMeta field : meta.getFileds()) {
-            System.out.println("filed :" + field.getColumnName() + " , isKey : " + field.isKey() + " , isNull : "
-                               + field.isNullable());
+        List<ResultSetPacket> packets = connection.queryMulti("show create table test.ljh_test");
+        String createDDL = null;
+        if (packets.get(0).getFieldValues().size() > 0) {
+            createDDL = packets.get(0).getFieldValues().get(1);
         }
         }
+
+        System.out.println(createDDL);
+
+        // TableMetaCache cache = new TableMetaCache(connection);
+        // TableMeta meta = cache.getTableMeta("otter1", "otter_stability1");
+        // Assert.assertNotNull(meta);
+        // for (FieldMeta field : meta.getFields()) {
+        // System.out.println("filed :" + field.getColumnName() + " , isKey : "
+        // + field.isKey() + " , isNull : "
+        // + field.isNullable());
+        // }
     }
     }
 }
 }

+ 2 - 2
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/group/GroupEventPaserTest.java

@@ -88,8 +88,8 @@ public class GroupEventPaserTest {
     private BinlogParser buildParser(AuthenticationInfo info) {
     private BinlogParser buildParser(AuthenticationInfo info) {
         return new AbstractBinlogParser<LogEvent>() {
         return new AbstractBinlogParser<LogEvent>() {
 
 
-            public Entry parse(LogEvent event) throws CanalParseException {
-                // return _parser.parse(event);
+            @Override
+            public Entry parse(LogEvent event, boolean isSeek) throws CanalParseException {
                 return null;
                 return null;
             }
             }
         };
         };

+ 3 - 3
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogDumpTest.java

@@ -25,11 +25,11 @@ public class LocalBinlogDumpTest {
 
 
     @Test
     @Test
     public void testSimple() {
     public void testSimple() {
-        String directory = "/home/jianghang/tmp/binlog";
+        String directory = "/Users/wanshao/projects/canal/parse/src/test/resources/binlog/tsdb";
         final LocalBinlogEventParser controller = new LocalBinlogEventParser();
         final LocalBinlogEventParser controller = new LocalBinlogEventParser();
-        final EntryPosition startPosition = new EntryPosition("mysql-bin.000006", 4L);
+        final EntryPosition startPosition = new EntryPosition("mysql-bin.000003", 123L);
 
 
-        controller.setMasterInfo(new AuthenticationInfo(new InetSocketAddress("127.0.0.1", 3306), "xxxxx", "xxxxx"));
+        controller.setMasterInfo(new AuthenticationInfo(new InetSocketAddress("127.0.0.1", 3306), "canal", "canal"));
         controller.setConnectionCharset(Charset.forName("UTF-8"));
         controller.setConnectionCharset(Charset.forName("UTF-8"));
         controller.setDirectory(directory);
         controller.setDirectory(directory);
         controller.setMasterPosition(startPosition);
         controller.setMasterPosition(startPosition);

+ 8 - 8
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogEventParserTest.java

@@ -23,15 +23,15 @@ import com.alibaba.otter.canal.sink.exception.CanalSinkException;
 public class LocalBinlogEventParserTest {
 public class LocalBinlogEventParserTest {
 
 
     private static final String MYSQL_ADDRESS = "127.0.0.1";
     private static final String MYSQL_ADDRESS = "127.0.0.1";
-    private static final String USERNAME      = "xxxxx";
-    private static final String PASSWORD      = "xxxxx";
+    private static final String USERNAME      = "canal";
+    private static final String PASSWORD      = "canal";
     private String              directory;
     private String              directory;
 
 
     @Before
     @Before
     public void setUp() {
     public void setUp() {
         URL url = Thread.currentThread().getContextClassLoader().getResource("dummy.txt");
         URL url = Thread.currentThread().getContextClassLoader().getResource("dummy.txt");
         File dummyFile = new File(url.getFile());
         File dummyFile = new File(url.getFile());
-        directory = new File(dummyFile.getParent() + "/binlog").getPath();
+        directory = new File(dummyFile + "/binlog/tsdb").getPath();
     }
     }
 
 
     @Test
     @Test
@@ -40,7 +40,7 @@ public class LocalBinlogEventParserTest {
         final AtomicLong entryCount = new AtomicLong(0);
         final AtomicLong entryCount = new AtomicLong(0);
         final EntryPosition entryPosition = new EntryPosition();
         final EntryPosition entryPosition = new EntryPosition();
 
 
-        final EntryPosition defaultPosition = buildPosition("mysql-bin.000001", 6163L, 1322803601000L);
+        final EntryPosition defaultPosition = buildPosition("mysql-bin.000003", 219L, 1505467103000L);
         final LocalBinlogEventParser controller = new LocalBinlogEventParser();
         final LocalBinlogEventParser controller = new LocalBinlogEventParser();
         controller.setMasterPosition(defaultPosition);
         controller.setMasterPosition(defaultPosition);
         controller.setMasterInfo(buildAuthentication());
         controller.setMasterInfo(buildAuthentication());
@@ -103,7 +103,7 @@ public class LocalBinlogEventParserTest {
         final AtomicLong entryCount = new AtomicLong(0);
         final AtomicLong entryCount = new AtomicLong(0);
         final EntryPosition entryPosition = new EntryPosition();
         final EntryPosition entryPosition = new EntryPosition();
 
 
-        final EntryPosition defaultPosition = buildPosition("mysql-bin.000001", null, 1322803601000L);
+        final EntryPosition defaultPosition = buildPosition("mysql-bin.000003", null, 1505467103000L);
         final LocalBinlogEventParser controller = new LocalBinlogEventParser();
         final LocalBinlogEventParser controller = new LocalBinlogEventParser();
         controller.setMasterPosition(defaultPosition);
         controller.setMasterPosition(defaultPosition);
         controller.setMasterInfo(buildAuthentication());
         controller.setMasterInfo(buildAuthentication());
@@ -156,15 +156,15 @@ public class LocalBinlogEventParserTest {
         Assert.assertTrue(entryCount.get() > 0);
         Assert.assertTrue(entryCount.get() > 0);
 
 
         // 对比第一条数据和起始的position相同
         // 对比第一条数据和起始的position相同
-        Assert.assertEquals(entryPosition.getJournalName(), "mysql-bin.000001");
-        Assert.assertTrue(entryPosition.getPosition() <= 6163L);
+        Assert.assertEquals(entryPosition.getJournalName(), "mysql-bin.000003");
+        Assert.assertTrue(entryPosition.getPosition() <= 300L);
         Assert.assertTrue(entryPosition.getTimestamp() <= defaultPosition.getTimestamp());
         Assert.assertTrue(entryPosition.getTimestamp() <= defaultPosition.getTimestamp());
     }
     }
 
 
     @Test
     @Test
     public void test_no_position() throws InterruptedException {
     public void test_no_position() throws InterruptedException {
         final TimeoutChecker timeoutChecker = new TimeoutChecker(3 * 1000);
         final TimeoutChecker timeoutChecker = new TimeoutChecker(3 * 1000);
-        final EntryPosition defaultPosition = buildPosition("mysql-bin.000002",
+        final EntryPosition defaultPosition = buildPosition("mysql-bin.000003",
             null,
             null,
             new Date().getTime() + 1000 * 1000L);
             new Date().getTime() + 1000 * 1000L);
         final AtomicLong entryCount = new AtomicLong(0);
         final AtomicLong entryCount = new AtomicLong(0);

+ 9 - 3
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlDumpTest.java

@@ -7,6 +7,7 @@ import java.util.List;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
+import com.alibaba.otter.canal.filter.aviater.AviaterRegexFilter;
 import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.index.AbstractLogPositionManager;
 import com.alibaba.otter.canal.parse.index.AbstractLogPositionManager;
 import com.alibaba.otter.canal.parse.stub.AbstractCanalEventSinkTest;
 import com.alibaba.otter.canal.parse.stub.AbstractCanalEventSinkTest;
@@ -26,13 +27,18 @@ public class MysqlDumpTest {
     @Test
     @Test
     public void testSimple() {
     public void testSimple() {
         final MysqlEventParser controller = new MysqlEventParser();
         final MysqlEventParser controller = new MysqlEventParser();
-        final EntryPosition startPosition = new EntryPosition("mysql-bin.000003", 4L);
+        final EntryPosition startPosition = new EntryPosition("mysql-bin.000001", 104606L);
 
 
         controller.setConnectionCharset(Charset.forName("UTF-8"));
         controller.setConnectionCharset(Charset.forName("UTF-8"));
         controller.setSlaveId(3344L);
         controller.setSlaveId(3344L);
         controller.setDetectingEnable(false);
         controller.setDetectingEnable(false);
-        controller.setMasterInfo(new AuthenticationInfo(new InetSocketAddress("127.0.0.1", 3306), "xxxxx", "xxxxx"));
+        controller.setMasterInfo(new AuthenticationInfo(new InetSocketAddress("127.0.0.1", 3306), "canal", "canal"));
         controller.setMasterPosition(startPosition);
         controller.setMasterPosition(startPosition);
+        controller.setEnableTsdb(true);
+        controller.setDestination("example");
+        controller.setTsdbSpringXml("classpath:tsdb/h2-tsdb.xml");
+        controller.setEventFilter(new AviaterRegexFilter("test\\..*"));
+        controller.setEventBlackFilter(new AviaterRegexFilter("canal_tsdb\\..*"));
         controller.setEventSink(new AbstractCanalEventSinkTest<List<Entry>>() {
         controller.setEventSink(new AbstractCanalEventSinkTest<List<Entry>>() {
 
 
             public boolean sink(List<Entry> entrys, InetSocketAddress remoteAddress, String destination)
             public boolean sink(List<Entry> entrys, InetSocketAddress remoteAddress, String destination)
@@ -100,7 +106,7 @@ public class MysqlDumpTest {
         controller.start();
         controller.start();
 
 
         try {
         try {
-            Thread.sleep(100 * 1000L);
+            Thread.sleep(100 * 1000 * 1000L);
         } catch (InterruptedException e) {
         } catch (InterruptedException e) {
             Assert.fail(e.getMessage());
             Assert.fail(e.getMessage());
         }
         }

+ 3 - 3
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParserTest.java

@@ -24,8 +24,8 @@ public class MysqlEventParserTest {
 
 
     private static final String DETECTING_SQL = "insert into retl.xdual values(1,now()) on duplicate key update x=now()";
     private static final String DETECTING_SQL = "insert into retl.xdual values(1,now()) on duplicate key update x=now()";
     private static final String MYSQL_ADDRESS = "127.0.0.1";
     private static final String MYSQL_ADDRESS = "127.0.0.1";
-    private static final String USERNAME      = "root";
-    private static final String PASSWORD      = "xxxxxx";
+    private static final String USERNAME      = "canal";
+    private static final String PASSWORD      = "canal";
 
 
     @Test
     @Test
     public void test_position() throws InterruptedException {
     public void test_position() throws InterruptedException {
@@ -34,7 +34,7 @@ public class MysqlEventParserTest {
         final EntryPosition entryPosition = new EntryPosition();
         final EntryPosition entryPosition = new EntryPosition();
 
 
         final MysqlEventParser controller = new MysqlEventParser();
         final MysqlEventParser controller = new MysqlEventParser();
-        final EntryPosition defaultPosition = buildPosition("mysql-bin.000001", 6163L, 1322803601000L);
+        final EntryPosition defaultPosition = buildPosition("mysql-bin.000003", 4690L, 1505481064000L);
 
 
         controller.setSlaveId(3344L);
         controller.setSlaveId(3344L);
         controller.setDetectingEnable(true);
         controller.setDetectingEnable(true);

+ 27 - 0
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/RdsBinlogOpenApiTest.java

@@ -0,0 +1,27 @@
+package com.alibaba.otter.canal.parse.inbound.mysql;
+
+import java.io.File;
+import java.util.Date;
+
+import org.apache.commons.lang.time.DateUtils;
+
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.RdsBinlogOpenApi;
+
+/**
+ * @author agapple 2017年10月15日 下午2:14:34
+ * @since 1.0.25
+ */
+public class RdsBinlogOpenApiTest {
+
+    public void testSimple() throws Throwable {
+        Date startTime = DateUtils.parseDate("2017-10-13 20:56:58", new String[] { "yyyy-MM-dd HH:mm:ss" });
+        Date endTime = DateUtils.parseDate("2017-10-14 02:57:59", new String[] { "yyyy-MM-dd HH:mm:ss" });
+        RdsBinlogOpenApi.downloadBinlogFiles("https://rds.aliyuncs.com/",
+            "",
+            "",
+            "rm-bp180v4mfjnm157es",
+            startTime,
+            endTime,
+            new File("/tmp/binlog/"));
+    }
+}

+ 117 - 0
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/RdsLocalBinlogDumpTest.java

@@ -0,0 +1,117 @@
+package com.alibaba.otter.canal.parse.inbound.mysql;
+
+import java.net.InetSocketAddress;
+import java.nio.charset.Charset;
+import java.util.List;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.alibaba.otter.canal.parse.exception.CanalParseException;
+import com.alibaba.otter.canal.parse.inbound.mysql.rds.RdsLocalBinlogEventParser;
+import com.alibaba.otter.canal.parse.index.AbstractLogPositionManager;
+import com.alibaba.otter.canal.parse.stub.AbstractCanalEventSinkTest;
+import com.alibaba.otter.canal.parse.support.AuthenticationInfo;
+import com.alibaba.otter.canal.protocol.CanalEntry.Column;
+import com.alibaba.otter.canal.protocol.CanalEntry.Entry;
+import com.alibaba.otter.canal.protocol.CanalEntry.EntryType;
+import com.alibaba.otter.canal.protocol.CanalEntry.EventType;
+import com.alibaba.otter.canal.protocol.CanalEntry.RowChange;
+import com.alibaba.otter.canal.protocol.CanalEntry.RowData;
+import com.alibaba.otter.canal.protocol.position.LogPosition;
+import com.alibaba.otter.canal.sink.exception.CanalSinkException;
+
+/**
+ * @author agapple 2017年10月15日 下午2:16:58
+ * @since 1.0.25
+ */
+public class RdsLocalBinlogDumpTest {
+
+    @Test
+    public void testSimple() {
+        String directory = "/tmp/rds";
+        final RdsLocalBinlogEventParser controller = new RdsLocalBinlogEventParser();
+        controller.setMasterInfo(new AuthenticationInfo(new InetSocketAddress("127.0.0.1", 3306), "root", "hello"));
+        controller.setConnectionCharset(Charset.forName("UTF-8"));
+        controller.setDirectory(directory);
+        controller.setAccesskey("");
+        controller.setSecretkey("");
+        controller.setInstanceId("");
+        controller.setStartTime(1507860498350L);
+        controller.setEventSink(new AbstractCanalEventSinkTest<List<Entry>>() {
+
+            public boolean sink(List<Entry> entrys, InetSocketAddress remoteAddress, String destination)
+                                                                                                        throws CanalSinkException,
+                                                                                                        InterruptedException {
+
+                for (Entry entry : entrys) {
+                    if (entry.getEntryType() == EntryType.TRANSACTIONBEGIN
+                        || entry.getEntryType() == EntryType.TRANSACTIONEND) {
+                        continue;
+                    }
+
+                    if (entry.getEntryType() == EntryType.ROWDATA) {
+                        RowChange rowChage = null;
+                        try {
+                            rowChage = RowChange.parseFrom(entry.getStoreValue());
+                        } catch (Exception e) {
+                            throw new RuntimeException("ERROR ## parser of eromanga-event has an error , data:"
+                                                       + entry.toString(), e);
+                        }
+
+                        EventType eventType = rowChage.getEventType();
+                        System.out.println(String.format("================> binlog[%s:%s] , name[%s,%s] , eventType : %s",
+                            entry.getHeader().getLogfileName(),
+                            entry.getHeader().getLogfileOffset(),
+                            entry.getHeader().getSchemaName(),
+                            entry.getHeader().getTableName(),
+                            eventType));
+
+                        for (RowData rowData : rowChage.getRowDatasList()) {
+                            if (eventType == EventType.DELETE) {
+                                print(rowData.getBeforeColumnsList());
+                            } else if (eventType == EventType.INSERT) {
+                                print(rowData.getAfterColumnsList());
+                            } else {
+                                System.out.println("-------> before");
+                                print(rowData.getBeforeColumnsList());
+                                System.out.println("-------> after");
+                                print(rowData.getAfterColumnsList());
+                            }
+                        }
+                    }
+                }
+
+                return true;
+            }
+
+        });
+        controller.setLogPositionManager(new AbstractLogPositionManager() {
+
+            @Override
+            public LogPosition getLatestIndexBy(String destination) {
+                return null;
+            }
+
+            @Override
+            public void persistLogPosition(String destination, LogPosition logPosition) throws CanalParseException {
+                System.out.println(logPosition);
+            }
+        });
+
+        controller.start();
+
+        try {
+            Thread.sleep(100 * 1000 * 1000L);
+        } catch (InterruptedException e) {
+            Assert.fail(e.getMessage());
+        }
+        controller.stop();
+    }
+
+    private void print(List<Column> columns) {
+        for (Column column : columns) {
+            System.out.println(column.getName() + " : " + column.getValue() + "    update=" + column.getUpdated());
+        }
+    }
+}

+ 2 - 2
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/SimpleDdlParserTest.java

@@ -3,8 +3,8 @@ package com.alibaba.otter.canal.parse.inbound.mysql;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
-import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.SimpleDdlParser;
-import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.SimpleDdlParser.DdlResult;
+import com.alibaba.otter.canal.parse.inbound.mysql.ddl.DdlResult;
+import com.alibaba.otter.canal.parse.inbound.mysql.ddl.SimpleDdlParser;
 import com.alibaba.otter.canal.protocol.CanalEntry.EventType;
 import com.alibaba.otter.canal.protocol.CanalEntry.EventType;
 
 
 public class SimpleDdlParserTest {
 public class SimpleDdlParserTest {

+ 37 - 0
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MemoryTableMetaTest.java

@@ -0,0 +1,37 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.net.URL;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+
+import com.alibaba.otter.canal.parse.inbound.TableMeta;
+
+/**
+ * @author agapple 2017年8月1日 下午7:15:54
+ */
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration(locations = { "/tsdb/mysql-tsdb.xml" })
+public class MemoryTableMetaTest {
+
+    @Test
+    public void testSimple() throws Throwable {
+        MemoryTableMeta memoryTableMeta = new MemoryTableMeta();
+        URL url = Thread.currentThread().getContextClassLoader().getResource("dummy.txt");
+        File dummyFile = new File(url.getFile());
+        File create = new File(dummyFile.getParent() + "/ddl", "create.sql");
+        String sql = StringUtils.join(IOUtils.readLines(new FileInputStream(create)), "\n");
+        memoryTableMeta.apply(null, "test", sql, null);
+
+        TableMeta meta = memoryTableMeta.find("test", "test");
+        System.out.println(meta);
+        Assert.assertTrue(meta.getFieldMetaByName("ID").isKey());
+    }
+}

+ 33 - 0
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MetaHistoryDAOTest.java

@@ -0,0 +1,33 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.util.List;
+
+import javax.annotation.Resource;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDAO;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDO;
+
+/**
+ * Created by wanshao Date: 2017/9/20 Time: 下午5:00
+ **/
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration(locations = { "/tsdb/mysql-tsdb.xml" })
+public class MetaHistoryDAOTest {
+
+    @Resource
+    MetaHistoryDAO metaHistoryDAO;
+
+    @Test
+    public void testSimple() {
+        List<MetaHistoryDO> metaHistoryDOList = metaHistoryDAO.findByTimestamp("test", 0L, 0L);
+        for (MetaHistoryDO metaHistoryDO : metaHistoryDOList) {
+            System.out.println(metaHistoryDO.getId());
+        }
+    }
+
+}

+ 18 - 0
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaManagerBuilderTest.java

@@ -0,0 +1,18 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import org.junit.Test;
+import org.springframework.util.Assert;
+
+/**
+ * @author agapple 2017年10月12日 上午10:50:00
+ * @since 1.0.25
+ */
+public class TableMetaManagerBuilderTest {
+
+    @Test
+    public void testSimple() {
+        TableMetaTSDB tableMetaTSDB = TableMetaTSDBBuilder.build("test", "classpath:tsdb/mysql-tsdb.xml");
+        Assert.notNull(tableMetaTSDB);
+        TableMetaTSDBBuilder.destory("test");
+    }
+}

+ 46 - 0
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaManagerTest.java

@@ -0,0 +1,46 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URL;
+
+import javax.annotation.Resource;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringUtils;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+
+import com.alibaba.otter.canal.protocol.position.EntryPosition;
+
+/**
+ * @author wanshao 2017年8月2日 下午4:11:45
+ * @since 3.2.5
+ */
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration(locations = { "/tsdb/h2-tsdb.xml" })
+public class TableMetaManagerTest {
+
+    @Resource
+    DatabaseTableMeta tableMetaManager;
+
+    @Test
+    public void testSimple() throws FileNotFoundException, IOException {
+        tableMetaManager.init("test");
+
+        URL url = Thread.currentThread().getContextClassLoader().getResource("dummy.txt");
+        File dummyFile = new File(url.getFile());
+        File create = new File(dummyFile.getParent() + "/ddl", "create.sql");
+        EntryPosition position = new EntryPosition("mysql-bin.001115", 139177334L, 3065927853L, 1501660815000L);
+        String createSql = StringUtils.join(IOUtils.readLines(new FileInputStream(create)), "\n");
+        tableMetaManager.apply(position, "tddl5_00", createSql, null);
+
+        String alterSql = "alter table `test` add column name varchar(32) after c_varchar";
+        position = new EntryPosition("mysql-bin.001115", 139177334L, 3065927854L, 1501660815000L);
+        tableMetaManager.apply(position, "tddl5_00", alterSql, null);
+    }
+}

BIN
parse/src/test/resources/binlog/tsdb/mysql-bin.000001


BIN
parse/src/test/resources/binlog/tsdb/mysql-bin.000002


BIN
parse/src/test/resources/binlog/tsdb/mysql-bin.000003


+ 64 - 0
parse/src/test/resources/ddl/create.sql

@@ -0,0 +1,64 @@
+CREATE TABLE `test_all` (
+  `id` bigint(20) NOT NULL AUTO_INCREMENT,
+  `c_bit_1` bit(1) DEFAULT NULL,
+  `c_bit_8` bit(8) DEFAULT NULL,
+  `c_bit_16` bit(16) DEFAULT NULL,
+  `c_bit_32` bit(32) DEFAULT NULL,
+  `c_bit_64` bit(64) DEFAULT NULL,
+  `c_bool` boolean DEFAULT NULL,
+  `c_tinyint_1` tinyint(1) DEFAULT NULL,
+  `c_tinyint_4` tinyint(4) DEFAULT NULL,
+  `c_tinyint_8` tinyint(8) DEFAULT NULL,
+  `c_tinyint_8_un` tinyint(8) unsigned DEFAULT NULL,
+  `c_smallint_1` smallint(1) DEFAULT NULL,
+  `c_smallint_16` smallint(16) DEFAULT NULL,
+  `c_smallint_16_un` smallint(16) unsigned DEFAULT NULL,
+  `c_mediumint_1` mediumint(1) DEFAULT NULL,
+  `c_mediumint_24` mediumint(24) DEFAULT NULL,
+  `c_mediumint_24_un` mediumint(24) unsigned DEFAULT NULL,
+  `c_int_1` int(1) DEFAULT NULL,
+  `c_int_32` int(32) DEFAULT NULL,
+  `c_int_32_un` int(32) unsigned DEFAULT NULL,
+  `c_bigint_1` bigint(1) DEFAULT NULL,
+  `c_bigint_64` bigint(64) DEFAULT NULL,
+  `c_bigint_64_un` bigint(64) unsigned DEFAULT NULL,
+  `c_decimal` decimal(10,3) DEFAULT NULL,
+  `c_decimal_pr` decimal(10,3) DEFAULT NULL,
+  `c_float` float DEFAULT NULL,
+  `c_float_pr` float(10,3) DEFAULT NULL,
+  `c_float_un` float(10,3) unsigned DEFAULT NULL,
+  `c_double` double DEFAULT NULL,
+  `c_double_pr` double(10,3) DEFAULT NULL,
+  `c_double_un` double(10,3) unsigned DEFAULT NULL,
+  `c_date` date DEFAULT NULL COMMENT 'date',
+  `c_datetime` datetime DEFAULT NULL,
+  `c_datetime_1` datetime(1) DEFAULT NULL,
+  `c_datetime_3` datetime(3) DEFAULT NULL,
+  `c_datetime_6` datetime(6) DEFAULT NULL,
+  `c_timestamp` timestamp DEFAULT CURRENT_TIMESTAMP,
+  `c_timestamp_1` timestamp(1) DEFAULT 0,
+  `c_timestamp_3` timestamp(3) DEFAULT 0,
+  `c_timestamp_6` timestamp(6) DEFAULT 0,
+  `c_time` time DEFAULT NULL,
+  `c_time_1` time(1) DEFAULT NULL,
+  `c_time_3` time(3) DEFAULT NULL,
+  `c_time_6` time(6) DEFAULT NULL,
+  `c_year` year DEFAULT NULL,
+  `c_year_4` year(4) DEFAULT NULL,
+  `c_char` char(10) DEFAULT NULL,
+  `c_varchar` varchar(10) DEFAULT NULL,
+  `c_binary` binary(10) DEFAULT NULL,
+  `c_varbinary` varbinary(10) DEFAULT NULL,
+  `c_blob_tiny` tinyblob DEFAULT NULL,
+  `c_blob` blob DEFAULT NULL,
+  `c_blob_medium` mediumblob DEFAULT NULL,
+  `c_blob_long` longblob DEFAULT NULL,
+  `c_text_tiny` tinytext DEFAULT NULL,
+  `c_text` text DEFAULT NULL,
+  `c_text_medium` mediumtext DEFAULT NULL,
+  `c_text_long` longtext DEFAULT NULL,
+  `c_enum` enum('a','b','c') DEFAULT NULL,
+  `c_set` set('a','b','c') DEFAULT NULL,
+  `c_json` json DEFAULT NULL,
+  PRIMARY KEY (`id`)
+) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT='10000000' 

Some files were not shown because too many files changed in this diff