瀏覽代碼

table meta support

kaimingwan 7 年之前
父節點
當前提交
e8da1c884b
共有 55 個文件被更改,包括 2925 次插入228 次删除
  1. 1 1
      client/src/test/java/logback.xml
  2. 146 0
      dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/BinlogPosition.java
  3. 26 17
      deployer/src/main/resources/example/instance.properties
  4. 1 1
      deployer/src/main/resources/logback.xml
  5. 42 0
      deployer/src/main/resources/spring/dal-dao.xml
  6. 43 0
      deployer/src/main/resources/spring/file-instance.xml
  7. 20 0
      deployer/src/main/resources/spring/mybatis-config.xml
  8. 57 0
      deployer/src/main/resources/spring/sql-map/sqlmap_history.xml
  9. 66 0
      deployer/src/main/resources/spring/sql-map/sqlmap_snapshot.xml
  10. 9 0
      deployer/src/main/resources/spring/sqlmap-config.xml
  11. 9 0
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlConnector.java
  12. 19 0
      parse/pom.xml
  13. 3 3
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/AbstractEventParser.java
  14. 1 1
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/BinlogParser.java
  15. 0 2
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/ErosaConnection.java
  16. 100 45
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/TableMeta.java
  17. 40 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/AbstractMysqlEventParser.java
  18. 1 1
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogEventParser.java
  19. 99 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlConnection.java
  20. 10 4
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParser.java
  21. 77 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/DBMSAction.java
  22. 42 30
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java
  23. 144 80
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/TableMetaCache.java
  24. 116 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/ddl/DdlResult.java
  25. 201 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/ddl/DruidDdlParser.java
  26. 43 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/DataSourceFactoryTSDB.java
  27. 223 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MemoryTableMeta.java
  28. 565 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaManager.java
  29. 37 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaTSDB.java
  30. 44 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaHistoryDAO.java
  31. 48 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaSnapshotDAO.java
  32. 141 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/model/MetaHistoryDO.java
  33. 105 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/model/MetaSnapshotDO.java
  34. 42 0
      parse/src/main/resources/dal-dao.xml
  35. 20 0
      parse/src/main/resources/mybatis-config.xml
  36. 57 0
      parse/src/main/resources/sql-map/sqlmap_history.xml
  37. 66 0
      parse/src/main/resources/sql-map/sqlmap_snapshot.xml
  38. 9 0
      parse/src/main/resources/sqlmap-config.xml
  39. 20 18
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/TableMetaCacheTest.java
  40. 3 2
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/group/GroupEventPaserTest.java
  41. 3 3
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogDumpTest.java
  42. 8 8
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogEventParserTest.java
  43. 2 2
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlDumpTest.java
  44. 3 3
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParserTest.java
  45. 38 0
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MemoryTableMetaTest.java
  46. 36 0
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MetaHistoryDAOTest.java
  47. 47 0
      parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaManagerTest.java
  48. 二進制
      parse/src/test/resources/binlog/tsdb/mysql-bin.000001
  49. 二進制
      parse/src/test/resources/binlog/tsdb/mysql-bin.000002
  50. 二進制
      parse/src/test/resources/binlog/tsdb/mysql-bin.000003
  51. 46 0
      parse/src/test/resources/dal-dao.xml
  52. 23 0
      parse/src/test/resources/ddl/create.sql
  53. 17 1
      pom.xml
  54. 4 4
      server/src/test/java/com/alibaba/otter/canal/server/BaseCanalServerWithEmbededTest.java
  55. 2 2
      server/src/test/java/com/alibaba/otter/canal/server/CanalServerWithEmbedded_StandaloneTest.java

+ 1 - 1
client/src/test/java/logback.xml

@@ -8,7 +8,7 @@
 		</encoder>
 		</encoder>
 	</appender>
 	</appender>
 	
 	
-	<root level="WARN">
+	<root level="INFO">
 		<appender-ref ref="STDOUT"/>
 		<appender-ref ref="STDOUT"/>
 	</root>
 	</root>
 </configuration>
 </configuration>

+ 146 - 0
dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/BinlogPosition.java

@@ -0,0 +1,146 @@
+package com.taobao.tddl.dbsync.binlog;
+
+import com.taobao.tddl.dbsync.binlog.LogPosition;
+
+/**
+ * Position inside binlog file
+ *
+ * @author <a href="mailto:seppo.jaakola@continuent.com">Seppo Jaakola</a>
+ * @version 1.0
+ */
+public class BinlogPosition extends LogPosition {
+
+    /* The source server_id of position, 0 invalid */
+    protected final long masterId;
+
+    /* The timestamp, in seconds, 0 invalid */
+    protected final long timestamp;
+
+    public BinlogPosition(String fileName, long position, long masterId, long timestamp){
+        super(fileName, position);
+        this.masterId = masterId;
+        this.timestamp = timestamp;
+    }
+
+    public BinlogPosition(LogPosition logPosition, long masterId, long timestamp){
+        super(logPosition.getFileName(), logPosition.getPosition());
+        this.masterId = masterId;
+        this.timestamp = timestamp;
+    }
+
+    public BinlogPosition(BinlogPosition binlogPosition){
+        super(binlogPosition.getFileName(), binlogPosition.getPosition());
+        this.masterId = binlogPosition.masterId;
+        this.timestamp = binlogPosition.timestamp;
+    }
+
+    private final static long[] pow10 = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000,
+        10000000000L, 100000000000L, 1000000000000L, 10000000000000L,
+        100000000000000L, 1000000000000000L, 10000000000000000L, 100000000000000000L,
+        1000000000000000000L };
+
+    public static String placeHolder(int bit, long number) {
+        if (bit > 18) {
+            throw new IllegalArgumentException("Bit must less than 18, but given " + bit);
+        }
+
+        final long max = pow10[bit];
+        if (number >= max) {
+            // 当 width < 数值的最大位数时,应该直接返回数值
+            return String.valueOf(number);
+        }
+
+        return String.valueOf(max + number).substring(1);
+    }
+
+    /**
+     * Return BinlogPosition in String representation. This serves as EventId for DBMSEvent.
+     */
+    public String format2String(final int positionMaxLen) {
+        String binlogSuffix = fileName;
+        String binlogOffset = placeHolder((int) positionMaxLen, position);
+        // 输出 '000001:0000000004@12+12314130'
+        StringBuffer buf = new StringBuffer(40);
+        buf.append(binlogSuffix);
+        buf.append(':');
+        buf.append(binlogOffset);
+        if (masterId != 0) {
+            buf.append('#');
+            buf.append(masterId);
+        }
+        if (timestamp != 0) {
+            buf.append('.');
+            buf.append(timestamp);
+        }
+        return buf.toString();
+    }
+
+    public static BinlogPosition parseFromString(String source) {
+        int colonIndex = source.indexOf(':');
+        int miscIndex = colonIndex + 1;
+        int sharpIndex = source.indexOf('#', miscIndex);
+        int semicolonIndex = source.indexOf(';', miscIndex); // NOTE: 向后兼容
+        int dotIndex = source.lastIndexOf('.');
+        if (colonIndex == -1) {
+            return null; // NOTE: 错误的位点
+        }
+
+        String binlogSuffix = source.substring(0, colonIndex);
+        long binlogPosition;
+        if (sharpIndex != -1) {
+            binlogPosition = Long.parseLong(source.substring(miscIndex, sharpIndex));
+        } else if (semicolonIndex != -1) {
+            binlogPosition = Long.parseLong(source.substring(miscIndex, semicolonIndex)); // NOTE: 向后兼容
+        } else if (dotIndex != -1) {
+            binlogPosition = Long.parseLong(source.substring(miscIndex, dotIndex));
+        } else {
+            binlogPosition = Long.parseLong(source.substring(miscIndex));
+        }
+
+        long masterId = 0; // NOTE: 默认值为 0
+        if (sharpIndex != -1) {
+            if (dotIndex != -1) {
+                masterId = Long.parseLong(source.substring(sharpIndex + 1, dotIndex));
+            } else {
+                masterId = Long.parseLong(source.substring(sharpIndex + 1));
+            }
+        }
+
+        long timestamp = 0; // NOTE: 默认值为 0
+        if (dotIndex != -1 && dotIndex > colonIndex) {
+            timestamp = Long.parseLong(source.substring(dotIndex + 1));
+        }
+
+        return new BinlogPosition(binlogSuffix, binlogPosition, // NL
+            masterId,
+            timestamp);
+    }
+
+    public String getFilePattern() {
+        final int index = fileName.indexOf('.');
+        if (index != -1) {
+            return fileName.substring(0, index);
+        }
+        return null;
+    }
+
+    public void setFilePattern(String filePattern) {
+        // We tolerate the event ID with or without the binlog prefix.
+        if (fileName.indexOf('.') < 0) {
+            fileName = filePattern + '.' + fileName;
+        }
+    }
+
+    public long getMasterId() {
+        return masterId;
+    }
+
+    public long getTimestamp() {
+        return timestamp;
+    }
+
+    @Override
+    public String toString() {
+        return format2String(10);
+    }
+}

+ 26 - 17
deployer/src/main/resources/example/instance.properties

@@ -1,27 +1,36 @@
 #################################################
 #################################################
 ## mysql serverId
 ## mysql serverId
-canal.instance.mysql.slaveId = 1234
-
+canal.instance.mysql.slaveId=1234
 # position info
 # position info
-canal.instance.master.address = 127.0.0.1:3306
-canal.instance.master.journal.name = 
-canal.instance.master.position = 
-canal.instance.master.timestamp = 
+canal.instance.master.address=127.0.0.1:3306
+canal.instance.master.journal.name=mysql-bin.000004
+canal.instance.master.position=25678
+canal.instance.master.timestamp=1506088042
+
+
+# tsdb info
+canal.instance.tsdb.address=127.0.0.1:3306
+canal.instance.tsdb.enable=true
+canal.instance.tsdb.dbUsername=canal
+canal.instance.tsdb.dbPassword=canal
+#  tsdb关联的目标db,和canal.instance.defaultDatabaseName意义相同
+canal.instance.tsdb.defaultDatabaseName=test
+canal.instance.tsdb.connectionCharset=UTF-8
+canal.instance.tsdb.driverClassName=com.mysql.jdbc.Driver
+canal.instance.tsdb.url=jdbc:mysql://127.0.0.1:3306/tsdb
+
 
 
-#canal.instance.standby.address = 
+#canal.instance.standby.address =
 #canal.instance.standby.journal.name =
 #canal.instance.standby.journal.name =
 #canal.instance.standby.position = 
 #canal.instance.standby.position = 
 #canal.instance.standby.timestamp = 
 #canal.instance.standby.timestamp = 
-
 # username/password
 # username/password
-canal.instance.dbUsername = canal
-canal.instance.dbPassword = canal
-canal.instance.defaultDatabaseName =
-canal.instance.connectionCharset = UTF-8
-
+canal.instance.dbUsername=canal
+canal.instance.dbPassword=canal
+canal.instance.defaultDatabaseName=test
+canal.instance.connectionCharset=UTF-8
 # table regex
 # table regex
-canal.instance.filter.regex = .*\\..*
-# table black regex
-canal.instance.filter.black.regex =  
-
+canal.instance.filter.regex=.*\\..*
+# table black regex , 默认过滤表结构管理的数据
+canal.instance.filter.black.regex=tsdb\\..*
 #################################################
 #################################################

+ 1 - 1
deployer/src/main/resources/logback.xml

@@ -75,7 +75,7 @@
     </logger>
     </logger>
     
     
 	<root level="WARN">
 	<root level="WARN">
-		<!--<appender-ref ref="STDOUT"/>-->
+		<appender-ref ref="STDOUT"/>
 		<appender-ref ref="CANAL-ROOT" />
 		<appender-ref ref="CANAL-ROOT" />
 	</root>
 	</root>
 </configuration>
 </configuration>

+ 42 - 0
deployer/src/main/resources/spring/dal-dao.xml

@@ -0,0 +1,42 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<beans xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns="http://www.springframework.org/schema/beans" xmlns:tx="http://www.springframework.org/schema/tx"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd
+	http://www.springframework.org/schema/tx
+    http://www.springframework.org/schema/tx/spring-tx-2.0.xsd"
+       default-autowire="byName">
+    <tx:annotation-driven/>
+
+    <bean id="dataSource" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.DataSourceFactoryTSDB"
+          factory-method="getDataSource">
+        <constructor-arg index="0" value="${canal.instance.tsdb.address}"/>
+        <constructor-arg index="1" value="${canal.instance.tsdb.dbUsername}"/>
+        <constructor-arg index="2" value="${canal.instance.tsdb.dbPassword}"/>
+        <constructor-arg index="3" value="${canal.instance.tsdb.enable:true}"/>
+        <constructor-arg index="4" value="${canal.instance.tsdb.defaultDatabaseName}"/>
+        <constructor-arg index="5" value="${canal.instance.tsdb.url}"/>
+        <constructor-arg index="6" value="${canal.instance.tsdb.driverClassName}"/>
+    </bean>
+
+    <bean id="transactionManager" class="org.springframework.jdbc.datasource.DataSourceTransactionManager">
+        <property name="dataSource" ref="dataSource"/>
+    </bean>
+
+    <bean id="txTemplate" class="org.springframework.transaction.support.TransactionTemplate">
+        <property name="transactionManager" ref="transactionManager"></property>
+        <property name="propagationBehaviorName" value="PROPAGATION_REQUIRED"></property>
+    </bean>
+
+    <bean id="sqlMapClient" class="org.springframework.orm.ibatis.SqlMapClientFactoryBean">
+        <property name="dataSource" ref="dataSource"/>
+        <property name="configLocation" value="classpath:sqlmap-config.xml"/>
+    </bean>
+
+    <bean id="metaHistoryDAO" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDAO">
+        <property name="sqlMapClient" ref="sqlMapClient"/>
+    </bean>
+
+    <bean id="metaSnapshotDAO" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaSnapshotDAO">
+        <property name="sqlMapClient" ref="sqlMapClient"/>
+    </bean>
+</beans>

+ 43 - 0
deployer/src/main/resources/spring/file-instance.xml

@@ -167,5 +167,48 @@
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="filterTableError" value="${canal.instance.filter.table.error:false}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />
 		<property name="supportBinlogFormats" value="${canal.instance.binlog.format}" />
 		<property name="supportBinlogImages" value="${canal.instance.binlog.image}" />
 		<property name="supportBinlogImages" value="${canal.instance.binlog.image}" />
+
+		<!--表结构相关-->
+		<property name="tableMetaManager">
+			<bean class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.TableMetaManager">
+				<property name="metaHistoryDAO" ref="metaHistoryDAO"/>
+				<property name="metaSnapshotDAO" ref="metaSnapshotDAO"/>
+			</bean>
+		</property>
+	</bean>
+
+	<!--tsdb related-->
+	<bean id="dataSource" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.DataSourceFactoryTSDB"
+		  factory-method="getDataSource">
+		<constructor-arg index="0" value="${canal.instance.tsdb.address}"/>
+		<constructor-arg index="1" value="${canal.instance.tsdb.dbUsername}"/>
+		<constructor-arg index="2" value="${canal.instance.tsdb.dbPassword}"/>
+		<constructor-arg index="3" value="${canal.instance.tsdb.enable:true}"/>
+		<constructor-arg index="4" value="${canal.instance.tsdb.defaultDatabaseName}"/>
+		<constructor-arg index="5" value="${canal.instance.tsdb.url}"/>
+		<constructor-arg index="6" value="${canal.instance.tsdb.driverClassName}"/>
+	</bean>
+
+	<bean id="transactionManager" class="org.springframework.jdbc.datasource.DataSourceTransactionManager">
+		<property name="dataSource" ref="dataSource"/>
+	</bean>
+
+	<bean id="txTemplate" class="org.springframework.transaction.support.TransactionTemplate">
+		<property name="transactionManager" ref="transactionManager"></property>
+		<property name="propagationBehaviorName" value="PROPAGATION_REQUIRED"></property>
+	</bean>
+
+	<bean id="sqlMapClient" class="org.springframework.orm.ibatis.SqlMapClientFactoryBean">
+		<property name="dataSource" ref="dataSource"/>
+		<property name="configLocation" value="classpath:sqlmap-config.xml"/>
+	</bean>
+
+	<bean id="metaHistoryDAO" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDAO">
+		<property name="sqlMapClient" ref="sqlMapClient"/>
+	</bean>
+
+	<bean id="metaSnapshotDAO" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaSnapshotDAO">
+		<property name="sqlMapClient" ref="sqlMapClient"/>
 	</bean>
 	</bean>
+
 </beans>
 </beans>

+ 20 - 0
deployer/src/main/resources/spring/mybatis-config.xml

@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!DOCTYPE configuration
+        PUBLIC "-//mybatis.org//DTD Config 3.0//EN"
+        "http://mybatis.org/dtd/mybatis-3-config.dtd">
+<configuration>
+    <environments default="development">
+        <environment id="development">
+            <transactionManager type="JDBC"/>
+            <dataSource type="POOLED">
+                <property name="driver" value="com.mysql.jdbc.Driver"/>
+                <property name="url" value="jdbc:mysql://127.0.0.1:3306/tsdb"/>
+                <property name="username" value="canal"/>
+                <property name="password" value="canal"/>
+            </dataSource>
+        </environment>
+    </environments>
+    <mappers>
+        <package name="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.mapper"/>
+    </mappers>
+</configuration>

+ 57 - 0
deployer/src/main/resources/spring/sql-map/sqlmap_history.xml

@@ -0,0 +1,57 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!DOCTYPE sqlMap PUBLIC "-//ibatis.apache.org//DTD SQL Map 2.0//EN" "http://ibatis.apache.org/dtd/sql-map-2.dtd" >
+<sqlMap namespace="table_meta_history">
+
+    <typeAlias alias="metaHistoryDO" type="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.model.MetaHistoryDO"/>
+
+    <sql id="allColumns">
+        <![CDATA[
+
+		gmt_create,gmt_modified,binlog_file,binlog_offest,binlog_master_id,binlog_timestamp,use_schema,`schema`,`table`,`sql`,`type`,`extra`
+
+        ]]>
+    </sql>
+    <sql id="allVOColumns">
+        <![CDATA[
+
+		a.id as id,a.gmt_create as gmtCreate,a.gmt_modified as gmtModified,
+		a.binlog_file as binlogFile,a.binlog_offest as binlogOffest,a.binlog_master_id as binlogMasterId,a.binlog_timestamp as binlogTimestamp,
+		a.use_schema as useSchema,a.`schema` as `schema`,a.`table` as `table`,a.`sql` as `sql`,a.`type` as `type`,a.`extra` as `extra`
+
+        ]]>
+    </sql>
+
+    <select id="findByTimestamp" parameterClass="java.util.Map" resultClass="metaHistoryDO">
+        select
+        <include refid="allVOColumns"/>
+        from `canal_table_meta_history$env$` a
+        <![CDATA[
+        where binlog_timestamp >= #snapshotTimestamp# and binlog_timestamp <= #timestamp#
+        order by binlog_timestamp asc,id asc
+        ]]>
+    </select>
+
+    <insert id="insert" parameterClass="metaHistoryDO">
+        insert into `canal_table_meta_history` (<include refid="allColumns"/>)
+        values(now(),now(),#binlogFile#,#binlogOffest#,#binlogMasterId#,#binlogTimestamp#,#useSchema#,#schema#,#table#,#sql#,#type#,#extra#);
+        <selectKey resultClass="java.lang.Long" keyProperty="id">
+            SELECT last_insert_id()
+        </selectKey>
+    </insert>
+
+
+    <delete id="deleteByGmtModified" parameterClass="java.util.Map">
+        <![CDATA[
+
+		delete from `canal_table_meta_history`
+		where gmt_modified < date_sub(now(),interval #interval# second)
+
+        ]]>
+    </delete>
+
+
+    <select id="getAll" resultClass="metaHistoryDO">
+        select * from canal_table_meta_history
+    </select>
+
+</sqlMap>

+ 66 - 0
deployer/src/main/resources/spring/sql-map/sqlmap_snapshot.xml

@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!DOCTYPE sqlMap PUBLIC "-//ibatis.apache.org//DTD SQL Map 2.0//EN" "http://ibatis.apache.org/dtd/sql-map-2.dtd" >
+<sqlMap namespace="table_meta_snapshot">
+
+
+    <typeAlias alias="metaSnapshotDO" type="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.model.MetaSnapshotDO"/>
+
+
+    <typeAlias alias="tableMetaSnapshotDO"
+               type="com.alibaba.middleware.jingwei.biz.dataobject.CanalTableMetaSnapshotDO"/>
+    <sql id="allColumns">
+        <![CDATA[
+
+		gmt_create,gmt_modified,binlog_file,binlog_offest,binlog_master_id,binlog_timestamp,data,extra
+
+        ]]>
+    </sql>
+    <sql id="allVOColumns">
+        <![CDATA[
+
+		a.id as id,a.gmt_create as gmtCreate,a.gmt_modified as gmtModified,
+		a.binlog_file as binlogFile,a.binlog_offest as binlogOffest,a.binlog_master_id as binlogMaster_id,a.binlog_timestamp as binlogTimestamp,a.data as data,a.extra as extra
+
+        ]]>
+    </sql>
+
+    <select id="findByTimestamp" parameterClass="java.util.Map" resultClass="metaSnapshotDO">
+        select
+        <include refid="allVOColumns"/>
+        from `canal_table_meta_snapshot$env$` a
+        <![CDATA[
+        where  binlog_timestamp < #timestamp#
+        order by binlog_timestamp desc,id desc
+        limit 1
+        ]]>
+    </select>
+
+    <insert id="insert" parameterClass="metaSnapshotDO">
+        insert into `canal_table_meta_snapshot` (<include refid="allColumns"/>)
+        values(now(),now(),#binlogFile#,#binlogOffest#,#binlogMasterId#,#binlogTimestamp#,#data#,#extra#);
+        <selectKey resultClass="java.lang.Long" keyProperty="id">
+            SELECT last_insert_id()
+        </selectKey>
+    </insert>
+
+    <update id="update" parameterClass="metaSnapshotDO">
+        update `canal_table_meta_snapshot` set gmt_modified=now(),
+        binlog_file=#binlogFile#,binlog_offest=#binlogOffest#,binlog_master_id=#binlogMasterId#,binlog_timestamp=#binlogTimestamp#,data=#data#,extra=#extra#
+        where binlog_timestamp=0
+    </update>
+
+
+    <delete id="deleteByGmtModified" parameterClass="java.util.Map">
+        <![CDATA[
+
+		delete from `canal_table_meta_snapshot`
+		where gmt_modified < date_sub(now(),interval #interval# second)
+
+        ]]>
+    </delete>
+
+    <select id="getAll" resultClass="metaSnapshotDO">
+        select * from canal_table_meta_snapshot
+    </select>
+
+</sqlMap>

+ 9 - 0
deployer/src/main/resources/spring/sqlmap-config.xml

@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!DOCTYPE sqlMapConfig PUBLIC "-//iBATIS.com//DTD SQL Map Config 2.0//EN"
+        "http://www.ibatis.com/dtd/sql-map-config-2.dtd">
+<sqlMapConfig>
+    <settings useStatementNamespaces="true"/>
+
+    <sqlMap resource="sql-map/sqlmap_history.xml"/>
+    <sqlMap resource="sql-map/sqlmap_snapshot.xml"/>
+</sqlMapConfig>

+ 9 - 0
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlConnector.java

@@ -35,6 +35,7 @@ public class MysqlConnector {
     private byte                charsetNumber     = 33;
     private byte                charsetNumber     = 33;
     private String              defaultSchema     = "retl";
     private String              defaultSchema     = "retl";
     private int                 soTimeout         = 30 * 1000;
     private int                 soTimeout         = 30 * 1000;
+    private int connTimeout = 5 * 1000;
     private int                 receiveBufferSize = 16 * 1024;
     private int                 receiveBufferSize = 16 * 1024;
     private int                 sendBufferSize    = 16 * 1024;
     private int                 sendBufferSize    = 16 * 1024;
 
 
@@ -129,6 +130,7 @@ public class MysqlConnector {
         connector.setReceiveBufferSize(getReceiveBufferSize());
         connector.setReceiveBufferSize(getReceiveBufferSize());
         connector.setSendBufferSize(getSendBufferSize());
         connector.setSendBufferSize(getSendBufferSize());
         connector.setSoTimeout(getSoTimeout());
         connector.setSoTimeout(getSoTimeout());
+        connector.setConnTimeout(connTimeout);
         return connector;
         return connector;
     }
     }
 
 
@@ -325,4 +327,11 @@ public class MysqlConnector {
         this.dumping = dumping;
         this.dumping = dumping;
     }
     }
 
 
+    public int getConnTimeout() {
+        return connTimeout;
+    }
+
+    public void setConnTimeout(int connTimeout) {
+        this.connTimeout = connTimeout;
+    }
 }
 }

+ 19 - 0
parse/pom.xml

@@ -45,11 +45,30 @@
 			<artifactId>canal.parse.driver</artifactId>
 			<artifactId>canal.parse.driver</artifactId>
 			<version>${project.version}</version>
 			<version>${project.version}</version>
 		</dependency>
 		</dependency>
+		<dependency>
+			<groupId>com.alibaba</groupId>
+			<artifactId>druid</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>mysql</groupId>
+			<artifactId>mysql-connector-java</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.ibatis</groupId>
+			<artifactId>ibatis-sqlmap</artifactId>
+			<version>2.3.4.726</version>
+		</dependency>
 		<!-- test dependency -->
 		<!-- test dependency -->
 		<dependency>
 		<dependency>
 			<groupId>junit</groupId>
 			<groupId>junit</groupId>
 			<artifactId>junit</artifactId>
 			<artifactId>junit</artifactId>
 			<scope>test</scope>
 			<scope>test</scope>
 		</dependency>
 		</dependency>
+		<dependency>
+			<groupId>org.springframework</groupId>
+			<artifactId>spring-test</artifactId>
+			<scope>test</scope>
+		</dependency>
+
 	</dependencies>
 	</dependencies>
 </project>
 </project>

+ 3 - 3
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/AbstractEventParser.java

@@ -173,7 +173,7 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
 
 
                             public boolean sink(EVENT event) {
                             public boolean sink(EVENT event) {
                                 try {
                                 try {
-                                    CanalEntry.Entry entry = parseAndProfilingIfNecessary(event);
+                                    CanalEntry.Entry entry = parseAndProfilingIfNecessary(event,false);
 
 
                                     if (!running) {
                                     if (!running) {
                                         return false;
                                         return false;
@@ -320,13 +320,13 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
         return result;
         return result;
     }
     }
 
 
-    protected CanalEntry.Entry parseAndProfilingIfNecessary(EVENT bod) throws Exception {
+    protected CanalEntry.Entry parseAndProfilingIfNecessary(EVENT bod,boolean isSeek) throws Exception {
         long startTs = -1;
         long startTs = -1;
         boolean enabled = getProfilingEnabled();
         boolean enabled = getProfilingEnabled();
         if (enabled) {
         if (enabled) {
             startTs = System.currentTimeMillis();
             startTs = System.currentTimeMillis();
         }
         }
-        CanalEntry.Entry event = binlogParser.parse(bod);
+        CanalEntry.Entry event = binlogParser.parse(bod,isSeek);
         if (enabled) {
         if (enabled) {
             this.parsingInterval = System.currentTimeMillis() - startTs;
             this.parsingInterval = System.currentTimeMillis() - startTs;
         }
         }

+ 1 - 1
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/BinlogParser.java

@@ -11,7 +11,7 @@ import com.alibaba.otter.canal.protocol.CanalEntry;
  */
  */
 public interface BinlogParser<T> extends CanalLifeCycle {
 public interface BinlogParser<T> extends CanalLifeCycle {
 
 
-    CanalEntry.Entry parse(T event) throws CanalParseException;
+    CanalEntry.Entry parse(T event,boolean isSeek) throws CanalParseException;
 
 
     void reset();
     void reset();
 }
 }

+ 0 - 2
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/ErosaConnection.java

@@ -15,8 +15,6 @@ public interface ErosaConnection {
 
 
     public void disconnect() throws IOException;
     public void disconnect() throws IOException;
 
 
-    public boolean isConnected();
-
     /**
     /**
      * 用于快速数据查找,和dump的区别在于,seek会只给出部分的数据
      * 用于快速数据查找,和dump的区别在于,seek会只给出部分的数据
      */
      */

+ 100 - 45
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/TableMeta.java

@@ -3,51 +3,79 @@ package com.alibaba.otter.canal.parse.inbound;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 
 
-import org.apache.commons.lang.StringUtils;
-
 import com.taobao.tddl.dbsync.binlog.event.TableMapLogEvent;
 import com.taobao.tddl.dbsync.binlog.event.TableMapLogEvent;
+import org.apache.commons.lang.StringUtils;
 
 
 /**
 /**
  * 描述数据meta对象,mysql binlog中对应的{@linkplain TableMapLogEvent}包含的信息不全
  * 描述数据meta对象,mysql binlog中对应的{@linkplain TableMapLogEvent}包含的信息不全
- * 
+ *
  * <pre>
  * <pre>
  * 1. 主键信息
  * 1. 主键信息
  * 2. column name
  * 2. column name
  * 3. unsigned字段
  * 3. unsigned字段
  * </pre>
  * </pre>
- * 
+ *
  * @author jianghang 2013-1-18 下午12:24:59
  * @author jianghang 2013-1-18 下午12:24:59
  * @version 1.0.0
  * @version 1.0.0
  */
  */
 public class TableMeta {
 public class TableMeta {
 
 
-    private String          fullName; // schema.table
-    private List<FieldMeta> fileds;
+    private String schema;
+    private String table;
+    private List<FieldMeta> fields = new ArrayList<TableMeta.FieldMeta>();
+    private String ddl; // 表结构的DDL语句
+
+    public TableMeta() {
 
 
-    public TableMeta(String fullName, List<FieldMeta> fileds){
-        this.fullName = fullName;
-        this.fileds = fileds;
+    }
+
+    public TableMeta(String schema, String table, List<FieldMeta> fields) {
+        this.schema = schema;
+        this.table = table;
+        this.fields = fields;
     }
     }
 
 
     public String getFullName() {
     public String getFullName() {
-        return fullName;
+        return schema + "." + table;
+    }
+
+    public String getSchema() {
+        return schema;
     }
     }
 
 
-    public void setFullName(String fullName) {
-        this.fullName = fullName;
+    public void setSchema(String schema) {
+        this.schema = schema;
     }
     }
 
 
-    public List<FieldMeta> getFileds() {
-        return fileds;
+    public String getTable() {
+        return table;
     }
     }
 
 
-    public void setFileds(List<FieldMeta> fileds) {
-        this.fileds = fileds;
+    public void setTable(String table) {
+        this.table = table;
+    }
+
+    public List<FieldMeta> getFields() {
+        return fields;
+    }
+
+    public void setFields(List<FieldMeta> fileds) {
+        this.fields = fileds;
+    }
+
+    public FieldMeta getFieldMetaByName(String name) {
+        for (FieldMeta meta : fields) {
+            if (meta.getColumnName().equalsIgnoreCase(name)) {
+                return meta;
+            }
+        }
+
+        throw new RuntimeException("unknow column : " + name);
     }
     }
 
 
     public List<FieldMeta> getPrimaryFields() {
     public List<FieldMeta> getPrimaryFields() {
         List<FieldMeta> primarys = new ArrayList<TableMeta.FieldMeta>();
         List<FieldMeta> primarys = new ArrayList<TableMeta.FieldMeta>();
-        for (FieldMeta meta : fileds) {
+        for (FieldMeta meta : fields) {
             if (meta.isKey()) {
             if (meta.isKey()) {
                 primarys.add(meta);
                 primarys.add(meta);
             }
             }
@@ -56,12 +84,47 @@ public class TableMeta {
         return primarys;
         return primarys;
     }
     }
 
 
+    public String getDdl() {
+        return ddl;
+    }
+
+    public void setDdl(String ddl) {
+        this.ddl = ddl;
+    }
+
+    public void addFieldMeta(FieldMeta fieldMeta) {
+        this.fields.add(fieldMeta);
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder data = new StringBuilder();
+        data.append("TableMeta [schema=" + schema + ", table=" + table + ", fileds=");
+        for (FieldMeta field : fields) {
+            data.append("\n\t").append(field.toString());
+        }
+        data.append("\n]");
+        return data.toString();
+    }
+
     public static class FieldMeta {
     public static class FieldMeta {
 
 
+        public FieldMeta() {
+
+        }
+
+        public FieldMeta(String columnName, String columnType, boolean nullable, boolean key, String defaultValue) {
+            this.columnName = columnName;
+            this.columnType = columnType;
+            this.nullable = nullable;
+            this.key = key;
+            this.defaultValue = defaultValue;
+        }
+
         private String columnName;
         private String columnName;
         private String columnType;
         private String columnType;
-        private String isNullable;
-        private String iskey;
+        private boolean nullable;
+        private boolean key;
         private String defaultValue;
         private String defaultValue;
         private String extra;
         private String extra;
 
 
@@ -81,20 +144,8 @@ public class TableMeta {
             this.columnType = columnType;
             this.columnType = columnType;
         }
         }
 
 
-        public String getIsNullable() {
-            return isNullable;
-        }
-
-        public void setIsNullable(String isNullable) {
-            this.isNullable = isNullable;
-        }
-
-        public String getIskey() {
-            return iskey;
-        }
-
-        public void setIskey(String iskey) {
-            this.iskey = iskey;
+        public void setNullable(boolean nullable) {
+            this.nullable = nullable;
         }
         }
 
 
         public String getDefaultValue() {
         public String getDefaultValue() {
@@ -105,30 +156,34 @@ public class TableMeta {
             this.defaultValue = defaultValue;
             this.defaultValue = defaultValue;
         }
         }
 
 
-        public String getExtra() {
-            return extra;
+        public boolean isUnsigned() {
+            return StringUtils.containsIgnoreCase(columnType, "unsigned");
         }
         }
 
 
-        public void setExtra(String extra) {
-            this.extra = extra;
+        public boolean isNullable() {
+            return nullable;
         }
         }
 
 
-        public boolean isUnsigned() {
-            return StringUtils.containsIgnoreCase(columnType, "unsigned");
+        public boolean isKey() {
+            return key;
         }
         }
 
 
-        public boolean isKey() {
-            return StringUtils.equalsIgnoreCase(iskey, "PRI");
+        public void setKey(boolean key) {
+            this.key = key;
         }
         }
 
 
-        public boolean isNullable() {
-            return StringUtils.equalsIgnoreCase(isNullable, "YES");
+        public String getExtra() {
+            return extra;
+        }
+
+        public void setExtra(String extra) {
+            this.extra = extra;
         }
         }
 
 
         public String toString() {
         public String toString() {
             return "FieldMeta [columnName=" + columnName + ", columnType=" + columnType + ", defaultValue="
             return "FieldMeta [columnName=" + columnName + ", columnType=" + columnType + ", defaultValue="
-                   + defaultValue + ", extra=" + extra + ", isNullable=" + isNullable + ", iskey=" + iskey + "]";
+                + defaultValue + ", nullable=" + nullable + ", key=" + key + "]";
         }
         }
-
     }
     }
+
 }
 }

+ 40 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/AbstractMysqlEventParser.java

@@ -2,6 +2,9 @@ package com.alibaba.otter.canal.parse.inbound.mysql;
 
 
 import java.nio.charset.Charset;
 import java.nio.charset.Charset;
 
 
+import javax.annotation.Resource;
+
+import com.taobao.tddl.dbsync.binlog.BinlogPosition;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -10,6 +13,7 @@ import com.alibaba.otter.canal.filter.aviater.AviaterRegexFilter;
 import com.alibaba.otter.canal.parse.inbound.AbstractEventParser;
 import com.alibaba.otter.canal.parse.inbound.AbstractEventParser;
 import com.alibaba.otter.canal.parse.inbound.BinlogParser;
 import com.alibaba.otter.canal.parse.inbound.BinlogParser;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.LogEventConvert;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.LogEventConvert;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.TableMetaManager;
 
 
 public abstract class AbstractMysqlEventParser extends AbstractEventParser {
 public abstract class AbstractMysqlEventParser extends AbstractEventParser {
 
 
@@ -25,6 +29,10 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
     protected boolean           filterRows              = false;
     protected boolean           filterRows              = false;
     protected boolean           filterTableError        = false;
     protected boolean           filterTableError        = false;
 
 
+    @Resource
+    protected TableMetaManager tableMetaManager;
+    protected boolean useDruidDdlFilter = true;
+
     protected BinlogParser buildParser() {
     protected BinlogParser buildParser() {
         LogEventConvert convert = new LogEventConvert();
         LogEventConvert convert = new LogEventConvert();
         if (eventFilter != null && eventFilter instanceof AviaterRegexFilter) {
         if (eventFilter != null && eventFilter instanceof AviaterRegexFilter) {
@@ -41,6 +49,9 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
         convert.setFilterQueryDdl(filterQueryDdl);
         convert.setFilterQueryDdl(filterQueryDdl);
         convert.setFilterRows(filterRows);
         convert.setFilterRows(filterRows);
         convert.setFilterTableError(filterTableError);
         convert.setFilterTableError(filterTableError);
+
+        //初始化parser的时候也初始化管理mysql 表结构的管理器
+        tableMetaManager.init();
         return convert;
         return convert;
     }
     }
 
 
@@ -53,6 +64,20 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
         }
         }
     }
     }
 
 
+    /**
+     * 回滚到指定位点
+     * @param position
+     * @return
+     */
+    protected boolean processTableMeta(BinlogPosition position) {
+        if (tableMetaManager != null) {
+            return tableMetaManager.rollback(position);
+        }
+
+        return true;
+    }
+
+
     public void setEventBlackFilter(CanalEventFilter eventBlackFilter) {
     public void setEventBlackFilter(CanalEventFilter eventBlackFilter) {
         super.setEventBlackFilter(eventBlackFilter);
         super.setEventBlackFilter(eventBlackFilter);
 
 
@@ -97,4 +122,19 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
         this.filterTableError = filterTableError;
         this.filterTableError = filterTableError;
     }
     }
 
 
+    public TableMetaManager getTableMetaManager() {
+        return tableMetaManager;
+    }
+
+    public void setTableMetaManager(TableMetaManager tableMetaManager) {
+        this.tableMetaManager = tableMetaManager;
+    }
+
+    public boolean isUseDruidDdlFilter() {
+        return useDruidDdlFilter;
+    }
+
+    public void setUseDruidDdlFilter(boolean useDruidDdlFilter) {
+        this.useDruidDdlFilter = useDruidDdlFilter;
+    }
 }
 }

+ 1 - 1
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogEventParser.java

@@ -50,7 +50,7 @@ public class LocalBinlogEventParser extends AbstractMysqlEventParser implements
             throw new CanalParseException(e);
             throw new CanalParseException(e);
         }
         }
 
 
-        tableMetaCache = new TableMetaCache(metaConnection);
+        tableMetaCache = new TableMetaCache(metaConnection,tableMetaManager);
         ((LogEventConvert) binlogParser).setTableMetaCache(tableMetaCache);
         ((LogEventConvert) binlogParser).setTableMetaCache(tableMetaCache);
     }
     }
 
 

+ 99 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlConnection.java

@@ -3,8 +3,14 @@ package com.alibaba.otter.canal.parse.inbound.mysql;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.nio.charset.Charset;
 import java.nio.charset.Charset;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.List;
 import java.util.List;
+import java.util.Properties;
 
 
+import com.mysql.jdbc.Driver;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.commons.lang.exception.ExceptionUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
@@ -21,6 +27,8 @@ import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.inbound.ErosaConnection;
 import com.alibaba.otter.canal.parse.inbound.ErosaConnection;
 import com.alibaba.otter.canal.parse.inbound.SinkFunction;
 import com.alibaba.otter.canal.parse.inbound.SinkFunction;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.DirectLogFetcher;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.DirectLogFetcher;
+import com.alibaba.otter.canal.parse.support.AuthenticationInfo;
+
 import com.taobao.tddl.dbsync.binlog.LogContext;
 import com.taobao.tddl.dbsync.binlog.LogContext;
 import com.taobao.tddl.dbsync.binlog.LogDecoder;
 import com.taobao.tddl.dbsync.binlog.LogDecoder;
 import com.taobao.tddl.dbsync.binlog.LogEvent;
 import com.taobao.tddl.dbsync.binlog.LogEvent;
@@ -35,20 +43,59 @@ public class MysqlConnection implements ErosaConnection {
     private BinlogFormat        binlogFormat;
     private BinlogFormat        binlogFormat;
     private BinlogImage         binlogImage;
     private BinlogImage         binlogImage;
 
 
+    // tsdb releated
+    private AuthenticationInfo authInfo;
+    private Connection conn;
+    protected int connTimeout = 5 * 1000; // 5秒
+    protected int soTimeout = 60 * 60 * 1000; // 1小时
+    protected int bufferSize = 16 * 1024;
+
+
     public MysqlConnection(){
     public MysqlConnection(){
     }
     }
 
 
     public MysqlConnection(InetSocketAddress address, String username, String password){
     public MysqlConnection(InetSocketAddress address, String username, String password){
+        authInfo = new AuthenticationInfo();
+        authInfo.setAddress(address);
+        authInfo.setUsername(username);
+        authInfo.setPassword(password);
         connector = new MysqlConnector(address, username, password);
         connector = new MysqlConnector(address, username, password);
+        //将connection里面的参数透传下
+        connector.setSoTimeout(soTimeout);
+        connector.setConnTimeout(connTimeout);
     }
     }
 
 
     public MysqlConnection(InetSocketAddress address, String username, String password, byte charsetNumber,
     public MysqlConnection(InetSocketAddress address, String username, String password, byte charsetNumber,
                            String defaultSchema){
                            String defaultSchema){
+        authInfo = new AuthenticationInfo();
+        authInfo.setAddress(address);
+        authInfo.setUsername(username);
+        authInfo.setPassword(password);
+        authInfo.setDefaultDatabaseName(defaultSchema);
         connector = new MysqlConnector(address, username, password, charsetNumber, defaultSchema);
         connector = new MysqlConnector(address, username, password, charsetNumber, defaultSchema);
+        //将connection里面的参数透传下
+        connector.setSoTimeout(soTimeout);
+        connector.setConnTimeout(connTimeout);
     }
     }
 
 
     public void connect() throws IOException {
     public void connect() throws IOException {
         connector.connect();
         connector.connect();
+        //准备一个connection连接给tsdb查询用
+        Properties info = new Properties();
+        info.put("user", authInfo.getUsername());
+        info.put("password", authInfo.getPassword());
+        info.put("connectTimeout", String.valueOf(connTimeout));
+        info.put("socketTimeout", String.valueOf(soTimeout));
+        String url = "jdbc:mysql://" + authInfo.getAddress().getHostName() + ":"
+            + String.valueOf(authInfo.getAddress().getPort()) + "?allowMultiQueries=true";
+        try {
+            Driver driver = new com.mysql.jdbc.Driver();
+            conn = driver.connect(url, info);
+            // conn = DriverManager.getConnection(url, info);
+        } catch (SQLException e) {
+            throw new CanalParseException(e);
+        }
+
     }
     }
 
 
     public void reconnect() throws IOException {
     public void reconnect() throws IOException {
@@ -68,6 +115,25 @@ public class MysqlConnection implements ErosaConnection {
         return exector.query(cmd);
         return exector.query(cmd);
     }
     }
 
 
+    public <T> T query(String sql, ProcessJdbcResult<T> processor) {
+        Statement stmt = null;
+        try {
+            stmt = conn.createStatement();
+            ResultSet rs = stmt.executeQuery(sql);
+            return processor.process(rs);
+        } catch (SQLException e) {
+            throw new CanalParseException(e);
+        } finally {
+            if (stmt != null) {
+                try {
+                    stmt.close();
+                } catch (SQLException e) {
+                    // ignore
+                }
+            }
+        }
+    }
+
     public void update(String cmd) throws IOException {
     public void update(String cmd) throws IOException {
         MysqlUpdateExecutor exector = new MysqlUpdateExecutor(connector);
         MysqlUpdateExecutor exector = new MysqlUpdateExecutor(connector);
         exector.update(cmd);
         exector.update(cmd);
@@ -147,6 +213,8 @@ public class MysqlConnection implements ErosaConnection {
         connection.setCharset(getCharset());
         connection.setCharset(getCharset());
         connection.setSlaveId(getSlaveId());
         connection.setSlaveId(getSlaveId());
         connection.setConnector(connector.fork());
         connection.setConnector(connector.fork());
+        //set authInfo
+        connection.setAuthInfo(authInfo);
         return connection;
         return connection;
     }
     }
 
 
@@ -382,4 +450,35 @@ public class MysqlConnection implements ErosaConnection {
         return binlogImage;
         return binlogImage;
     }
     }
 
 
+    public Connection getConn() {
+        return conn;
+    }
+    public InetSocketAddress getAddress() {
+        return authInfo.getAddress();
+    }
+
+    public void setConnTimeout(int connTimeout) {
+        this.connTimeout = connTimeout;
+    }
+
+    public void setSoTimeout(int soTimeout) {
+        this.soTimeout = soTimeout;
+    }
+
+    public void setBufferSize(int bufferSize) {
+        this.bufferSize = bufferSize;
+    }
+
+    public AuthenticationInfo getAuthInfo() {
+        return authInfo;
+    }
+
+    public void setAuthInfo(AuthenticationInfo authInfo) {
+        this.authInfo = authInfo;
+    }
+
+    public static interface ProcessJdbcResult<T> {
+
+        public T process(ResultSet rs) throws SQLException;
+    }
 }
 }

+ 10 - 4
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParser.java

@@ -69,6 +69,7 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
 
 
     // update by yishun.chen,特殊异常处理参数
     // update by yishun.chen,特殊异常处理参数
     private int                dumpErrorCount                    = 0;        // binlogDump失败异常计数
     private int                dumpErrorCount                    = 0;        // binlogDump失败异常计数
+    private int dumpTimeoutCount = 0;// socketTimeout异常
     private int                dumpErrorCountThreshold           = 2;        // binlogDump失败异常计数阀值
     private int                dumpErrorCountThreshold           = 2;        // binlogDump失败异常计数阀值
 
 
     protected ErosaConnection buildErosaConnection() {
     protected ErosaConnection buildErosaConnection() {
@@ -118,7 +119,12 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                 }
                 }
             }
             }
 
 
-            tableMetaCache = new TableMetaCache(metaConnection);
+            if(tableMetaManager != null){
+                tableMetaManager.setConnection(metaConnection);
+                tableMetaManager.setFilter(eventFilter);
+            }
+
+            tableMetaCache = new TableMetaCache(metaConnection,tableMetaManager);
             ((LogEventConvert) binlogParser).setTableMetaCache(tableMetaCache);
             ((LogEventConvert) binlogParser).setTableMetaCache(tableMetaCache);
         }
         }
     }
     }
@@ -438,7 +444,7 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
 
 
             public boolean sink(LogEvent event) {
             public boolean sink(LogEvent event) {
                 try {
                 try {
-                    CanalEntry.Entry entry = parseAndProfilingIfNecessary(event);
+                    CanalEntry.Entry entry = parseAndProfilingIfNecessary(event,true);
                     if (entry == null) {
                     if (entry == null) {
                         return true;
                         return true;
                     }
                     }
@@ -471,7 +477,7 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
 
 
                 public boolean sink(LogEvent event) {
                 public boolean sink(LogEvent event) {
                     try {
                     try {
-                        CanalEntry.Entry entry = parseAndProfilingIfNecessary(event);
+                        CanalEntry.Entry entry = parseAndProfilingIfNecessary(event,true);
                         if (entry == null) {
                         if (entry == null) {
                             return true;
                             return true;
                         }
                         }
@@ -683,7 +689,7 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                 public boolean sink(LogEvent event) {
                 public boolean sink(LogEvent event) {
                     EntryPosition entryPosition = null;
                     EntryPosition entryPosition = null;
                     try {
                     try {
-                        CanalEntry.Entry entry = parseAndProfilingIfNecessary(event);
+                        CanalEntry.Entry entry = parseAndProfilingIfNecessary(event,true);
                         if (entry == null) {
                         if (entry == null) {
                             return true;
                             return true;
                         }
                         }

+ 77 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/DBMSAction.java

@@ -0,0 +1,77 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.dbsync;
+
+/**
+ * Defines database change action types: INSERT, UPDATE, DELETE, OTHER.
+ *
+ * @author <a href="mailto:changyuan.lh@taobao.com">Changyuan.lh</a>
+ * @version 1.0
+ */
+public enum DBMSAction {
+
+    INSERT('I'), UPDATE('U'), DELETE('D'), REPLACE('R'), OTHER('O'), CREATE('C'), ALTER('A'), ERASE('E'), QUERY('Q'),
+    ROWQUERY('W'), TRUNCATE('T'), CINDEX('X'), DINDEX('Y'), RENAME('Z');
+
+    protected final byte bValue;
+
+    DBMSAction(char ch){
+        this.bValue = (byte) ch;
+    }
+
+    /**
+     * Return action type from byte value.
+     */
+    public static DBMSAction fromValue(int iValue) {
+        switch ((char) iValue) {
+            case 'I':
+            case 'M': // MERGE (Oracle only)
+                return INSERT;
+            case 'U':
+                return UPDATE;
+            case 'D': // DELETE
+                return DELETE;
+            case 'R': // REPLACE
+                return REPLACE;
+            case 'C':
+                return CREATE;
+            case 'A':
+                return ALTER;
+            case 'E':
+                return ERASE;
+            case 'Q':
+                return QUERY;
+            case 'W':
+                return ROWQUERY;
+            case 'T':
+                return TRUNCATE;
+            case 'X':
+                return CINDEX;
+            case 'Y':
+                return DINDEX;
+            case 'Z':
+                return RENAME;
+        }
+        return OTHER;
+    }
+
+    /**
+     * Return action type from query.
+     */
+    public static DBMSAction fromQuery(String query) {
+        int length = query.length();
+        for (int index = 0; index < length; index++) {
+            char ch = query.charAt(index);
+            if (!Character.isWhitespace(ch)) {
+                return DBMSAction.fromValue(ch);
+            }
+        }
+
+        return OTHER;
+    }
+
+    /**
+     * Return byte value of action type.
+     */
+    public byte value() {
+        return bValue;
+    }
+}

+ 42 - 30
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java

@@ -9,6 +9,7 @@ import java.sql.Types;
 import java.util.BitSet;
 import java.util.BitSet;
 import java.util.List;
 import java.util.List;
 
 
+import com.taobao.tddl.dbsync.binlog.BinlogPosition;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.commons.lang.exception.ExceptionUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
@@ -87,7 +88,8 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
     // 新增rows过滤,用于仅订阅除rows以外的数据
     // 新增rows过滤,用于仅订阅除rows以外的数据
     private boolean                     filterRows          = false;
     private boolean                     filterRows          = false;
 
 
-    public Entry parse(LogEvent logEvent) throws CanalParseException {
+    @Override
+    public Entry parse(LogEvent logEvent, boolean isSeek) throws CanalParseException {
         if (logEvent == null || logEvent instanceof UnknownLogEvent) {
         if (logEvent == null || logEvent instanceof UnknownLogEvent) {
             return null;
             return null;
         }
         }
@@ -98,7 +100,7 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                 binlogFileName = ((RotateLogEvent) logEvent).getFilename();
                 binlogFileName = ((RotateLogEvent) logEvent).getFilename();
                 break;
                 break;
             case LogEvent.QUERY_EVENT:
             case LogEvent.QUERY_EVENT:
-                return parseQueryEvent((QueryLogEvent) logEvent);
+                return parseQueryEvent((QueryLogEvent) logEvent,isSeek);
             case LogEvent.XID_EVENT:
             case LogEvent.XID_EVENT:
                 return parseXidEvent((XidLogEvent) logEvent);
                 return parseXidEvent((XidLogEvent) logEvent);
             case LogEvent.TABLE_MAP_EVENT:
             case LogEvent.TABLE_MAP_EVENT:
@@ -137,7 +139,7 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
         }
         }
     }
     }
 
 
-    private Entry parseQueryEvent(QueryLogEvent event) {
+    private Entry parseQueryEvent(QueryLogEvent event, boolean isSeek) {
         String queryString = event.getQuery();
         String queryString = event.getQuery();
         if (StringUtils.endsWithIgnoreCase(queryString, BEGIN)) {
         if (StringUtils.endsWithIgnoreCase(queryString, BEGIN)) {
             TransactionBegin transactionBegin = createTransactionBegin(event.getSessionId());
             TransactionBegin transactionBegin = createTransactionBegin(event.getSessionId());
@@ -215,24 +217,28 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
             }
             }
 
 
             // 更新下table meta cache
             // 更新下table meta cache
-            if (tableMetaCache != null
-                && (result.getType() == EventType.ALTER || result.getType() == EventType.ERASE || result.getType() == EventType.RENAME)) {
-                for (DdlResult renameResult = result; renameResult != null; renameResult = renameResult.getRenameTableResult()) {
-                    String schemaName0 = event.getDbName(); // 防止rename语句后产生schema变更带来影响
-                    if (StringUtils.isNotEmpty(renameResult.getSchemaName())) {
-                        schemaName0 = renameResult.getSchemaName();
-                    }
-
-                    tableName = renameResult.getTableName();
-                    if (StringUtils.isNotEmpty(tableName)) {
-                        // 如果解析到了正确的表信息,则根据全名进行清除
-                        tableMetaCache.clearTableMeta(schemaName0, tableName);
-                    } else {
-                        // 如果无法解析正确的表信息,则根据schema进行清除
-                        tableMetaCache.clearTableMetaWithSchemaName(schemaName0);
-                    }
-                }
-            }
+            //if (tableMetaCache != null
+            //    && (result.getType() == EventType.ALTER || result.getType() == EventType.ERASE || result.getType() == EventType.RENAME)) {
+            //    for (DdlResult renameResult = result; renameResult != null; renameResult = renameResult.getRenameTableResult()) {
+            //        String schemaName0 = event.getDbName(); // 防止rename语句后产生schema变更带来影响
+            //        if (StringUtils.isNotEmpty(renameResult.getSchemaName())) {
+            //            schemaName0 = renameResult.getSchemaName();
+            //        }
+            //
+            //        tableName = renameResult.getTableName();
+            //        if (StringUtils.isNotEmpty(tableName)) {
+            //            // 如果解析到了正确的表信息,则根据全名进行清除
+            //            tableMetaCache.clearTableMeta(schemaName0, tableName);
+            //        } else {
+            //            // 如果无法解析正确的表信息,则根据schema进行清除
+            //            tableMetaCache.clearTableMetaWithSchemaName(schemaName0);
+            //        }
+            //    }
+            //}
+
+            //使用新的表结构元数据管理方式
+            BinlogPosition position = createPosition(event.getHeader());
+            tableMetaCache.apply(position, event.getDbName(), queryString);
 
 
             Header header = createHeader(binlogFileName, event.getHeader(), schemaName, tableName, type);
             Header header = createHeader(binlogFileName, event.getHeader(), schemaName, tableName, type);
             RowChange.Builder rowChangeBuider = RowChange.newBuilder();
             RowChange.Builder rowChangeBuider = RowChange.newBuilder();
@@ -348,6 +354,7 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                 table.getDbName(),
                 table.getDbName(),
                 table.getTableName(),
                 table.getTableName(),
                 eventType);
                 eventType);
+            BinlogPosition position = createPosition(event.getHeader());
             RowChange.Builder rowChangeBuider = RowChange.newBuilder();
             RowChange.Builder rowChangeBuider = RowChange.newBuilder();
             rowChangeBuider.setTableId(event.getTableId());
             rowChangeBuider.setTableId(event.getTableId());
             rowChangeBuider.setIsDdl(false);
             rowChangeBuider.setIsDdl(false);
@@ -359,7 +366,7 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
             boolean tableError = false;
             boolean tableError = false;
             TableMeta tableMeta = null;
             TableMeta tableMeta = null;
             if (tableMetaCache != null) {// 入错存在table meta cache
             if (tableMetaCache != null) {// 入错存在table meta cache
-                tableMeta = getTableMeta(table.getDbName(), table.getTableName(), true);
+                tableMeta = getTableMeta(table.getDbName(), table.getTableName(), true,position);
                 if (tableMeta == null) {
                 if (tableMeta == null) {
                     tableError = true;
                     tableError = true;
                     if (!filterTableError) {
                     if (!filterTableError) {
@@ -405,6 +412,10 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
         }
         }
     }
     }
 
 
+    private BinlogPosition createPosition(LogHeader logHeader) {
+        return new BinlogPosition(binlogFileName, logHeader.getLogPos(), logHeader.getServerId(), logHeader.getWhen()); // 记录到秒
+    }
+
     private boolean parseOneRow(RowData.Builder rowDataBuilder, RowsLogEvent event, RowsLogBuffer buffer, BitSet cols,
     private boolean parseOneRow(RowData.Builder rowDataBuilder, RowsLogEvent event, RowsLogBuffer buffer, BitSet cols,
                                 boolean isAfter, TableMeta tableMeta) throws UnsupportedEncodingException {
                                 boolean isAfter, TableMeta tableMeta) throws UnsupportedEncodingException {
         int columnCnt = event.getTable().getColumnCnt();
         int columnCnt = event.getTable().getColumnCnt();
@@ -413,18 +424,19 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
         boolean tableError = false;
         boolean tableError = false;
         // check table fileds count,只能处理加字段
         // check table fileds count,只能处理加字段
         boolean existRDSNoPrimaryKey = false;
         boolean existRDSNoPrimaryKey = false;
-        if (tableMeta != null && columnInfo.length > tableMeta.getFileds().size()) {
+        if (tableMeta != null && columnInfo.length > tableMeta.getFields().size()) {
             if (tableMetaCache.isOnRDS()) {
             if (tableMetaCache.isOnRDS()) {
                 // 特殊处理下RDS的场景
                 // 特殊处理下RDS的场景
                 List<FieldMeta> primaryKeys = tableMeta.getPrimaryFields();
                 List<FieldMeta> primaryKeys = tableMeta.getPrimaryFields();
                 if (primaryKeys == null || primaryKeys.isEmpty()) {
                 if (primaryKeys == null || primaryKeys.isEmpty()) {
-                    if (columnInfo.length == tableMeta.getFileds().size() + 1
+                    if (columnInfo.length == tableMeta.getFields().size() + 1
                         && columnInfo[columnInfo.length - 1].type == LogEvent.MYSQL_TYPE_LONGLONG) {
                         && columnInfo[columnInfo.length - 1].type == LogEvent.MYSQL_TYPE_LONGLONG) {
                         existRDSNoPrimaryKey = true;
                         existRDSNoPrimaryKey = true;
                     }
                     }
                 }
                 }
             }
             }
 
 
+            BinlogPosition position = createPosition(event.getHeader());
             if (!existRDSNoPrimaryKey) {
             if (!existRDSNoPrimaryKey) {
                 // online ddl增加字段操作步骤:
                 // online ddl增加字段操作步骤:
                 // 1. 新增一张临时表,将需要做ddl表的数据全量导入
                 // 1. 新增一张临时表,将需要做ddl表的数据全量导入
@@ -432,7 +444,7 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                 // 3. 锁住应用请求,将临时表rename为老表的名字,完成增加字段的操作
                 // 3. 锁住应用请求,将临时表rename为老表的名字,完成增加字段的操作
                 // 尝试做一次reload,可能因为ddl没有正确解析,或者使用了类似online ddl的操作
                 // 尝试做一次reload,可能因为ddl没有正确解析,或者使用了类似online ddl的操作
                 // 因为online ddl没有对应表名的alter语法,所以不会有clear cache的操作
                 // 因为online ddl没有对应表名的alter语法,所以不会有clear cache的操作
-                tableMeta = getTableMeta(event.getTable().getDbName(), event.getTable().getTableName(), false);// 强制重新获取一次
+                tableMeta = getTableMeta(event.getTable().getDbName(), event.getTable().getTableName(), false,position);// 强制重新获取一次
                 if (tableMeta == null) {
                 if (tableMeta == null) {
                     tableError = true;
                     tableError = true;
                     if (!filterTableError) {
                     if (!filterTableError) {
@@ -442,11 +454,11 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
                 }
                 }
 
 
                 // 在做一次判断
                 // 在做一次判断
-                if (tableMeta != null && columnInfo.length > tableMeta.getFileds().size()) {
+                if (tableMeta != null && columnInfo.length > tableMeta.getFields().size()) {
                     tableError = true;
                     tableError = true;
                     if (!filterTableError) {
                     if (!filterTableError) {
                         throw new CanalParseException("column size is not match for table:" + tableMeta.getFullName()
                         throw new CanalParseException("column size is not match for table:" + tableMeta.getFullName()
-                                                      + "," + columnInfo.length + " vs " + tableMeta.getFileds().size());
+                                                      + "," + columnInfo.length + " vs " + tableMeta.getFields().size());
                     }
                     }
                 }
                 }
             } else {
             } else {
@@ -473,7 +485,7 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
             FieldMeta fieldMeta = null;
             FieldMeta fieldMeta = null;
             if (tableMeta != null && !tableError) {
             if (tableMeta != null && !tableError) {
                 // 处理file meta
                 // 处理file meta
-                fieldMeta = tableMeta.getFileds().get(i);
+                fieldMeta = tableMeta.getFields().get(i);
                 columnBuilder.setName(fieldMeta.getColumnName());
                 columnBuilder.setName(fieldMeta.getColumnName());
                 columnBuilder.setIsKey(fieldMeta.isKey());
                 columnBuilder.setIsKey(fieldMeta.isKey());
                 // 增加mysql type类型,issue 73
                 // 增加mysql type类型,issue 73
@@ -672,9 +684,9 @@ public class LogEventConvert extends AbstractCanalLifeCycle implements BinlogPar
         return true;
         return true;
     }
     }
 
 
-    private TableMeta getTableMeta(String dbName, String tbName, boolean useCache) {
+    private TableMeta getTableMeta(String dbName, String tbName, boolean useCache, BinlogPosition position) {
         try {
         try {
-            return tableMetaCache.getTableMeta(dbName, tbName, useCache);
+            return tableMetaCache.getTableMeta(dbName, tbName, useCache,position);
         } catch (Exception e) {
         } catch (Exception e) {
             String message = ExceptionUtils.getRootCauseMessage(e);
             String message = ExceptionUtils.getRootCauseMessage(e);
             if (filterTableError) {
             if (filterTableError) {

+ 144 - 80
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/TableMetaCache.java

@@ -1,137 +1,201 @@
 package com.alibaba.otter.canal.parse.inbound.mysql.dbsync;
 package com.alibaba.otter.canal.parse.inbound.mysql.dbsync;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.sql.ResultSet;
+import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 
 
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.taobao.tddl.dbsync.binlog.BinlogPosition;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.StringUtils;
 
 
-import com.alibaba.otter.canal.parse.driver.mysql.packets.server.FieldPacket;
-import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ResultSetPacket;
 import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.exception.CanalParseException;
 import com.alibaba.otter.canal.parse.inbound.TableMeta;
 import com.alibaba.otter.canal.parse.inbound.TableMeta;
 import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
 import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection;
-import com.google.common.base.Function;
-import com.google.common.collect.MigrateMap;
+import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection.ProcessJdbcResult;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.TableMetaManager;
 
 
 /**
 /**
  * 处理table meta解析和缓存
  * 处理table meta解析和缓存
- * 
+ *
  * @author jianghang 2013-1-17 下午10:15:16
  * @author jianghang 2013-1-17 下午10:15:16
  * @version 1.0.0
  * @version 1.0.0
  */
  */
 public class TableMetaCache {
 public class TableMetaCache {
 
 
-    public static final String     COLUMN_NAME    = "COLUMN_NAME";
-    public static final String     COLUMN_TYPE    = "COLUMN_TYPE";
-    public static final String     IS_NULLABLE    = "IS_NULLABLE";
-    public static final String     COLUMN_KEY     = "COLUMN_KEY";
-    public static final String     COLUMN_DEFAULT = "COLUMN_DEFAULT";
-    public static final String     EXTRA          = "EXTRA";
-    private MysqlConnection        connection;
-    private boolean                isOnRDS        = false;
+    public static final String COLUMN_NAME = "COLUMN_NAME";
+    public static final String COLUMN_TYPE = "COLUMN_TYPE";
+    public static final String IS_NULLABLE = "IS_NULLABLE";
+    public static final String COLUMN_KEY = "COLUMN_KEY";
+    public static final String COLUMN_DEFAULT = "COLUMN_DEFAULT";
+    public static final String EXTRA = "EXTRA";
+    private MysqlConnection connection;
+    private boolean isOnRDS = false;
+
+    private TableMetaManager tableMetaManager;
+    // 第一层tableId,第二层schema.table,解决tableId重复,对应多张表
+    private LoadingCache<String, TableMeta> tableMetaDB;
+
 
 
     // 第一层tableId,第二层schema.table,解决tableId重复,对应多张表
     // 第一层tableId,第二层schema.table,解决tableId重复,对应多张表
     private Map<String, TableMeta> tableMetaCache;
     private Map<String, TableMeta> tableMetaCache;
 
 
-    public TableMetaCache(MysqlConnection con){
+    /**
+     * 从db获取表结构
+     * @param fullname
+     * @return
+     */
+    private TableMeta getTableMetaByDB(final String fullname) {
+        return connection.query("desc " + fullname, new ProcessJdbcResult<TableMeta>() {
+
+            @Override
+            public TableMeta process(ResultSet rs) throws SQLException {
+                List<FieldMeta> metas = new ArrayList<FieldMeta>();
+                while (rs.next()) {
+                    FieldMeta meta = new FieldMeta();
+                    // 做一个优化,使用String.intern(),共享String对象,减少内存使用
+                    meta.setColumnName(rs.getString("Field"));
+                    meta.setColumnType(rs.getString("Type"));
+                    meta.setNullable(StringUtils.equalsIgnoreCase(rs.getString("Null"), "YES"));
+                    meta.setKey("PRI".equalsIgnoreCase(rs.getString("Key")));
+                    meta.setDefaultValue(rs.getString("Default"));
+                    metas.add(meta);
+                }
+
+                String[] names = StringUtils.split(fullname, "`.`");
+                String schema = names[0];
+                String table = names[1].substring(0, names[1].length());
+                return new TableMeta(schema, table, metas);
+            }
+        });
+    }
+
+    public TableMetaCache(MysqlConnection con,TableMetaManager tableMetaManager) {
         this.connection = con;
         this.connection = con;
-        tableMetaCache = MigrateMap.makeComputingMap(new Function<String, TableMeta>() {
+        this.tableMetaManager = tableMetaManager;
+        //如果持久存储的表结构为空,从db里面获取下
+        if (tableMetaManager == null) {
+            this.tableMetaDB = CacheBuilder.newBuilder().build(new CacheLoader<String, TableMeta>() {
 
 
-            public TableMeta apply(String name) {
-                try {
-                    return getTableMeta0(name);
-                } catch (IOException e) {
-                    // 尝试做一次retry操作
+                @Override
+                public TableMeta load(String name) throws Exception {
                     try {
                     try {
-                        connection.reconnect();
-                        return getTableMeta0(name);
-                    } catch (IOException e1) {
-                        throw new CanalParseException("fetch failed by table meta:" + name, e1);
+                        return getTableMetaByDB(name);
+                    } catch (CanalParseException e) {
+                        // 尝试做一次retry操作
+                        try {
+                            connection.reconnect();
+                            return getTableMetaByDB(name);
+                        } catch (IOException e1) {
+                            throw new CanalParseException("fetch failed by table meta:" + name, e1);
+                        }
                     }
                     }
                 }
                 }
-            }
 
 
-        });
+            });
+        }
 
 
-        try {
-            ResultSetPacket packet = connection.query("show global variables  like 'rds\\_%'");
-            if (packet.getFieldValues().size() > 0) {
-                isOnRDS = true;
+        isOnRDS = connection.query("show global variables  like 'rds\\_%'", new ProcessJdbcResult<Boolean>() {
+
+            @Override
+            public Boolean process(ResultSet rs) throws SQLException {
+                if (rs.next()) {
+                    return true;
+                }
+                return false;
             }
             }
-        } catch (IOException e) {
-        }
+        });
     }
     }
 
 
-    public TableMeta getTableMeta(String schema, String table) {
-        return getTableMeta(schema, table, true);
+    public TableMeta getTableMeta(String schema, String table, BinlogPosition position) {
+        return getTableMeta(schema, table, true, position);
     }
     }
 
 
-    public TableMeta getTableMeta(String schema, String table, boolean useCache) {
-        if (!useCache) {
-            tableMetaCache.remove(getFullName(schema, table));
-        }
+    public TableMeta getTableMeta(String schema, String table, boolean useCache, BinlogPosition position) {
+        TableMeta tableMeta = null;
+        if (tableMetaManager != null) {
+            tableMeta = tableMetaManager.find(schema, table);
+            if (tableMeta == null) {
+                // 因为条件变化,可能第一次的tableMeta没取到,需要从db获取一次,并记录到snapshot中
+                String createDDL = connection.query("show create table " + getFullName(schema, table),
+                    new ProcessJdbcResult<String>() {
+
+                        @Override
+                        public String process(ResultSet rs) throws SQLException {
+                            while (rs.next()) {
+                                return rs.getString(2);
+                            }
+                            return null;
+                        }
+                    });
+                // 强制覆盖掉内存值
+                tableMetaManager.apply(position, schema, createDDL);
+                tableMeta = tableMetaManager.find(schema, table);
+            }
+            return tableMeta;
+        } else {
+            if (!useCache) {
+                tableMetaDB.invalidate(getFullName(schema, table));
+            }
 
 
-        return tableMetaCache.get(getFullName(schema, table));
+            return tableMetaDB.getUnchecked(getFullName(schema, table));
+        }
     }
     }
 
 
+
+
     public void clearTableMeta(String schema, String table) {
     public void clearTableMeta(String schema, String table) {
-        tableMetaCache.remove(getFullName(schema, table));
+        if (tableMetaManager != null) {
+            // tsdb不需要做,会基于ddl sql自动清理
+        } else {
+            tableMetaDB.invalidate(getFullName(schema, table));
+        }
     }
     }
 
 
     public void clearTableMetaWithSchemaName(String schema) {
     public void clearTableMetaWithSchemaName(String schema) {
-        // Set<String> removeNames = new HashSet<String>(); //
-        // 存一份临时变量,避免在遍历的时候进行删除
-        for (String name : tableMetaCache.keySet()) {
-            if (StringUtils.startsWithIgnoreCase(name, schema + ".")) {
-                // removeNames.add(name);
-                tableMetaCache.remove(name);
+        if (tableMetaManager != null) {
+            // tsdb不需要做,会基于ddl sql自动清理
+        } else {
+            for (String name : tableMetaDB.asMap().keySet()) {
+                if (StringUtils.startsWithIgnoreCase(name, schema + ".")) {
+                    // removeNames.add(name);
+                    tableMetaDB.invalidate(name);
+                }
             }
             }
         }
         }
-
-        // for (String name : removeNames) {
-        // tables.remove(name);
-        // }
     }
     }
 
 
     public void clearTableMeta() {
     public void clearTableMeta() {
-        tableMetaCache.clear();
-    }
-
-    private TableMeta getTableMeta0(String fullname) throws IOException {
-        ResultSetPacket packet = connection.query("desc " + fullname);
-        return new TableMeta(fullname, parserTableMeta(packet));
+        if (tableMetaManager != null) {
+            // tsdb不需要做,会基于ddl sql自动清理
+        } else {
+            tableMetaDB.invalidateAll();
+        }
     }
     }
 
 
-    private List<FieldMeta> parserTableMeta(ResultSetPacket packet) {
-        Map<String, Integer> nameMaps = new HashMap<String, Integer>(6, 1f);
-
-        int index = 0;
-        for (FieldPacket fieldPacket : packet.getFieldDescriptors()) {
-            nameMaps.put(fieldPacket.getOriginalName(), index++);
+    /**
+     * 更新一下本地的表结构内存
+     *
+     * @param position
+     * @param schema
+     * @param ddl
+     * @return
+     */
+    public boolean apply(BinlogPosition position, String schema, String ddl) {
+        if (tableMetaManager != null) {
+            return tableMetaManager.apply(position, schema, ddl);
+        } else {
+            // ignore
+            return true;
         }
         }
+    }
 
 
-        int size = packet.getFieldDescriptors().size();
-        int count = packet.getFieldValues().size() / packet.getFieldDescriptors().size();
-        List<FieldMeta> result = new ArrayList<FieldMeta>();
-        for (int i = 0; i < count; i++) {
-            FieldMeta meta = new FieldMeta();
-            // 做一个优化,使用String.intern(),共享String对象,减少内存使用
-            meta.setColumnName(packet.getFieldValues().get(nameMaps.get(COLUMN_NAME) + i * size).intern());
-            meta.setColumnType(packet.getFieldValues().get(nameMaps.get(COLUMN_TYPE) + i * size));
-            meta.setIsNullable(packet.getFieldValues().get(nameMaps.get(IS_NULLABLE) + i * size));
-            meta.setIskey(packet.getFieldValues().get(nameMaps.get(COLUMN_KEY) + i * size));
-            meta.setDefaultValue(packet.getFieldValues().get(nameMaps.get(COLUMN_DEFAULT) + i * size));
-            meta.setExtra(packet.getFieldValues().get(nameMaps.get(EXTRA) + i * size));
-
-            result.add(meta);
-        }
 
 
-        return result;
-    }
 
 
     private String getFullName(String schema, String table) {
     private String getFullName(String schema, String table) {
         StringBuilder builder = new StringBuilder();
         StringBuilder builder = new StringBuilder();

+ 116 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/ddl/DdlResult.java

@@ -0,0 +1,116 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.ddl;
+
+import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.DBMSAction;
+
+/**
+ * @author agapple 2017年8月1日 下午7:30:42
+ * @since 3.2.5
+ */
+public class DdlResult {
+
+    private String schemaName;
+    private String tableName;
+    private String oriSchemaName; // rename ddl中的源表
+    private String oriTableName; // rename ddl中的目标表
+    private DBMSAction type;
+    private DdlResult renameTableResult; // 多个rename table的存储
+
+    /*
+     * RENAME TABLE tbl_name TO new_tbl_name [, tbl_name2 TO new_tbl_name2] ...
+     */
+
+    public DdlResult() {
+    }
+
+    public DdlResult(String schemaName) {
+        this.schemaName = schemaName;
+    }
+
+    public DdlResult(String schemaName, String tableName) {
+        this.schemaName = schemaName;
+        this.tableName = tableName;
+    }
+
+    public DdlResult(String schemaName, String tableName, String oriSchemaName, String oriTableName) {
+        this.schemaName = schemaName;
+        this.tableName = tableName;
+        this.oriSchemaName = oriSchemaName;
+        this.oriTableName = oriTableName;
+    }
+
+    public String getSchemaName() {
+        return schemaName;
+    }
+
+    public void setSchemaName(String schemaName) {
+        this.schemaName = schemaName;
+    }
+
+    public String getTableName() {
+        return tableName;
+    }
+
+    public void setTableName(String tableName) {
+        this.tableName = tableName;
+    }
+
+    public DBMSAction getType() {
+        return type;
+    }
+
+    public void setType(DBMSAction type) {
+        this.type = type;
+    }
+
+    public String getOriSchemaName() {
+        return oriSchemaName;
+    }
+
+    public void setOriSchemaName(String oriSchemaName) {
+        this.oriSchemaName = oriSchemaName;
+    }
+
+    public String getOriTableName() {
+        return oriTableName;
+    }
+
+    public void setOriTableName(String oriTableName) {
+        this.oriTableName = oriTableName;
+    }
+
+    public DdlResult getRenameTableResult() {
+        return renameTableResult;
+    }
+
+    public void setRenameTableResult(DdlResult renameTableResult) {
+        this.renameTableResult = renameTableResult;
+    }
+
+    @Override
+    public DdlResult clone() {
+        DdlResult result = new DdlResult();
+        result.setOriSchemaName(oriSchemaName);
+        result.setOriTableName(oriTableName);
+        result.setSchemaName(schemaName);
+        result.setTableName(tableName);
+        //result.setType(type);
+        return result;
+    }
+
+    @Override
+    public String toString() {
+        DdlResult ddlResult = this;
+        StringBuffer sb = new StringBuffer();
+        do {
+            sb.append(String
+                .format("DdlResult [schemaName=%s , tableName=%s , oriSchemaName=%s , oriTableName=%s , type=%s ];",
+                    ddlResult.schemaName,
+                    ddlResult.tableName,
+                    ddlResult.oriSchemaName,
+                    ddlResult.oriTableName,
+                    ddlResult.type));
+            ddlResult = ddlResult.renameTableResult;
+        } while (ddlResult != null);
+        return sb.toString();
+    }
+}

+ 201 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/ddl/DruidDdlParser.java

@@ -0,0 +1,201 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.ddl;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import com.alibaba.druid.sql.SQLUtils;
+import com.alibaba.druid.sql.ast.SQLExpr;
+import com.alibaba.druid.sql.ast.SQLStatement;
+import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr;
+import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableAddConstraint;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableAddIndex;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableDropConstraint;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableDropIndex;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableDropKey;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableItem;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableRename;
+import com.alibaba.druid.sql.ast.statement.SQLAlterTableStatement;
+import com.alibaba.druid.sql.ast.statement.SQLConstraint;
+import com.alibaba.druid.sql.ast.statement.SQLCreateIndexStatement;
+import com.alibaba.druid.sql.ast.statement.SQLCreateTableStatement;
+import com.alibaba.druid.sql.ast.statement.SQLDeleteStatement;
+import com.alibaba.druid.sql.ast.statement.SQLDropIndexStatement;
+import com.alibaba.druid.sql.ast.statement.SQLDropTableStatement;
+import com.alibaba.druid.sql.ast.statement.SQLExprTableSource;
+import com.alibaba.druid.sql.ast.statement.SQLInsertStatement;
+import com.alibaba.druid.sql.ast.statement.SQLTableSource;
+import com.alibaba.druid.sql.ast.statement.SQLTruncateStatement;
+import com.alibaba.druid.sql.ast.statement.SQLUnique;
+import com.alibaba.druid.sql.ast.statement.SQLUpdateStatement;
+import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlRenameTableStatement;
+import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlRenameTableStatement.Item;
+import com.alibaba.druid.sql.parser.ParserException;
+import com.alibaba.druid.util.JdbcConstants;
+import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.DBMSAction;
+
+/**
+ * @author agapple 2017年7月27日 下午4:05:34
+ * @since 3.2.5
+ */
+public class DruidDdlParser {
+
+    public static List<DdlResult> parse(String queryString, String schmeaName) {
+        List<SQLStatement> stmtList = null;
+        try {
+            stmtList = SQLUtils.parseStatements(queryString, JdbcConstants.MYSQL, false);
+        } catch (ParserException e) {
+            // 可能存在一些SQL是不支持的,比如存储过程
+            DdlResult ddlResult = new DdlResult();
+            ddlResult.setType(DBMSAction.QUERY);
+            return Arrays.asList(ddlResult);
+        }
+
+        List<DdlResult> ddlResults = new ArrayList<DdlResult>();
+        for (SQLStatement statement : stmtList) {
+            if (statement instanceof SQLCreateTableStatement) {
+                DdlResult ddlResult = new DdlResult();
+                SQLCreateTableStatement createTable = (SQLCreateTableStatement) statement;
+                processName(ddlResult, schmeaName, createTable.getName(), false);
+                ddlResult.setType(DBMSAction.CREATE);
+                ddlResults.add(ddlResult);
+            } else if (statement instanceof SQLAlterTableStatement) {
+                SQLAlterTableStatement alterTable = (SQLAlterTableStatement) statement;
+                for (SQLAlterTableItem item : alterTable.getItems()) {
+                    if (item instanceof SQLAlterTableRename) {
+                        DdlResult ddlResult = new DdlResult();
+                        processName(ddlResult, schmeaName, alterTable.getName(), true);
+                        processName(ddlResult, schmeaName, ((SQLAlterTableRename) item).getToName(), false);
+                        ddlResults.add(ddlResult);
+                    } else if (item instanceof SQLAlterTableAddIndex) {
+                        DdlResult ddlResult = new DdlResult();
+                        processName(ddlResult, schmeaName, alterTable.getName(), false);
+                        ddlResult.setType(DBMSAction.CINDEX);
+                        ddlResults.add(ddlResult);
+                    } else if (item instanceof SQLAlterTableDropIndex || item instanceof SQLAlterTableDropKey) {
+                        DdlResult ddlResult = new DdlResult();
+                        processName(ddlResult, schmeaName, alterTable.getName(), false);
+                        ddlResult.setType(DBMSAction.DINDEX);
+                        ddlResults.add(ddlResult);
+                    } else if (item instanceof SQLAlterTableAddConstraint) {
+                        DdlResult ddlResult = new DdlResult();
+                        processName(ddlResult, schmeaName, alterTable.getName(), false);
+                        SQLConstraint constraint = ((SQLAlterTableAddConstraint) item).getConstraint();
+                        if (constraint instanceof SQLUnique) {
+                            ddlResult.setType(DBMSAction.CINDEX);
+                            ddlResults.add(ddlResult);
+                        }
+                    } else if (item instanceof SQLAlterTableDropConstraint) {
+                        DdlResult ddlResult = new DdlResult();
+                        processName(ddlResult, schmeaName, alterTable.getName(), false);
+                        ddlResult.setType(DBMSAction.DINDEX);
+                        ddlResults.add(ddlResult);
+                    } else {
+                        DdlResult ddlResult = new DdlResult();
+                        processName(ddlResult, schmeaName, alterTable.getName(), false);
+                        ddlResult.setType(DBMSAction.ALTER);
+                        ddlResults.add(ddlResult);
+                    }
+                }
+            } else if (statement instanceof SQLDropTableStatement) {
+                SQLDropTableStatement dropTable = (SQLDropTableStatement) statement;
+                for (SQLExprTableSource tableSource : dropTable.getTableSources()) {
+                    DdlResult ddlResult = new DdlResult();
+                    processName(ddlResult, schmeaName, tableSource.getExpr(), false);
+                    ddlResult.setType(DBMSAction.ERASE);
+                    ddlResults.add(ddlResult);
+                }
+            } else if (statement instanceof SQLCreateIndexStatement) {
+                SQLCreateIndexStatement createIndex = (SQLCreateIndexStatement) statement;
+                SQLTableSource tableSource = createIndex.getTable();
+                DdlResult ddlResult = new DdlResult();
+                processName(ddlResult, schmeaName, ((SQLExprTableSource) tableSource).getExpr(), false);
+                ddlResult.setType(DBMSAction.CINDEX);
+                ddlResults.add(ddlResult);
+            } else if (statement instanceof SQLDropIndexStatement) {
+                SQLDropIndexStatement dropIndex = (SQLDropIndexStatement) statement;
+                SQLExprTableSource tableSource = dropIndex.getTableName();
+                DdlResult ddlResult = new DdlResult();
+                processName(ddlResult, schmeaName, tableSource.getExpr(), false);
+                ddlResult.setType(DBMSAction.DINDEX);
+                ddlResults.add(ddlResult);
+            } else if (statement instanceof SQLTruncateStatement) {
+                SQLTruncateStatement truncate = (SQLTruncateStatement) statement;
+                for (SQLExprTableSource tableSource : truncate.getTableSources()) {
+                    DdlResult ddlResult = new DdlResult();
+                    processName(ddlResult, schmeaName, tableSource.getExpr(), false);
+                    ddlResult.setType(DBMSAction.TRUNCATE);
+                    ddlResults.add(ddlResult);
+                }
+            } else if (statement instanceof MySqlRenameTableStatement) {
+                MySqlRenameTableStatement rename = (MySqlRenameTableStatement) statement;
+                for (Item item : rename.getItems()) {
+                    DdlResult ddlResult = new DdlResult();
+                    processName(ddlResult, schmeaName, item.getName(), true);
+                    processName(ddlResult, schmeaName, item.getTo(), false);
+                    ddlResult.setType(DBMSAction.RENAME);
+                    ddlResults.add(ddlResult);
+                }
+            } else if (statement instanceof SQLInsertStatement) {
+                DdlResult ddlResult = new DdlResult();
+                SQLInsertStatement insert = (SQLInsertStatement) statement;
+                processName(ddlResult, schmeaName, insert.getTableName(), true);
+                ddlResult.setType(DBMSAction.INSERT);
+                ddlResults.add(ddlResult);
+            } else if (statement instanceof SQLUpdateStatement) {
+                DdlResult ddlResult = new DdlResult();
+                SQLUpdateStatement update = (SQLUpdateStatement) statement;
+                // 拿到的表名可能为null,比如update a,b set a.id=x
+                processName(ddlResult, schmeaName, update.getTableName(), true);
+                ddlResult.setType(DBMSAction.UPDATE);
+                ddlResults.add(ddlResult);
+            } else if (statement instanceof SQLDeleteStatement) {
+                DdlResult ddlResult = new DdlResult();
+                SQLDeleteStatement delete = (SQLDeleteStatement) statement;
+                // 拿到的表名可能为null,比如delete a,b from a where a.id = b.id
+                processName(ddlResult, schmeaName, delete.getTableName(), true);
+                ddlResult.setType(DBMSAction.DELETE);
+                ddlResults.add(ddlResult);
+            }
+        }
+
+        return ddlResults;
+    }
+
+    private static void processName(DdlResult ddlResult, String schema, SQLExpr sqlName, boolean isOri) {
+        if (sqlName == null) {
+            return;
+        }
+
+        String table = null;
+        if (sqlName instanceof SQLPropertyExpr) {
+            SQLIdentifierExpr owner = (SQLIdentifierExpr) ((SQLPropertyExpr) sqlName).getOwner();
+            schema = unescapeName(owner.getName());
+            table = unescapeName(((SQLPropertyExpr) sqlName).getName());
+        } else if (sqlName instanceof SQLIdentifierExpr) {
+            table = unescapeName(((SQLIdentifierExpr) sqlName).getName());
+        }
+
+        if (isOri) {
+            ddlResult.setOriSchemaName(schema);
+            ddlResult.setOriTableName(table);
+        } else {
+            ddlResult.setSchemaName(schema);
+            ddlResult.setTableName(table);
+        }
+    }
+
+    public static String unescapeName(String name) {
+        if (name.length() > 2) {
+            char c0 = name.charAt(0);
+            char x0 = name.charAt(name.length() - 1);
+            if ((c0 == '"' && x0 == '"') || (c0 == '`' && x0 == '`')) {
+                return name.substring(1, name.length() - 1);
+            }
+        }
+
+        return name;
+    }
+
+}

+ 43 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/DataSourceFactoryTSDB.java

@@ -0,0 +1,43 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.sql.SQLException;
+
+import javax.sql.DataSource;
+
+import com.alibaba.druid.pool.DruidDataSource;
+import com.alibaba.otter.canal.parse.exception.CanalParseException;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.math.NumberUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Created by wanshao
+ * Date: 2017/9/22
+ * Time: 下午2:46
+ **/
+public class DataSourceFactoryTSDB {
+
+    private static Logger logger = LoggerFactory.getLogger(DataSourceFactoryTSDB.class);
+
+    public static DataSource getDataSource(String address, String userName, String password,
+                                           boolean enable,
+                                           String defaultDatabaseName, String url, String driverClassName) {
+
+        DruidDataSource druidDataSource = new DruidDataSource();
+        druidDataSource.setUrl(url);
+        druidDataSource.setUsername(userName);
+        druidDataSource.setPassword(password);
+
+        try {
+            druidDataSource.init();
+        } catch (SQLException e) {
+            logger.error("druidDataSource.init", e);
+            throw new CanalParseException("初始化druid dataSource出错");
+        }
+        return druidDataSource;
+    }
+
+
+}

+ 223 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MemoryTableMeta.java

@@ -0,0 +1,223 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import com.taobao.tddl.dbsync.binlog.BinlogPosition;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.druid.sql.ast.SQLDataType;
+import com.alibaba.druid.sql.ast.SQLExpr;
+import com.alibaba.druid.sql.ast.SQLStatement;
+import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr;
+import com.alibaba.druid.sql.ast.expr.SQLNullExpr;
+import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr;
+import com.alibaba.druid.sql.ast.statement.SQLColumnConstraint;
+import com.alibaba.druid.sql.ast.statement.SQLColumnDefinition;
+import com.alibaba.druid.sql.ast.statement.SQLColumnPrimaryKey;
+import com.alibaba.druid.sql.ast.statement.SQLCreateTableStatement;
+import com.alibaba.druid.sql.ast.statement.SQLNotNullConstraint;
+import com.alibaba.druid.sql.ast.statement.SQLNullConstraint;
+import com.alibaba.druid.sql.ast.statement.SQLSelectOrderByItem;
+import com.alibaba.druid.sql.ast.statement.SQLTableElement;
+import com.alibaba.druid.sql.dialect.mysql.ast.MySqlPrimaryKey;
+import com.alibaba.druid.sql.repository.Schema;
+import com.alibaba.druid.sql.repository.SchemaObject;
+import com.alibaba.druid.sql.repository.SchemaRepository;
+import com.alibaba.druid.util.JdbcConstants;
+
+import com.alibaba.otter.canal.parse.inbound.TableMeta;
+import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
+import com.alibaba.otter.canal.parse.inbound.mysql.ddl.DruidDdlParser;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.TableMetaTSDB;
+
+/**
+ * 基于DDL维护的内存表结构
+ *
+ * @author agapple 2017年7月27日 下午4:19:40
+ * @since 3.2.5
+ */
+public class MemoryTableMeta implements TableMetaTSDB {
+
+    private Map<List<String>, TableMeta> tableMetas = new ConcurrentHashMap<List<String>, TableMeta>();
+    private SchemaRepository repository = new SchemaRepository(JdbcConstants.MYSQL);
+    private Logger logger = LoggerFactory.getLogger(MemoryTableMeta.class);
+
+    public MemoryTableMeta(Logger logger){
+        this.logger = logger;
+    }
+
+    public boolean apply(BinlogPosition position, String schema, String ddl) {
+        tableMetas.clear();
+        synchronized (this) {
+            if (StringUtils.isNotEmpty(schema)) {
+                repository.setDefaultSchema(schema);
+            }
+
+            try {
+                repository.console(ddl);
+            } catch (Throwable e) {
+                logger.warn("parse faield : " + ddl, e);
+            }
+        }
+
+        // TableMeta meta = find("tddl5_00", "ab");
+        // if (meta != null) {
+        // repository.setDefaultSchema("tddl5_00");
+        // System.out.println(repository.console("show create table tddl5_00.ab"));
+        // System.out.println(repository.console("show columns from tddl5_00.ab"));
+        // }
+        return true;
+    }
+
+    @Override
+    public TableMeta find(String schema, String table) {
+        List<String> keys = Arrays.asList(schema, table);
+        TableMeta tableMeta = tableMetas.get(keys);
+        if (tableMeta == null) {
+            synchronized (this) {
+                tableMeta = tableMetas.get(keys);
+                if (tableMeta == null) {
+                    Schema schemaRep = repository.findSchema(schema);
+                    if (schema == null) {
+                        return null;
+                    }
+                    SchemaObject data = schemaRep.findTable(table);
+                    if (data == null) {
+                        return null;
+                    }
+                    SQLStatement statement = data.getStatement();
+                    if (statement == null) {
+                        return null;
+                    }
+                    if (statement instanceof SQLCreateTableStatement) {
+                        tableMeta = parse((SQLCreateTableStatement) statement);
+                    }
+                    if (tableMeta != null) {
+                        if (table != null) {
+                            tableMeta.setTable(table);
+                        }
+                        if (schema != null) {
+                            tableMeta.setSchema(schema);
+                        }
+
+                        tableMetas.put(keys, tableMeta);
+                    }
+                }
+            }
+        }
+
+        return tableMeta;
+    }
+
+    @Override
+    public boolean rollback(BinlogPosition position) {
+        throw new RuntimeException("not support for memory");
+    }
+
+    public Map<String, String> snapshot() {
+        Map<String, String> schemaDdls = new HashMap<String, String>();
+        for (Schema schema : repository.getSchemas()) {
+            StringBuffer data = new StringBuffer(4 * 1024);
+            for (String table : schema.showTables()) {
+                SchemaObject schemaObject = schema.findTable(table);
+                schemaObject.getStatement().output(data);
+                data.append("; \n");
+            }
+            schemaDdls.put(schema.getName(), data.toString());
+        }
+
+        return schemaDdls;
+    }
+
+    private TableMeta parse(SQLCreateTableStatement statement) {
+        int size = statement.getTableElementList().size();
+        if (size > 0) {
+            TableMeta tableMeta = new TableMeta();
+            for (int i = 0; i < size; ++i) {
+                SQLTableElement element = statement.getTableElementList().get(i);
+                processTableElement(element, tableMeta);
+            }
+            return tableMeta;
+        }
+
+        return null;
+    }
+
+    private void processTableElement(SQLTableElement element, TableMeta tableMeta) {
+        if (element instanceof SQLColumnDefinition) {
+            FieldMeta fieldMeta = new FieldMeta();
+            SQLColumnDefinition column = (SQLColumnDefinition) element;
+            String name = getSqlName(column.getName());
+            // String charset = getSqlName(column.getCharsetExpr());
+            SQLDataType dataType = column.getDataType();
+            String dataTypStr = dataType.getName();
+            if (dataType.getArguments().size() > 0) {
+                dataTypStr += "(";
+                for (int i = 0; i < column.getDataType().getArguments().size(); i++) {
+                    if (i != 0) {
+                        dataTypStr += ",";
+                    }
+                    SQLExpr arg = column.getDataType().getArguments().get(i);
+                    dataTypStr += arg.toString();
+                }
+                dataTypStr += ")";
+            }
+
+            if (column.getDefaultExpr() == null || column.getDefaultExpr() instanceof SQLNullExpr) {
+                fieldMeta.setDefaultValue(null);
+            } else {
+                fieldMeta.setDefaultValue(getSqlName(column.getDefaultExpr()));
+            }
+
+            fieldMeta.setColumnName(name);
+            fieldMeta.setColumnType(dataTypStr);
+            fieldMeta.setNullable(true);
+            List<SQLColumnConstraint> constraints = column.getConstraints();
+            for (SQLColumnConstraint constraint : constraints) {
+                if (constraint instanceof SQLNotNullConstraint) {
+                    fieldMeta.setNullable(false);
+                } else if (constraint instanceof SQLNullConstraint) {
+                    fieldMeta.setNullable(true);
+                } else if (constraint instanceof SQLColumnPrimaryKey) {
+                    fieldMeta.setKey(true);
+                }
+            }
+            tableMeta.addFieldMeta(fieldMeta);
+        } else if (element instanceof MySqlPrimaryKey) {
+            MySqlPrimaryKey column = (MySqlPrimaryKey) element;
+            List<SQLSelectOrderByItem> pks = column.getColumns();
+            for (SQLSelectOrderByItem pk : pks) {
+                String name = getSqlName(pk.getExpr());
+                FieldMeta field = tableMeta.getFieldMetaByName(name);
+                field.setKey(true);
+            }
+        }
+    }
+
+    private String getSqlName(SQLExpr sqlName) {
+        if (sqlName == null) {
+            return null;
+        }
+
+        if (sqlName instanceof SQLPropertyExpr) {
+            SQLIdentifierExpr owner = (SQLIdentifierExpr) ((SQLPropertyExpr) sqlName).getOwner();
+            return DruidDdlParser.unescapeName(owner.getName()) + "."
+                + DruidDdlParser.unescapeName(((SQLPropertyExpr) sqlName).getName());
+        } else if (sqlName instanceof SQLIdentifierExpr) {
+            return DruidDdlParser.unescapeName(((SQLIdentifierExpr) sqlName).getName());
+        } else {
+            return sqlName.toString();
+        }
+    }
+
+    public SchemaRepository getRepository() {
+        return repository;
+    }
+
+}

+ 565 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaManager.java

@@ -0,0 +1,565 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Pattern;
+
+import javax.annotation.Resource;
+
+import com.alibaba.druid.sql.repository.Schema;
+import com.alibaba.fastjson.JSON;
+import com.alibaba.fastjson.JSONArray;
+import com.alibaba.fastjson.JSONObject;
+import com.alibaba.otter.canal.filter.CanalEventFilter;
+import com.alibaba.otter.canal.parse.exception.CanalParseException;
+import com.alibaba.otter.canal.parse.inbound.TableMeta;
+import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
+import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection;
+import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection.ProcessJdbcResult;
+import com.alibaba.otter.canal.parse.inbound.mysql.ddl.DdlResult;
+import com.alibaba.otter.canal.parse.inbound.mysql.ddl.DruidDdlParser;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDAO;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaSnapshotDAO;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.model.MetaHistoryDO;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.model.MetaSnapshotDO;
+
+import com.taobao.tddl.dbsync.binlog.BinlogPosition;
+import org.apache.commons.beanutils.BeanUtils;
+import org.apache.commons.lang.ObjectUtils;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * 基于console远程管理
+ *
+ * see internal class: CanalTableMeta , ConsoleTableMetaTSDB
+ *
+ * @author agapple 2017年7月27日 下午10:47:55
+ * @since 3.2.5
+ */
+public class TableMetaManager implements TableMetaTSDB {
+    private static Pattern pattern = Pattern.compile("Duplicate entry '.*' for key '*'");
+
+    private static final BinlogPosition INIT_POSITION = BinlogPosition.parseFromString("0:0#-2.-1");
+    private Logger logger = LoggerFactory.getLogger(TableMetaManager.class);
+    private String consoleDomain = null;
+    private int retry = 3;
+    private MemoryTableMeta memoryTableMeta;
+    private MysqlConnection connection; // 查询meta信息的链接
+    private CanalEventFilter filter;
+    private BinlogPosition lastPosition;
+    private ScheduledExecutorService scheduler;
+
+    @Resource
+    private MetaHistoryDAO metaHistoryDAO;
+
+    @Resource
+    private MetaSnapshotDAO metaSnapshotDAO;
+
+    public void init(){
+        this.memoryTableMeta = new MemoryTableMeta(logger);
+        this.scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
+
+            @Override
+            public Thread newThread(Runnable r) {
+                return new Thread(r, "[scheduler-table-meta-snapshot]");
+            }
+        });
+
+        // 24小时生成一份snapshot
+        scheduler.scheduleWithFixedDelay(new Runnable() {
+
+            @Override
+            public void run() {
+                try {
+                    logger.info("-------- begin to produce snapshot for table meta");
+                    applySnapshotToDB(lastPosition, false);
+                } catch (Throwable e) {
+                    logger.error("scheudle faield", e);
+                }
+            }
+        }, 24, 24, TimeUnit.SECONDS);
+    }
+
+    public TableMetaManager() {
+
+    }
+
+    @Override
+    public TableMeta find(String schema, String table) {
+        synchronized (memoryTableMeta) {
+            return memoryTableMeta.find(schema, table);
+        }
+    }
+
+    @Override
+    public boolean apply(BinlogPosition position, String schema, String ddl) {
+        // 首先记录到内存结构
+        synchronized (memoryTableMeta) {
+            if (memoryTableMeta.apply(position, schema, ddl)) {
+                this.lastPosition = position;
+                // 同步每次变更给远程做历史记录
+                return applyHistoryToDB(position, schema, ddl);
+            } else {
+                throw new RuntimeException("apply to memory is failed");
+            }
+        }
+    }
+
+    @Override
+    public boolean rollback(BinlogPosition position) {
+        // 每次rollback需要重新构建一次memory data
+        this.memoryTableMeta = new MemoryTableMeta(logger);
+        boolean flag = false;
+        BinlogPosition snapshotPosition = buildMemFromSnapshot(position);
+        if (snapshotPosition != null) {
+            applyHistoryOnMemory(snapshotPosition, position);
+            flag = true;
+        }
+
+        if (!flag) {
+            // 如果没有任何数据,则为初始化状态,全量dump一份关注的表
+            if (dumpTableMeta(connection, filter)) {
+                // 记录一下snapshot结果,方便快速恢复
+                flag = applySnapshotToDB(INIT_POSITION, true);
+            }
+        }
+
+        return flag;
+    }
+
+    @Override
+    public Map<String, String> snapshot() {
+        return memoryTableMeta.snapshot();
+    }
+
+    /**
+     * 初始化的时候dump一下表结构
+     */
+    private boolean dumpTableMeta(MysqlConnection connection, final CanalEventFilter filter) {
+        List<String> schemas = connection.query("show databases", new ProcessJdbcResult<List>() {
+
+            @Override
+            public List process(ResultSet rs) throws SQLException {
+                List<String> schemas = new ArrayList<String>();
+                while (rs.next()) {
+                    String schema = rs.getString(1);
+                    if (!filter.filter(schema)) {
+                        schemas.add(schema);
+                    }
+                }
+                return schemas;
+            }
+        });
+
+        for (String schema : schemas) {
+            List<String> tables = connection.query("show tables from `" + schema + "`", new ProcessJdbcResult<List>() {
+
+                @Override
+                public List process(ResultSet rs) throws SQLException {
+                    List<String> tables = new ArrayList<String>();
+                    while (rs.next()) {
+                        String table = rs.getString(1);
+                        if (!filter.filter(table)) {
+                            tables.add(table);
+                        }
+                    }
+                    return tables;
+                }
+            });
+
+            StringBuilder sql = new StringBuilder();
+            for (String table : tables) {
+                sql.append("show create table `" + schema + "`.`" + table + "`;");
+            }
+
+            // 使用多语句方式读取
+            Statement stmt = null;
+            try {
+                stmt = connection.getConn().createStatement();
+                ResultSet rs = stmt.executeQuery(sql.toString());
+                boolean existMoreResult = false;
+                do {
+                    if (existMoreResult) {
+                        rs = stmt.getResultSet();
+                    }
+
+                    while (rs.next()) {
+                        String oneTableCreateSql = rs.getString(2);
+                        memoryTableMeta.apply(INIT_POSITION, schema, oneTableCreateSql);
+                    }
+
+                    existMoreResult = stmt.getMoreResults();
+                } while (existMoreResult);
+            } catch (SQLException e) {
+                throw new CanalParseException(e);
+            } finally {
+                if (stmt != null) {
+                    try {
+                        stmt.close();
+                    } catch (SQLException e) {
+                        // ignore
+                    }
+                }
+            }
+        }
+
+        return true;
+    }
+
+    private boolean applyHistoryToDB(BinlogPosition position, String schema, String ddl) {
+
+        Map<String, String> content = new HashMap<String, String>();
+
+        content.put("binlogFile", position.getFileName());
+        content.put("binlogOffest", String.valueOf(position.getPosition()));
+        content.put("binlogMasterId", String.valueOf(position.getMasterId()));
+        content.put("binlogTimestamp", String.valueOf(position.getTimestamp()));
+        content.put("useSchema", schema);
+        if (content.isEmpty()) {
+            throw new RuntimeException("apply failed caused by content is empty in applyHistoryToDB");
+        }
+        // 待补充
+        List<DdlResult> ddlResults = DruidDdlParser.parse(schema, ddl);
+        if (ddlResults.size() > 0) {
+            DdlResult ddlResult = ddlResults.get(0);
+            content.put("schema", ddlResult.getSchemaName());
+            content.put("table", ddlResult.getTableName());
+            content.put("type", ddlResult.getType().name());
+            content.put("sql", ddl);
+            // content.put("extra", "");
+        }
+
+        for (int i = 0; i < retry; i++) {
+            MetaHistoryDO metaDO = new MetaHistoryDO();
+            try {
+                BeanUtils.populate(metaDO, content);
+                // 会建立唯一约束,解决:
+                // 1. 重复的binlog file+offest
+                // 2. 重复的masterId+timestamp
+                metaHistoryDAO.insert(metaDO);
+            } catch (Throwable e) {
+                if (isUkDuplicateException(e)) {
+                    // 忽略掉重复的位点
+                    logger.warn("dup apply for sql : " + ddl);
+                } else {
+                    throw new RuntimeException("apply history to db failed caused by : " + e.getMessage());
+                }
+
+            }
+            return true;
+        }
+        return false;
+    }
+
+    /**
+     * 发布数据到console上
+     */
+    private boolean applySnapshotToDB(BinlogPosition position, boolean init) {
+        // 获取一份快照
+        MemoryTableMeta tmpMemoryTableMeta = new MemoryTableMeta(logger);
+        Map<String, String> schemaDdls = null;
+        synchronized (memoryTableMeta) {
+            if (!init && position == null) {
+                // 如果是持续构建,则识别一下是否有DDL变更过,如果没有就忽略了
+                return false;
+            }
+            schemaDdls = memoryTableMeta.snapshot();
+            for (Map.Entry<String, String> entry : schemaDdls.entrySet()) {
+                tmpMemoryTableMeta.apply(position, entry.getKey(), entry.getValue());
+            }
+        }
+
+        // 基于临时内存对象进行对比
+        boolean compareAll = true;
+        for (Schema schema : tmpMemoryTableMeta.getRepository().getSchemas()) {
+            for (String table : schema.showTables()) {
+                if (!compareTableMetaDbAndMemory(connection, schema.getName(), table)) {
+                    compareAll = false;
+                }
+            }
+        }
+        if (compareAll) {
+
+            Map<String, String> content = new HashMap<String, String>();
+
+            content.put("binlogFile", position.getFileName());
+            content.put("binlogOffest", String.valueOf(position.getPosition()));
+            content.put("binlogMasterId", String.valueOf(position.getMasterId()));
+            content.put("binlogTimestamp", String.valueOf(position.getTimestamp()));
+            content.put("data", JSON.toJSONString(schemaDdls));
+            if (content.isEmpty()) {
+                throw new RuntimeException("apply failed caused by content is empty in applySnapshotToDB");
+            }
+
+            for (int i = 0; i < retry; i++) {
+                MetaSnapshotDO snapshotDO = new MetaSnapshotDO();
+                try {
+                    BeanUtils.populate(snapshotDO, content);
+                    metaSnapshotDAO.insert(snapshotDO);
+                } catch (Throwable e) {
+                    if (isUkDuplicateException(e)) {
+                        // 忽略掉重复的位点
+                        logger.warn("dup apply snapshot for data : " + snapshotDO.getData());
+                    } else {
+                        throw new RuntimeException("apply failed caused by : " + e.getMessage());
+                    }
+                }
+                return true;
+
+            }
+            return false;
+
+        } else {
+            logger.error("compare failed , check log");
+        }
+        return false;
+    }
+
+    private boolean compareTableMetaDbAndMemory(MysqlConnection connection, final String schema, final String table) {
+        TableMeta tableMetaFromDB = connection.query("desc " + getFullName(schema, table),
+            new ProcessJdbcResult<TableMeta>() {
+
+                @Override
+                public TableMeta process(ResultSet rs) throws SQLException {
+                    List<FieldMeta> metas = new ArrayList<FieldMeta>();
+                    while (rs.next()) {
+                        FieldMeta meta = new FieldMeta();
+                        // 做一个优化,使用String.intern(),共享String对象,减少内存使用
+                        meta.setColumnName(rs.getString("Field"));
+                        meta.setColumnType(rs.getString("Type"));
+                        meta.setNullable(StringUtils.equalsIgnoreCase(rs.getString("Null"), "YES"));
+                        meta.setKey("PRI".equalsIgnoreCase(rs.getString("Key")));
+                        meta.setDefaultValue(rs.getString("Default"));
+                        metas.add(meta);
+                    }
+
+                    return new TableMeta(schema, table, metas);
+                }
+            });
+
+        TableMeta tableMetaFromMem = memoryTableMeta.find(schema, table);
+        boolean result = compareTableMeta(tableMetaFromMem, tableMetaFromDB);
+        if (!result) {
+            logger.error("compare failed . \n db : " + tableMetaFromDB + " \n mem : " + tableMetaFromMem);
+        }
+
+        return result;
+    }
+
+    private BinlogPosition buildMemFromSnapshot(BinlogPosition position) {
+
+        Map<String, String> content = new HashMap<String, String>();
+
+        content.put("binlogFile", position.getFileName());
+        content.put("binlogOffest", String.valueOf(position.getPosition()));
+        content.put("binlogMasterId", String.valueOf(position.getMasterId()));
+        content.put("binlogTimestamp", String.valueOf(position.getTimestamp()));
+        if (content.isEmpty()) {
+            throw new RuntimeException("apply failed caused by content is empty in buildMemFromSnapshot");
+        }
+        for (int i = 0; i < retry; i++) {
+
+            try {
+
+                String timestamp = content.get("binlogTimestamp");
+                MetaSnapshotDO snapshotDO = metaSnapshotDAO.findByTimestamp(Long.valueOf(timestamp));
+                JSONObject jsonData = new JSONObject();
+                jsonData.put("content", JSON.toJSONString(snapshotDO));
+                if (jsonData == null) {
+                    // 可能没有任何snapshot数据
+                    return null;
+                }
+
+                String binlogFile = jsonData.getString("binlogFile");
+                String binlogOffest = jsonData.getString("binlogOffest");
+                String binlogMasterId = jsonData.getString("binlogMasterId");
+                String binlogTimestamp = jsonData.getString("binlogTimestamp");
+
+                BinlogPosition snapshotPosition = new BinlogPosition(binlogFile,
+                    Long.valueOf(binlogOffest == null ? "0" : binlogOffest),
+                    Long.valueOf(binlogMasterId == null ? "-2" : binlogMasterId),
+                    Long.valueOf(binlogTimestamp == null ? "0" : binlogTimestamp));
+                // data存储为Map<String,String>,每个分库一套建表
+                String sqlData = jsonData.getString("data");
+                JSONObject jsonObj = JSON.parseObject(sqlData);
+                for (Map.Entry entry : jsonObj.entrySet()) {
+                    // 记录到内存
+                    if (!memoryTableMeta.apply(snapshotPosition,
+                        ObjectUtils.toString(entry.getKey()),
+                        ObjectUtils.toString(entry.getValue()))) {
+                        return null;
+                    }
+                }
+
+                return snapshotPosition;
+
+            } catch (Throwable e) {
+                throw new RuntimeException("apply failed caused by : " + e.getMessage());
+            }
+
+        }
+
+        return null;
+
+    }
+
+    private boolean applyHistoryOnMemory(BinlogPosition position, BinlogPosition rollbackPosition) {
+        Map<String, String> content = new HashMap<String, String>();
+        content.put("binlogSnapshotTimestamp", String.valueOf(position.getTimestamp()));
+        content.put("binlogFile", rollbackPosition.getFileName());
+        content.put("binlogOffest", String.valueOf(rollbackPosition.getPosition()));
+        content.put("binlogMasterId", String.valueOf(rollbackPosition.getMasterId()));
+        content.put("binlogTimestamp", String.valueOf(rollbackPosition.getTimestamp()));
+        String timestamp = content.get("binlogTimestamp");
+        String binlogSnapshotTimestamp = content.get("binlogSnapshotTimestamp");
+
+        for (int i = 0; i < retry; i++) {
+            try {
+                List<MetaHistoryDO> metaHistoryDOList = metaHistoryDAO.findByTimestamp(
+                    Long.valueOf(binlogSnapshotTimestamp),
+                    Long.valueOf(timestamp));
+                JSONObject json = new JSONObject();
+                json.put("content", JSON.toJSONString(metaHistoryDOList));
+                String data = ObjectUtils.toString(json.get("content"));
+                JSONArray jsonArray = JSON.parseArray(data);
+                for (Object jsonObj : jsonArray) {
+                    JSONObject jsonData = (JSONObject)jsonObj;
+                    String binlogFile = jsonData.getString("binlogFile");
+                    String binlogOffest = jsonData.getString("binlogOffest");
+                    String binlogMasterId = jsonData.getString("binlogMasterId");
+                    String binlogTimestamp = jsonData.getString("binlogTimestamp");
+                    String useSchema = jsonData.getString("useSchema");
+                    String sqlData = jsonData.getString("sql");
+                    BinlogPosition snapshotPosition = new BinlogPosition(binlogFile,
+                        Long.valueOf(binlogOffest == null ? "0" : binlogOffest),
+                        Long.valueOf(binlogMasterId == null ? "-2" : binlogMasterId),
+                        Long.valueOf(binlogTimestamp == null ? "0" : binlogTimestamp));
+
+                    // 如果是同一秒内,对比一下history的位点,如果比期望的位点要大,忽略之
+                    if (snapshotPosition.getTimestamp() > rollbackPosition.getTimestamp()) {
+                        continue;
+                    } else if (rollbackPosition.getMasterId() == snapshotPosition.getMasterId()
+                        && snapshotPosition.compareTo(rollbackPosition) > 0) {
+                        continue;
+                    }
+
+                    // 记录到内存
+                    if (!memoryTableMeta.apply(snapshotPosition, useSchema, sqlData)) {
+                        return false;
+                    }
+
+                }
+
+                return jsonArray.size() > 0;
+            } catch (Throwable e) {
+
+                throw new RuntimeException("apply failed", e);
+
+            }
+
+        }
+
+        return false;
+    }
+
+    private String getFullName(String schema, String table) {
+        StringBuilder builder = new StringBuilder();
+        return builder.append('`')
+            .append(schema)
+            .append('`')
+            .append('.')
+            .append('`')
+            .append(table)
+            .append('`')
+            .toString();
+    }
+
+    private boolean compareTableMeta(TableMeta source, TableMeta target) {
+        if (!StringUtils.equalsIgnoreCase(source.getSchema(), target.getSchema())) {
+            return false;
+        }
+
+        if (!StringUtils.equalsIgnoreCase(source.getTable(), target.getTable())) {
+            return false;
+        }
+
+        List<FieldMeta> sourceFields = source.getFields();
+        List<FieldMeta> targetFields = target.getFields();
+        if (sourceFields.size() != targetFields.size()) {
+            return false;
+        }
+
+        for (int i = 0; i < sourceFields.size(); i++) {
+            FieldMeta sourceField = sourceFields.get(i);
+            FieldMeta targetField = targetFields.get(i);
+            if (!StringUtils.equalsIgnoreCase(sourceField.getColumnName(), targetField.getColumnName())) {
+                return false;
+            }
+
+            if (!StringUtils.equalsIgnoreCase(sourceField.getColumnType(), targetField.getColumnType())) {
+                return false;
+            }
+
+            if (!StringUtils.equalsIgnoreCase(sourceField.getDefaultValue(), targetField.getDefaultValue())) {
+                return false;
+            }
+
+            if (sourceField.isNullable() != targetField.isNullable()) {
+                return false;
+            }
+
+            if (sourceField.isKey() != targetField.isKey()) {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    public void setConnection(MysqlConnection connection) {
+        this.connection = connection;
+    }
+
+    public void setFilter(CanalEventFilter filter) {
+        this.filter = filter;
+    }
+
+    public MetaHistoryDAO getMetaHistoryDAO() {
+        return metaHistoryDAO;
+    }
+
+    public void setMetaHistoryDAO(MetaHistoryDAO metaHistoryDAO) {
+        this.metaHistoryDAO = metaHistoryDAO;
+    }
+
+    public MetaSnapshotDAO getMetaSnapshotDAO() {
+        return metaSnapshotDAO;
+    }
+
+    public void setMetaSnapshotDAO(MetaSnapshotDAO metaSnapshotDAO) {
+        this.metaSnapshotDAO = metaSnapshotDAO;
+    }
+
+    public MysqlConnection getConnection() {
+        return connection;
+    }
+
+    public boolean isUkDuplicateException(Throwable t) {
+        if (pattern.matcher(t.getMessage()).find()) {
+            // 违反外键约束时也抛出这种异常,所以这里还要判断包含字符串Duplicate entry
+            return true;
+        }
+        return false;
+    }
+}

+ 37 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaTSDB.java

@@ -0,0 +1,37 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.util.Map;
+
+import com.alibaba.otter.canal.parse.inbound.TableMeta;
+
+import com.taobao.tddl.dbsync.binlog.BinlogPosition;
+
+/**
+ * 表结构的时间序列存储
+ *
+ * @author agapple 2017年7月27日 下午4:06:30
+ * @since 3.2.5
+ */
+public interface TableMetaTSDB {
+
+    /**
+     * 获取当前的表结构
+     */
+    public TableMeta find(String schema, String table);
+
+    /**
+     * 添加ddl到时间序列库中
+     */
+    public boolean apply(BinlogPosition position, String schema, String ddl);
+
+    /**
+     * 回滚到指定位点的表结构
+     */
+    public boolean rollback(BinlogPosition position);
+
+    /**
+     * 生成快照内容
+     */
+    public Map<String/* schema */, String> snapshot();
+
+}

+ 44 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaHistoryDAO.java

@@ -0,0 +1,44 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao;
+
+import java.util.HashMap;
+import java.util.List;
+
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.model.MetaHistoryDO;
+
+import com.google.common.collect.Maps;
+import org.springframework.orm.ibatis.support.SqlMapClientDaoSupport;
+
+/**
+ * canal数据的存储
+ *
+ * @author wanshao 2017年7月27日 下午10:51:55
+ * @since 3.2.5
+ */
+@SuppressWarnings("deprecation")
+public class MetaHistoryDAO extends SqlMapClientDaoSupport {
+
+    public List<MetaHistoryDO> getAll() {
+        return getSqlMapClientTemplate().queryForList("table_meta_history.getAll");
+    }
+
+    public Long insert(MetaHistoryDO metaDO) {
+        return (Long)getSqlMapClientTemplate().insert("table_meta_history.insert", metaDO);
+    }
+
+    public List<MetaHistoryDO> findByTimestamp(long snapshotTimestamp, long timestamp) {
+        HashMap params = Maps.newHashMapWithExpectedSize(2);
+        params.put("snapshotTimestamp", snapshotTimestamp);
+        params.put("timestamp", timestamp);
+        return (List<MetaHistoryDO>)getSqlMapClientTemplate().queryForList("table_meta_history.findByTimestamp",
+            params);
+    }
+
+    /**
+     * 删除interval秒之前的数据
+     */
+    public Integer deleteByGmtModified(int interval) {
+        HashMap params = Maps.newHashMapWithExpectedSize(2);
+        params.put("interval", interval);
+        return getSqlMapClientTemplate().delete("table_meta_history.deleteByGmtModified", params);
+    }
+}

+ 48 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/dao/MetaSnapshotDAO.java

@@ -0,0 +1,48 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao;
+
+import java.util.HashMap;
+
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.model.MetaSnapshotDO;
+
+import com.google.common.collect.Maps;
+import org.springframework.orm.ibatis.support.SqlMapClientDaoSupport;
+
+/**
+ * canal数据的存储
+ * 
+ * @author wanshao 2017年7月27日 下午10:51:55
+ * @since 3.2.5
+ */
+
+public class MetaSnapshotDAO extends SqlMapClientDaoSupport {
+    public Long insert(MetaSnapshotDO snapshotDO) {
+        return (Long) getSqlMapClientTemplate().insert("table_meta_snapshot.insert", snapshotDO);
+    }
+
+    public Long update(MetaSnapshotDO snapshotDO) {
+        return (Long) getSqlMapClientTemplate().insert("table_meta_snapshot.update", snapshotDO);
+    }
+
+    public MetaSnapshotDO findByTimestamp(long timestamp) {
+        HashMap params = Maps.newHashMapWithExpectedSize(2);
+        params.put("timestamp", timestamp);
+        return (MetaSnapshotDO) getSqlMapClientTemplate().queryForObject("table_meta_snapshot.findByTimestamp",
+            params);
+    }
+
+    public Integer deleteByTask(String taskName) {
+        HashMap params = Maps.newHashMapWithExpectedSize(2);
+        params.put("taskName", taskName);
+        return getSqlMapClientTemplate().delete("table_meta_snapshot.deleteByTaskName", params);
+    }
+
+    /**
+     * 删除interval秒之前的数据
+     */
+    public Integer deleteByGmtModified(int interval) {
+        HashMap params = Maps.newHashMapWithExpectedSize(2);
+        params.put("interval", interval);
+        return getSqlMapClientTemplate().delete("table_meta_snapshot.deleteByGmtModified", params);
+    }
+
+}

+ 141 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/model/MetaHistoryDO.java

@@ -0,0 +1,141 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb.model;
+
+import java.util.Date;
+
+/**
+ * @author agapple 2017年7月27日 下午11:09:41
+ * @since 3.2.5
+ */
+public class MetaHistoryDO {
+
+    /**
+     * 主键
+     */
+    private Long id;
+
+    /**
+     * 创建时间
+     */
+    private Date gmtCreate;
+
+    /**
+     * 修改时间
+     */
+    private Date gmtModified;
+
+    private String binlogFile;
+    private Long binlogOffest;
+    private String binlogMasterId;
+    private Long binlogTimestamp;
+    private String useSchema;
+    private String schema;
+    private String table;
+    private String sql;
+    private String type;
+    private String extra;
+
+    public Long getId() {
+        return id;
+    }
+
+    public void setId(Long id) {
+        this.id = id;
+    }
+
+    public Date getGmtCreate() {
+        return gmtCreate;
+    }
+
+    public void setGmtCreate(Date gmtCreate) {
+        this.gmtCreate = gmtCreate;
+    }
+
+    public Date getGmtModified() {
+        return gmtModified;
+    }
+
+    public void setGmtModified(Date gmtModified) {
+        this.gmtModified = gmtModified;
+    }
+
+    public String getBinlogFile() {
+        return binlogFile;
+    }
+
+    public void setBinlogFile(String binlogFile) {
+        this.binlogFile = binlogFile;
+    }
+
+    public Long getBinlogOffest() {
+        return binlogOffest;
+    }
+
+    public void setBinlogOffest(Long binlogOffest) {
+        this.binlogOffest = binlogOffest;
+    }
+
+    public String getBinlogMasterId() {
+        return binlogMasterId;
+    }
+
+    public void setBinlogMasterId(String binlogMasterId) {
+        this.binlogMasterId = binlogMasterId;
+    }
+
+    public Long getBinlogTimestamp() {
+        return binlogTimestamp;
+    }
+
+    public void setBinlogTimestamp(Long binlogTimestamp) {
+        this.binlogTimestamp = binlogTimestamp;
+    }
+
+    public String getUseSchema() {
+        return useSchema;
+    }
+
+    public void setUseSchema(String useSchema) {
+        this.useSchema = useSchema;
+    }
+
+    public String getSchema() {
+        return schema;
+    }
+
+    public void setSchema(String schema) {
+        this.schema = schema;
+    }
+
+    public String getTable() {
+        return table;
+    }
+
+    public void setTable(String table) {
+        this.table = table;
+    }
+
+    public String getSql() {
+        return sql;
+    }
+
+    public void setSql(String sql) {
+        this.sql = sql;
+    }
+
+    public String getType() {
+        return type;
+    }
+
+    public void setType(String type) {
+        this.type = type;
+    }
+
+    public String getExtra() {
+        return extra;
+    }
+
+    public void setExtra(String extra) {
+        this.extra = extra;
+    }
+
+}

+ 105 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/model/MetaSnapshotDO.java

@@ -0,0 +1,105 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb.model;
+
+import java.util.Date;
+
+/**
+ * @author agapple 2017年7月27日 下午11:09:41
+ * @since 3.2.5
+ */
+public class MetaSnapshotDO {
+
+    /**
+     * 主键
+     */
+    private Long id;
+
+    /**
+     * 创建时间
+     */
+    private Date gmtCreate;
+
+    /**
+     * 修改时间
+     */
+    private Date gmtModified;
+
+    private String binlogFile;
+    private Long binlogOffest;
+    private String binlogMasterId;
+    private Long binlogTimestamp;
+    private String data;
+    private String extra;
+
+    public Long getId() {
+        return id;
+    }
+
+    public void setId(Long id) {
+        this.id = id;
+    }
+
+    public Date getGmtCreate() {
+        return gmtCreate;
+    }
+
+    public void setGmtCreate(Date gmtCreate) {
+        this.gmtCreate = gmtCreate;
+    }
+
+    public Date getGmtModified() {
+        return gmtModified;
+    }
+
+    public void setGmtModified(Date gmtModified) {
+        this.gmtModified = gmtModified;
+    }
+
+    public String getBinlogFile() {
+        return binlogFile;
+    }
+
+    public void setBinlogFile(String binlogFile) {
+        this.binlogFile = binlogFile;
+    }
+
+    public Long getBinlogOffest() {
+        return binlogOffest;
+    }
+
+    public void setBinlogOffest(Long binlogOffest) {
+        this.binlogOffest = binlogOffest;
+    }
+
+    public String getBinlogMasterId() {
+        return binlogMasterId;
+    }
+
+    public void setBinlogMasterId(String binlogMasterId) {
+        this.binlogMasterId = binlogMasterId;
+    }
+
+    public Long getBinlogTimestamp() {
+        return binlogTimestamp;
+    }
+
+    public void setBinlogTimestamp(Long binlogTimestamp) {
+        this.binlogTimestamp = binlogTimestamp;
+    }
+
+    public String getData() {
+        return data;
+    }
+
+    public void setData(String data) {
+        this.data = data;
+    }
+
+    public String getExtra() {
+        return extra;
+    }
+
+    public void setExtra(String extra) {
+        this.extra = extra;
+    }
+
+}

+ 42 - 0
parse/src/main/resources/dal-dao.xml

@@ -0,0 +1,42 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<beans xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns="http://www.springframework.org/schema/beans" xmlns:tx="http://www.springframework.org/schema/tx"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd
+	http://www.springframework.org/schema/tx
+    http://www.springframework.org/schema/tx/spring-tx-2.0.xsd"
+       default-autowire="byName">
+    <tx:annotation-driven/>
+
+    <bean id="dataSource" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.DataSourceFactoryTSDB"
+          factory-method="getDataSource">
+        <constructor-arg index="0" value="${canal.instance.tsdb.address}"/>
+        <constructor-arg index="1" value="${canal.instance.tsdb.dbUsername}"/>
+        <constructor-arg index="2" value="${canal.instance.tsdb.dbPassword}"/>
+        <constructor-arg index="3" value="${canal.instance.tsdb.enable:true}"/>
+        <constructor-arg index="4" value="${canal.instance.tsdb.defaultDatabaseName}"/>
+        <constructor-arg index="5" value="${canal.instance.tsdb.url}"/>
+        <constructor-arg index="6" value="${canal.instance.tsdb.driverClassName}"/>
+    </bean>
+
+    <bean id="transactionManager" class="org.springframework.jdbc.datasource.DataSourceTransactionManager">
+        <property name="dataSource" ref="dataSource"/>
+    </bean>
+
+    <bean id="txTemplate" class="org.springframework.transaction.support.TransactionTemplate">
+        <property name="transactionManager" ref="transactionManager"></property>
+        <property name="propagationBehaviorName" value="PROPAGATION_REQUIRED"></property>
+    </bean>
+
+    <bean id="sqlMapClient" class="org.springframework.orm.ibatis.SqlMapClientFactoryBean">
+        <property name="dataSource" ref="dataSource"/>
+        <property name="configLocation" value="classpath:sqlmap-config.xml"/>
+    </bean>
+
+    <bean id="MetaHistoryDAO" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDAO">
+        <property name="sqlMapClient" ref="sqlMapClient"/>
+    </bean>
+
+    <bean id="MetaSnapshotDAO" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaSnapshotDAO">
+        <property name="sqlMapClient" ref="sqlMapClient"/>
+    </bean>
+</beans>

+ 20 - 0
parse/src/main/resources/mybatis-config.xml

@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!DOCTYPE configuration
+        PUBLIC "-//mybatis.org//DTD Config 3.0//EN"
+        "http://mybatis.org/dtd/mybatis-3-config.dtd">
+<configuration>
+    <environments default="development">
+        <environment id="development">
+            <transactionManager type="JDBC"/>
+            <dataSource type="POOLED">
+                <property name="driver" value="com.mysql.jdbc.Driver"/>
+                <property name="url" value="jdbc:mysql://127.0.0.1:3306/tsdb"/>
+                <property name="username" value="canal"/>
+                <property name="password" value="canal"/>
+            </dataSource>
+        </environment>
+    </environments>
+    <mappers>
+        <package name="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.mapper"/>
+    </mappers>
+</configuration>

+ 57 - 0
parse/src/main/resources/sql-map/sqlmap_history.xml

@@ -0,0 +1,57 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!DOCTYPE sqlMap PUBLIC "-//ibatis.apache.org//DTD SQL Map 2.0//EN" "http://ibatis.apache.org/dtd/sql-map-2.dtd" >
+<sqlMap namespace="table_meta_history">
+
+    <typeAlias alias="metaHistoryDO" type="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.model.MetaHistoryDO"/>
+
+    <sql id="allColumns">
+        <![CDATA[
+
+		gmt_create,gmt_modified,binlog_file,binlog_offest,binlog_master_id,binlog_timestamp,use_schema,`schema`,`table`,`sql`,`type`,`extra`
+
+        ]]>
+    </sql>
+    <sql id="allVOColumns">
+        <![CDATA[
+
+		a.id as id,a.gmt_create as gmtCreate,a.gmt_modified as gmtModified,
+		a.binlog_file as binlogFile,a.binlog_offest as binlogOffest,a.binlog_master_id as binlogMasterId,a.binlog_timestamp as binlogTimestamp,
+		a.use_schema as useSchema,a.`schema` as `schema`,a.`table` as `table`,a.`sql` as `sql`,a.`type` as `type`,a.`extra` as `extra`
+
+        ]]>
+    </sql>
+
+    <select id="findByTimestamp" parameterClass="java.util.Map" resultClass="metaHistoryDO">
+        select
+        <include refid="allVOColumns"/>
+        from `canal_table_meta_history$env$` a
+        <![CDATA[
+        where binlog_timestamp >= #snapshotTimestamp# and binlog_timestamp <= #timestamp#
+        order by binlog_timestamp asc,id asc
+        ]]>
+    </select>
+
+    <insert id="insert" parameterClass="metaHistoryDO">
+        insert into `canal_table_meta_history` (<include refid="allColumns"/>)
+        values(now(),now(),#binlogFile#,#binlogOffest#,#binlogMasterId#,#binlogTimestamp#,#useSchema#,#schema#,#table#,#sql#,#type#,#extra#);
+        <selectKey resultClass="java.lang.Long" keyProperty="id">
+            SELECT last_insert_id()
+        </selectKey>
+    </insert>
+
+
+    <delete id="deleteByGmtModified" parameterClass="java.util.Map">
+        <![CDATA[
+
+		delete from `canal_table_meta_history`
+		where gmt_modified < date_sub(now(),interval #interval# second)
+
+        ]]>
+    </delete>
+
+
+    <select id="getAll" resultClass="metaHistoryDO">
+        select * from canal_table_meta_history
+    </select>
+
+</sqlMap>

+ 66 - 0
parse/src/main/resources/sql-map/sqlmap_snapshot.xml

@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!DOCTYPE sqlMap PUBLIC "-//ibatis.apache.org//DTD SQL Map 2.0//EN" "http://ibatis.apache.org/dtd/sql-map-2.dtd" >
+<sqlMap namespace="table_meta_snapshot">
+
+
+    <typeAlias alias="metaSnapshotDO" type="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.model.MetaSnapshotDO"/>
+
+
+    <typeAlias alias="tableMetaSnapshotDO"
+               type="com.alibaba.middleware.jingwei.biz.dataobject.CanalTableMetaSnapshotDO"/>
+    <sql id="allColumns">
+        <![CDATA[
+
+		gmt_create,gmt_modified,binlog_file,binlog_offest,binlog_master_id,binlog_timestamp,data,extra
+
+        ]]>
+    </sql>
+    <sql id="allVOColumns">
+        <![CDATA[
+
+		a.id as id,a.gmt_create as gmtCreate,a.gmt_modified as gmtModified,
+		a.binlog_file as binlogFile,a.binlog_offest as binlogOffest,a.binlog_master_id as binlogMaster_id,a.binlog_timestamp as binlogTimestamp,a.data as data,a.extra as extra
+
+        ]]>
+    </sql>
+
+    <select id="findByTimestamp" parameterClass="java.util.Map" resultClass="metaSnapshotDO">
+        select
+        <include refid="allVOColumns"/>
+        from `canal_table_meta_snapshot$env$` a
+        <![CDATA[
+        where  binlog_timestamp < #timestamp#
+        order by binlog_timestamp desc,id desc
+        limit 1
+        ]]>
+    </select>
+
+    <insert id="insert" parameterClass="metaSnapshotDO">
+        insert into `canal_table_meta_snapshot` (<include refid="allColumns"/>)
+        values(now(),now(),#binlogFile#,#binlogOffest#,#binlogMasterId#,#binlogTimestamp#,#data#,#extra#);
+        <selectKey resultClass="java.lang.Long" keyProperty="id">
+            SELECT last_insert_id()
+        </selectKey>
+    </insert>
+
+    <update id="update" parameterClass="metaSnapshotDO">
+        update `canal_table_meta_snapshot` set gmt_modified=now(),
+        binlog_file=#binlogFile#,binlog_offest=#binlogOffest#,binlog_master_id=#binlogMasterId#,binlog_timestamp=#binlogTimestamp#,data=#data#,extra=#extra#
+        where binlog_timestamp=0
+    </update>
+
+
+    <delete id="deleteByGmtModified" parameterClass="java.util.Map">
+        <![CDATA[
+
+		delete from `canal_table_meta_snapshot`
+		where gmt_modified < date_sub(now(),interval #interval# second)
+
+        ]]>
+    </delete>
+
+    <select id="getAll" resultClass="metaSnapshotDO">
+        select * from canal_table_meta_snapshot
+    </select>
+
+</sqlMap>

+ 9 - 0
parse/src/main/resources/sqlmap-config.xml

@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!DOCTYPE sqlMapConfig PUBLIC "-//iBATIS.com//DTD SQL Map Config 2.0//EN"
+        "http://www.ibatis.com/dtd/sql-map-config-2.dtd">
+<sqlMapConfig>
+    <settings useStatementNamespaces="true"/>
+
+    <sqlMap resource="sql-map/sqlmap_history.xml"/>
+    <sqlMap resource="sql-map/sqlmap_snapshot.xml"/>
+</sqlMapConfig>

+ 20 - 18
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/TableMetaCacheTest.java

@@ -3,31 +3,33 @@ package com.alibaba.otter.canal.parse.inbound;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 
 
+import com.taobao.tddl.dbsync.binlog.BinlogPosition;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
+import com.alibaba.druid.sql.visitor.functions.Bin;
 import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
 import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.TableMetaCache;
 import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.TableMetaCache;
 
 
 public class TableMetaCacheTest {
 public class TableMetaCacheTest {
 
 
-    @Test
-    public void testSimple() {
-
-        MysqlConnection connection = new MysqlConnection(new InetSocketAddress("127.0.0.1", 3306), "xxxxx", "xxxxx");
-        try {
-            connection.connect();
-        } catch (IOException e) {
-            Assert.fail(e.getMessage());
-        }
-
-        TableMetaCache cache = new TableMetaCache(connection);
-        TableMeta meta = cache.getTableMeta("otter1", "otter_stability1");
-        Assert.assertNotNull(meta);
-        for (FieldMeta field : meta.getFileds()) {
-            System.out.println("filed :" + field.getColumnName() + " , isKey : " + field.isKey() + " , isNull : "
-                               + field.isNullable());
-        }
-    }
+    //@Test
+    //public void testSimple() {
+    //
+    //    MysqlConnection connection = new MysqlConnection(new InetSocketAddress("127.0.0.1", 3306), "xxxxx", "xxxxx");
+    //    try {
+    //        connection.connect();
+    //    } catch (IOException e) {
+    //        Assert.fail(e.getMessage());
+    //    }
+    //
+    //    TableMetaCache cache = new TableMetaCache(connection);
+    //    TableMeta meta = cache.getTableMeta("otter1", "otter_stability1");
+    //    Assert.assertNotNull(meta);
+    //    for (FieldMeta field : meta.getFields()) {
+    //        System.out.println("filed :" + field.getColumnName() + " , isKey : " + field.isKey() + " , isNull : "
+    //                           + field.isNullable());
+    //    }
+    //}
 }
 }

+ 3 - 2
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/group/GroupEventPaserTest.java

@@ -88,8 +88,9 @@ public class GroupEventPaserTest {
     private BinlogParser buildParser(AuthenticationInfo info) {
     private BinlogParser buildParser(AuthenticationInfo info) {
         return new AbstractBinlogParser<LogEvent>() {
         return new AbstractBinlogParser<LogEvent>() {
 
 
-            public Entry parse(LogEvent event) throws CanalParseException {
-                // return _parser.parse(event);
+
+            @Override
+            public Entry parse(LogEvent event, boolean isSeek) throws CanalParseException {
                 return null;
                 return null;
             }
             }
         };
         };

+ 3 - 3
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogDumpTest.java

@@ -25,11 +25,11 @@ public class LocalBinlogDumpTest {
 
 
     @Test
     @Test
     public void testSimple() {
     public void testSimple() {
-        String directory = "/home/jianghang/tmp/binlog";
+        String directory = "/Users/wanshao/projects/canal/parse/src/test/resources/binlog/tsdb";
         final LocalBinlogEventParser controller = new LocalBinlogEventParser();
         final LocalBinlogEventParser controller = new LocalBinlogEventParser();
-        final EntryPosition startPosition = new EntryPosition("mysql-bin.000006", 4L);
+        final EntryPosition startPosition = new EntryPosition("mysql-bin.000003", 123L);
 
 
-        controller.setMasterInfo(new AuthenticationInfo(new InetSocketAddress("127.0.0.1", 3306), "xxxxx", "xxxxx"));
+        controller.setMasterInfo(new AuthenticationInfo(new InetSocketAddress("127.0.0.1", 3306), "canal", "canal"));
         controller.setConnectionCharset(Charset.forName("UTF-8"));
         controller.setConnectionCharset(Charset.forName("UTF-8"));
         controller.setDirectory(directory);
         controller.setDirectory(directory);
         controller.setMasterPosition(startPosition);
         controller.setMasterPosition(startPosition);

+ 8 - 8
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/LocalBinlogEventParserTest.java

@@ -23,15 +23,15 @@ import com.alibaba.otter.canal.sink.exception.CanalSinkException;
 public class LocalBinlogEventParserTest {
 public class LocalBinlogEventParserTest {
 
 
     private static final String MYSQL_ADDRESS = "127.0.0.1";
     private static final String MYSQL_ADDRESS = "127.0.0.1";
-    private static final String USERNAME      = "xxxxx";
-    private static final String PASSWORD      = "xxxxx";
+    private static final String USERNAME      = "canal";
+    private static final String PASSWORD      = "canal";
     private String              directory;
     private String              directory;
 
 
     @Before
     @Before
     public void setUp() {
     public void setUp() {
         URL url = Thread.currentThread().getContextClassLoader().getResource("dummy.txt");
         URL url = Thread.currentThread().getContextClassLoader().getResource("dummy.txt");
         File dummyFile = new File(url.getFile());
         File dummyFile = new File(url.getFile());
-        directory = new File(dummyFile.getParent() + "/binlog").getPath();
+        directory = new File("/Users/wanshao/projects/canal/parse/src/test/resources/binlog/tsdb").getPath();
     }
     }
 
 
     @Test
     @Test
@@ -40,7 +40,7 @@ public class LocalBinlogEventParserTest {
         final AtomicLong entryCount = new AtomicLong(0);
         final AtomicLong entryCount = new AtomicLong(0);
         final EntryPosition entryPosition = new EntryPosition();
         final EntryPosition entryPosition = new EntryPosition();
 
 
-        final EntryPosition defaultPosition = buildPosition("mysql-bin.000001", 6163L, 1322803601000L);
+        final EntryPosition defaultPosition = buildPosition("mysql-bin.000003", 219L, 1505467103000L);
         final LocalBinlogEventParser controller = new LocalBinlogEventParser();
         final LocalBinlogEventParser controller = new LocalBinlogEventParser();
         controller.setMasterPosition(defaultPosition);
         controller.setMasterPosition(defaultPosition);
         controller.setMasterInfo(buildAuthentication());
         controller.setMasterInfo(buildAuthentication());
@@ -103,7 +103,7 @@ public class LocalBinlogEventParserTest {
         final AtomicLong entryCount = new AtomicLong(0);
         final AtomicLong entryCount = new AtomicLong(0);
         final EntryPosition entryPosition = new EntryPosition();
         final EntryPosition entryPosition = new EntryPosition();
 
 
-        final EntryPosition defaultPosition = buildPosition("mysql-bin.000001", null, 1322803601000L);
+        final EntryPosition defaultPosition = buildPosition("mysql-bin.000003", null, 1505467103000L);
         final LocalBinlogEventParser controller = new LocalBinlogEventParser();
         final LocalBinlogEventParser controller = new LocalBinlogEventParser();
         controller.setMasterPosition(defaultPosition);
         controller.setMasterPosition(defaultPosition);
         controller.setMasterInfo(buildAuthentication());
         controller.setMasterInfo(buildAuthentication());
@@ -156,15 +156,15 @@ public class LocalBinlogEventParserTest {
         Assert.assertTrue(entryCount.get() > 0);
         Assert.assertTrue(entryCount.get() > 0);
 
 
         // 对比第一条数据和起始的position相同
         // 对比第一条数据和起始的position相同
-        Assert.assertEquals(entryPosition.getJournalName(), "mysql-bin.000001");
-        Assert.assertTrue(entryPosition.getPosition() <= 6163L);
+        Assert.assertEquals(entryPosition.getJournalName(), "mysql-bin.000003");
+        Assert.assertTrue(entryPosition.getPosition() <= 300L);
         Assert.assertTrue(entryPosition.getTimestamp() <= defaultPosition.getTimestamp());
         Assert.assertTrue(entryPosition.getTimestamp() <= defaultPosition.getTimestamp());
     }
     }
 
 
     @Test
     @Test
     public void test_no_position() throws InterruptedException {
     public void test_no_position() throws InterruptedException {
         final TimeoutChecker timeoutChecker = new TimeoutChecker(3 * 1000);
         final TimeoutChecker timeoutChecker = new TimeoutChecker(3 * 1000);
-        final EntryPosition defaultPosition = buildPosition("mysql-bin.000002",
+        final EntryPosition defaultPosition = buildPosition("mysql-bin.000003",
             null,
             null,
             new Date().getTime() + 1000 * 1000L);
             new Date().getTime() + 1000 * 1000L);
         final AtomicLong entryCount = new AtomicLong(0);
         final AtomicLong entryCount = new AtomicLong(0);

+ 2 - 2
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlDumpTest.java

@@ -26,12 +26,12 @@ public class MysqlDumpTest {
     @Test
     @Test
     public void testSimple() {
     public void testSimple() {
         final MysqlEventParser controller = new MysqlEventParser();
         final MysqlEventParser controller = new MysqlEventParser();
-        final EntryPosition startPosition = new EntryPosition("mysql-bin.000003", 4L);
+        final EntryPosition startPosition = new EntryPosition("mysql-bin.000003", 123L);
 
 
         controller.setConnectionCharset(Charset.forName("UTF-8"));
         controller.setConnectionCharset(Charset.forName("UTF-8"));
         controller.setSlaveId(3344L);
         controller.setSlaveId(3344L);
         controller.setDetectingEnable(false);
         controller.setDetectingEnable(false);
-        controller.setMasterInfo(new AuthenticationInfo(new InetSocketAddress("127.0.0.1", 3306), "xxxxx", "xxxxx"));
+        controller.setMasterInfo(new AuthenticationInfo(new InetSocketAddress("127.0.0.1", 3306), "canal", "canal"));
         controller.setMasterPosition(startPosition);
         controller.setMasterPosition(startPosition);
         controller.setEventSink(new AbstractCanalEventSinkTest<List<Entry>>() {
         controller.setEventSink(new AbstractCanalEventSinkTest<List<Entry>>() {
 
 

+ 3 - 3
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParserTest.java

@@ -24,8 +24,8 @@ public class MysqlEventParserTest {
 
 
     private static final String DETECTING_SQL = "insert into retl.xdual values(1,now()) on duplicate key update x=now()";
     private static final String DETECTING_SQL = "insert into retl.xdual values(1,now()) on duplicate key update x=now()";
     private static final String MYSQL_ADDRESS = "127.0.0.1";
     private static final String MYSQL_ADDRESS = "127.0.0.1";
-    private static final String USERNAME      = "root";
-    private static final String PASSWORD      = "xxxxxx";
+    private static final String USERNAME      = "canal";
+    private static final String PASSWORD      = "canal";
 
 
     @Test
     @Test
     public void test_position() throws InterruptedException {
     public void test_position() throws InterruptedException {
@@ -34,7 +34,7 @@ public class MysqlEventParserTest {
         final EntryPosition entryPosition = new EntryPosition();
         final EntryPosition entryPosition = new EntryPosition();
 
 
         final MysqlEventParser controller = new MysqlEventParser();
         final MysqlEventParser controller = new MysqlEventParser();
-        final EntryPosition defaultPosition = buildPosition("mysql-bin.000001", 6163L, 1322803601000L);
+        final EntryPosition defaultPosition = buildPosition("mysql-bin.000003", 4690L, 1505481064000L);
 
 
         controller.setSlaveId(3344L);
         controller.setSlaveId(3344L);
         controller.setDetectingEnable(true);
         controller.setDetectingEnable(true);

+ 38 - 0
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MemoryTableMetaTest.java

@@ -0,0 +1,38 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.net.URL;
+
+import com.alibaba.otter.canal.parse.inbound.TableMeta;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringUtils;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+
+/**
+ * @author agapple 2017年8月1日 下午7:15:54
+ */
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration(locations = {"/dal-dao.xml"})
+public class MemoryTableMetaTest {
+
+    @Test
+    public void testSimple() throws Throwable {
+        MemoryTableMeta memoryTableMeta = new MemoryTableMeta(null);
+        URL url = Thread.currentThread().getContextClassLoader().getResource("dummy.txt");
+        File dummyFile = new File(url.getFile());
+        File create = new File(dummyFile.getParent() + "/ddl", "create.sql");
+        String sql = StringUtils.join(IOUtils.readLines(new FileInputStream(create)), "\n");
+        memoryTableMeta.apply(null, "test", sql);
+
+        TableMeta meta = memoryTableMeta.find("test", "test");
+        System.out.println(meta);
+        Assert.assertTrue( meta.getFieldMetaByName("ID").isKey());
+    }
+}

+ 36 - 0
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MetaHistoryDAOTest.java

@@ -0,0 +1,36 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.util.List;
+
+import javax.annotation.Resource;
+
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDAO;
+import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.model.MetaHistoryDO;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+
+/**
+ * Created by wanshao
+ * Date: 2017/9/20
+ * Time: 下午5:00
+ **/
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration(locations = {"/dal-dao.xml"})
+public class MetaHistoryDAOTest {
+
+    @Resource
+    MetaHistoryDAO metaHistoryDAO;
+
+    @Test
+    public void testGetAll() {
+        List<MetaHistoryDO>
+            metaHistoryDOList = metaHistoryDAO.getAll();
+        for (MetaHistoryDO metaHistoryDO : metaHistoryDOList) {
+            System.out.println(metaHistoryDO.getId());
+        }
+    }
+
+}

+ 47 - 0
parse/src/test/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/TableMetaManagerTest.java

@@ -0,0 +1,47 @@
+package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URL;
+
+import javax.annotation.Resource;
+
+import com.taobao.tddl.dbsync.binlog.BinlogPosition;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringUtils;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+
+/**
+ * @author wanshao 2017年8月2日 下午4:11:45
+ * @since 3.2.5
+ */
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration(locations = { "/dal-dao.xml" })
+public class TableMetaManagerTest {
+
+    @Resource
+    TableMetaManager tableMetaManager;
+
+    @Test
+    public void testSimple() throws FileNotFoundException, IOException {
+
+
+
+        URL url = Thread.currentThread().getContextClassLoader().getResource("dummy.txt");
+        File dummyFile = new File(url.getFile());
+        File create = new File(dummyFile.getParent() + "/ddl", "create.sql");
+        BinlogPosition position = BinlogPosition.parseFromString("001115:0139177334#3065927853.1501660815000");
+        String createSql = StringUtils.join(IOUtils.readLines(new FileInputStream(create)), "\n");
+        tableMetaManager.apply(position, "tddl5_00", createSql);
+
+        String alterSql = "alter table `test` add column name varchar(32) after c_varchar";
+        position = BinlogPosition.parseFromString("001115:0139177334#3065927853.1501660816000");
+        tableMetaManager.apply(position, "tddl5_00", alterSql);
+
+    }
+}

二進制
parse/src/test/resources/binlog/tsdb/mysql-bin.000001


二進制
parse/src/test/resources/binlog/tsdb/mysql-bin.000002


二進制
parse/src/test/resources/binlog/tsdb/mysql-bin.000003


+ 46 - 0
parse/src/test/resources/dal-dao.xml

@@ -0,0 +1,46 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<beans xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns="http://www.springframework.org/schema/beans" xmlns:tx="http://www.springframework.org/schema/tx"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd
+	http://www.springframework.org/schema/tx
+    http://www.springframework.org/schema/tx/spring-tx-2.0.xsd"
+       default-autowire="byName">
+    <tx:annotation-driven/>
+
+    <bean id="dataSource" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.DataSourceFactoryTSDB"
+          factory-method="getDataSource">
+        <constructor-arg index="0" value="127.0.0.1"/>
+        <constructor-arg index="1" value="canal"/>
+        <constructor-arg index="2" value="canal"/>
+        <constructor-arg index="3" value="true"/>
+        <constructor-arg index="4" value="tsdb"/>
+        <constructor-arg index="5" value="jdbc:mysql://127.0.0.1:3306/tsdb"/>
+        <constructor-arg index="6" value="com.mysql.jdbc.Driver"/>
+    </bean>
+
+    <bean id="transactionManager" class="org.springframework.jdbc.datasource.DataSourceTransactionManager">
+        <property name="dataSource" ref="dataSource"/>
+    </bean>
+
+    <bean id="txTemplate" class="org.springframework.transaction.support.TransactionTemplate">
+        <property name="transactionManager" ref="transactionManager"></property>
+        <property name="propagationBehaviorName" value="PROPAGATION_REQUIRED"></property>
+    </bean>
+
+    <bean id="sqlMapClient" class="org.springframework.orm.ibatis.SqlMapClientFactoryBean">
+        <property name="dataSource" ref="dataSource"/>
+        <property name="configLocation" value="classpath:sqlmap-config.xml"/>
+    </bean>
+
+
+    <bean id="MetaHistoryDAO" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDAO">
+        <property name="sqlMapClient" ref="sqlMapClient"/>
+    </bean>
+
+    <bean id="MetaSnapshotDAO" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaSnapshotDAO">
+        <property name="sqlMapClient" ref="sqlMapClient"/>
+    </bean>
+
+    <bean id="PersistTableMeta" class="com.alibaba.otter.canal.parse.inbound.mysql.tsdb.TableMetaManager"></bean>
+
+</beans>

+ 23 - 0
parse/src/test/resources/ddl/create.sql

@@ -0,0 +1,23 @@
+ CREATE TABLE `test` (
+  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
+  `c_tinyint` tinyint(4) DEFAULT '1' COMMENT 'tinyint',
+  `c_smallint` smallint(6) DEFAULT 0 COMMENT 'smallint',
+  `c_mediumint` mediumint(9) DEFAULT NULL COMMENT 'mediumint',
+  `c_int` int(11) DEFAULT NULL COMMENT 'int',
+  `c_bigint` bigint(20) DEFAULT NULL COMMENT 'bigint',
+  `c_decimal` decimal(10,3) DEFAULT NULL COMMENT 'decimal',
+  `c_date` date DEFAULT '0000-00-00' COMMENT 'date',
+  `c_datetime` datetime DEFAULT '0000-00-00 00:00:00' COMMENT 'datetime',
+  `c_timestamp` timestamp NULL DEFAULT NULL COMMENT 'timestamp',
+  `c_time` time DEFAULT NULL COMMENT 'time',
+  `c_char` char(10) DEFAULT NULL COMMENT 'char',
+  `c_varchar` varchar(10) DEFAULT 'hello' COMMENT 'varchar',
+  `c_blob` blob COMMENT 'blob',
+  `c_text` text COMMENT 'text',
+  `c_mediumtext` mediumtext COMMENT 'mediumtext',
+  `c_longblob` longblob COMMENT 'longblob',
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `uk_a` (`c_tinyint`),
+  KEY `k_b` (`c_smallint`),
+  KEY `k_c` (`c_mediumint`,`c_int`)
+) ENGINE=InnoDB AUTO_INCREMENT=1769503 DEFAULT CHARSET=utf8mb4 COMMENT='10000000';

+ 17 - 1
pom.xml

@@ -135,6 +135,12 @@
                 <artifactId>spring</artifactId>
                 <artifactId>spring</artifactId>
                 <version>2.5.6</version>
                 <version>2.5.6</version>
             </dependency>
             </dependency>
+            <dependency>
+                <groupId>org.springframework</groupId>
+                <artifactId>spring-test</artifactId>
+                <version>2.5.6</version>
+                <scope>test</scope>
+            </dependency>
             <!-- external -->
             <!-- external -->
             <dependency>
             <dependency>
                 <groupId>commons-lang</groupId>
                 <groupId>commons-lang</groupId>
@@ -204,6 +210,16 @@
                 <artifactId>protobuf-java</artifactId>
                 <artifactId>protobuf-java</artifactId>
                 <version>2.6.1</version>
                 <version>2.6.1</version>
             </dependency>
             </dependency>
+            <dependency>
+                <groupId>org.apache.ibatis</groupId>
+                <artifactId>ibatis-sqlmap</artifactId>
+                <version>2.3.4.726</version>
+            </dependency>
+            <dependency>
+                <groupId>com.alibaba</groupId>
+                <artifactId>druid</artifactId>
+                <version>1.1.3</version>
+            </dependency>
             <!-- log -->
             <!-- log -->
             <dependency>
             <dependency>
                 <groupId>ch.qos.logback</groupId>
                 <groupId>ch.qos.logback</groupId>
@@ -236,7 +252,7 @@
                 <groupId>mysql</groupId>
                 <groupId>mysql</groupId>
                 <artifactId>mysql-connector-java</artifactId>
                 <artifactId>mysql-connector-java</artifactId>
                 <version>5.1.33</version>
                 <version>5.1.33</version>
-                <scope>test</scope>
+                <!--<scope>test</scope>-->
             </dependency>
             </dependency>
         </dependencies>
         </dependencies>
     </dependencyManagement>
     </dependencyManagement>

+ 4 - 4
server/src/test/java/com/alibaba/otter/canal/server/BaseCanalServerWithEmbededTest.java

@@ -19,12 +19,12 @@ import com.alibaba.otter.canal.server.embedded.CanalServerWithEmbedded;
 public abstract class BaseCanalServerWithEmbededTest {
 public abstract class BaseCanalServerWithEmbededTest {
 
 
     protected static final String   cluster1       = "127.0.0.1:2188";
     protected static final String   cluster1       = "127.0.0.1:2188";
-    protected static final String   DESTINATION    = "ljhtest1";
+    protected static final String   DESTINATION    = "example";
     protected static final String   DETECTING_SQL  = "insert into retl.xdual values(1,now()) on duplicate key update x=now()";
     protected static final String   DETECTING_SQL  = "insert into retl.xdual values(1,now()) on duplicate key update x=now()";
     protected static final String   MYSQL_ADDRESS  = "127.0.0.1";
     protected static final String   MYSQL_ADDRESS  = "127.0.0.1";
-    protected static final String   USERNAME       = "retl";
-    protected static final String   PASSWORD       = "retl";
-    protected static final String   FILTER         = "retl\\..*,erosa.zk_complaint_bizdata";
+    protected static final String   USERNAME       = "canal";
+    protected static final String   PASSWORD       = "canal";
+    protected static final String   FILTER         = ".*\\\\..*";
 
 
     private CanalServerWithEmbedded server;
     private CanalServerWithEmbedded server;
     private ClientIdentity          clientIdentity = new ClientIdentity(DESTINATION, (short) 1);                               ;
     private ClientIdentity          clientIdentity = new ClientIdentity(DESTINATION, (short) 1);                               ;

+ 2 - 2
server/src/test/java/com/alibaba/otter/canal/server/CanalServerWithEmbedded_StandaloneTest.java

@@ -34,8 +34,8 @@ public class CanalServerWithEmbedded_StandaloneTest extends BaseCanalServerWithE
             new InetSocketAddress(MYSQL_ADDRESS, 3306)));
             new InetSocketAddress(MYSQL_ADDRESS, 3306)));
         parameter.setDbUsername(USERNAME);
         parameter.setDbUsername(USERNAME);
         parameter.setDbPassword(PASSWORD);
         parameter.setDbPassword(PASSWORD);
-        parameter.setPositions(Arrays.asList("{\"journalName\":\"mysql-bin.000001\",\"position\":6163L,\"timestamp\":1322803601000L}",
-            "{\"journalName\":\"mysql-bin.000001\",\"position\":6163L,\"timestamp\":1322803601000L}"));
+        parameter.setPositions(Arrays.asList("{\"journalName\":\"mysql-bin.000003\",\"position\":14217L,\"timestamp\":\"1505998863000\"}",
+            "{\"journalName\":\"mysql-bin.000003\",\"position\":14377L,\"timestamp\":\"1505998863000\"}"));
 
 
         parameter.setSlaveId(1234L);
         parameter.setSlaveId(1234L);