Browse Source

rdb 全schema同步

mcy 6 years ago
parent
commit
24d898d87a

+ 3 - 1
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/AbstractCanalAdapterWorker.java

@@ -70,6 +70,7 @@ public abstract class AbstractCanalAdapterWorker {
                     });
                     return true;
                 } catch (Exception e) {
+                    logger.error(e.getMessage(), e);
                     return false;
                 }
             }));
@@ -108,6 +109,7 @@ public abstract class AbstractCanalAdapterWorker {
                     });
                     return true;
                 } catch (Exception e) {
+                    logger.error(e.getMessage(), e);
                     return false;
                 }
             }));
@@ -178,7 +180,7 @@ public abstract class AbstractCanalAdapterWorker {
 
     /**
      * 分批同步
-     * 
+     *
      * @param dmls
      * @param adapter
      */

+ 10 - 12
client-adapter/launcher/src/main/resources/application.yml

@@ -15,7 +15,7 @@ spring:
 canal.conf:
   canalServerHost: 127.0.0.1:11111
 #  zookeeperHosts: slave1:2181
-#  mqServers: slave1:6667 #or rocketmq
+#  mqServers: 127.0.0.1:9092 #or rocketmq
 #  flatMessage: true
   batchSize: 500
   syncBatchSize: 1000
@@ -34,7 +34,7 @@ canal.conf:
     groups:
     - groupId: g1
       outerAdapters:
-      - name: logger
+#      - name: logger
 #      - name: rdb
 #        key: oracle1
 #        properties:
@@ -42,15 +42,13 @@ canal.conf:
 #          jdbc.url: jdbc:oracle:thin:@localhost:49161:XE
 #          jdbc.username: mytest
 #          jdbc.password: m121212
-#      - name: rdb
-#        key: postgres1
-#        properties:
-#          jdbc.driverClassName: org.postgresql.Driver
-#          jdbc.url: jdbc:postgresql://localhost:5432/postgres
-#          jdbc.username: postgres
-#          jdbc.password: 121212
-#          threads: 1
-#          commitSize: 3000
+      - name: rdb
+        key: mysql1
+        properties:
+          jdbc.driverClassName: com.mysql.jdbc.Driver
+          jdbc.url: jdbc:mysql://192.168.100.36/mytest?useUnicode=true
+          jdbc.username: root
+          jdbc.password: Ambari-123
 #      - name: hbase
 #        properties:
 #          hbase.zookeeper.quorum: 127.0.0.1
@@ -59,4 +57,4 @@ canal.conf:
 #      - name: es
 #        hosts: 127.0.0.1:9300
 #        properties:
-#          cluster.name: elasticsearch
+#          cluster.name: elasticsearch

+ 6 - 1
client-adapter/rdb/pom.xml

@@ -24,6 +24,11 @@
             <version>1.19</version>
             <scope>provided</scope>
         </dependency>
+        <dependency>
+            <groupId>com.alibaba.fastsql</groupId>
+            <artifactId>fastsql</artifactId>
+            <version>2.0.0_preview_644</version>
+        </dependency>
 
         <dependency>
             <groupId>mysql</groupId>
@@ -100,4 +105,4 @@
             </plugin>
         </plugins>
     </build>
-</project>
+</project>

+ 28 - 13
client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/RdbAdapter.java

@@ -2,10 +2,10 @@ package com.alibaba.otter.canal.client.adapter.rdb;
 
 import java.sql.Connection;
 import java.sql.SQLException;
-import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
@@ -21,22 +21,26 @@ import com.alibaba.otter.canal.client.adapter.rdb.config.ConfigLoader;
 import com.alibaba.otter.canal.client.adapter.rdb.config.MappingConfig;
 import com.alibaba.otter.canal.client.adapter.rdb.monitor.RdbConfigMonitor;
 import com.alibaba.otter.canal.client.adapter.rdb.service.RdbEtlService;
+import com.alibaba.otter.canal.client.adapter.rdb.service.RdbMirrorDbSyncService;
 import com.alibaba.otter.canal.client.adapter.rdb.service.RdbSyncService;
+import com.alibaba.otter.canal.client.adapter.rdb.support.SyncUtil;
 import com.alibaba.otter.canal.client.adapter.support.*;
 
 @SPI("rdb")
 public class RdbAdapter implements OuterAdapter {
 
-    private static Logger                           logger             = LoggerFactory.getLogger(RdbAdapter.class);
+    private static Logger                           logger              = LoggerFactory.getLogger(RdbAdapter.class);
 
-    private Map<String, MappingConfig>              rdbMapping         = new HashMap<>();                          // 文件名对应配置
-    private Map<String, Map<String, MappingConfig>> mappingConfigCache = new HashMap<>();                          // 库名-表名对应配置
+    private Map<String, MappingConfig>              rdbMapping          = new ConcurrentHashMap<>();                // 文件名对应配置
+    private Map<String, Map<String, MappingConfig>> mappingConfigCache  = new ConcurrentHashMap<>();                // 库名-表名对应配置
+    private Map<String, MappingConfig>              mirrorDbConfigCache = new ConcurrentHashMap<>();                // 镜像库配置
 
     private DruidDataSource                         dataSource;
 
     private RdbSyncService                          rdbSyncService;
+    private RdbMirrorDbSyncService                  rdbMirrorDbSyncService;
 
-    private ExecutorService                         executor           = Executors.newFixedThreadPool(1);
+    private ExecutorService                         executor            = Executors.newFixedThreadPool(1);
 
     private RdbConfigMonitor                        rdbConfigMonitor;
 
@@ -62,12 +66,18 @@ public class RdbAdapter implements OuterAdapter {
         for (Map.Entry<String, MappingConfig> entry : rdbMapping.entrySet()) {
             String configName = entry.getKey();
             MappingConfig mappingConfig = entry.getValue();
-            Map<String, MappingConfig> configMap = mappingConfigCache
-                .computeIfAbsent(StringUtils.trimToEmpty(mappingConfig.getDestination()) + "."
-                                 + mappingConfig.getDbMapping().getDatabase() + "."
-                                 + mappingConfig.getDbMapping().getTable(),
-                    k1 -> new HashMap<>());
-            configMap.put(configName, mappingConfig);
+            if (!mappingConfig.getDbMapping().isMirrorDb()) {
+                Map<String, MappingConfig> configMap = mappingConfigCache.computeIfAbsent(
+                    StringUtils.trimToEmpty(mappingConfig.getDestination()) + "." + mappingConfig.getDbMapping()
+                        .getDatabase() + "." + mappingConfig.getDbMapping().getTable(),
+                    k1 -> new ConcurrentHashMap<>());
+                configMap.put(configName, mappingConfig);
+            } else {
+                // mirrorDB
+                mirrorDbConfigCache.put(StringUtils.trimToEmpty(mappingConfig.getDestination()) + "."
+                                        + mappingConfig.getDbMapping().getDatabase(),
+                    mappingConfig);
+            }
         }
 
         Map<String, String> properties = configuration.getProperties();
@@ -96,6 +106,10 @@ public class RdbAdapter implements OuterAdapter {
             dataSource,
             threads != null ? Integer.valueOf(threads) : null);
 
+        rdbMirrorDbSyncService = new RdbMirrorDbSyncService(mirrorDbConfigCache,
+            dataSource,
+            threads != null ? Integer.valueOf(threads) : null);
+
         rdbConfigMonitor = new RdbConfigMonitor();
         rdbConfigMonitor.init(configuration.getKey(), this);
     }
@@ -103,6 +117,7 @@ public class RdbAdapter implements OuterAdapter {
     @Override
     public void sync(List<Dml> dmls) {
         rdbSyncService.sync(dmls);
+        rdbMirrorDbSyncService.sync(dmls);
     }
 
     @Override
@@ -157,7 +172,7 @@ public class RdbAdapter implements OuterAdapter {
     public Map<String, Object> count(String task) {
         MappingConfig config = rdbMapping.get(task);
         MappingConfig.DbMapping dbMapping = config.getDbMapping();
-        String sql = "SELECT COUNT(1) AS cnt FROM " + dbMapping.getTargetTable();
+        String sql = "SELECT COUNT(1) AS cnt FROM " + SyncUtil.dbTable(dbMapping);
         Connection conn = null;
         Map<String, Object> res = new LinkedHashMap<>();
         try {
@@ -183,7 +198,7 @@ public class RdbAdapter implements OuterAdapter {
                 }
             }
         }
-        res.put("targetTable", dbMapping.getTargetTable());
+        res.put("targetTable", SyncUtil.dbTable(dbMapping));
 
         return res;
     }

+ 29 - 32
client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/config/MappingConfig.java

@@ -1,8 +1,6 @@
 package com.alibaba.otter.canal.client.adapter.rdb.config;
 
-import java.util.LinkedHashSet;
 import java.util.Map;
-import java.util.Set;
 
 /**
  * RDB表映射配置
@@ -66,30 +64,37 @@ public class MappingConfig {
         if (dbMapping.database == null || dbMapping.database.isEmpty()) {
             throw new NullPointerException("dbMapping.database");
         }
-        if (dbMapping.table == null || dbMapping.table.isEmpty()) {
+        if (!dbMapping.isMirrorDb() && (dbMapping.table == null || dbMapping.table.isEmpty())) {
             throw new NullPointerException("dbMapping.table");
         }
-        if (dbMapping.targetTable == null || dbMapping.targetTable.isEmpty()) {
+        if (!dbMapping.isMirrorDb() && (dbMapping.targetTable == null || dbMapping.targetTable.isEmpty())) {
             throw new NullPointerException("dbMapping.targetTable");
         }
     }
 
     public static class DbMapping {
 
-        private String              database;                            // 数据库名或schema名
-        private String              table;                               // 表面名
-        private Map<String, String> targetPk;                            // 目标表主键字段
-        private boolean             mapAll      = false;                 // 映射所有字段
-        private String              targetTable;                         // 目标表名
-        private Map<String, String> targetColumns;                       // 目标表字段映射
+        private Boolean             mirrorDb    = false; // 是否镜像库
+        private String              database;            // 数据库名或schema名
+        private String              table;               // 表名
+        private Map<String, String> targetPk;            // 目标表主键字段
+        private Boolean             mapAll      = false; // 映射所有字段
+        private String              targetDb;            // 目标库名
+        private String              targetTable;         // 目标表名
+        private Map<String, String> targetColumns;       // 目标表字段映射
 
-        private String              etlCondition;                        // etl条件sql
+        private String              etlCondition;        // etl条件sql
 
-        private Set<String>         families    = new LinkedHashSet<>(); // column family列表
         private int                 readBatch   = 5000;
-        private int                 commitBatch = 5000;                  // etl等批量提交大小
+        private int                 commitBatch = 5000;  // etl等批量提交大小
 
-        // private volatile Map<String, String> allColumns; // mapAll为true,自动设置改字段
+        public boolean isMirrorDb() {
+            return mirrorDb == null ? false : mirrorDb;
+        }
+
+        public void setMirrorDb(Boolean mirrorDb) {
+            this.mirrorDb = mirrorDb;
+        }
 
         public String getDatabase() {
             return database;
@@ -116,13 +121,21 @@ public class MappingConfig {
         }
 
         public boolean isMapAll() {
-            return mapAll;
+            return mapAll == null ? false : mapAll;
         }
 
-        public void setMapAll(boolean mapAll) {
+        public void setMapAll(Boolean mapAll) {
             this.mapAll = mapAll;
         }
 
+        public String getTargetDb() {
+            return targetDb;
+        }
+
+        public void setTargetDb(String targetDb) {
+            this.targetDb = targetDb;
+        }
+
         public String getTargetTable() {
             return targetTable;
         }
@@ -147,14 +160,6 @@ public class MappingConfig {
             this.etlCondition = etlCondition;
         }
 
-        public Set<String> getFamilies() {
-            return families;
-        }
-
-        public void setFamilies(Set<String> families) {
-            this.families = families;
-        }
-
         public int getReadBatch() {
             return readBatch;
         }
@@ -170,13 +175,5 @@ public class MappingConfig {
         public void setCommitBatch(int commitBatch) {
             this.commitBatch = commitBatch;
         }
-
-        // public Map<String, String> getAllColumns() {
-        // return allColumns;
-        // }
-        //
-        // public void setAllColumns(Map<String, String> allColumns) {
-        // this.allColumns = allColumns;
-        // }
     }
 }

+ 3 - 3
client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/service/RdbEtlService.java

@@ -109,7 +109,7 @@ public class RdbEtlService {
             logger.info(
                 dbMapping.getTable() + " etl completed in: " + (System.currentTimeMillis() - start) / 1000 + "s!");
 
-            etlResult.setResultMessage("导入目标表 " + dbMapping.getTargetTable() + " 数据:" + successCount.get() + " 条");
+            etlResult.setResultMessage("导入目标表 " + SyncUtil.dbTable(dbMapping) + " 数据:" + successCount.get() + " 条");
         } catch (Exception e) {
             logger.error(e.getMessage(), e);
             errMsg.add(hbaseTable + " etl failed! ==>" + e.getMessage());
@@ -187,7 +187,7 @@ public class RdbEtlService {
                     // }
 
                     StringBuilder insertSql = new StringBuilder();
-                    insertSql.append("INSERT INTO ").append(dbMapping.getTargetTable()).append(" (");
+                    insertSql.append("INSERT INTO ").append(SyncUtil.dbTable(dbMapping)).append(" (");
                     columnsMap
                         .forEach((targetColumnName, srcColumnName) -> insertSql.append(targetColumnName).append(","));
 
@@ -209,7 +209,7 @@ public class RdbEtlService {
                             // 删除数据
                             Map<String, Object> values = new LinkedHashMap<>();
                             StringBuilder deleteSql = new StringBuilder(
-                                "DELETE FROM " + dbMapping.getTargetTable() + " WHERE ");
+                                "DELETE FROM " + SyncUtil.dbTable(dbMapping) + " WHERE ");
                             appendCondition(dbMapping, deleteSql, values, rs);
                             try (PreparedStatement pstmt2 = connTarget.prepareStatement(deleteSql.toString())) {
                                 int k = 1;

+ 66 - 0
client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/service/RdbMirrorDbSyncService.java

@@ -0,0 +1,66 @@
+package com.alibaba.otter.canal.client.adapter.rdb.service;
+
+import java.io.StringWriter;
+import java.sql.Connection;
+import java.sql.Statement;
+import java.util.List;
+import java.util.Map;
+
+import javax.sql.DataSource;
+
+import com.alibaba.fastsql.sql.ast.SQLName;
+import com.alibaba.fastsql.sql.ast.SQLStatement;
+import com.alibaba.fastsql.sql.ast.statement.SQLExprTableSource;
+import com.alibaba.fastsql.sql.dialect.mysql.parser.MySqlStatementParser;
+import com.alibaba.fastsql.sql.dialect.mysql.visitor.MySqlOutputVisitor;
+import com.alibaba.fastsql.sql.dialect.mysql.visitor.MySqlSchemaStatVisitor;
+import com.alibaba.fastsql.sql.parser.SQLStatementParser;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.otter.canal.client.adapter.rdb.config.MappingConfig;
+import com.alibaba.otter.canal.client.adapter.support.Dml;
+
+public class RdbMirrorDbSyncService {
+
+    private static final Logger        logger = LoggerFactory.getLogger(RdbMirrorDbSyncService.class);
+
+    private Map<String, MappingConfig> mirrorDbConfigCache;                                           // 镜像库配置
+    private DataSource                 dataSource;
+
+    public RdbMirrorDbSyncService(Map<String, MappingConfig> mirrorDbConfigCache, DataSource dataSource,
+                                  Integer threads){
+        this.mirrorDbConfigCache = mirrorDbConfigCache;
+        this.dataSource = dataSource;
+    }
+
+    public void sync(List<Dml> dmls) {
+        for (Dml dml : dmls) {
+            String destination = StringUtils.trimToEmpty(dml.getDestination());
+            String database = dml.getDatabase();
+            MappingConfig configMap = mirrorDbConfigCache.get(destination + "." + database);
+            if (configMap == null) {
+                continue;
+            }
+            if (dml.getSql() != null) {
+                // DDL
+                executeDdl(database, dml.getSql());
+            } else {
+                // DML
+                // TODO
+            }
+        }
+    }
+
+    private void executeDdl(String database, String sql) {
+        try (Connection conn = dataSource.getConnection(); Statement statement = conn.createStatement()) {
+            statement.execute(sql);
+            if (logger.isTraceEnabled()) {
+                logger.trace("Execute DDL sql: {} for database: {}", sql, database);
+            }
+        } catch (Exception e) {
+            logger.error(e.getMessage(), e);
+        }
+    }
+}

+ 7 - 5
client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/service/RdbSyncService.java

@@ -91,7 +91,9 @@ public class RdbSyncService {
                             dmlsPartition[hash].add(syncItem);
                         });
                     } else {
-                        int hash = Math.abs(Math.abs(config.getDbMapping().getTargetTable().hashCode()) % threads);
+                        int hash = 0;
+                        // Math.abs(Math.abs(config.getDbMapping().getTargetTable().hashCode()) %
+                        // threads);
                         List<SingleDml> singleDmls = SingleDml.dml2SingleDmls(dml);
                         singleDmls.forEach(singleDml -> {
                             SyncItem syncItem = new SyncItem(config, singleDml);
@@ -164,7 +166,7 @@ public class RdbSyncService {
             Map<String, String> columnsMap = SyncUtil.getColumnsMap(dbMapping, data);
 
             StringBuilder insertSql = new StringBuilder();
-            insertSql.append("INSERT INTO ").append(dbMapping.getTargetTable()).append(" (");
+            insertSql.append("INSERT INTO ").append(SyncUtil.dbTable(dbMapping)).append(" (");
 
             columnsMap.forEach((targetColumnName, srcColumnName) -> insertSql.append(targetColumnName).append(","));
             int len = insertSql.length();
@@ -228,7 +230,7 @@ public class RdbSyncService {
             Map<String, Integer> ctype = getTargetColumnType(batchExecutor.getConn(), config);
 
             StringBuilder updateSql = new StringBuilder();
-            updateSql.append("UPDATE ").append(dbMapping.getTargetTable()).append(" SET ");
+            updateSql.append("UPDATE ").append(SyncUtil.dbTable(dbMapping)).append(" SET ");
             List<Map<String, ?>> values = new ArrayList<>();
             for (String srcColumnName : old.keySet()) {
                 List<String> targetColumnNames = new ArrayList<>();
@@ -280,7 +282,7 @@ public class RdbSyncService {
             Map<String, Integer> ctype = getTargetColumnType(batchExecutor.getConn(), config);
 
             StringBuilder sql = new StringBuilder();
-            sql.append("DELETE FROM ").append(dbMapping.getTargetTable()).append(" WHERE ");
+            sql.append("DELETE FROM ").append(SyncUtil.dbTable(dbMapping)).append(" WHERE ");
 
             List<Map<String, ?>> values = new ArrayList<>();
             // 拼接主键
@@ -313,7 +315,7 @@ public class RdbSyncService {
                 if (columnType == null) {
                     columnType = new LinkedHashMap<>();
                     final Map<String, Integer> columnTypeTmp = columnType;
-                    String sql = "SELECT * FROM " + dbMapping.getTargetTable() + " WHERE 1=2";
+                    String sql = "SELECT * FROM " + SyncUtil.dbTable(dbMapping) + " WHERE 1=2";
                     Util.sqlRS(conn, sql, rs -> {
                         try {
                             ResultSetMetaData rsd = rs.getMetaData();

+ 10 - 0
client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/support/SyncUtil.java

@@ -9,6 +9,7 @@ import java.util.Collection;
 import java.util.LinkedHashMap;
 import java.util.Map;
 
+import org.apache.commons.lang.StringUtils;
 import org.joda.time.DateTime;
 
 import com.alibaba.otter.canal.client.adapter.rdb.config.MappingConfig;
@@ -253,4 +254,13 @@ public class SyncUtil {
                 pstmt.setObject(i, value, type);
         }
     }
+
+    public static String dbTable(MappingConfig.DbMapping dbMapping) {
+        String result = "";
+        if (StringUtils.isNotEmpty(dbMapping.getTargetDb())) {
+            result += dbMapping.getTargetDb() + ".";
+        }
+        result += dbMapping.getTargetTable();
+        return result;
+    }
 }

+ 9 - 7
client-adapter/rdb/src/main/resources/rdb/mytest_user.yml

@@ -1,17 +1,19 @@
 dataSourceKey: defaultDS
 destination: example
-outerAdapterKey: oracle1
+outerAdapterKey: mysql1
 concurrent: true
 dbMapping:
+  mirrorDb: true
   database: mytest
-  table: user
-  targetTable: mytest.tb_user
-  targetPk:
-    id: id
-  mapAll: true
+#  table: user
+  targetDb: mytest2
+#  targetTable: mytest.tb_user
+#  targetPk:
+#    id: id
+#  mapAll: true
 #  targetColumns:
 #    id:
 #    name:
 #    role_id:
 #    c_time:
-#    test1:
+#    test1:

+ 1 - 1
client-adapter/rdb/src/test/java/com/alibaba/otter/canal/client/adapter/rdb/test/DBTest.java

@@ -42,7 +42,7 @@ public class DBTest {
             .prepareStatement("insert into user (id,name,role_id,c_time,test1,test2) values (?,?,?,?,?,?)");
 
         java.util.Date now = new java.util.Date();
-        for (int i = 1; i <= 100000; i++) {
+        for (int i = 1; i <= 10000; i++) {
             pstmt.clearParameters();
             pstmt.setLong(1, (long) i);
             pstmt.setString(2, "test_" + i);

+ 86 - 0
client-adapter/rdb/src/test/java/com/alibaba/otter/canal/client/adapter/rdb/test/SqlParserTest.java

@@ -0,0 +1,86 @@
+wpackage com.alibaba.otter.canal.client.adapter.rdb.test;
+
+import com.alibaba.fastsql.sql.ast.SQLName;
+import com.alibaba.fastsql.sql.ast.SQLStatement;
+import com.alibaba.fastsql.sql.ast.statement.SQLCreateTableStatement;
+import com.alibaba.fastsql.sql.ast.statement.SQLExprTableSource;
+import com.alibaba.fastsql.sql.dialect.mysql.parser.MySqlCreateTableParser;
+import com.alibaba.fastsql.sql.dialect.mysql.parser.MySqlStatementParser;
+import com.alibaba.fastsql.sql.dialect.mysql.visitor.MySqlOutputVisitor;
+import com.alibaba.fastsql.sql.dialect.mysql.visitor.MySqlSchemaStatVisitor;
+import com.alibaba.fastsql.sql.parser.SQLStatementParser;
+
+import java.io.StringWriter;
+
+public class SqlParserTest {
+
+    public static class TableNameVisitor extends MySqlOutputVisitor {
+
+        public TableNameVisitor(Appendable appender){
+            super(appender);
+        }
+
+        @Override
+        public boolean visit(SQLExprTableSource x) {
+            SQLName table = (SQLName) x.getExpr();
+            String tableName = table.getSimpleName();
+
+            // 改写tableName
+            print0("new_" + tableName.toUpperCase());
+
+            return true;
+        }
+
+    }
+
+    public static void main(String[] args) {
+        // String sql = "select * from `mytest`.`t` where id=1 and name=ming group by
+        // uid limit 1,200 order by ctime";
+
+        String sql = "CREATE TABLE `mytest`.`user` (\n" + "  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n"
+                     + "  `name` varchar(30) NOT NULL,\n" + "  `c_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,\n"
+                     + "  `role_id` bigint(20) DEFAULT NULL,\n" + "  `test1` text,\n" + "  `test2` blob,\n"
+                     + "  `key` varchar(30) DEFAULT NULL,\n" + "  PRIMARY KEY (`id`)\n"
+                     + ") ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8;";
+
+        // // 新建 MySQL Parser
+        // SQLStatementParser parser = new MySqlStatementParser(sql);
+        //
+        // // 使用Parser解析生成AST,这里SQLStatement就是AST
+        // SQLStatement sqlStatement = parser.parseStatement();
+        //
+        // MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
+        // sqlStatement.accept(visitor);
+        //
+        // System.out.println("getTables:" + visitor.getTables());
+        // System.out.println("getParameters:" + visitor.getParameters());
+        // System.out.println("getOrderByColumns:" + visitor.getOrderByColumns());
+        // System.out.println("getGroupByColumns:" + visitor.getGroupByColumns());
+        // System.out.println("---------------------------------------------------------------------------");
+        //
+        // // 使用select访问者进行select的关键信息打印
+        // // SelectPrintVisitor selectPrintVisitor = new SelectPrintVisitor();
+        // // sqlStatement.accept(selectPrintVisitor);
+        //
+        // System.out.println("---------------------------------------------------------------------------");
+        // // 最终sql输出
+        // StringWriter out = new StringWriter();
+        // TableNameVisitor outputVisitor = new TableNameVisitor(out);
+        // sqlStatement.accept(outputVisitor);
+        // System.out.println(out.toString());
+
+        MySqlCreateTableParser parser1 = new MySqlCreateTableParser(sql);
+        SQLCreateTableStatement createTableStatement = parser1.parseCreateTable();
+//        MySqlSchemaStatVisitor visitor1 = new MySqlSchemaStatVisitor();
+//        createTableStatement.accept(visitor1);
+        // visitor1.getTables().forEach((k, v) -> {
+        // System.out.println(k.);
+        // System.out.println(v);
+        // });
+        // 最终sql输出
+        StringWriter out = new StringWriter();
+        TableNameVisitor outputVisitor = new TableNameVisitor(out);
+        createTableStatement.accept(outputVisitor);
+        System.out.println(out.toString());
+    }
+}