Просмотр исходного кода

Merge remote-tracking branch 'upstream/master'

winger 6 лет назад
Родитель
Сommit
82a78546ab
16 измененных файлов с 613 добавлено и 233 удалено
  1. 10 12
      client-adapter/elasticsearch/src/main/java/com/alibaba/otter/canal/client/adapter/es/ESAdapter.java
  2. 3 1
      client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/HbaseAdapter.java
  3. 49 51
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/AbstractCanalAdapterWorker.java
  4. 14 1
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterKafkaWorker.java
  5. 17 2
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterRocketMQWorker.java
  6. 27 31
      client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterWorker.java
  7. 6 3
      client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/RdbAdapter.java
  8. 48 52
      client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/service/RdbMirrorDbSyncService.java
  9. 34 39
      client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/service/RdbSyncService.java
  10. 10 26
      client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/support/SyncUtil.java
  11. 47 15
      client/src/main/java/com/alibaba/otter/canal/client/kafka/KafkaCanalConnector.java
  12. 112 0
      client/src/main/java/com/alibaba/otter/canal/client/kafka/KafkaOffsetCanalConnector.java
  13. 32 0
      client/src/main/java/com/alibaba/otter/canal/client/kafka/protocol/KafkaFlatMessage.java
  14. 33 0
      client/src/main/java/com/alibaba/otter/canal/client/kafka/protocol/KafkaMessage.java
  15. 168 0
      client/src/test/java/com/alibaba/otter/canal/client/running/kafka/CanalKafkaOffsetClientExample.java
  16. 3 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MemoryTableMeta.java

+ 10 - 12
client-adapter/elasticsearch/src/main/java/com/alibaba/otter/canal/client/adapter/es/ESAdapter.java

@@ -75,8 +75,8 @@ public class ESAdapter implements OuterAdapter {
             // 过滤不匹配的key的配置
             esSyncConfigTmp.forEach((key, config) -> {
                 if ((config.getOuterAdapterKey() == null && configuration.getKey() == null)
-                    || (config.getOuterAdapterKey() != null && config.getOuterAdapterKey()
-                        .equalsIgnoreCase(configuration.getKey()))) {
+                    || (config.getOuterAdapterKey() != null
+                        && config.getOuterAdapterKey().equalsIgnoreCase(configuration.getKey()))) {
                     esSyncConfig.put(key, config);
                 }
             });
@@ -98,15 +98,11 @@ public class ESAdapter implements OuterAdapter {
                 }
                 String schema = matcher.group(2);
 
-                schemaItem.getAliasTableItems()
-                    .values()
-                    .forEach(tableItem -> {
-                        Map<String, ESSyncConfig> esSyncConfigMap = dbTableEsSyncConfig.computeIfAbsent(schema
-                                                                                                        + "-"
-                                                                                                        + tableItem.getTableName(),
-                            k -> new HashMap<>());
-                        esSyncConfigMap.put(configName, config);
-                    });
+                schemaItem.getAliasTableItems().values().forEach(tableItem -> {
+                    Map<String, ESSyncConfig> esSyncConfigMap = dbTableEsSyncConfig
+                        .computeIfAbsent(schema + "-" + tableItem.getTableName(), k -> new HashMap<>());
+                    esSyncConfigMap.put(configName, config);
+                });
             }
 
             Map<String, String> properties = configuration.getProperties();
@@ -140,7 +136,9 @@ public class ESAdapter implements OuterAdapter {
         String database = dml.getDatabase();
         String table = dml.getTable();
         Map<String, ESSyncConfig> configMap = dbTableEsSyncConfig.get(database + "-" + table);
-        esSyncService.sync(configMap.values(), dml);
+        if (configMap != null) {
+            esSyncService.sync(configMap.values(), dml);
+        }
     }
 
     @Override

+ 3 - 1
client-adapter/hbase/src/main/java/com/alibaba/otter/canal/client/adapter/hbase/HbaseAdapter.java

@@ -115,7 +115,9 @@ public class HbaseAdapter implements OuterAdapter {
         String database = dml.getDatabase();
         String table = dml.getTable();
         Map<String, MappingConfig> configMap = mappingConfigCache.get(destination + "." + database + "." + table);
-        configMap.values().forEach(config -> hbaseSyncService.sync(config, dml));
+        if (configMap != null) {
+            configMap.values().forEach(config -> hbaseSyncService.sync(config, dml));
+        }
     }
 
     @Override

+ 49 - 51
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/AbstractCanalAdapterWorker.java

@@ -2,11 +2,7 @@ package com.alibaba.otter.canal.adapter.launcher.loader;
 
 import java.util.ArrayList;
 import java.util.List;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
+import java.util.concurrent.*;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -80,16 +76,17 @@ public abstract class AbstractCanalAdapterWorker {
             futures.forEach(future -> {
                 try {
                     if (!future.get()) {
-                        logger.error("Outer adapter write failed");
+                        throw new RuntimeException("Outer adapter sync failed! ");
                     }
-                } catch (InterruptedException | ExecutionException e) {
-                    // ignore
+                } catch (Exception e) {
+                    future.cancel(true);
+                    throw new RuntimeException(e);
                 }
             });
         });
     }
 
-    private void writeOut(final List<FlatMessage> flatMessages) {
+    protected void writeOut(final List<FlatMessage> flatMessages) {
         List<Future<Boolean>> futures = new ArrayList<>();
         // 组间适配器并行运行
         canalOuterAdapters.forEach(outerAdapters -> {
@@ -119,63 +116,64 @@ public abstract class AbstractCanalAdapterWorker {
             futures.forEach(future -> {
                 try {
                     if (!future.get()) {
-                        logger.error("Outer adapter write failed");
+                        throw new RuntimeException("Outer adapter sync failed! ");
                     }
-                } catch (InterruptedException | ExecutionException e) {
-                    // ignore
+                } catch (Exception e) {
+                    future.cancel(true);
+                    throw new RuntimeException(e);
                 }
             });
         });
     }
 
     @SuppressWarnings("unchecked")
-    protected void mqWriteOutData(int retry, long timeout, final boolean flatMessage, CanalMQConnector connector,
+    protected boolean mqWriteOutData(int retry, long timeout, int i, final boolean flatMessage, CanalMQConnector connector,
                                   ExecutorService workerExecutor) {
-        for (int i = 0; i < retry; i++) {
-            try {
-                List<?> messages;
-                if (!flatMessage) {
-                    messages = connector.getListWithoutAck(100L, TimeUnit.MILLISECONDS);
-                } else {
-                    messages = connector.getFlatListWithoutAck(100L, TimeUnit.MILLISECONDS);
-                }
-                if (messages != null) {
-                    Future<Boolean> future = workerExecutor.submit(() -> {
-                        if (flatMessage) {
-                            // batch write
-                            writeOut((List<FlatMessage>) messages);
-                        } else {
-                            for (final Object message : messages) {
-                                writeOut((Message) message);
-                            }
+        try {
+            List<?> messages;
+            if (!flatMessage) {
+                messages = connector.getListWithoutAck(100L, TimeUnit.MILLISECONDS);
+            } else {
+                messages = connector.getFlatListWithoutAck(100L, TimeUnit.MILLISECONDS);
+            }
+            if (messages != null && !messages.isEmpty()) {
+                Future<Boolean> future = workerExecutor.submit(() -> {
+                    if (flatMessage) {
+                        // batch write
+                        writeOut((List<FlatMessage>) messages);
+                    } else {
+                        for (final Object message : messages) {
+                            writeOut((Message) message);
                         }
-                        return true;
-                    });
-
-                    try {
-                        future.get(timeout, TimeUnit.MILLISECONDS);
-                    } catch (Exception e) {
-                        future.cancel(true);
-                        throw e;
                     }
-                }
-                connector.ack();
-                break;
-            } catch (Throwable e) {
-                if (i == retry - 1) {
-                    connector.ack();
-                } else {
-                    connector.rollback();
-                }
+                    return true;
+                });
 
-                logger.error(e.getMessage(), e);
                 try {
-                    TimeUnit.SECONDS.sleep(1L);
-                } catch (InterruptedException e1) {
-                    // ignore
+                    future.get(timeout, TimeUnit.MILLISECONDS);
+                } catch (Exception e) {
+                    future.cancel(true);
+                    throw e;
                 }
+                connector.ack();
+            }
+           return true;
+        } catch (Throwable e) {
+            if (i == retry - 1) {
+                connector.ack();
+                logger.error(e.getMessage() + " Error sync but ACK!");
+                return true;
+            } else {
+                connector.rollback();
+                logger.error(e.getMessage() + " Error sync and rollback, execute times: " + (i + 1));
+            }
+            try {
+                Thread.sleep(500);
+            } catch (InterruptedException e1) {
+                // ignore
             }
         }
+        return  false;
     }
 
     /**

+ 14 - 1
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterKafkaWorker.java

@@ -3,8 +3,11 @@ package com.alibaba.otter.canal.adapter.launcher.loader;
 import java.util.List;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
+import com.alibaba.otter.canal.protocol.FlatMessage;
+import com.alibaba.otter.canal.protocol.Message;
 import org.apache.kafka.common.errors.WakeupException;
 
 import com.alibaba.otter.canal.client.adapter.OuterAdapter;
@@ -61,7 +64,17 @@ public class CanalAdapterKafkaWorker extends AbstractCanalAdapterWorker {
                         connector.disconnect();
                         break;
                     }
-                    mqWriteOutData(retry, timeout, flatMessage, connector, workerExecutor);
+                    if (retry == -1) {
+                        retry = Integer.MAX_VALUE;
+                    }
+                    for (int i = 0; i < retry; i++) {
+                        if (!running) {
+                            break;
+                        }
+                        if (mqWriteOutData(retry, timeout, i, flatMessage, connector, workerExecutor)) {
+                            break;
+                        }
+                    }
                 }
             } catch (Exception e) {
                 logger.error(e.getMessage(), e);

+ 17 - 2
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterRocketMQWorker.java

@@ -3,7 +3,11 @@ package com.alibaba.otter.canal.adapter.launcher.loader;
 import java.util.List;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
 
+import com.alibaba.otter.canal.protocol.FlatMessage;
+import com.alibaba.otter.canal.protocol.Message;
 import org.apache.kafka.common.errors.WakeupException;
 
 import com.alibaba.otter.canal.client.adapter.OuterAdapter;
@@ -39,7 +43,8 @@ public class CanalAdapterRocketMQWorker extends AbstractCanalAdapterWorker {
             ;
 
         ExecutorService workerExecutor = Executors.newSingleThreadExecutor();
-        int retry = canalClientConfig.getRetries() == null || canalClientConfig.getRetries() == 0 ? 1 : canalClientConfig.getRetries();
+        int retry = canalClientConfig.getRetries() == null
+                    || canalClientConfig.getRetries() == 0 ? 1 : canalClientConfig.getRetries();
         long timeout = canalClientConfig.getTimeout() == null ? 30000 : canalClientConfig.getTimeout(); // 默认超时30秒
 
         while (running) {
@@ -56,7 +61,17 @@ public class CanalAdapterRocketMQWorker extends AbstractCanalAdapterWorker {
                         connector.disconnect();
                         break;
                     }
-                    mqWriteOutData(retry, timeout, flatMessage, connector, workerExecutor);
+                    if (retry == -1) {
+                        retry = Integer.MAX_VALUE;
+                    }
+                    for (int i = 0; i < retry; i++) {
+                        if (!running) {
+                            break;
+                        }
+                        if (mqWriteOutData(retry, timeout, i, flatMessage, connector, workerExecutor)) {
+                            break;
+                        }
+                    }
                 }
             } catch (Exception e) {
                 logger.error(e.getMessage(), e);

+ 27 - 31
client-adapter/launcher/src/main/java/com/alibaba/otter/canal/adapter/launcher/loader/CanalAdapterWorker.java

@@ -20,10 +20,10 @@ import com.alibaba.otter.canal.protocol.Message;
  */
 public class CanalAdapterWorker extends AbstractCanalAdapterWorker {
 
-    private static final int  BATCH_SIZE = 50;
-    private static final int  SO_TIMEOUT = 0;
+    private static final int BATCH_SIZE = 50;
+    private static final int SO_TIMEOUT = 0;
 
-    private CanalConnector    connector;
+    private CanalConnector   connector;
 
     /**
      * 单台client适配器worker的构造方法
@@ -61,10 +61,14 @@ public class CanalAdapterWorker extends AbstractCanalAdapterWorker {
         while (!running)
             ; // waiting until running == true
 
-        ExecutorService workerExecutor = Executors.newSingleThreadExecutor();
         int retry = canalClientConfig.getRetries() == null
                     || canalClientConfig.getRetries() == 0 ? 1 : canalClientConfig.getRetries();
-        long timeout = canalClientConfig.getTimeout() == null ? 300000 : canalClientConfig.getTimeout(); // 默认超时5分钟
+        if (retry == -1) {
+            // 重试次数-1代表异常时一直阻塞重试
+            retry = Integer.MAX_VALUE;
+        }
+        // long timeout = canalClientConfig.getTimeout() == null ? 300000 :
+        // canalClientConfig.getTimeout(); // 默认超时5分钟
         Integer batchSize = canalClientConfig.getBatchSize();
         if (batchSize == null) {
             batchSize = BATCH_SIZE;
@@ -90,6 +94,9 @@ public class CanalAdapterWorker extends AbstractCanalAdapterWorker {
                     }
 
                     for (int i = 0; i < retry; i++) {
+                        if (!running) {
+                            break;
+                        }
                         Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据
                         long batchId = message.getId();
                         try {
@@ -97,29 +104,19 @@ public class CanalAdapterWorker extends AbstractCanalAdapterWorker {
                             if (batchId == -1 || size == 0) {
                                 Thread.sleep(500);
                             } else {
-                                Future<Boolean> future = workerExecutor.submit(() -> {
-                                    if (logger.isDebugEnabled()) {
-                                        logger.debug("destination: {} batchId: {} batchSize: {} ",
-                                            canalDestination,
-                                            batchId,
-                                            size);
-                                    }
-                                    long begin = System.currentTimeMillis();
-                                    writeOut(message);
-                                    if (logger.isDebugEnabled()) {
-                                        logger.debug("destination: {} batchId: {} elapsed time: {} ms",
-                                            canalDestination,
-                                            batchId,
-                                            System.currentTimeMillis() - begin);
-                                    }
-                                    return true;
-                                });
-
-                                try {
-                                    future.get(timeout, TimeUnit.MILLISECONDS);
-                                } catch (Exception e) {
-                                    future.cancel(true);
-                                    throw e;
+                                if (logger.isDebugEnabled()) {
+                                    logger.debug("destination: {} batchId: {} batchSize: {} ",
+                                        canalDestination,
+                                        batchId,
+                                        size);
+                                }
+                                long begin = System.currentTimeMillis();
+                                writeOut(message);
+                                if (logger.isDebugEnabled()) {
+                                    logger.debug("destination: {} batchId: {} elapsed time: {} ms",
+                                        canalDestination,
+                                        batchId,
+                                        System.currentTimeMillis() - begin);
                                 }
                             }
                             connector.ack(batchId); // 提交确认
@@ -127,10 +124,11 @@ public class CanalAdapterWorker extends AbstractCanalAdapterWorker {
                         } catch (Exception e) {
                             if (i != retry - 1) {
                                 connector.rollback(batchId); // 处理失败, 回滚数据
+                                logger.error(e.getMessage() + " Error sync and rollback, execute times: " + (i + 1));
                             } else {
                                 connector.ack(batchId);
+                                logger.error(e.getMessage() + " Error sync but ACK!");
                             }
-                            logger.error("sync error!", e);
                             Thread.sleep(500);
                         }
                     }
@@ -151,8 +149,6 @@ public class CanalAdapterWorker extends AbstractCanalAdapterWorker {
                 }
             }
         }
-
-        workerExecutor.shutdown();
     }
 
     @Override

+ 6 - 3
client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/RdbAdapter.java

@@ -5,7 +5,10 @@ import java.sql.SQLException;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 
 import javax.sql.DataSource;
 
@@ -152,8 +155,8 @@ public class RdbAdapter implements OuterAdapter {
         try {
             future1.get();
             future2.get();
-        } catch (ExecutionException | InterruptedException e) {
-            // ignore
+        } catch (Exception e) {
+            throw new RuntimeException(e);
         }
     }
 

+ 48 - 52
client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/service/RdbMirrorDbSyncService.java

@@ -47,62 +47,58 @@ public class RdbMirrorDbSyncService {
      * @param dmls 批量 DML
      */
     public void sync(List<Dml> dmls) {
-        try {
-            List<Dml> dmlList = new ArrayList<>();
-            for (Dml dml : dmls) {
-                String destination = StringUtils.trimToEmpty(dml.getDestination());
-                String database = dml.getDatabase();
-                MirrorDbConfig mirrorDbConfig = mirrorDbConfigCache.get(destination + "." + database);
-                if (mirrorDbConfig == null) {
-                    continue;
-                }
-                if (dml.getIsDdl() != null && dml.getIsDdl() && StringUtils.isNotEmpty(dml.getSql())) {
-                    // DDL
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("DDL: {}", JSON.toJSONString(dml, SerializerFeature.WriteMapNullValue));
-                    }
-                    executeDdl(mirrorDbConfig, dml);
-                    rdbSyncService.getColumnsTypeCache().remove(destination + "." + database + "." + dml.getTable());
-                    mirrorDbConfig.getTableConfig().remove(dml.getTable()); // 删除对应库表配置
-                } else {
-                    // DML
-                    initMappingConfig(dml.getTable(), mirrorDbConfig.getMappingConfig(), mirrorDbConfig, dml);
-                    dmlList.add(dml);
+        List<Dml> dmlList = new ArrayList<>();
+        for (Dml dml : dmls) {
+            String destination = StringUtils.trimToEmpty(dml.getDestination());
+            String database = dml.getDatabase();
+            MirrorDbConfig mirrorDbConfig = mirrorDbConfigCache.get(destination + "." + database);
+            if (mirrorDbConfig == null) {
+                continue;
+            }
+            if (dml.getIsDdl() != null && dml.getIsDdl() && StringUtils.isNotEmpty(dml.getSql())) {
+                // DDL
+                if (logger.isDebugEnabled()) {
+                    logger.debug("DDL: {}", JSON.toJSONString(dml, SerializerFeature.WriteMapNullValue));
                 }
+                executeDdl(mirrorDbConfig, dml);
+                rdbSyncService.getColumnsTypeCache().remove(destination + "." + database + "." + dml.getTable());
+                mirrorDbConfig.getTableConfig().remove(dml.getTable()); // 删除对应库表配置
+            } else {
+                // DML
+                initMappingConfig(dml.getTable(), mirrorDbConfig.getMappingConfig(), mirrorDbConfig, dml);
+                dmlList.add(dml);
             }
-            if (!dmlList.isEmpty()) {
-                rdbSyncService.sync(dmlList, dml -> {
-                    MirrorDbConfig mirrorDbConfig = mirrorDbConfigCache
-                        .get(dml.getDestination() + "." + dml.getDatabase());
-                    String destination = StringUtils.trimToEmpty(dml.getDestination());
-                    String database = dml.getDatabase();
-                    String table = dml.getTable();
-                    MappingConfig config = mirrorDbConfig.getTableConfig().get(table);
+        }
+        if (!dmlList.isEmpty()) {
+            rdbSyncService.sync(dmlList, dml -> {
+                MirrorDbConfig mirrorDbConfig = mirrorDbConfigCache.get(dml.getDestination() + "." + dml.getDatabase());
+                if (mirrorDbConfig == null) {
+                    return false;
+                }
+                String table = dml.getTable();
+                MappingConfig config = mirrorDbConfig.getTableConfig().get(table);
 
-                    if (config == null) {
-                        return false;
-                    }
+                if (config == null) {
+                    return false;
+                }
 
-                    if (config.getConcurrent()) {
-                        List<SingleDml> singleDmls = SingleDml.dml2SingleDmls(dml);
-                        singleDmls.forEach(singleDml -> {
-                            int hash = rdbSyncService.pkHash(config.getDbMapping(), singleDml.getData());
-                            RdbSyncService.SyncItem syncItem = new RdbSyncService.SyncItem(config, singleDml);
-                            rdbSyncService.getDmlsPartition()[hash].add(syncItem);
-                        });
-                    } else {
-                        int hash = 0;
-                        List<SingleDml> singleDmls = SingleDml.dml2SingleDmls(dml);
-                        singleDmls.forEach(singleDml -> {
-                            RdbSyncService.SyncItem syncItem = new RdbSyncService.SyncItem(config, singleDml);
-                            rdbSyncService.getDmlsPartition()[hash].add(syncItem);
-                        });
-                    }
-                    return true;
-                });
-            }
-        } catch (Exception e) {
-            logger.error(e.getMessage(), e);
+                if (config.getConcurrent()) {
+                    List<SingleDml> singleDmls = SingleDml.dml2SingleDmls(dml);
+                    singleDmls.forEach(singleDml -> {
+                        int hash = rdbSyncService.pkHash(config.getDbMapping(), singleDml.getData());
+                        RdbSyncService.SyncItem syncItem = new RdbSyncService.SyncItem(config, singleDml);
+                        rdbSyncService.getDmlsPartition()[hash].add(syncItem);
+                    });
+                } else {
+                    int hash = 0;
+                    List<SingleDml> singleDmls = SingleDml.dml2SingleDmls(dml);
+                    singleDmls.forEach(singleDml -> {
+                        RdbSyncService.SyncItem syncItem = new RdbSyncService.SyncItem(config, singleDml);
+                        rdbSyncService.getDmlsPartition()[hash].add(syncItem);
+                    });
+                }
+                return true;
+            });
         }
     }
 

+ 34 - 39
client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/service/RdbSyncService.java

@@ -133,49 +133,44 @@ public class RdbSyncService {
      * @param dmls 批量 DML
      */
     public void sync(Map<String, Map<String, MappingConfig>> mappingConfig, List<Dml> dmls) {
-        try {
-            sync(dmls, dml -> {
-                if (dml.getIsDdl() != null && dml.getIsDdl() && StringUtils.isNotEmpty(dml.getSql())) {
-                    // DDL
-                    columnsTypeCache.remove(dml.getDestination() + "." + dml.getDatabase() + "." + dml.getTable());
+        sync(dmls, dml -> {
+            if (dml.getIsDdl() != null && dml.getIsDdl() && StringUtils.isNotEmpty(dml.getSql())) {
+                // DDL
+                columnsTypeCache.remove(dml.getDestination() + "." + dml.getDatabase() + "." + dml.getTable());
+                return false;
+            } else {
+                // DML
+                String destination = StringUtils.trimToEmpty(dml.getDestination());
+                String database = dml.getDatabase();
+                String table = dml.getTable();
+                Map<String, MappingConfig> configMap = mappingConfig.get(destination + "." + database + "." + table);
+
+                if (configMap == null) {
                     return false;
-                } else {
-                    // DML
-                    String destination = StringUtils.trimToEmpty(dml.getDestination());
-                    String database = dml.getDatabase();
-                    String table = dml.getTable();
-                    Map<String, MappingConfig> configMap = mappingConfig
-                        .get(destination + "." + database + "." + table);
-
-                    if (configMap == null) {
-                        return false;
-                    }
+                }
 
-                    boolean executed = false;
-                    for (MappingConfig config : configMap.values()) {
-                        if (config.getConcurrent()) {
-                            List<SingleDml> singleDmls = SingleDml.dml2SingleDmls(dml);
-                            singleDmls.forEach(singleDml -> {
-                                int hash = pkHash(config.getDbMapping(), singleDml.getData());
-                                SyncItem syncItem = new SyncItem(config, singleDml);
-                                dmlsPartition[hash].add(syncItem);
-                            });
-                        } else {
-                            int hash = 0;
-                            List<SingleDml> singleDmls = SingleDml.dml2SingleDmls(dml);
-                            singleDmls.forEach(singleDml -> {
-                                SyncItem syncItem = new SyncItem(config, singleDml);
-                                dmlsPartition[hash].add(syncItem);
-                            });
-                        }
-                        executed = true;
+                boolean executed = false;
+                for (MappingConfig config : configMap.values()) {
+                    if (config.getConcurrent()) {
+                        List<SingleDml> singleDmls = SingleDml.dml2SingleDmls(dml);
+                        singleDmls.forEach(singleDml -> {
+                            int hash = pkHash(config.getDbMapping(), singleDml.getData());
+                            SyncItem syncItem = new SyncItem(config, singleDml);
+                            dmlsPartition[hash].add(syncItem);
+                        });
+                    } else {
+                        int hash = 0;
+                        List<SingleDml> singleDmls = SingleDml.dml2SingleDmls(dml);
+                        singleDmls.forEach(singleDml -> {
+                            SyncItem syncItem = new SyncItem(config, singleDml);
+                            dmlsPartition[hash].add(syncItem);
+                        });
                     }
-                    return executed;
+                    executed = true;
                 }
-            });
-        } catch (Exception e) {
-            logger.error(e.getMessage(), e);
-        }
+                return executed;
+            }
+        });
     }
 
     /**

+ 10 - 26
client-adapter/rdb/src/main/java/com/alibaba/otter/canal/client/adapter/rdb/support/SyncUtil.java

@@ -86,9 +86,7 @@ public class SyncUtil {
                 }
                 break;
             case Types.TINYINT:
-                if (value instanceof Byte || value instanceof Short || value instanceof Integer) {
-                    pstmt.setByte(i, (byte) value);
-                } else if (value instanceof Number) {
+                 if (value instanceof Number) {
                     pstmt.setByte(i, ((Number) value).byteValue());
                 } else if (value instanceof String) {
                     pstmt.setByte(i, Byte.parseByte((String) value));
@@ -97,9 +95,7 @@ public class SyncUtil {
                 }
                 break;
             case Types.SMALLINT:
-                if (value instanceof Byte || value instanceof Short || value instanceof Integer) {
-                    pstmt.setShort(i, (short) value);
-                } else if (value instanceof Number) {
+                if (value instanceof Number) {
                     pstmt.setShort(i, ((Number) value).shortValue());
                 } else if (value instanceof String) {
                     pstmt.setShort(i, Short.parseShort((String) value));
@@ -108,10 +104,7 @@ public class SyncUtil {
                 }
                 break;
             case Types.INTEGER:
-                if (value instanceof Byte || value instanceof Short || value instanceof Integer
-                    || value instanceof Long) {
-                    pstmt.setInt(i, (int) value);
-                } else if (value instanceof Number) {
+                if (value instanceof Number) {
                     pstmt.setInt(i, ((Number) value).intValue());
                 } else if (value instanceof String) {
                     pstmt.setInt(i, Integer.parseInt((String) value));
@@ -120,10 +113,7 @@ public class SyncUtil {
                 }
                 break;
             case Types.BIGINT:
-                if (value instanceof Byte || value instanceof Short || value instanceof Integer
-                    || value instanceof Long) {
-                    pstmt.setLong(i, (long) value);
-                } else if (value instanceof Number) {
+                if (value instanceof Number) {
                     pstmt.setLong(i, ((Number) value).longValue());
                 } else if (value instanceof String) {
                     pstmt.setLong(i, Long.parseLong((String) value));
@@ -136,13 +126,13 @@ public class SyncUtil {
                 if (value instanceof BigDecimal) {
                     pstmt.setBigDecimal(i, (BigDecimal) value);
                 } else if (value instanceof Byte) {
-                    pstmt.setInt(i, (int) value);
+                    pstmt.setInt(i, ((Byte) value).intValue());
                 } else if (value instanceof Short) {
-                    pstmt.setInt(i, (int) value);
+                    pstmt.setInt(i, ((Short) value).intValue());
                 } else if (value instanceof Integer) {
-                    pstmt.setInt(i, (int) value);
+                    pstmt.setInt(i, (Integer) value);
                 } else if (value instanceof Long) {
-                    pstmt.setLong(i, (long) value);
+                    pstmt.setLong(i, (Long) value);
                 } else if (value instanceof Float) {
                     pstmt.setBigDecimal(i, new BigDecimal((float) value));
                 } else if (value instanceof Double) {
@@ -154,10 +144,7 @@ public class SyncUtil {
                 }
                 break;
             case Types.REAL:
-                if (value instanceof Byte || value instanceof Short || value instanceof Integer || value instanceof Long
-                    || value instanceof Float || value instanceof Double) {
-                    pstmt.setFloat(i, (float) value);
-                } else if (value instanceof Number) {
+                if (value instanceof Number) {
                     pstmt.setFloat(i, ((Number) value).floatValue());
                 } else if (value instanceof String) {
                     pstmt.setFloat(i, Float.parseFloat((String) value));
@@ -167,10 +154,7 @@ public class SyncUtil {
                 break;
             case Types.FLOAT:
             case Types.DOUBLE:
-                if (value instanceof Byte || value instanceof Short || value instanceof Integer || value instanceof Long
-                    || value instanceof Float || value instanceof Double) {
-                    pstmt.setDouble(i, (double) value);
-                } else if (value instanceof Number) {
+                if (value instanceof Number) {
                     pstmt.setDouble(i, ((Number) value).doubleValue());
                 } else if (value instanceof String) {
                     pstmt.setDouble(i, Double.parseDouble((String) value));

+ 47 - 15
client/src/main/java/com/alibaba/otter/canal/client/kafka/KafkaCanalConnector.java

@@ -1,9 +1,6 @@
 package com.alibaba.otter.canal.client.kafka;
 
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Properties;
+import java.util.*;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.kafka.clients.consumer.ConsumerRecord;
@@ -22,7 +19,7 @@ import com.google.common.collect.Lists;
 
 /**
  * canal kafka 数据操作客户端
- * 
+ *
  * <pre>
  * 注意点:
  * 1. 相比于canal {@linkplain SimpleCanalConnector}, 这里get和ack操作不能有并发, 必须是一个线程执行get后,内存里执行完毕ack后再取下一个get
@@ -33,14 +30,16 @@ import com.google.common.collect.Lists;
  */
 public class KafkaCanalConnector implements CanalMQConnector {
 
-    private KafkaConsumer<String, Message> kafkaConsumer;
-    private KafkaConsumer<String, String>  kafkaConsumer2;   // 用于扁平message的数据消费
-    private String                         topic;
-    private Integer                        partition;
-    private Properties                     properties;
-    private volatile boolean               connected = false;
-    private volatile boolean               running   = false;
-    private boolean                        flatMessage;
+    protected KafkaConsumer<String, Message> kafkaConsumer;
+    protected KafkaConsumer<String, String>  kafkaConsumer2;                  // 用于扁平message的数据消费
+    protected String                         topic;
+    protected Integer                        partition;
+    protected Properties                     properties;
+    protected volatile boolean               connected      = false;
+    protected volatile boolean               running        = false;
+    protected boolean                        flatMessage;
+
+    private Map<Integer, Long>               currentOffsets = new HashMap<>();
 
     public KafkaCanalConnector(String servers, String topic, Integer partition, String groupId, Integer batchSize,
                                boolean flatMessage){
@@ -71,6 +70,7 @@ public class KafkaCanalConnector implements CanalMQConnector {
     /**
      * 打开连接
      */
+    @Override
     public void connect() {
         if (connected) {
             return;
@@ -79,6 +79,7 @@ public class KafkaCanalConnector implements CanalMQConnector {
         connected = true;
         if (kafkaConsumer == null && !flatMessage) {
             kafkaConsumer = new KafkaConsumer<String, Message>(properties);
+
         }
         if (kafkaConsumer2 == null && flatMessage) {
             kafkaConsumer2 = new KafkaConsumer<String, String>(properties);
@@ -88,6 +89,7 @@ public class KafkaCanalConnector implements CanalMQConnector {
     /**
      * 关闭链接
      */
+    @Override
     public void disconnect() {
         if (kafkaConsumer != null) {
             kafkaConsumer.close();
@@ -101,10 +103,11 @@ public class KafkaCanalConnector implements CanalMQConnector {
         connected = false;
     }
 
-    private void waitClientRunning() {
+    protected void waitClientRunning() {
         running = true;
     }
 
+    @Override
     public boolean checkValid() {
         return true;// 默认都放过
     }
@@ -112,6 +115,7 @@ public class KafkaCanalConnector implements CanalMQConnector {
     /**
      * 订阅topic
      */
+    @Override
     public void subscribe() {
         waitClientRunning();
         if (!running) {
@@ -139,6 +143,7 @@ public class KafkaCanalConnector implements CanalMQConnector {
     /**
      * 取消订阅
      */
+    @Override
     public void unsubscribe() {
         waitClientRunning();
         if (!running) {
@@ -176,6 +181,11 @@ public class KafkaCanalConnector implements CanalMQConnector {
 
         ConsumerRecords<String, Message> records = kafkaConsumer.poll(unit.toMillis(timeout));
 
+        currentOffsets.clear();
+        for (TopicPartition topicPartition : records.partitions()) {
+            currentOffsets.put(topicPartition.partition(), kafkaConsumer.position(topicPartition));
+        }
+
         if (!records.isEmpty()) {
             List<Message> messages = new ArrayList<>();
             for (ConsumerRecord<String, Message> record : records) {
@@ -208,6 +218,12 @@ public class KafkaCanalConnector implements CanalMQConnector {
         }
 
         ConsumerRecords<String, String> records = kafkaConsumer2.poll(unit.toMillis(timeout));
+
+        currentOffsets.clear();
+        for (TopicPartition topicPartition : records.partitions()) {
+            currentOffsets.put(topicPartition.partition(), kafkaConsumer2.position(topicPartition));
+        }
+
         if (!records.isEmpty()) {
             List<FlatMessage> flatMessages = new ArrayList<>();
             for (ConsumerRecord<String, String> record : records) {
@@ -222,12 +238,28 @@ public class KafkaCanalConnector implements CanalMQConnector {
     }
 
     @Override
-    public void rollback() throws CanalClientException {
+    public void rollback() {
+        waitClientRunning();
+        if (!running) {
+            return;
+        }
+        // 回滚所有分区
+        if (kafkaConsumer != null) {
+            for (Map.Entry<Integer, Long> entry : currentOffsets.entrySet()) {
+                kafkaConsumer.seek(new TopicPartition(topic, entry.getKey()), entry.getValue() - 1);
+            }
+        }
+        if (kafkaConsumer2 != null) {
+            for (Map.Entry<Integer, Long> entry : currentOffsets.entrySet()) {
+                kafkaConsumer2.seek(new TopicPartition(topic, entry.getKey()), entry.getValue() - 1);
+            }
+        }
     }
 
     /**
      * 提交offset,如果超过 session.timeout.ms 设置的时间没有ack则会抛出异常,ack失败
      */
+    @Override
     public void ack() {
         waitClientRunning();
         if (!running) {

+ 112 - 0
client/src/main/java/com/alibaba/otter/canal/client/kafka/KafkaOffsetCanalConnector.java

@@ -0,0 +1,112 @@
+package com.alibaba.otter.canal.client.kafka;
+
+import com.alibaba.fastjson.JSON;
+import com.alibaba.otter.canal.client.kafka.protocol.KafkaFlatMessage;
+import com.alibaba.otter.canal.client.kafka.protocol.KafkaMessage;
+import com.alibaba.otter.canal.protocol.FlatMessage;
+import com.alibaba.otter.canal.protocol.Message;
+import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+import com.google.common.collect.Lists;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.common.TopicPartition;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * kafka带消息offset的连接器
+ *
+ * @Author panjianping
+ * @Email ipanjianping@qq.com
+ * @Date 2018/12/17
+ */
+public class KafkaOffsetCanalConnector extends KafkaCanalConnector {
+
+    public KafkaOffsetCanalConnector(String servers, String topic, Integer partition, String groupId, boolean flatMessage) {
+        super(servers, topic, partition, groupId, 100, flatMessage);
+        // 启动时从未消费的消息位置开始
+        properties.put("auto.offset.reset", "earliest");
+    }
+
+    /**
+     * 获取Kafka消息,不确认
+     *
+     * @param timeout
+     * @param unit
+     * @param offset  消息偏移地址(-1为不偏移)
+     * @return
+     * @throws CanalClientException
+     */
+    public List<KafkaMessage> getListWithoutAck(Long timeout, TimeUnit unit, long offset) throws CanalClientException {
+        waitClientRunning();
+        if (!running) {
+            return Lists.newArrayList();
+        }
+
+        if (offset > -1) {
+            TopicPartition tp = new TopicPartition(topic, partition == null ? 0 : partition);
+            kafkaConsumer.seek(tp, offset);
+        }
+
+        ConsumerRecords<String, Message> records = kafkaConsumer.poll(unit.toMillis(timeout));
+
+        if (!records.isEmpty()) {
+            List<KafkaMessage> messages = new ArrayList<>();
+            for (ConsumerRecord<String, Message> record : records) {
+                KafkaMessage message = new KafkaMessage(record.value(), record.offset());
+                messages.add(message);
+            }
+            return messages;
+        }
+        return Lists.newArrayList();
+    }
+
+    /**
+     * 获取Kafka消息,不确认
+     *
+     * @param timeout
+     * @param unit
+     * @param offset  消息偏移地址(-1为不偏移)
+     * @return
+     * @throws CanalClientException
+     */
+    public List<KafkaFlatMessage> getFlatListWithoutAck(Long timeout, TimeUnit unit, long offset) throws CanalClientException {
+        waitClientRunning();
+        if (!running) {
+            return Lists.newArrayList();
+        }
+
+        if (offset > -1) {
+            TopicPartition tp = new TopicPartition(topic, partition == null ? 0 : partition);
+            kafkaConsumer2.seek(tp, offset);
+        }
+
+        ConsumerRecords<String, String> records = kafkaConsumer2.poll(unit.toMillis(timeout));
+        if (!records.isEmpty()) {
+            List<KafkaFlatMessage> flatMessages = new ArrayList<>();
+            for (ConsumerRecord<String, String> record : records) {
+                String flatMessageJson = record.value();
+                FlatMessage flatMessage = JSON.parseObject(flatMessageJson, FlatMessage.class);
+                KafkaFlatMessage message = new KafkaFlatMessage(flatMessage, record.offset());
+                flatMessages.add(message);
+            }
+
+            return flatMessages;
+        }
+        return Lists.newArrayList();
+    }
+
+    /**
+     * 重新设置AutoOffsetReset(默认 earliest )
+     *
+     * @param value
+     */
+    public void setAutoOffsetReset(String value) {
+        if (StringUtils.isNotBlank(value)) {
+            properties.put("auto.offset.reset", value);
+        }
+    }
+}

+ 32 - 0
client/src/main/java/com/alibaba/otter/canal/client/kafka/protocol/KafkaFlatMessage.java

@@ -0,0 +1,32 @@
+package com.alibaba.otter.canal.client.kafka.protocol;
+
+import com.alibaba.otter.canal.protocol.FlatMessage;
+import org.springframework.beans.BeanUtils;
+
+/**
+ * 消息对象(Kafka)
+ *
+ * @Author panjianping
+ * @Email ipanjianping@qq.com
+ * @Date 2018/12/17
+ */
+public class KafkaFlatMessage extends FlatMessage {
+    /**
+     * Kafka 消息 offset
+     */
+    private long offset;
+
+    public KafkaFlatMessage(FlatMessage message, long offset) {
+        super(message.getId());
+        BeanUtils.copyProperties(message, this);
+        this.offset = offset;
+    }
+
+    public long getOffset() {
+        return offset;
+    }
+
+    public void setOffset(long offset) {
+        this.offset = offset;
+    }
+}

+ 33 - 0
client/src/main/java/com/alibaba/otter/canal/client/kafka/protocol/KafkaMessage.java

@@ -0,0 +1,33 @@
+package com.alibaba.otter.canal.client.kafka.protocol;
+
+import com.alibaba.otter.canal.protocol.Message;
+import org.springframework.beans.BeanUtils;
+
+/**
+ * 消息对象(Kafka)
+ *
+ * @Author panjianping
+ * @Email ipanjianping@qq.com
+ * @Date 2018/12/17
+ */
+public class KafkaMessage extends Message {
+    /**
+     * Kafka 消息 offset
+     */
+    private long offset;
+
+    public KafkaMessage(Message message, long offset) {
+        super(message.getId());
+        BeanUtils.copyProperties(message, this);
+        this.offset = offset;
+    }
+
+
+    public long getOffset() {
+        return offset;
+    }
+
+    public void setOffset(long offset) {
+        this.offset = offset;
+    }
+}

+ 168 - 0
client/src/test/java/com/alibaba/otter/canal/client/running/kafka/CanalKafkaOffsetClientExample.java

@@ -0,0 +1,168 @@
+package com.alibaba.otter.canal.client.running.kafka;
+
+import com.alibaba.otter.canal.client.kafka.KafkaOffsetCanalConnector;
+import com.alibaba.otter.canal.client.kafka.protocol.KafkaMessage;
+import org.apache.kafka.common.errors.WakeupException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.util.Assert;
+
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * KafkaOffsetCanalConnector 使用示例
+ * <p>KafkaOffsetCanalConnector 与 KafkaCanalConnector 的另一区别是 auto.offset.reset 默认值不同;</p>
+ * <p>KafkaOffsetCanalConnector 默认为 earliest;canal-kafka-client重启后从未被消费的记录开始拉取消息,同时提供了修改 auto.offset.reset 的方法 setAutoOffsetReset</p>
+ *
+ * @author panjianping @ 2018-12-18
+ * @version 1.1.3
+ */
+public class CanalKafkaOffsetClientExample {
+
+    protected final static Logger logger = LoggerFactory.getLogger(CanalKafkaOffsetClientExample.class);
+
+    private KafkaOffsetCanalConnector connector;
+
+    private static volatile boolean running = false;
+
+    private Thread thread = null;
+
+    private Thread.UncaughtExceptionHandler handler = new Thread.UncaughtExceptionHandler() {
+
+        public void uncaughtException(Thread t, Throwable e) {
+            logger.error("parse events has an error", e);
+        }
+    };
+
+    public CanalKafkaOffsetClientExample(String servers, String topic, Integer partition, String groupId) {
+        connector = new KafkaOffsetCanalConnector(servers, topic, partition, groupId, false);
+    }
+
+    public static void main(String[] args) {
+        try {
+            final CanalKafkaOffsetClientExample kafkaCanalClientExample = new CanalKafkaOffsetClientExample(
+                    AbstractKafkaTest.servers,
+                    AbstractKafkaTest.topic,
+                    AbstractKafkaTest.partition,
+                    AbstractKafkaTest.groupId);
+            logger.info("## start the kafka consumer: {}-{}", AbstractKafkaTest.topic, AbstractKafkaTest.groupId);
+            kafkaCanalClientExample.start();
+            logger.info("## the canal kafka consumer is running now ......");
+            Runtime.getRuntime().addShutdownHook(new Thread() {
+
+                public void run() {
+                    try {
+                        logger.info("## stop the kafka consumer");
+                        kafkaCanalClientExample.stop();
+                    } catch (Throwable e) {
+                        logger.warn("##something goes wrong when stopping kafka consumer:", e);
+                    } finally {
+                        logger.info("## kafka consumer is down.");
+                    }
+                }
+
+            });
+            while (running)
+                ;
+        } catch (Throwable e) {
+            logger.error("## Something goes wrong when starting up the kafka consumer:", e);
+            System.exit(0);
+        }
+    }
+
+    public void start() {
+        Assert.notNull(connector, "connector is null");
+        thread = new Thread(new Runnable() {
+
+            public void run() {
+                process();
+            }
+        });
+        thread.setUncaughtExceptionHandler(handler);
+        thread.start();
+        running = true;
+    }
+
+    public void stop() {
+        if (!running) {
+            return;
+        }
+        running = false;
+        if (thread != null) {
+            try {
+                thread.join();
+            } catch (InterruptedException e) {
+                // ignore
+            }
+        }
+    }
+
+    private void process() {
+        while (!running)
+            ;
+        while (running) {
+            try {
+                // 修改 AutoOffsetReset 的值,默认(earliest)
+                //connector.setAutoOffsetReset(null);
+                connector.connect();
+                connector.subscribe();
+                // 消息起始偏移地址
+                long offset = -1;
+                // 错误次数
+                int errorCount = 0;
+                while (running) {
+                    try {
+                        // 错误重试次数超过3次后,每30秒递增重试
+                        if (errorCount > 2) {
+                            Thread.sleep((errorCount - 2) * 1000 * 30);
+                        }
+
+                        List<KafkaMessage> messages = connector.getListWithoutAck(100L, TimeUnit.MILLISECONDS, offset); // 获取message
+                        if (messages == null) {
+                            continue;
+                        }
+                        for (KafkaMessage message : messages) {
+                            long batchId = message.getId();
+                            int size = message.getEntries().size();
+
+                            if (batchId == -1 || size == 0) {
+                                continue;
+                            }
+
+                            // 记录第一条消息的offset,用于处理数据异常时重新从此位置获取消息
+                            if (offset < 0) {
+                                offset = message.getOffset();
+                            }
+
+                            // printSummary(message, batchId, size);
+                            // printEntry(message.getEntries());
+                            logger.info(message.toString());
+                        }
+
+                        connector.ack(); // 提交确认
+                        // 还原offset
+                        offset = -1;
+                        errorCount = 0;
+                    } catch (Exception e) {
+                        errorCount++;
+                        logger.error(e.getMessage(), e);
+                        if (errorCount == 3) {
+                            // 重试3次后发送邮件提醒异常
+                            // mailService.sendMail("同步数据异常,请及时处理", "错误消息");
+                        }
+                    }
+                }
+            } catch (Exception e) {
+                logger.error(e.getMessage(), e);
+            }
+        }
+
+        try {
+            connector.unsubscribe();
+        } catch (WakeupException e) {
+            // No-op. Continue process
+        }
+        connector.disconnect();
+    }
+}

+ 3 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/tsdb/MemoryTableMeta.java

@@ -30,6 +30,7 @@ import com.alibaba.fastsql.sql.ast.statement.SQLSelectOrderByItem;
 import com.alibaba.fastsql.sql.ast.statement.SQLTableElement;
 import com.alibaba.fastsql.sql.dialect.mysql.ast.MySqlPrimaryKey;
 import com.alibaba.fastsql.sql.dialect.mysql.ast.MySqlUnique;
+import com.alibaba.fastsql.sql.dialect.mysql.ast.expr.MySqlOrderingExpr;
 import com.alibaba.fastsql.sql.repository.Schema;
 import com.alibaba.fastsql.sql.repository.SchemaObject;
 import com.alibaba.fastsql.sql.repository.SchemaRepository;
@@ -258,6 +259,8 @@ public class MemoryTableMeta implements TableMetaTSDB {
             return ((SQLCharExpr) sqlName).getText();
         } else if (sqlName instanceof SQLMethodInvokeExpr) {
             return DruidDdlParser.unescapeName(((SQLMethodInvokeExpr) sqlName).getMethodName());
+        } else if (sqlName instanceof MySqlOrderingExpr) {
+            return getSqlName(((MySqlOrderingExpr) sqlName).getExpr());
         } else {
             return sqlName.toString();
         }