|
@@ -1,8 +1,6 @@
|
|
|
package com.alibaba.otter.canal.parse.inbound.mysql.tsdb;
|
|
|
|
|
|
-import java.sql.ResultSet;
|
|
|
-import java.sql.SQLException;
|
|
|
-import java.sql.Statement;
|
|
|
+import java.io.IOException;
|
|
|
import java.util.ArrayList;
|
|
|
import java.util.HashMap;
|
|
|
import java.util.List;
|
|
@@ -15,58 +13,54 @@ import java.util.regex.Pattern;
|
|
|
|
|
|
import javax.annotation.Resource;
|
|
|
|
|
|
+import org.apache.commons.beanutils.BeanUtils;
|
|
|
+import org.apache.commons.lang.ObjectUtils;
|
|
|
+import org.apache.commons.lang.StringUtils;
|
|
|
+import org.slf4j.Logger;
|
|
|
+import org.slf4j.LoggerFactory;
|
|
|
+
|
|
|
import com.alibaba.druid.sql.repository.Schema;
|
|
|
import com.alibaba.fastjson.JSON;
|
|
|
-import com.alibaba.fastjson.JSONArray;
|
|
|
import com.alibaba.fastjson.JSONObject;
|
|
|
import com.alibaba.otter.canal.filter.CanalEventFilter;
|
|
|
+import com.alibaba.otter.canal.parse.driver.mysql.packets.server.ResultSetPacket;
|
|
|
import com.alibaba.otter.canal.parse.exception.CanalParseException;
|
|
|
import com.alibaba.otter.canal.parse.inbound.TableMeta;
|
|
|
import com.alibaba.otter.canal.parse.inbound.TableMeta.FieldMeta;
|
|
|
import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection;
|
|
|
-import com.alibaba.otter.canal.parse.inbound.mysql.MysqlConnection.ProcessJdbcResult;
|
|
|
+import com.alibaba.otter.canal.parse.inbound.mysql.dbsync.TableMetaCache;
|
|
|
import com.alibaba.otter.canal.parse.inbound.mysql.ddl.DdlResult;
|
|
|
import com.alibaba.otter.canal.parse.inbound.mysql.ddl.DruidDdlParser;
|
|
|
import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDAO;
|
|
|
import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaSnapshotDAO;
|
|
|
import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.model.MetaHistoryDO;
|
|
|
import com.alibaba.otter.canal.parse.inbound.mysql.tsdb.model.MetaSnapshotDO;
|
|
|
-
|
|
|
import com.taobao.tddl.dbsync.binlog.BinlogPosition;
|
|
|
-import org.apache.commons.beanutils.BeanUtils;
|
|
|
-import org.apache.commons.lang.ObjectUtils;
|
|
|
-import org.apache.commons.lang.StringUtils;
|
|
|
-import org.slf4j.Logger;
|
|
|
-import org.slf4j.LoggerFactory;
|
|
|
|
|
|
/**
|
|
|
- * 基于console远程管理
|
|
|
- *
|
|
|
- * see internal class: CanalTableMeta , ConsoleTableMetaTSDB
|
|
|
+ * 基于console远程管理 see internal class: CanalTableMeta , ConsoleTableMetaTSDB
|
|
|
*
|
|
|
* @author agapple 2017年7月27日 下午10:47:55
|
|
|
* @since 3.2.5
|
|
|
*/
|
|
|
public class TableMetaManager implements TableMetaTSDB {
|
|
|
- private static Pattern pattern = Pattern.compile("Duplicate entry '.*' for key '*'");
|
|
|
|
|
|
+ private static Logger logger = LoggerFactory.getLogger(TableMetaManager.class);
|
|
|
+ private static Pattern pattern = Pattern.compile("Duplicate entry '.*' for key '*'");
|
|
|
private static final BinlogPosition INIT_POSITION = BinlogPosition.parseFromString("0:0#-2.-1");
|
|
|
- private Logger logger = LoggerFactory.getLogger(TableMetaManager.class);
|
|
|
- private String consoleDomain = null;
|
|
|
- private int retry = 3;
|
|
|
- private MemoryTableMeta memoryTableMeta;
|
|
|
- private MysqlConnection connection; // 查询meta信息的链接
|
|
|
- private CanalEventFilter filter;
|
|
|
- private BinlogPosition lastPosition;
|
|
|
- private ScheduledExecutorService scheduler;
|
|
|
+ private MemoryTableMeta memoryTableMeta;
|
|
|
+ private MysqlConnection connection; // 查询meta信息的链接
|
|
|
+ private CanalEventFilter filter;
|
|
|
+ private BinlogPosition lastPosition;
|
|
|
+ private ScheduledExecutorService scheduler;
|
|
|
|
|
|
@Resource
|
|
|
- private MetaHistoryDAO metaHistoryDAO;
|
|
|
+ private MetaHistoryDAO metaHistoryDAO;
|
|
|
|
|
|
@Resource
|
|
|
- private MetaSnapshotDAO metaSnapshotDAO;
|
|
|
+ private MetaSnapshotDAO metaSnapshotDAO;
|
|
|
|
|
|
- public void init(){
|
|
|
+ public void init() {
|
|
|
this.memoryTableMeta = new MemoryTableMeta(logger);
|
|
|
this.scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
|
|
|
|
|
@@ -91,7 +85,7 @@ public class TableMetaManager implements TableMetaTSDB {
|
|
|
}, 24, 24, TimeUnit.SECONDS);
|
|
|
}
|
|
|
|
|
|
- public TableMetaManager() {
|
|
|
+ public TableMetaManager(){
|
|
|
|
|
|
}
|
|
|
|
|
@@ -147,80 +141,46 @@ public class TableMetaManager implements TableMetaTSDB {
|
|
|
* 初始化的时候dump一下表结构
|
|
|
*/
|
|
|
private boolean dumpTableMeta(MysqlConnection connection, final CanalEventFilter filter) {
|
|
|
- List<String> schemas = connection.query("show databases", new ProcessJdbcResult<List>() {
|
|
|
-
|
|
|
- @Override
|
|
|
- public List process(ResultSet rs) throws SQLException {
|
|
|
- List<String> schemas = new ArrayList<String>();
|
|
|
- while (rs.next()) {
|
|
|
- String schema = rs.getString(1);
|
|
|
- if (!filter.filter(schema)) {
|
|
|
- schemas.add(schema);
|
|
|
- }
|
|
|
+ try {
|
|
|
+ ResultSetPacket packet = connection.query("show databases");
|
|
|
+ List<String> schemas = new ArrayList<String>();
|
|
|
+ for (String schema : packet.getFieldValues()) {
|
|
|
+ if (!filter.filter(schema)) {
|
|
|
+ schemas.add(schema);
|
|
|
}
|
|
|
- return schemas;
|
|
|
}
|
|
|
- });
|
|
|
|
|
|
- for (String schema : schemas) {
|
|
|
- List<String> tables = connection.query("show tables from `" + schema + "`", new ProcessJdbcResult<List>() {
|
|
|
-
|
|
|
- @Override
|
|
|
- public List process(ResultSet rs) throws SQLException {
|
|
|
- List<String> tables = new ArrayList<String>();
|
|
|
- while (rs.next()) {
|
|
|
- String table = rs.getString(1);
|
|
|
- if (!filter.filter(table)) {
|
|
|
- tables.add(table);
|
|
|
- }
|
|
|
+ for (String schema : schemas) {
|
|
|
+ packet = connection.query("show tables from `" + schema + "`");
|
|
|
+ List<String> tables = new ArrayList<String>();
|
|
|
+ for (String table : packet.getFieldValues()) {
|
|
|
+ if (!filter.filter(table)) {
|
|
|
+ tables.add(table);
|
|
|
}
|
|
|
- return tables;
|
|
|
}
|
|
|
- });
|
|
|
-
|
|
|
- StringBuilder sql = new StringBuilder();
|
|
|
- for (String table : tables) {
|
|
|
- sql.append("show create table `" + schema + "`.`" + table + "`;");
|
|
|
- }
|
|
|
|
|
|
- // 使用多语句方式读取
|
|
|
- Statement stmt = null;
|
|
|
- try {
|
|
|
- stmt = connection.getConn().createStatement();
|
|
|
- ResultSet rs = stmt.executeQuery(sql.toString());
|
|
|
- boolean existMoreResult = false;
|
|
|
- do {
|
|
|
- if (existMoreResult) {
|
|
|
- rs = stmt.getResultSet();
|
|
|
- }
|
|
|
+ StringBuilder sql = new StringBuilder();
|
|
|
+ for (String table : tables) {
|
|
|
+ sql.append("show create table `" + schema + "`.`" + table + "`;");
|
|
|
+ }
|
|
|
|
|
|
- while (rs.next()) {
|
|
|
- String oneTableCreateSql = rs.getString(2);
|
|
|
+ List<ResultSetPacket> packets = connection.queryMulti(sql.toString());
|
|
|
+ for (ResultSetPacket onePacket : packets) {
|
|
|
+ if (onePacket.getFieldValues().size() > 1) {
|
|
|
+ String oneTableCreateSql = onePacket.getFieldValues().get(1);
|
|
|
memoryTableMeta.apply(INIT_POSITION, schema, oneTableCreateSql);
|
|
|
}
|
|
|
-
|
|
|
- existMoreResult = stmt.getMoreResults();
|
|
|
- } while (existMoreResult);
|
|
|
- } catch (SQLException e) {
|
|
|
- throw new CanalParseException(e);
|
|
|
- } finally {
|
|
|
- if (stmt != null) {
|
|
|
- try {
|
|
|
- stmt.close();
|
|
|
- } catch (SQLException e) {
|
|
|
- // ignore
|
|
|
- }
|
|
|
}
|
|
|
}
|
|
|
- }
|
|
|
|
|
|
- return true;
|
|
|
+ return true;
|
|
|
+ } catch (IOException e) {
|
|
|
+ throw new CanalParseException(e);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
private boolean applyHistoryToDB(BinlogPosition position, String schema, String ddl) {
|
|
|
-
|
|
|
Map<String, String> content = new HashMap<String, String>();
|
|
|
-
|
|
|
content.put("binlogFile", position.getFileName());
|
|
|
content.put("binlogOffest", String.valueOf(position.getPosition()));
|
|
|
content.put("binlogMasterId", String.valueOf(position.getMasterId()));
|
|
@@ -240,26 +200,23 @@ public class TableMetaManager implements TableMetaTSDB {
|
|
|
// content.put("extra", "");
|
|
|
}
|
|
|
|
|
|
- for (int i = 0; i < retry; i++) {
|
|
|
- MetaHistoryDO metaDO = new MetaHistoryDO();
|
|
|
- try {
|
|
|
- BeanUtils.populate(metaDO, content);
|
|
|
- // 会建立唯一约束,解决:
|
|
|
- // 1. 重复的binlog file+offest
|
|
|
- // 2. 重复的masterId+timestamp
|
|
|
- metaHistoryDAO.insert(metaDO);
|
|
|
- } catch (Throwable e) {
|
|
|
- if (isUkDuplicateException(e)) {
|
|
|
- // 忽略掉重复的位点
|
|
|
- logger.warn("dup apply for sql : " + ddl);
|
|
|
- } else {
|
|
|
- throw new RuntimeException("apply history to db failed caused by : " + e.getMessage());
|
|
|
- }
|
|
|
-
|
|
|
+ MetaHistoryDO metaDO = new MetaHistoryDO();
|
|
|
+ try {
|
|
|
+ BeanUtils.populate(metaDO, content);
|
|
|
+ // 会建立唯一约束,解决:
|
|
|
+ // 1. 重复的binlog file+offest
|
|
|
+ // 2. 重复的masterId+timestamp
|
|
|
+ metaHistoryDAO.insert(metaDO);
|
|
|
+ } catch (Throwable e) {
|
|
|
+ if (isUkDuplicateException(e)) {
|
|
|
+ // 忽略掉重复的位点
|
|
|
+ logger.warn("dup apply for sql : " + ddl);
|
|
|
+ } else {
|
|
|
+ throw new CanalParseException("apply history to db failed caused by : " + e.getMessage());
|
|
|
}
|
|
|
- return true;
|
|
|
+
|
|
|
}
|
|
|
- return false;
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -290,9 +247,7 @@ public class TableMetaManager implements TableMetaTSDB {
|
|
|
}
|
|
|
}
|
|
|
if (compareAll) {
|
|
|
-
|
|
|
Map<String, String> content = new HashMap<String, String>();
|
|
|
-
|
|
|
content.put("binlogFile", position.getFileName());
|
|
|
content.put("binlogOffest", String.valueOf(position.getPosition()));
|
|
|
content.put("binlogMasterId", String.valueOf(position.getMasterId()));
|
|
@@ -302,24 +257,19 @@ public class TableMetaManager implements TableMetaTSDB {
|
|
|
throw new RuntimeException("apply failed caused by content is empty in applySnapshotToDB");
|
|
|
}
|
|
|
|
|
|
- for (int i = 0; i < retry; i++) {
|
|
|
- MetaSnapshotDO snapshotDO = new MetaSnapshotDO();
|
|
|
- try {
|
|
|
- BeanUtils.populate(snapshotDO, content);
|
|
|
- metaSnapshotDAO.insert(snapshotDO);
|
|
|
- } catch (Throwable e) {
|
|
|
- if (isUkDuplicateException(e)) {
|
|
|
- // 忽略掉重复的位点
|
|
|
- logger.warn("dup apply snapshot for data : " + snapshotDO.getData());
|
|
|
- } else {
|
|
|
- throw new RuntimeException("apply failed caused by : " + e.getMessage());
|
|
|
- }
|
|
|
+ MetaSnapshotDO snapshotDO = new MetaSnapshotDO();
|
|
|
+ try {
|
|
|
+ BeanUtils.populate(snapshotDO, content);
|
|
|
+ metaSnapshotDAO.insert(snapshotDO);
|
|
|
+ } catch (Throwable e) {
|
|
|
+ if (isUkDuplicateException(e)) {
|
|
|
+ // 忽略掉重复的位点
|
|
|
+ logger.warn("dup apply snapshot for data : " + snapshotDO.getData());
|
|
|
+ } else {
|
|
|
+ throw new CanalParseException("apply failed caused by : " + e.getMessage());
|
|
|
}
|
|
|
- return true;
|
|
|
-
|
|
|
}
|
|
|
- return false;
|
|
|
-
|
|
|
+ return true;
|
|
|
} else {
|
|
|
logger.error("compare failed , check log");
|
|
|
}
|
|
@@ -327,26 +277,15 @@ public class TableMetaManager implements TableMetaTSDB {
|
|
|
}
|
|
|
|
|
|
private boolean compareTableMetaDbAndMemory(MysqlConnection connection, final String schema, final String table) {
|
|
|
- TableMeta tableMetaFromDB = connection.query("desc " + getFullName(schema, table),
|
|
|
- new ProcessJdbcResult<TableMeta>() {
|
|
|
-
|
|
|
- @Override
|
|
|
- public TableMeta process(ResultSet rs) throws SQLException {
|
|
|
- List<FieldMeta> metas = new ArrayList<FieldMeta>();
|
|
|
- while (rs.next()) {
|
|
|
- FieldMeta meta = new FieldMeta();
|
|
|
- // 做一个优化,使用String.intern(),共享String对象,减少内存使用
|
|
|
- meta.setColumnName(rs.getString("Field"));
|
|
|
- meta.setColumnType(rs.getString("Type"));
|
|
|
- meta.setNullable(StringUtils.equalsIgnoreCase(rs.getString("Null"), "YES"));
|
|
|
- meta.setKey("PRI".equalsIgnoreCase(rs.getString("Key")));
|
|
|
- meta.setDefaultValue(rs.getString("Default"));
|
|
|
- metas.add(meta);
|
|
|
- }
|
|
|
-
|
|
|
- return new TableMeta(schema, table, metas);
|
|
|
- }
|
|
|
- });
|
|
|
+ TableMeta tableMetaFromDB = new TableMeta();
|
|
|
+ tableMetaFromDB.setSchema(schema);
|
|
|
+ tableMetaFromDB.setTable(table);
|
|
|
+ try {
|
|
|
+ ResultSetPacket packet = connection.query("desc " + getFullName(schema, table));
|
|
|
+ tableMetaFromDB.setFields(TableMetaCache.parserTableMeta(packet));
|
|
|
+ } catch (IOException e) {
|
|
|
+ throw new CanalParseException(e);
|
|
|
+ }
|
|
|
|
|
|
TableMeta tableMetaFromMem = memoryTableMeta.find(schema, table);
|
|
|
boolean result = compareTableMeta(tableMetaFromMem, tableMetaFromDB);
|
|
@@ -358,119 +297,70 @@ public class TableMetaManager implements TableMetaTSDB {
|
|
|
}
|
|
|
|
|
|
private BinlogPosition buildMemFromSnapshot(BinlogPosition position) {
|
|
|
-
|
|
|
- Map<String, String> content = new HashMap<String, String>();
|
|
|
-
|
|
|
- content.put("binlogFile", position.getFileName());
|
|
|
- content.put("binlogOffest", String.valueOf(position.getPosition()));
|
|
|
- content.put("binlogMasterId", String.valueOf(position.getMasterId()));
|
|
|
- content.put("binlogTimestamp", String.valueOf(position.getTimestamp()));
|
|
|
- if (content.isEmpty()) {
|
|
|
- throw new RuntimeException("apply failed caused by content is empty in buildMemFromSnapshot");
|
|
|
- }
|
|
|
- for (int i = 0; i < retry; i++) {
|
|
|
-
|
|
|
- try {
|
|
|
-
|
|
|
- String timestamp = content.get("binlogTimestamp");
|
|
|
- MetaSnapshotDO snapshotDO = metaSnapshotDAO.findByTimestamp(Long.valueOf(timestamp));
|
|
|
- JSONObject jsonData = new JSONObject();
|
|
|
- jsonData.put("content", JSON.toJSONString(snapshotDO));
|
|
|
- if (jsonData == null) {
|
|
|
- // 可能没有任何snapshot数据
|
|
|
+ try {
|
|
|
+ MetaSnapshotDO snapshotDO = metaSnapshotDAO.findByTimestamp(position.getTimestamp());
|
|
|
+ String binlogFile = snapshotDO.getBinlogFile();
|
|
|
+ Long binlogOffest = snapshotDO.getBinlogOffest();
|
|
|
+ String binlogMasterId = snapshotDO.getBinlogMasterId();
|
|
|
+ Long binlogTimestamp = snapshotDO.getBinlogTimestamp();
|
|
|
+
|
|
|
+ BinlogPosition snapshotPosition = new BinlogPosition(binlogFile,
|
|
|
+ binlogOffest == null ? 0l : binlogOffest,
|
|
|
+ Long.valueOf(binlogMasterId == null ? "-2" : binlogMasterId),
|
|
|
+ binlogTimestamp == null ? 0l : binlogTimestamp);
|
|
|
+ // data存储为Map<String,String>,每个分库一套建表
|
|
|
+ String sqlData = snapshotDO.getData();
|
|
|
+ JSONObject jsonObj = JSON.parseObject(sqlData);
|
|
|
+ for (Map.Entry entry : jsonObj.entrySet()) {
|
|
|
+ // 记录到内存
|
|
|
+ if (!memoryTableMeta.apply(snapshotPosition,
|
|
|
+ ObjectUtils.toString(entry.getKey()),
|
|
|
+ ObjectUtils.toString(entry.getValue()))) {
|
|
|
return null;
|
|
|
}
|
|
|
-
|
|
|
- String binlogFile = jsonData.getString("binlogFile");
|
|
|
- String binlogOffest = jsonData.getString("binlogOffest");
|
|
|
- String binlogMasterId = jsonData.getString("binlogMasterId");
|
|
|
- String binlogTimestamp = jsonData.getString("binlogTimestamp");
|
|
|
-
|
|
|
- BinlogPosition snapshotPosition = new BinlogPosition(binlogFile,
|
|
|
- Long.valueOf(binlogOffest == null ? "0" : binlogOffest),
|
|
|
- Long.valueOf(binlogMasterId == null ? "-2" : binlogMasterId),
|
|
|
- Long.valueOf(binlogTimestamp == null ? "0" : binlogTimestamp));
|
|
|
- // data存储为Map<String,String>,每个分库一套建表
|
|
|
- String sqlData = jsonData.getString("data");
|
|
|
- JSONObject jsonObj = JSON.parseObject(sqlData);
|
|
|
- for (Map.Entry entry : jsonObj.entrySet()) {
|
|
|
- // 记录到内存
|
|
|
- if (!memoryTableMeta.apply(snapshotPosition,
|
|
|
- ObjectUtils.toString(entry.getKey()),
|
|
|
- ObjectUtils.toString(entry.getValue()))) {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return snapshotPosition;
|
|
|
-
|
|
|
- } catch (Throwable e) {
|
|
|
- throw new RuntimeException("apply failed caused by : " + e.getMessage());
|
|
|
}
|
|
|
|
|
|
+ return snapshotPosition;
|
|
|
+ } catch (Throwable e) {
|
|
|
+ throw new CanalParseException("apply failed caused by : " + e.getMessage());
|
|
|
}
|
|
|
-
|
|
|
- return null;
|
|
|
-
|
|
|
}
|
|
|
|
|
|
private boolean applyHistoryOnMemory(BinlogPosition position, BinlogPosition rollbackPosition) {
|
|
|
- Map<String, String> content = new HashMap<String, String>();
|
|
|
- content.put("binlogSnapshotTimestamp", String.valueOf(position.getTimestamp()));
|
|
|
- content.put("binlogFile", rollbackPosition.getFileName());
|
|
|
- content.put("binlogOffest", String.valueOf(rollbackPosition.getPosition()));
|
|
|
- content.put("binlogMasterId", String.valueOf(rollbackPosition.getMasterId()));
|
|
|
- content.put("binlogTimestamp", String.valueOf(rollbackPosition.getTimestamp()));
|
|
|
- String timestamp = content.get("binlogTimestamp");
|
|
|
- String binlogSnapshotTimestamp = content.get("binlogSnapshotTimestamp");
|
|
|
-
|
|
|
- for (int i = 0; i < retry; i++) {
|
|
|
- try {
|
|
|
- List<MetaHistoryDO> metaHistoryDOList = metaHistoryDAO.findByTimestamp(
|
|
|
- Long.valueOf(binlogSnapshotTimestamp),
|
|
|
- Long.valueOf(timestamp));
|
|
|
- JSONObject json = new JSONObject();
|
|
|
- json.put("content", JSON.toJSONString(metaHistoryDOList));
|
|
|
- String data = ObjectUtils.toString(json.get("content"));
|
|
|
- JSONArray jsonArray = JSON.parseArray(data);
|
|
|
- for (Object jsonObj : jsonArray) {
|
|
|
- JSONObject jsonData = (JSONObject)jsonObj;
|
|
|
- String binlogFile = jsonData.getString("binlogFile");
|
|
|
- String binlogOffest = jsonData.getString("binlogOffest");
|
|
|
- String binlogMasterId = jsonData.getString("binlogMasterId");
|
|
|
- String binlogTimestamp = jsonData.getString("binlogTimestamp");
|
|
|
- String useSchema = jsonData.getString("useSchema");
|
|
|
- String sqlData = jsonData.getString("sql");
|
|
|
- BinlogPosition snapshotPosition = new BinlogPosition(binlogFile,
|
|
|
- Long.valueOf(binlogOffest == null ? "0" : binlogOffest),
|
|
|
- Long.valueOf(binlogMasterId == null ? "-2" : binlogMasterId),
|
|
|
- Long.valueOf(binlogTimestamp == null ? "0" : binlogTimestamp));
|
|
|
-
|
|
|
- // 如果是同一秒内,对比一下history的位点,如果比期望的位点要大,忽略之
|
|
|
- if (snapshotPosition.getTimestamp() > rollbackPosition.getTimestamp()) {
|
|
|
- continue;
|
|
|
- } else if (rollbackPosition.getMasterId() == snapshotPosition.getMasterId()
|
|
|
- && snapshotPosition.compareTo(rollbackPosition) > 0) {
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // 记录到内存
|
|
|
- if (!memoryTableMeta.apply(snapshotPosition, useSchema, sqlData)) {
|
|
|
- return false;
|
|
|
- }
|
|
|
-
|
|
|
+ try {
|
|
|
+ List<MetaHistoryDO> metaHistoryDOList = metaHistoryDAO.findByTimestamp(position.getTimestamp(),
|
|
|
+ rollbackPosition.getTimestamp());
|
|
|
+ for (MetaHistoryDO metaHistoryDO : metaHistoryDOList) {
|
|
|
+ String binlogFile = metaHistoryDO.getBinlogFile();
|
|
|
+ Long binlogOffest = metaHistoryDO.getBinlogOffest();
|
|
|
+ String binlogMasterId = metaHistoryDO.getBinlogMasterId();
|
|
|
+ Long binlogTimestamp = metaHistoryDO.getBinlogTimestamp();
|
|
|
+ String useSchema = metaHistoryDO.getUseSchema();
|
|
|
+ String sqlData = metaHistoryDO.getSql();
|
|
|
+ BinlogPosition snapshotPosition = new BinlogPosition(binlogFile,
|
|
|
+ binlogOffest == null ? 0L : binlogOffest,
|
|
|
+ Long.valueOf(binlogMasterId == null ? "-2" : binlogMasterId),
|
|
|
+ binlogTimestamp == null ? 0L : binlogTimestamp);
|
|
|
+
|
|
|
+ // 如果是同一秒内,对比一下history的位点,如果比期望的位点要大,忽略之
|
|
|
+ if (snapshotPosition.getTimestamp() > rollbackPosition.getTimestamp()) {
|
|
|
+ continue;
|
|
|
+ } else if (rollbackPosition.getMasterId() == snapshotPosition.getMasterId()
|
|
|
+ && snapshotPosition.compareTo(rollbackPosition) > 0) {
|
|
|
+ continue;
|
|
|
}
|
|
|
|
|
|
- return jsonArray.size() > 0;
|
|
|
- } catch (Throwable e) {
|
|
|
-
|
|
|
- throw new RuntimeException("apply failed", e);
|
|
|
+ // 记录到内存
|
|
|
+ if (!memoryTableMeta.apply(snapshotPosition, useSchema, sqlData)) {
|
|
|
+ return false;
|
|
|
+ }
|
|
|
|
|
|
}
|
|
|
|
|
|
+ return metaHistoryDOList.size() > 0;
|
|
|
+ } catch (Throwable e) {
|
|
|
+ throw new CanalParseException("apply failed", e);
|
|
|
}
|
|
|
-
|
|
|
- return false;
|
|
|
}
|
|
|
|
|
|
private String getFullName(String schema, String table) {
|