|
@@ -1,9 +1,6 @@
|
|
|
package com.alibaba.otter.canal.client.kafka;
|
|
|
|
|
|
-import java.util.ArrayList;
|
|
|
-import java.util.Collections;
|
|
|
-import java.util.List;
|
|
|
-import java.util.Properties;
|
|
|
+import java.util.*;
|
|
|
import java.util.concurrent.TimeUnit;
|
|
|
|
|
|
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
|
@@ -22,7 +19,7 @@ import com.google.common.collect.Lists;
|
|
|
|
|
|
/**
|
|
|
* canal kafka 数据操作客户端
|
|
|
- *
|
|
|
+ *
|
|
|
* <pre>
|
|
|
* 注意点:
|
|
|
* 1. 相比于canal {@linkplain SimpleCanalConnector}, 这里get和ack操作不能有并发, 必须是一个线程执行get后,内存里执行完毕ack后再取下一个get
|
|
@@ -33,14 +30,16 @@ import com.google.common.collect.Lists;
|
|
|
*/
|
|
|
public class KafkaCanalConnector implements CanalMQConnector {
|
|
|
|
|
|
- protected KafkaConsumer<String, Message> kafkaConsumer;
|
|
|
- protected KafkaConsumer<String, String> kafkaConsumer2; // 用于扁平message的数据消费
|
|
|
- protected String topic;
|
|
|
- protected Integer partition;
|
|
|
- protected Properties properties;
|
|
|
- private volatile boolean connected = false;
|
|
|
- protected volatile boolean running = false;
|
|
|
- private boolean flatMessage;
|
|
|
+ private KafkaConsumer<String, Message> kafkaConsumer;
|
|
|
+ private KafkaConsumer<String, String> kafkaConsumer2; // 用于扁平message的数据消费
|
|
|
+ private String topic;
|
|
|
+ private Integer partition;
|
|
|
+ private Properties properties;
|
|
|
+ private volatile boolean connected = false;
|
|
|
+ private volatile boolean running = false;
|
|
|
+ private boolean flatMessage;
|
|
|
+
|
|
|
+ private Map<Integer, Long> currentOffsets = new HashMap<>();
|
|
|
|
|
|
public KafkaCanalConnector(String servers, String topic, Integer partition, String groupId, Integer batchSize,
|
|
|
boolean flatMessage){
|
|
@@ -80,6 +79,7 @@ public class KafkaCanalConnector implements CanalMQConnector {
|
|
|
connected = true;
|
|
|
if (kafkaConsumer == null && !flatMessage) {
|
|
|
kafkaConsumer = new KafkaConsumer<String, Message>(properties);
|
|
|
+
|
|
|
}
|
|
|
if (kafkaConsumer2 == null && flatMessage) {
|
|
|
kafkaConsumer2 = new KafkaConsumer<String, String>(properties);
|
|
@@ -181,6 +181,11 @@ public class KafkaCanalConnector implements CanalMQConnector {
|
|
|
|
|
|
ConsumerRecords<String, Message> records = kafkaConsumer.poll(unit.toMillis(timeout));
|
|
|
|
|
|
+ currentOffsets.clear();
|
|
|
+ for (TopicPartition topicPartition : records.partitions()) {
|
|
|
+ currentOffsets.put(topicPartition.partition(), kafkaConsumer.position(topicPartition));
|
|
|
+ }
|
|
|
+
|
|
|
if (!records.isEmpty()) {
|
|
|
List<Message> messages = new ArrayList<>();
|
|
|
for (ConsumerRecord<String, Message> record : records) {
|
|
@@ -213,6 +218,12 @@ public class KafkaCanalConnector implements CanalMQConnector {
|
|
|
}
|
|
|
|
|
|
ConsumerRecords<String, String> records = kafkaConsumer2.poll(unit.toMillis(timeout));
|
|
|
+
|
|
|
+ currentOffsets.clear();
|
|
|
+ for (TopicPartition topicPartition : records.partitions()) {
|
|
|
+ currentOffsets.put(topicPartition.partition(), kafkaConsumer2.position(topicPartition));
|
|
|
+ }
|
|
|
+
|
|
|
if (!records.isEmpty()) {
|
|
|
List<FlatMessage> flatMessages = new ArrayList<>();
|
|
|
for (ConsumerRecord<String, String> record : records) {
|
|
@@ -227,7 +238,22 @@ public class KafkaCanalConnector implements CanalMQConnector {
|
|
|
}
|
|
|
|
|
|
@Override
|
|
|
- public void rollback() throws CanalClientException {
|
|
|
+ public void rollback() {
|
|
|
+ waitClientRunning();
|
|
|
+ if (!running) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ // 回滚所有分区
|
|
|
+ if (kafkaConsumer != null) {
|
|
|
+ for (Map.Entry<Integer, Long> entry : currentOffsets.entrySet()) {
|
|
|
+ kafkaConsumer.seek(new TopicPartition(topic, entry.getKey()), entry.getValue() - 1);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (kafkaConsumer2 != null) {
|
|
|
+ for (Map.Entry<Integer, Long> entry : currentOffsets.entrySet()) {
|
|
|
+ kafkaConsumer2.seek(new TopicPartition(topic, entry.getKey()), entry.getValue() - 1);
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/**
|