Browse Source

Merge pull request #819 from lcybo/metrics_support

canal 性能监控改动版,merge到开发分支(metrics_support)
lcybo 6 years ago
parent
commit
725d36aa98
68 changed files with 2117 additions and 707 deletions
  1. 2 0
      .gitignore
  2. 6 6
      deployer/pom.xml
  3. 0 15
      deployer/src/main/bin/metrics_env.sh
  4. 1 6
      deployer/src/main/bin/startup.sh
  5. 12 1
      deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalController.java
  6. 9 1
      deployer/src/main/resources/canal.properties
  7. 9 10
      deployer/src/main/resources/example/instance.properties
  8. 3 2
      deployer/src/main/resources/spring/default-instance.xml
  9. 3 2
      deployer/src/main/resources/spring/file-instance.xml
  10. 5 4
      deployer/src/main/resources/spring/group-instance.xml
  11. 2 1
      deployer/src/main/resources/spring/local-instance.xml
  12. 3 2
      deployer/src/main/resources/spring/memory-instance.xml
  13. 67 0
      docker/Dockerfile
  14. 30 0
      docker/build.sh
  15. 117 0
      docker/image/admin/app.sh
  16. 2 0
      docker/image/admin/bin/clean_log
  17. 45 0
      docker/image/admin/bin/clean_log.sh
  18. 13 0
      docker/image/admin/health.sh
  19. 11 0
      docker/image/alidata/bin/exec_rc_local.sh
  20. 6 0
      docker/image/alidata/bin/lark-wait
  21. 27 0
      docker/image/alidata/bin/main.sh
  22. 19 0
      docker/image/alidata/init/02init-sshd.sh
  23. 66 0
      docker/image/alidata/init/fix-hosts.py
  24. 40 0
      docker/image/alidata/lib/proc.sh
  25. 92 0
      docker/run.sh
  26. 1 1
      driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlConnector.java
  27. 8 13
      instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/CanalInstanceWithManager.java
  28. 25 1
      instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/model/CanalParameter.java
  29. 3 1
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/MessageDeserializer.java
  30. 2 2
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningData.java
  31. 44 38
      kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningMonitor.java
  32. 3 3
      kafka/src/main/java/com/alibaba/otter/canal/kafka/CanalServerStarter.java
  33. 25 26
      kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/CanalKafkaProducer.java
  34. 5 2
      kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/CanalKafkaStarter.java
  35. 17 8
      kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/KafkaProperties.java
  36. 49 10
      kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/MessageSerializer.java
  37. 1 0
      kafka/src/main/resources/kafka.yml
  38. 2 0
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/AbstractEventParser.java
  39. 26 18
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/AbstractMysqlEventParser.java
  40. 29 10
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlConnection.java
  41. 16 3
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParser.java
  42. 34 1
      parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlMultiStageCoprocessor.java
  43. 0 10
      pom.xml
  44. 53 66
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/CanalInstanceExports.java
  45. 0 21
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/CanalServerExports.java
  46. 14 0
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/InstanceRegistry.java
  47. 31 26
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/PrometheusService.java
  48. 134 0
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/EntryCollector.java
  49. 0 79
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/InboundThroughputAspect.java
  50. 0 64
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/InstanceMetaCollector.java
  51. 0 75
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/MemoryStoreCollector.java
  52. 91 0
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/MetaCollector.java
  53. 0 80
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/OutboundThroughputAspect.java
  54. 116 0
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/ParserCollector.java
  55. 69 49
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/PrometheusCanalEventDownStreamHandler.java
  56. 138 0
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/PrometheusClientInstanceProfiler.java
  57. 84 0
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/SinkCollector.java
  58. 132 0
      prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/StoreCollector.java
  59. 0 11
      prometheus/src/main/resources/META-INF/aop.xml
  60. 39 0
      server/src/main/java/com/alibaba/otter/canal/server/netty/CanalServerWithNettyProfiler.java
  61. 13 0
      server/src/main/java/com/alibaba/otter/canal/server/netty/ClientInstanceProfiler.java
  62. 16 0
      server/src/main/java/com/alibaba/otter/canal/server/netty/NettyUtils.java
  63. 1 1
      server/src/main/java/com/alibaba/otter/canal/server/netty/handler/ClientAuthenticationHandler.java
  64. 38 37
      server/src/main/java/com/alibaba/otter/canal/server/netty/handler/SessionHandler.java
  65. 179 0
      server/src/main/java/com/alibaba/otter/canal/server/netty/listener/ChannelFutureAggregator.java
  66. 53 0
      server/src/test/java/com/alibaba/otter/canal/server/CanalServerWithEmbedded_FileModeTest.java
  67. 17 1
      sink/src/main/java/com/alibaba/otter/canal/sink/entry/EntryEventSink.java
  68. 19 0
      store/src/main/java/com/alibaba/otter/canal/store/memory/MemoryEventStoreWithBuffer.java

+ 2 - 0
.gitignore

@@ -14,3 +14,5 @@ jtester.properties
 .idea/
 *.iml
 .DS_Store
+*.tar.gz
+*.rpm

+ 6 - 6
deployer/pom.xml

@@ -18,12 +18,12 @@
 		</dependency>
 
 		<!-- 这里指定runtime的metrics provider-->
-		<!--<dependency>-->
-			<!--<groupId>com.alibaba.otter</groupId>-->
-			<!--<artifactId>canal.prometheus</artifactId>-->
-			<!--<version>${project.version}</version>-->
-			<!--<scope>runtime</scope>-->
-		<!--</dependency>-->
+		<dependency>
+			<groupId>com.alibaba.otter</groupId>
+			<artifactId>canal.prometheus</artifactId>
+			<version>${project.version}</version>
+			<scope>runtime</scope>
+		</dependency>
 	</dependencies>
 	
 	<build>

+ 0 - 15
deployer/src/main/bin/metrics_env.sh

@@ -1,15 +0,0 @@
-#!/bin/bash
-# Additional line arg for current prometheus solution
-case "`uname`" in
-Linux)
-    bin_abs_path=$(readlink -f $(dirname $0))
-	;;
-*)
-	bin_abs_path=`cd $(dirname $0); pwd`
-	;;
-esac
-base=${bin_abs_path}/..
-if [ $(ls $base/lib/aspectjweaver*.jar | wc -l) -eq 1 ]; then
-    WEAVER=$(ls $base/lib/aspectjweaver*.jar)
-    METRICS_OPTS=" -javaagent:"${WEAVER}" "
-fi

+ 1 - 6
deployer/src/main/bin/startup.sh

@@ -94,12 +94,7 @@ then
 	echo LOG CONFIGURATION : $logback_configurationFile
 	echo canal conf : $canal_conf 
 	echo CLASSPATH :$CLASSPATH
-#   metrics support options
-#	if [ -x $base/bin/metrics_env.sh ]; then
-#	    . $base/bin/metrics_env.sh
-#	    echo METRICS_OPTS $METRICS_OPTS
-#	fi
-	$JAVA $JAVA_OPTS $METRICS_OPTS $JAVA_DEBUG_OPT $CANAL_OPTS -classpath .:$CLASSPATH com.alibaba.otter.canal.deployer.CanalLauncher 1>>$base/logs/canal/canal.log 2>&1 &
+	$JAVA $JAVA_OPTS $JAVA_DEBUG_OPT $CANAL_OPTS -classpath .:$CLASSPATH com.alibaba.otter.canal.deployer.CanalLauncher 1>>$base/logs/canal/canal.log 2>&1 &
 	echo $! > $base/bin/canal.pid 
 	
 	echo "cd to $current_path for continue"

+ 12 - 1
deployer/src/main/java/com/alibaba/otter/canal/deployer/CanalController.java

@@ -379,7 +379,18 @@ public class CanalController {
     }
 
     private String getProperty(Properties properties, String key) {
-        return StringUtils.trim(properties.getProperty(StringUtils.trim(key)));
+        key = StringUtils.trim(key);
+        String value = System.getProperty(key);
+
+        if (value == null) {
+            value = System.getenv(key);
+        }
+
+        if (value == null) {
+            value = properties.getProperty(key);
+        }
+
+        return StringUtils.trim(value);
     }
 
     public void start() throws Throwable {

+ 9 - 1
deployer/src/main/resources/canal.properties

@@ -3,7 +3,7 @@
 #################################################
 canal.id= 1
 canal.ip=
-canal.port= 11111
+canal.port=11111
 canal.zkServers=
 # flush data to zk
 canal.zookeeper.flush.period = 1000
@@ -43,6 +43,7 @@ canal.instance.filter.query.dml = false
 canal.instance.filter.query.ddl = false
 canal.instance.filter.table.error = false
 canal.instance.filter.rows = false
+canal.instance.filter.transaction.entry = false
 
 # binlog format/image check
 canal.instance.binlog.format = ROW,STATEMENT,MIXED 
@@ -58,6 +59,13 @@ canal.instance.parser.parallel = true
 ## disruptor ringbuffer size, must be power of 2
 canal.instance.parser.parallelBufferSize = 256
 
+# table meta tsdb info
+canal.instance.tsdb.enable=true
+canal.instance.tsdb.dir=${canal.file.data.dir:../conf}/${canal.instance.destination:}
+canal.instance.tsdb.url=jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
+canal.instance.tsdb.dbUsername=canal
+canal.instance.tsdb.dbPassword=canal
+
 #################################################
 ######### 		destinations		############# 
 #################################################

+ 9 - 10
deployer/src/main/resources/example/instance.properties

@@ -1,11 +1,12 @@
 #################################################
-## mysql serverId
-canal.instance.mysql.slaveId=0
+## mysql serverId , v1.0.26+ will autoGen 
+# canal.instance.mysql.slaveId=0
 
-# position info
-canal.instance.master.address=127.0.0.1:3306
 # enable gtid use true/false
 canal.instance.gtidon=false
+
+# position info
+canal.instance.master.address=127.0.0.1:3306
 canal.instance.master.journal.name=
 canal.instance.master.position=
 canal.instance.master.timestamp=
@@ -13,23 +14,21 @@ canal.instance.master.gtid=
 
 # table meta tsdb info
 canal.instance.tsdb.enable=true
-canal.instance.tsdb.dir=${canal.file.data.dir:../conf}/${canal.instance.destination:}
-canal.instance.tsdb.url=jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
 #canal.instance.tsdb.url=jdbc:mysql://127.0.0.1:3306/canal_tsdb
-canal.instance.tsdb.dbUsername=canal
-canal.instance.tsdb.dbPassword=canal
-
+#canal.instance.tsdb.dbUsername=canal
+#canal.instance.tsdb.dbPassword=canal
 
 #canal.instance.standby.address =
 #canal.instance.standby.journal.name =
 #canal.instance.standby.position = 
 #canal.instance.standby.timestamp =
 #canal.instance.standby.gtid=
+
 # username/password
 canal.instance.dbUsername=canal
 canal.instance.dbPassword=canal
-canal.instance.defaultDatabaseName=test
 canal.instance.connectionCharset=UTF-8
+
 # table regex
 canal.instance.filter.regex=.*\\..*
 # table black regex

+ 3 - 2
deployer/src/main/resources/spring/default-instance.xml

@@ -81,6 +81,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
@@ -148,7 +149,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -156,7 +157,7 @@
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 3 - 2
deployer/src/main/resources/spring/file-instance.xml

@@ -67,6 +67,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
@@ -133,7 +134,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -141,7 +142,7 @@
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 5 - 4
deployer/src/main/resources/spring/group-instance.xml

@@ -64,6 +64,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 	
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.group.GroupEventParser">
@@ -130,7 +131,7 @@
 				<property name="address" value="${canal.instance.master1.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -138,7 +139,7 @@
 				<property name="address" value="${canal.instance.standby1.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		
@@ -228,7 +229,7 @@
 				<property name="address" value="${canal.instance.master2.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -236,7 +237,7 @@
 				<property name="address" value="${canal.instance.standby2.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 2 - 1
deployer/src/main/resources/spring/local-instance.xml

@@ -67,6 +67,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.rds.RdsLocalBinlogEventParser">
@@ -113,7 +114,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 3 - 2
deployer/src/main/resources/spring/memory-instance.xml

@@ -64,6 +64,7 @@
 	
 	<bean id="eventSink" class="com.alibaba.otter.canal.sink.entry.EntryEventSink">
 		<property name="eventStore" ref="eventStore" />
+		<property name="filterTransactionEntry" value="${canal.instance.filter.transaction.entry:false}"/>
 	</bean>
 
 	<bean id="eventParser" class="com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser">
@@ -121,7 +122,7 @@
 				<property name="address" value="${canal.instance.master.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		<property name="standbyInfo">
@@ -129,7 +130,7 @@
 				<property name="address" value="${canal.instance.standby.address}" />
 				<property name="username" value="${canal.instance.dbUsername:retl}" />
 				<property name="password" value="${canal.instance.dbPassword:retl}" />
-				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:retl}" />
+				<property name="defaultDatabaseName" value="${canal.instance.defaultDatabaseName:test}" />
 			</bean>
 		</property>
 		

+ 67 - 0
docker/Dockerfile

@@ -0,0 +1,67 @@
+FROM centos:centos6.7
+
+MAINTAINER agapple (jianghang115@gmail.com)
+
+# install system
+RUN \
+    /bin/cp -f /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
+    echo 'root:Hello1234' | chpasswd && \
+    groupadd -r admin && useradd -g admin admin && \
+    yum install -y man && \
+    yum install -y dstat && \
+    yum install -y unzip && \
+    yum install -y nc && \
+    yum install -y openssh-server && \
+    yum install -y tar && \
+    yum install -y which && \
+    yum install -y wget && \
+    yum install -y perl && \
+    yum install -y file && \
+    ssh-keygen -q -N "" -t dsa -f /etc/ssh/ssh_host_dsa_key && \
+    ssh-keygen -q -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key && \
+    sed -ri 's/session    required     pam_loginuid.so/#session    required     pam_loginuid.so/g' /etc/pam.d/sshd && \
+    sed -i -e 's/^#Port 22$/Port 2222/' /etc/ssh/sshd_config && \
+    mkdir -p /root/.ssh && chown root.root /root && chmod 700 /root/.ssh && \
+    yum install -y cronie && \
+    sed -i '/session required pam_loginuid.so/d' /etc/pam.d/crond && \
+    yum clean all && \
+    true
+
+# install canal
+COPY image/ /tmp/docker/
+COPY canal.deployer-*.tar.gz /home/admin/
+COPY jdk-8-linux-x64.rpm /tmp/
+
+RUN \
+    cp -R /tmp/docker/alidata /alidata && \
+    chmod +x /alidata/bin/* && \
+    mkdir -p /home/admin && \
+    cp -R /tmp/docker/admin/* /home/admin/  && \
+    /bin/cp -f alidata/bin/lark-wait /usr/bin/lark-wait && \
+
+    touch /var/lib/rpm/* && \ 
+    yum -y install /tmp/jdk-8-linux-x64.rpm && \
+    /bin/rm -f /tmp/jdk-8-linux-x64.rpm && \
+
+    echo "export JAVA_HOME=/usr/java/latest" >> /etc/profile && \
+    echo "export PATH=\$JAVA_HOME/bin:\$PATH" >> /etc/profile && \
+    /bin/mv /home/admin/bin/clean_log /etc/cron.d && \
+
+    mkdir -p /home/admin/canal-server && \
+    tar -xzvf /home/admin/canal.deployer-*.tar.gz -C /home/admin/canal-server && \
+    /bin/rm -f /home/admin/canal.deployer-*.tar.gz && \
+
+    mkdir -p home/admin/canal-server/logs  && \
+    chmod +x /home/admin/*.sh  && \
+    chmod +x /home/admin/bin/*.sh  && \
+    chown admin: -R /home/admin && \
+    yum clean all && \
+    true
+
+# 2222 sys , 8080 web , 8000 debug , 11111 canal
+EXPOSE 2222 11111 8000 8080
+
+WORKDIR /home/admin
+
+ENTRYPOINT [ "/alidata/bin/main.sh" ]
+CMD [ "/home/admin/app.sh" ]

+ 30 - 0
docker/build.sh

@@ -0,0 +1,30 @@
+#!/bin/bash
+
+current_path=`pwd`
+case "`uname`" in
+    Darwin)
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+    Linux)
+        bin_abs_path=$(readlink -f $(dirname $0))
+        ;;
+    *)
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+esac
+BASE=${bin_abs_path}
+
+if [ ! -f $BASE/jdk*.rpm ] ; then
+    DOWNLOAD_LINK="http://download.oracle.com/otn-pub/java/jdk/8u181-b13/96a7b8442fe848ef90c96a2fad6ed6d1/jdk-8u181-linux-x64.tar.gz"
+    wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=xxx; oraclelicense=accept-securebackup-cookie" "$DOWNLOAD_LINK" -O $BASE/jdk-8-linux-x64.rpm
+fi
+
+cd $BASE/../ && mvn clean package -Dmaven.test.skip -Denv=release && cd $current_path ;
+
+if [ "$1" == "kafka" ] ; then
+	cp $BASE/../target/canal-kafka-*.tar.gz $BASE/
+	docker build --no-cache -t canal/canal-server $BASE/
+else 
+	cp $BASE/../target/canal.deployer-*.tar.gz $BASE/
+	docker build --no-cache -t canal/canal-server $BASE/
+fi

+ 117 - 0
docker/image/admin/app.sh

@@ -0,0 +1,117 @@
+#!/bin/bash
+set -e
+
+source /etc/profile
+export JAVA_HOME=/usr/java/latest
+export PATH=$JAVA_HOME/bin:$PATH
+touch /tmp/start.log
+chown admin: /tmp/start.log
+chown -R admin: /home/admin/canal-server
+host=`hostname -i`
+
+# waitterm
+#   wait TERM/INT signal.
+#   see: http://veithen.github.io/2014/11/16/sigterm-propagation.html
+waitterm() {
+        local PID
+        # any process to block
+        tail -f /dev/null &
+        PID="$!"
+        # setup trap, could do nothing, or just kill the blocker
+        trap "kill -TERM ${PID}" TERM INT
+        # wait for signal, ignore wait exit code
+        wait "${PID}" || true
+        # clear trap
+        trap - TERM INT
+        # wait blocker, ignore blocker exit code
+        wait "${PID}" 2>/dev/null || true
+}
+
+# waittermpid "${PIDFILE}".
+#   monitor process by pidfile && wait TERM/INT signal.
+#   if the process disappeared, return 1, means exit with ERROR.
+#   if TERM or INT signal received, return 0, means OK to exit.
+waittermpid() {
+        local PIDFILE PID do_run error
+        PIDFILE="${1?}"
+        do_run=true
+        error=0
+        trap "do_run=false" TERM INT
+        while "${do_run}" ; do
+                PID="$(cat "${PIDFILE}")"
+                if ! ps -p "${PID}" >/dev/null 2>&1 ; then
+                        do_run=false
+                        error=1
+                else
+                        sleep 1
+                fi
+        done
+        trap - TERM INT
+        return "${error}"
+}
+
+
+function checkStart() {
+    local name=$1
+    local cmd=$2
+    local timeout=$3
+    cost=5
+    while [ $timeout -gt 0 ]; do
+        ST=`eval $cmd`
+        if [ "$ST" == "0" ]; then
+            sleep 1
+            let timeout=timeout-1
+            let cost=cost+1
+        elif [ "$ST" == "" ]; then
+            sleep 1
+            let timeout=timeout-1
+            let cost=cost+1
+        else
+            break
+        fi
+    done
+    echo "start $name successful"
+}
+
+
+function start_canal() {
+    echo "start canal ..."
+    serverPort=`perl -le 'print $ENV{"canal.port"}'`
+    if [ -z "$serverPort" ] ; then
+        serverPort=11111
+    fi
+
+    destination=`perl -le 'print $ENV{"canal.destinations"}'`
+    if [[ "$destination" =~ ',' ]]; then
+        echo "multi destination:$destination is not support"
+        exit 1;
+    else
+        mv /home/admin/canal-server/conf/example /home/admin/canal-server/conf/$destination
+    fi
+    su admin -c 'cd /home/admin/canal-server/bin/ && sh restart.sh 1>>/tmp/start.log 2>&1'
+    sleep 5
+    #check start
+    checkStart "canal" "nc 127.0.0.1 $serverPort -w 1 -z | wc -l" 30
+}
+
+function stop_canal() {
+    echo "stop canal"
+    su admin -c 'cd /home/admin/canal-server/bin/ && sh stop.sh 1>>/tmp/start.log 2>&1'
+    echo "stop canal successful ..."
+}
+
+echo "==> START ..."
+
+start_canal
+
+echo "==> START SUCCESSFUL ..."
+
+tail -f /dev/null &
+# wait TERM signal
+waitterm
+
+echo "==> STOP"
+
+stop_canal
+
+echo "==> STOP SUCCESSFUL ..."

+ 2 - 0
docker/image/admin/bin/clean_log

@@ -0,0 +1,2 @@
+# cron clean log once per minute
+*/2 * * * * admin /home/admin/bin/clean_log.sh >>/tmp/clean_log.log 2>&1

+ 45 - 0
docker/image/admin/bin/clean_log.sh

@@ -0,0 +1,45 @@
+#!/bin/bash
+
+# Global Settings
+PATH="$HOME/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/X11R6/bin:/root/bin"
+export PATH
+
+CUTOFF="85"
+#获取磁盘使用率最高的分区
+USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+before=$USAGE
+
+baseClean(){
+    #删除tmp目录15天前的文件。
+    #更新文档时间戳
+    if [ -d /tmp/hsperfdata_admin ]
+    then
+        touch /tmp/hsperfdata_admin
+        touch /tmp/hsperfdata_admin/*
+    fi
+
+    find /tmp/ -type f -mtime +15 | xargs -t rm -rf >/dev/null 2>&1
+
+
+    now=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+    echo "before:$before; now:$now"
+}
+
+CANAL_DIR="/home/admin/canal-server/logs"
+if [[ -d $CANAL_DIR ]]; then
+  USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+  if [[ $USAGE -ge 90 ]]; then
+        find $CANAL_DIR -type f -mtime +7 | xargs rm -rf {}
+  fi
+  USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+  if [[ $USAGE -ge 80 ]]; then
+        find $CANAL_DIR -type f -mtime +3 | xargs rm -rf {}
+  fi
+  USAGE=$(df -h|awk 'NR>1 {gsub(/%$/,"",$5);print $5 }'|sort -nr|head -1)
+  if [[ $USAGE -ge 80 ]]; then
+        find $CANAL_DIR -type d -empty -mtime +3 | grep -v canal | xargs rm -rf {}
+        find $CANAL_DIR -type f -iname '*.tmp' | xargs rm -rf {}
+  fi
+  baseClean
+  exit 0
+fi

+ 13 - 0
docker/image/admin/health.sh

@@ -0,0 +1,13 @@
+#!/bin/sh
+CHECK_URL="http://127.0.0.1:8080/metrics"
+CHECK_POINT="success"
+CHECK_COUNT=`curl -s --connect-timeout 7 --max-time 7 $CHECK_URL | grep -c $CHECK_POINT`
+if [ $CHECK_COUNT -eq 0 ]; then
+    echo "[FAILED]"
+    status=0
+	error=1
+else
+    echo "[  OK  ]"
+    status=1
+	error=0
+fi

+ 11 - 0
docker/image/alidata/bin/exec_rc_local.sh

@@ -0,0 +1,11 @@
+#!/bin/bash
+
+if [ "${SKIP_EXEC_RC_LOCAL}" = "YES" ] ; then
+	echo "skip /etc/rc.local: SKIP_EXEC_RC_LOCAL=${SKIP_EXEC_RC_LOCAL}"
+	exit
+fi
+
+if [ "${DOCKER_DEPLOY_TYPE}" = "HOST" ] ; then
+	echo "skip /etc/rc.local: DOCKER_DEPLOY_TYPE=${DOCKER_DEPLOY_TYPE}"
+	exit
+fi

+ 6 - 0
docker/image/alidata/bin/lark-wait

@@ -0,0 +1,6 @@
+#!/bin/bash
+set -e
+
+chown admin: -R /home/admin/
+source /alidata/lib/proc.sh
+waitterm

+ 27 - 0
docker/image/alidata/bin/main.sh

@@ -0,0 +1,27 @@
+#!/bin/bash
+
+[ -n "${DOCKER_DEPLOY_TYPE}" ] || DOCKER_DEPLOY_TYPE="VM"
+echo "DOCKER_DEPLOY_TYPE=${DOCKER_DEPLOY_TYPE}"
+
+# run init scripts
+for e in $(ls /alidata/init/*) ; do
+	[ -x "${e}" ] || continue
+	echo "==> INIT $e"
+	$e
+	echo "==> EXIT CODE: $?"
+done
+
+echo "==> INIT DEFAULT"
+service sshd start
+service crond start
+
+#echo "check hostname -i: `hostname -i`"
+#hti_num=`hostname -i|awk '{print NF}'`
+#if [ $hti_num -gt 1 ];then
+#    echo "hostname -i result error:`hostname -i`"
+#    exit 120
+#fi
+
+echo "==> INIT DONE"
+echo "==> RUN ${*}"
+exec "${@}"

+ 19 - 0
docker/image/alidata/init/02init-sshd.sh

@@ -0,0 +1,19 @@
+#!/bin/bash
+
+# set port
+if [ -z "${SSHD_PORT}" ] ; then
+	SSHD_PORT=22
+	[ "${DOCKER_DEPLOY_TYPE}" = "HOST" ] && SSHD_PORT=2222
+fi
+
+sed -r -i '/^OPTIONS=/ d' /etc/sysconfig/sshd
+echo 'OPTIONS="-p '"${SSHD_PORT}"'"' >> /etc/sysconfig/sshd
+
+# set admin ssh pulic key
+if [ "${USE_ADMIN_PASSAGE}" = "YES" ] ; then
+    echo "set admin passage"
+    mkdir -p /home/admin/.ssh
+    chown admin:admin /home/admin/.ssh
+    chown admin:admin /home/admin/.ssh/authorized_keys
+    chmod 644 /home/admin/.ssh/authorized_keys
+fi

+ 66 - 0
docker/image/alidata/init/fix-hosts.py

@@ -0,0 +1,66 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#****************************************************************#
+# Create Date: 2017-01-06 17:58
+#***************************************************************#
+
+import socket
+import shutil
+from time import gmtime, strftime
+
+# get host_name
+host_name = socket.gethostname()
+tmp_file = "/tmp/.lark-fix-host.hosts"
+host_file = "/etc/hosts"
+bak_file_name = "/tmp/hosts-fix-bak.%s" % ( strftime("%Y-%m-%d_%H-%M-%S", gmtime()) )
+
+# load /etc/hosts file context
+FH = open(host_file,"r")
+file_lines = [ i.rstrip() for i in FH.readlines()]
+FH.close()
+file_lines_reverse = file_lines[::-1]
+new_lines = []
+bad_lines = []
+last_match_line = ""
+
+for line in file_lines_reverse:
+    if line.find(host_name) < 0:  # 不匹配的行直接跳过
+        new_lines.append(line + "\n")
+        continue
+
+    cols = line.split()
+    new_cols = []
+    if cols[0].startswith("#"): # 跳过已经注释掉的行
+        new_lines.append(line + "\n")
+        continue
+    for col in cols:
+        if not col == host_name: # 跳过不匹配的列
+            new_cols.append(col)
+            continue
+
+        if cols[0] == "127.0.0.1": # 如果第一列是 127.0.0.1 就跳过匹配的列, 防止 hostname -i 返回 127.0.0.1
+            continue
+
+        # 如果已经发现过匹配的列, 就丢掉重复的列
+        if not len(last_match_line) == 0:
+            continue
+
+        new_cols.append(col)
+        last_match_line = line
+
+    # 跳过 xx.xx.xx.xx hostname 这样的重复列
+    if len(new_cols) == 1:
+        continue
+
+    new_l = "%s\n" % " ".join(new_cols)
+    new_lines.append(new_l)
+
+# save tmp hosts
+
+FH2=file(tmp_file,"w+")
+FH2.writelines( new_lines[::-1])
+FH2.close()
+
+# mv to /etc/hosts
+shutil.copy(host_file, bak_file_name)
+shutil.move(tmp_file, host_file)

+ 40 - 0
docker/image/alidata/lib/proc.sh

@@ -0,0 +1,40 @@
+# waitterm
+#   wait TERM/INT signal.
+#   see: http://veithen.github.io/2014/11/16/sigterm-propagation.html
+waitterm() {
+	local PID
+	# any process to block
+	tail -f /dev/null &
+	PID="$!"
+	# setup trap, could do nothing, or just kill the blocker
+	trap "kill -TERM ${PID}" TERM INT
+	# wait for signal, ignore wait exit code
+	wait "${PID}" || true
+	# clear trap
+	trap - TERM INT
+	# wait blocker, ignore blocker exit code
+	wait "${PID}" 2>/dev/null || true
+}
+
+# waittermpid "${PIDFILE}".
+#   monitor process by pidfile && wait TERM/INT signal.
+#   if the process disappeared, return 1, means exit with ERROR.
+#   if TERM or INT signal received, return 0, means OK to exit.
+waittermpid() {
+	local PIDFILE PID do_run error
+	PIDFILE="${1?}"
+	do_run=true
+	error=0
+	trap "do_run=false" TERM INT
+	while "${do_run}" ; do
+		PID="$(cat "${PIDFILE}")"
+		if ! ps -p "${PID}" >/dev/null 2>&1 ; then
+			do_run=false
+			error=1
+		else
+			sleep 1
+		fi
+	done
+	trap - TERM INT
+	return "${error}"
+}

+ 92 - 0
docker/run.sh

@@ -0,0 +1,92 @@
+#!/bin/bash
+
+function usage() {
+    echo "Usage:"
+    echo "  run.sh [CONFIG]"
+    echo "example:"
+    echo "  run.sh -e canal.instance.master.address=127.0.0.1:3306 \\"
+    echo "         -e canal.instance.dbUsername=canal \\"
+    echo "         -e canal.instance.dbPassword=canal \\"
+    echo "         -e canal.instance.connectionCharset=UTF-8 \\"
+    echo "         -e canal.instance.tsdb.enable=true \\"
+    echo "         -e canal.instance.gtidon=false \\"
+    echo "         -e canal.instance.filter.regex=.*\\..* "
+    exit
+}
+
+function check_port() {
+    local port=$1
+    local TL=$(which telnet)
+    if [ -f $TL ]; then
+        data=`echo quit | telnet 127.0.0.1 $port| grep -ic connected`
+        echo $data
+        return
+    fi
+
+    local NC=$(which nc)
+    if [ -f $NC ]; then
+        data=`nc -z -w 1 127.0.0.1 $port | grep -ic succeeded`
+        echo $data
+        return
+    fi
+    echo "0"
+    return
+}
+
+function getMyIp() {
+    case "`uname`" in
+        Darwin)
+         myip=`echo "show State:/Network/Global/IPv4" | scutil | grep PrimaryInterface | awk '{print $3}' | xargs ifconfig | grep inet | grep -v inet6 | awk '{print $2}'`
+         ;;
+        *)
+         myip=`ip route get 1 | awk '{print $NF;exit}'`
+         ;;
+  esac
+  echo $myip
+}
+
+NET_MODE=""
+case "`uname`" in
+    Darwin)
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+    Linux)
+        bin_abs_path=$(readlink -f $(dirname $0))
+        NET_MODE="--net=host"
+        ;;
+    *)
+        NET_MODE="--net=host"
+        bin_abs_path=`cd $(dirname $0); pwd`
+        ;;
+esac
+BASE=${bin_abs_path}
+if [ $# -eq 0 ]; then
+    usage
+elif [ "$1" == "-h" ] ; then
+    usage
+elif [ "$1" == "help" ] ; then
+    usage
+fi
+
+DATA="$BASE/data"
+mkdir -p $DATA
+CONFIG=${@:1}
+#VOLUMNS="-v $DATA:/home/admin/canal-server/logs"
+PORTLIST="8000 8080 2222 11111"
+PORTS=""
+for PORT in $PORTLIST ; do
+    #exist=`check_port $PORT`
+    exist="0"
+    if [ "$exist" == "0" ]; then
+        PORTS="$PORTS -p $PORT:$PORT"
+    else
+        echo "port $PORT is used , pls check"
+        exit 1
+    fi
+done
+
+MEMORY="-m 4096m"
+LOCALHOST=`getMyIp`
+cmd="docker run -it -h $LOCALHOST $CONFIG --name=canal-server $VOLUMNS $NET_MODE $PORTS $MEMORY canal/canal-server"
+echo $cmd
+eval $cmd

+ 1 - 1
driver/src/main/java/com/alibaba/otter/canal/parse/driver/mysql/MysqlConnector.java

@@ -32,7 +32,7 @@ public class MysqlConnector {
     private String              password;
 
     private byte                charsetNumber     = 33;
-    private String              defaultSchema     = "retl";
+    private String              defaultSchema     = "test";
     private int                 soTimeout         = 30 * 1000;
     private int                 connTimeout       = 5 * 1000;
     private int                 receiveBufferSize = 16 * 1024;

+ 8 - 13
instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/CanalInstanceWithManager.java

@@ -6,6 +6,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
+import com.alibaba.otter.canal.meta.FileMixedMetaManager;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -20,13 +21,7 @@ import com.alibaba.otter.canal.filter.aviater.AviaterRegexFilter;
 import com.alibaba.otter.canal.instance.core.AbstractCanalInstance;
 import com.alibaba.otter.canal.instance.manager.model.Canal;
 import com.alibaba.otter.canal.instance.manager.model.CanalParameter;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.DataSourcing;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.HAMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.IndexMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.MetaMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.SourcingType;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.StorageMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.StorageScavengeMode;
+import com.alibaba.otter.canal.instance.manager.model.CanalParameter.*;
 import com.alibaba.otter.canal.meta.MemoryMetaManager;
 import com.alibaba.otter.canal.meta.PeriodMixedMetaManager;
 import com.alibaba.otter.canal.meta.ZooKeeperMetaManager;
@@ -37,12 +32,7 @@ import com.alibaba.otter.canal.parse.inbound.AbstractEventParser;
 import com.alibaba.otter.canal.parse.inbound.group.GroupEventParser;
 import com.alibaba.otter.canal.parse.inbound.mysql.LocalBinlogEventParser;
 import com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser;
-import com.alibaba.otter.canal.parse.index.CanalLogPositionManager;
-import com.alibaba.otter.canal.parse.index.FailbackLogPositionManager;
-import com.alibaba.otter.canal.parse.index.MemoryLogPositionManager;
-import com.alibaba.otter.canal.parse.index.MetaLogPositionManager;
-import com.alibaba.otter.canal.parse.index.PeriodMixedLogPositionManager;
-import com.alibaba.otter.canal.parse.index.ZooKeeperLogPositionManager;
+import com.alibaba.otter.canal.parse.index.*;
 import com.alibaba.otter.canal.parse.support.AuthenticationInfo;
 import com.alibaba.otter.canal.protocol.position.EntryPosition;
 import com.alibaba.otter.canal.sink.entry.EntryEventSink;
@@ -120,6 +110,11 @@ public class CanalInstanceWithManager extends AbstractCanalInstance {
             ZooKeeperMetaManager zooKeeperMetaManager = new ZooKeeperMetaManager();
             zooKeeperMetaManager.setZkClientx(getZkclientx());
             ((PeriodMixedMetaManager) metaManager).setZooKeeperMetaManager(zooKeeperMetaManager);
+        } else if (mode.isLocalFile()){
+            FileMixedMetaManager fileMixedMetaManager = new FileMixedMetaManager();
+            fileMixedMetaManager.setDataDir(parameters.getDataDir());
+            fileMixedMetaManager.setPeriod(parameters.getMetaFileFlushPeriod());
+            metaManager = fileMixedMetaManager;
         } else {
             throw new CanalException("unsupport MetaMode for " + mode);
         }

+ 25 - 1
instance/manager/src/main/java/com/alibaba/otter/canal/instance/manager/model/CanalParameter.java

@@ -28,8 +28,10 @@ public class CanalParameter implements Serializable {
     private Long                     zkClusterId;                                                    // zk集群id,为管理方便
     private List<String>             zkClusters;                                                     // zk集群地址
 
+    private String                   dataDir                            = "../conf";                 // 默认本地文件数据的目录默认是conf
     // meta相关参数
     private MetaMode                 metaMode                           = MetaMode.MEMORY;           // meta机制
+    private Integer                  metaFileFlushPeriod                = 1000;                      // meta刷新间隔
 
     // storage存储
     private Integer                  transactionSize                    = 1024;                      // 支持处理的transaction事务大小
@@ -243,7 +245,9 @@ public class CanalParameter implements Serializable {
         /** 文件存储模式 */
         ZOOKEEPER,
         /** 混合模式,内存+文件 */
-        MIXED;
+        MIXED,
+        /** 本地文件存储模式*/
+        LOCAL_FILE;
 
         public boolean isMemory() {
             return this.equals(MetaMode.MEMORY);
@@ -256,6 +260,10 @@ public class CanalParameter implements Serializable {
         public boolean isMixed() {
             return this.equals(MetaMode.MIXED);
         }
+
+        public boolean isLocalFile(){
+            return this.equals(MetaMode.LOCAL_FILE);
+        }
     }
 
     public static enum IndexMode {
@@ -390,6 +398,22 @@ public class CanalParameter implements Serializable {
         return storageMode;
     }
 
+    public String getDataDir() {
+        return dataDir;
+    }
+
+    public void setDataDir(String dataDir) {
+        this.dataDir = dataDir;
+    }
+
+    public Integer getMetaFileFlushPeriod() {
+        return metaFileFlushPeriod;
+    }
+
+    public void setMetaFileFlushPeriod(Integer metaFileFlushPeriod) {
+        this.metaFileFlushPeriod = metaFileFlushPeriod;
+    }
+
     public void setStorageMode(StorageMode storageMode) {
         this.storageMode = storageMode;
     }

+ 3 - 1
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/MessageDeserializer.java

@@ -29,7 +29,9 @@ public class MessageDeserializer implements Deserializer<Message> {
     @Override
     public Message deserialize(String topic, byte[] data) {
         try {
-            if (data == null) return null;
+            if (data == null) {
+                return null;
+            }
             else {
                 CanalPacket.Packet p = CanalPacket.Packet.parseFrom(data);
                 switch (p.getType()) {

+ 2 - 2
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningData.java

@@ -8,8 +8,8 @@ package com.alibaba.otter.canal.kafka.client.running;
  */
 public class ClientRunningData {
 
-    private String groupId;
-    private String address;
+    private String  groupId;
+    private String  address;
     private boolean active = true;
 
     public String getGroupId() {

+ 44 - 38
kafka-client/src/main/java/com/alibaba/otter/canal/kafka/client/running/ClientRunningMonitor.java

@@ -1,12 +1,11 @@
 package com.alibaba.otter.canal.kafka.client.running;
 
-import com.alibaba.otter.canal.common.AbstractCanalLifeCycle;
-import com.alibaba.otter.canal.common.utils.AddressUtils;
-import com.alibaba.otter.canal.common.utils.BooleanMutex;
-import com.alibaba.otter.canal.common.utils.JsonUtils;
-import com.alibaba.otter.canal.common.zookeeper.ZkClientx;
-import com.alibaba.otter.canal.common.zookeeper.ZookeeperPathUtils;
-import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+import java.text.MessageFormat;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
 import org.I0Itec.zkclient.IZkDataListener;
 import org.I0Itec.zkclient.exception.ZkException;
 import org.I0Itec.zkclient.exception.ZkInterruptedException;
@@ -17,11 +16,14 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.MDC;
 
-import java.text.MessageFormat;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
+import com.alibaba.otter.canal.common.AbstractCanalLifeCycle;
+import com.alibaba.otter.canal.common.utils.AddressUtils;
+import com.alibaba.otter.canal.common.utils.BooleanMutex;
+import com.alibaba.otter.canal.common.utils.JsonUtils;
+import com.alibaba.otter.canal.common.zookeeper.ZkClientx;
+import com.alibaba.otter.canal.common.zookeeper.ZookeeperPathUtils;
+import com.alibaba.otter.canal.protocol.exception.CanalClientException;
+
 
 /**
  * kafka client running状态信息
@@ -31,13 +33,18 @@ import java.util.concurrent.TimeUnit;
  */
 public class ClientRunningMonitor extends AbstractCanalLifeCycle {
 
-    private static final String TOPIC_ROOT_NODE = ZookeeperPathUtils.CANAL_ROOT_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + "topics";
+    private static final String TOPIC_ROOT_NODE             = ZookeeperPathUtils.CANAL_ROOT_NODE
+                                                              + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + "topics";
 
-    private static final String TOPIC_NODE = TOPIC_ROOT_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + "{0}";
+    private static final String TOPIC_NODE                  = TOPIC_ROOT_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR
+                                                              + "{0}";
 
-    private static final String TOPIC_CLIENTID_NODE = TOPIC_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + "{1}";
+    private static final String TOPIC_CLIENTID_NODE         = TOPIC_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR
+                                                              + "{1}";
 
-    private static final String TOPIC_CLIENTID_RUNNING_NODE = TOPIC_CLIENTID_NODE + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR + ZookeeperPathUtils.RUNNING_NODE;
+    private static final String TOPIC_CLIENTID_RUNNING_NODE = TOPIC_CLIENTID_NODE
+                                                              + ZookeeperPathUtils.ZOOKEEPER_SEPARATOR
+                                                              + ZookeeperPathUtils.RUNNING_NODE;
 
     private static String getTopicClientRunning(String topic, String groupId) {
         return MessageFormat.format(TOPIC_CLIENTID_RUNNING_NODE, topic, groupId);
@@ -47,21 +54,21 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
         return MessageFormat.format(TOPIC_CLIENTID_NODE, topic, groupId);
     }
 
-    private static final Logger logger = LoggerFactory.getLogger(ClientRunningMonitor.class);
-    private ZkClientx zkClient;
-    private String topic;
-    private ClientRunningData clientData;
-    private IZkDataListener dataListener;
-    private BooleanMutex mutex = new BooleanMutex(false);
-    private volatile boolean release = false;
+    private static final Logger        logger       = LoggerFactory.getLogger(ClientRunningMonitor.class);
+    private ZkClientx                  zkClient;
+    private String                     topic;
+    private ClientRunningData          clientData;
+    private IZkDataListener            dataListener;
+    private BooleanMutex               mutex        = new BooleanMutex(false);
+    private volatile boolean           release      = false;
     private volatile ClientRunningData activeData;
-    private ScheduledExecutorService delayExector = Executors.newScheduledThreadPool(1);
-    private ClientRunningListener listener;
-    private int delayTime = 5;
+    private ScheduledExecutorService   delayExector = Executors.newScheduledThreadPool(1);
+    private ClientRunningListener      listener;
+    private int                        delayTime    = 5;
 
-    private static Integer virtualPort;
+    private static Integer             virtualPort;
 
-    public ClientRunningMonitor() {
+    public ClientRunningMonitor(){
         if (virtualPort == null) {
             Random rand = new Random();
             virtualPort = rand.nextInt(9000) + 1000;
@@ -108,7 +115,6 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
 
     }
 
-
     public void start() {
         super.start();
 
@@ -123,7 +129,7 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
         String path = getTopicClientRunning(this.topic, clientData.getGroupId());
         zkClient.unsubscribeDataChanges(path, dataListener);
         releaseRunning(); // 尝试一下release
-        //Fix issue #697
+        // Fix issue #697
         if (delayExector != null) {
             delayExector.shutdown();
         }
@@ -159,13 +165,12 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
                 }
             }
         } catch (ZkNoNodeException e) {
-            zkClient.createPersistent(getClientIdNodePath(this.topic, clientData.getGroupId()),
-                    true); // 尝试创建父节点
+            zkClient.createPersistent(getClientIdNodePath(this.topic, clientData.getGroupId()), true); // 尝试创建父节点
             initRunning();
         } catch (Throwable t) {
             logger.error(MessageFormat.format("There is an error when execute initRunning method, with destination [{0}].",
-                    topic),
-                    t);
+                topic),
+                t);
             // 出现任何异常尝试release
             releaseRunning();
             throw new CanalClientException("something goes wrong in initRunning method. ", t);
@@ -187,7 +192,8 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
      */
     public boolean check() {
         String path = getTopicClientRunning(this.topic, clientData.getGroupId());
-        //ZookeeperPathUtils.getDestinationClientRunning(this.destination, clientData.getClientId());
+        // ZookeeperPathUtils.getDestinationClientRunning(this.destination,
+        // clientData.getClientId());
         try {
             byte[] bytes = zkClient.readData(path);
             ClientRunningData eventData = JsonUtils.unmarshalFromByte(bytes, ClientRunningData.class);
@@ -196,8 +202,8 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
             boolean result = isMine(activeData.getAddress());
             if (!result) {
                 logger.warn("canal is running in [{}] , but not in [{}]",
-                        activeData.getAddress(),
-                        clientData.getAddress());
+                    activeData.getAddress(),
+                    clientData.getAddress());
             }
             return result;
         } catch (ZkNoNodeException e) {
@@ -235,7 +241,7 @@ public class ClientRunningMonitor extends AbstractCanalLifeCycle {
         if (listener != null) {
             // 触发回调
             listener.processActiveEnter();
-            this.clientData.setAddress(/*address*/AddressUtils.getHostIp() + ":" + virtualPort);
+            this.clientData.setAddress(/* address */AddressUtils.getHostIp() + ":" + virtualPort);
 
             String path = getTopicClientRunning(this.topic, clientData.getGroupId());
             // 序列化

+ 3 - 3
kafka/src/main/java/com/alibaba/otter/canal/kafka/CanalServerStarter.java

@@ -17,9 +17,9 @@ import com.alibaba.otter.canal.deployer.CanalController;
  */
 public class CanalServerStarter {
 
-    private static final String CLASSPATH_URL_PREFIX = "classpath:";
-    private static final Logger logger               = LoggerFactory.getLogger(CanalServerStarter.class);
-    private volatile static boolean running          = false;
+    private static final String     CLASSPATH_URL_PREFIX = "classpath:";
+    private static final Logger     logger               = LoggerFactory.getLogger(CanalServerStarter.class);
+    private volatile static boolean running              = false;
 
     public static void init() {
         try {

+ 25 - 26
kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/CanalKafkaProducer.java

@@ -3,7 +3,6 @@ package com.alibaba.otter.canal.kafka.producer;
 import java.io.IOException;
 import java.util.Properties;
 
-import com.google.protobuf.ByteString;
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.Producer;
 import org.apache.kafka.clients.producer.ProducerRecord;
@@ -12,7 +11,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.alibaba.otter.canal.kafka.producer.KafkaProperties.Topic;
-import com.alibaba.otter.canal.protocol.CanalEntry;
 import com.alibaba.otter.canal.protocol.Message;
 
 /**
@@ -52,31 +50,32 @@ public class CanalKafkaProducer {
     }
 
     public void send(Topic topic, Message message) throws IOException {
-        boolean valid = false;
-        if (message != null) {
-            if (message.isRaw() && !message.getRawEntries().isEmpty()) {
-                for (ByteString byteString : message.getRawEntries()) {
-                    CanalEntry.Entry entry = CanalEntry.Entry.parseFrom(byteString);
-                    if (entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONBEGIN
-                            && entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONEND) {
-                        valid = true;
-                        break;
-                    }
-                }
-            } else if (!message.getEntries().isEmpty()){
-                for (CanalEntry.Entry entry : message.getEntries()) {
-                    if (entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONBEGIN
-                            && entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONEND) {
-                        valid = true;
-                        break;
-                    }
-                }
-            }
-        }
-        if (!valid) {
-            return;
-        }
+        // set canal.instance.filter.transaction.entry = true
 
+        // boolean valid = false;
+        // if (message != null) {
+        // if (message.isRaw() && !message.getRawEntries().isEmpty()) {
+        // for (ByteString byteString : message.getRawEntries()) {
+        // CanalEntry.Entry entry = CanalEntry.Entry.parseFrom(byteString);
+        // if (entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONBEGIN
+        // && entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONEND) {
+        // valid = true;
+        // break;
+        // }
+        // }
+        // } else if (!message.getEntries().isEmpty()){
+        // for (CanalEntry.Entry entry : message.getEntries()) {
+        // if (entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONBEGIN
+        // && entry.getEntryType() != CanalEntry.EntryType.TRANSACTIONEND) {
+        // valid = true;
+        // break;
+        // }
+        // }
+        // }
+        // }
+        // if (!valid) {
+        // return;
+        // }
         ProducerRecord<String, Message> record;
         if (topic.getPartition() != null) {
             record = new ProducerRecord<String, Message>(topic.getTopic(), topic.getPartition(), null, message);

+ 5 - 2
kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/CanalKafkaStarter.java

@@ -52,7 +52,10 @@ public class CanalKafkaStarter {
             // 初始化 kafka producer
             canalKafkaProducer = new CanalKafkaProducer();
             canalKafkaProducer.init(kafkaProperties);
-
+            // set filterTransactionEntry
+            if (kafkaProperties.isFilterTransactionEntry()) {
+                System.setProperty("canal.instance.filter.transaction.entry", "true");
+            }
             // 对应每个instance启动一个worker线程
             List<CanalDestination> destinations = kafkaProperties.getCanalDestinations();
 
@@ -118,7 +121,7 @@ public class CanalKafkaStarter {
                     Message message = server.getWithoutAck(clientIdentity, kafkaProperties.getCanalBatchSize()); // 获取指定数量的数据
                     long batchId = message.getId();
                     try {
-                        int size = message.getEntries().size();
+                        int size = message.isRaw() ?  message.getRawEntries().size() : message.getEntries().size();
                         if (batchId != -1 && size != 0) {
                             if (!StringUtils.isEmpty(destination.getTopic())) {
                                 Topic topic = new Topic();

+ 17 - 8
kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/KafkaProperties.java

@@ -13,15 +13,15 @@ import java.util.Set;
  */
 public class KafkaProperties {
 
-    private String                 servers           = "localhost:6667";
-    private int                    retries           = 0;
-    private int                    batchSize         = 16384;
-    private int                    lingerMs          = 1;
-    private long                   bufferMemory      = 33554432L;
+    private String                 servers                = "localhost:6667";
+    private int                    retries                = 0;
+    private int                    batchSize              = 16384;
+    private int                    lingerMs               = 1;
+    private long                   bufferMemory           = 33554432L;
+    private boolean                filterTransactionEntry = true;
+    private int                    canalBatchSize         = 5;
 
-    private int                    canalBatchSize    = 5;
-
-    private List<CanalDestination> canalDestinations = new ArrayList<CanalDestination>();
+    private List<CanalDestination> canalDestinations      = new ArrayList<CanalDestination>();
 
     public static class CanalDestination {
 
@@ -158,4 +158,13 @@ public class KafkaProperties {
     public void setCanalDestinations(List<CanalDestination> canalDestinations) {
         this.canalDestinations = canalDestinations;
     }
+
+    public boolean isFilterTransactionEntry() {
+        return filterTransactionEntry;
+    }
+
+    public void setFilterTransactionEntry(boolean filterTransactionEntry) {
+        this.filterTransactionEntry = filterTransactionEntry;
+    }
+
 }

+ 49 - 10
kafka/src/main/java/com/alibaba/otter/canal/kafka/producer/MessageSerializer.java

@@ -1,13 +1,19 @@
 package com.alibaba.otter.canal.kafka.producer;
 
-import com.alibaba.otter.canal.protocol.CanalEntry;
-import com.alibaba.otter.canal.protocol.CanalPacket;
-import com.alibaba.otter.canal.protocol.Message;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.kafka.common.errors.SerializationException;
 import org.apache.kafka.common.serialization.Serializer;
 import org.springframework.util.CollectionUtils;
 
-import java.util.Map;
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.CanalPacket;
+import com.alibaba.otter.canal.protocol.CanalPacket.PacketType;
+import com.alibaba.otter.canal.protocol.Message;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedOutputStream;
+import com.google.protobuf.WireFormat;
 
 /**
  * Kafka Message类的序列化
@@ -25,20 +31,53 @@ public class MessageSerializer implements Serializer<Message> {
     public byte[] serialize(String topic, Message data) {
         try {
             if (data != null) {
-                CanalPacket.Messages.Builder messageBuilder = CanalPacket.Messages.newBuilder();
                 if (data.getId() != -1) {
                     if (data.isRaw() && !CollectionUtils.isEmpty(data.getRawEntries())) {
-                        messageBuilder.addAllMessages(data.getRawEntries());
+                        // for performance
+                        List<ByteString> rowEntries = data.getRawEntries();
+                        // message size
+                        int messageSize = 0;
+                        messageSize += com.google.protobuf.CodedOutputStream.computeInt64Size(1, data.getId());
+
+                        int dataSize = 0;
+                        for (int i = 0; i < rowEntries.size(); i++) {
+                            dataSize += com.google.protobuf.CodedOutputStream.computeBytesSizeNoTag(rowEntries.get(i));
+                        }
+                        messageSize += dataSize;
+                        messageSize += 1 * rowEntries.size();
+                        // packet size
+                        int size = 0;
+                        size += com.google.protobuf.CodedOutputStream.computeEnumSize(3,
+                            PacketType.MESSAGES.getNumber());
+                        size += com.google.protobuf.CodedOutputStream.computeTagSize(5)
+                                + com.google.protobuf.CodedOutputStream.computeRawVarint32Size(messageSize)
+                                + messageSize;
+                        // build data
+                        byte[] body = new byte[size];
+                        CodedOutputStream output = CodedOutputStream.newInstance(body);
+                        output.writeEnum(3, PacketType.MESSAGES.getNumber());
+
+                        output.writeTag(5, WireFormat.WIRETYPE_LENGTH_DELIMITED);
+                        output.writeRawVarint32(messageSize);
+                        // message
+                        output.writeInt64(1, data.getId());
+                        for (int i = 0; i < rowEntries.size(); i++) {
+                            output.writeBytes(2, rowEntries.get(i));
+                        }
+                        output.checkNoSpaceLeft();
+                        return body;
                     } else if (!CollectionUtils.isEmpty(data.getEntries())) {
+                        CanalPacket.Messages.Builder messageBuilder = CanalPacket.Messages.newBuilder();
                         for (CanalEntry.Entry entry : data.getEntries()) {
                             messageBuilder.addMessages(entry.toByteString());
                         }
+
+                        CanalPacket.Packet.Builder packetBuilder = CanalPacket.Packet.newBuilder();
+                        packetBuilder.setType(CanalPacket.PacketType.MESSAGES);
+                        packetBuilder.setBody(messageBuilder.build().toByteString());
+                        return packetBuilder.build().toByteArray();
                     }
                 }
-                CanalPacket.Packet.Builder packetBuilder = CanalPacket.Packet.newBuilder();
-                packetBuilder.setType(CanalPacket.PacketType.MESSAGES);
-                packetBuilder.setBody(messageBuilder.build().toByteString());
-                return packetBuilder.build().toByteArray();
             }
         } catch (Exception e) {
             throw new SerializationException("Error when serializing message to byte[] ");

+ 1 - 0
kafka/src/main/resources/kafka.yml

@@ -5,6 +5,7 @@ lingerMs: 1
 bufferMemory: 33554432
 # canal的批次大小,单位 k
 canalBatchSize: 50
+filterTransactionEntry: true
 
 canalDestinations:
   - canalDestination: example

+ 2 - 0
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/AbstractEventParser.java

@@ -95,6 +95,7 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
     protected int                                    parallelBufferSize         = 256;                                     // 必须为2的幂
     protected MultiStageCoprocessor                  multiStageCoprocessor;
 
+
     protected abstract BinlogParser buildParser();
 
     protected abstract ErosaConnection buildErosaConnection();
@@ -615,4 +616,5 @@ public abstract class AbstractEventParser<EVENT> extends AbstractCanalLifeCycle
         this.parallelBufferSize = parallelBufferSize;
     }
 
+
 }

+ 26 - 18
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/AbstractMysqlEventParser.java

@@ -1,6 +1,7 @@
 package com.alibaba.otter.canal.parse.inbound.mysql;
 
 import java.nio.charset.Charset;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -19,21 +20,22 @@ import com.alibaba.otter.canal.protocol.position.EntryPosition;
 
 public abstract class AbstractMysqlEventParser extends AbstractEventParser {
 
-    protected final Logger      logger                  = LoggerFactory.getLogger(this.getClass());
-    protected static final long BINLOG_START_OFFEST     = 4L;
+    protected final Logger            logger                    = LoggerFactory.getLogger(this.getClass());
+    protected static final long       BINLOG_START_OFFEST       = 4L;
 
-    protected boolean           enableTsdb              = false;
-    protected String            tsdbSpringXml;
-    protected TableMetaTSDB     tableMetaTSDB;
+    protected boolean                 enableTsdb                = false;
+    protected String                  tsdbSpringXml;
+    protected TableMetaTSDB           tableMetaTSDB;
     // 编码信息
-    protected byte              connectionCharsetNumber = (byte) 33;
-    protected Charset           connectionCharset       = Charset.forName("UTF-8");
-    protected boolean           filterQueryDcl          = false;
-    protected boolean           filterQueryDml          = false;
-    protected boolean           filterQueryDdl          = false;
-    protected boolean           filterRows              = false;
-    protected boolean           filterTableError        = false;
-    protected boolean           useDruidDdlFilter       = true;
+    protected byte                    connectionCharsetNumber   = (byte) 33;
+    protected Charset                 connectionCharset         = Charset.forName("UTF-8");
+    protected boolean                 filterQueryDcl            = false;
+    protected boolean                 filterQueryDml            = false;
+    protected boolean                 filterQueryDdl            = false;
+    protected boolean                 filterRows                = false;
+    protected boolean                 filterTableError          = false;
+    protected boolean                 useDruidDdlFilter         = true;
+    private final AtomicLong          eventsPublishBlockingTime = new AtomicLong(0L);
 
     protected BinlogParser buildParser() {
         LogEventConvert convert = new LogEventConvert();
@@ -119,11 +121,13 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
     }
 
     protected MultiStageCoprocessor buildMultiStageCoprocessor() {
-        return new MysqlMultiStageCoprocessor(parallelBufferSize,
-            parallelThreadSize,
-            (LogEventConvert) binlogParser,
-            transactionBuffer,
-            destination);
+        MysqlMultiStageCoprocessor mysqlMultiStageCoprocessor = new MysqlMultiStageCoprocessor(parallelBufferSize,
+                parallelThreadSize,
+                (LogEventConvert) binlogParser,
+                transactionBuffer,
+                destination);
+        mysqlMultiStageCoprocessor.setEventsPublishBlockingTime(eventsPublishBlockingTime);
+        return mysqlMultiStageCoprocessor;
     }
 
     // ============================ setter / getter =========================
@@ -188,4 +192,8 @@ public abstract class AbstractMysqlEventParser extends AbstractEventParser {
         }
     }
 
+    public AtomicLong getEventsPublishBlockingTime() {
+        return this.eventsPublishBlockingTime;
+    }
+
 }

+ 29 - 10
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlConnection.java

@@ -7,6 +7,7 @@ import java.net.InetSocketAddress;
 import java.nio.charset.Charset;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
@@ -37,18 +38,20 @@ import com.taobao.tddl.dbsync.binlog.LogEvent;
 
 public class MysqlConnection implements ErosaConnection {
 
-    private static final Logger logger      = LoggerFactory.getLogger(MysqlConnection.class);
+    private static final Logger       logger                    = LoggerFactory.getLogger(MysqlConnection.class);
 
-    private MysqlConnector      connector;
-    private long                slaveId;
-    private Charset             charset     = Charset.forName("UTF-8");
-    private BinlogFormat        binlogFormat;
-    private BinlogImage         binlogImage;
+    private MysqlConnector            connector;
+    private long                      slaveId;
+    private Charset                   charset                   = Charset.forName("UTF-8");
+    private BinlogFormat              binlogFormat;
+    private BinlogImage               binlogImage;
 
     // tsdb releated
-    private AuthenticationInfo  authInfo;
-    protected int               connTimeout = 5 * 1000;                                      // 5秒
-    protected int               soTimeout   = 60 * 60 * 1000;                                // 1小时
+    private AuthenticationInfo        authInfo;
+    protected     int                 connTimeout               = 5 * 1000;                                      // 5秒
+    protected     int                 soTimeout                 = 60 * 60 * 1000;                                // 1小时
+    // dump binlog bytes, 暂不包括meta与TSDB
+    private AtomicLong                receivedBinlogBytes;
 
     public MysqlConnection(){
     }
@@ -124,6 +127,7 @@ public class MysqlConnection implements ErosaConnection {
         decoder.handle(LogEvent.XID_EVENT);
         LogContext context = new LogContext();
         while (fetcher.fetch()) {
+            accumulateReceivedBytes(fetcher.limit());
             LogEvent event = null;
             event = decoder.decode(fetcher, context);
 
@@ -146,6 +150,7 @@ public class MysqlConnection implements ErosaConnection {
         LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.ENUM_END_EVENT);
         LogContext context = new LogContext();
         while (fetcher.fetch()) {
+            accumulateReceivedBytes(fetcher.limit());
             LogEvent event = null;
             event = decoder.decode(fetcher, context);
 
@@ -174,6 +179,7 @@ public class MysqlConnection implements ErosaConnection {
             LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.ENUM_END_EVENT);
             LogContext context = new LogContext();
             while (fetcher.fetch()) {
+                accumulateReceivedBytes(fetcher.limit());
                 LogEvent event = null;
                 event = decoder.decode(fetcher, context);
 
@@ -204,6 +210,7 @@ public class MysqlConnection implements ErosaConnection {
         try {
             fetcher.start(connector.getChannel());
             while (fetcher.fetch()) {
+                accumulateReceivedBytes(fetcher.limit());
                 LogBuffer buffer = fetcher.duplicate();
                 fetcher.consume(fetcher.limit());
                 if (!coprocessor.publish(buffer)) {
@@ -230,6 +237,7 @@ public class MysqlConnection implements ErosaConnection {
         try {
             fetcher.start(connector.getChannel());
             while (fetcher.fetch()) {
+                accumulateReceivedBytes(fetcher.limit());
                 LogBuffer buffer = fetcher.duplicate();
                 fetcher.consume(fetcher.limit());
                 if (!coprocessor.publish(buffer)) {
@@ -334,7 +342,6 @@ public class MysqlConnection implements ErosaConnection {
      * <li>net_read_timeout</li>
      * </ol>
      * 
-     * @param channel
      * @throws IOException
      */
     private void updateSettings() throws IOException {
@@ -453,6 +460,14 @@ public class MysqlConnection implements ErosaConnection {
         }
     }
 
+    private void accumulateReceivedBytes(long x) {
+        if (receivedBinlogBytes != null) {
+            receivedBinlogBytes.addAndGet(x);
+        }
+    }
+
+
+
     public static enum BinlogFormat {
 
         STATEMENT("STATEMENT"), ROW("ROW"), MIXED("MIXED");
@@ -592,4 +607,8 @@ public class MysqlConnection implements ErosaConnection {
         this.authInfo = authInfo;
     }
 
+    public void setReceivedBinlogBytes(AtomicLong receivedBinlogBytes) {
+        this.receivedBinlogBytes = receivedBinlogBytes;
+    }
+
 }

+ 16 - 3
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlEventParser.java

@@ -69,8 +69,11 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
     private BinlogImage[]      supportBinlogImages;                          // 支持的binlogImage,如果设置会执行强校验
 
     // update by yishun.chen,特殊异常处理参数
-    private int                dumpErrorCount                    = 0;        // binlogDump失败异常计数
-    private int                dumpErrorCountThreshold           = 2;        // binlogDump失败异常计数阀值
+    private       int                 dumpErrorCount            = 0;        // binlogDump失败异常计数
+    private       int                 dumpErrorCountThreshold   = 2;        // binlogDump失败异常计数阀值
+
+    // instance received binlog bytes
+    private final AtomicLong          receivedBinlogBytes       = new AtomicLong(0L);
 
     protected ErosaConnection buildErosaConnection() {
         return buildMysqlConnection(this.runningInfo);
@@ -314,6 +317,7 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
         connection.getConnector().setSendBufferSize(sendBufferSize);
         connection.getConnector().setSoTimeout(defaultConnectionTimeoutInSeconds * 1000);
         connection.setCharset(connectionCharset);
+        connection.setReceivedBinlogBytes(receivedBinlogBytes);
         // 随机生成slaveId
         if (this.slaveId <= 0) {
             this.slaveId = generateUniqueServerId();
@@ -512,7 +516,7 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
     private Long findTransactionBeginPosition(ErosaConnection mysqlConnection, final EntryPosition entryPosition)
                                                                                                                  throws IOException {
         // 针对开始的第一条为非Begin记录,需要从该binlog扫描
-        final AtomicLong preTransactionStartPosition = new AtomicLong(0L);
+        final java.util.concurrent.atomic.AtomicLong preTransactionStartPosition = new java.util.concurrent.atomic.AtomicLong(0L);
         mysqlConnection.reconnect();
         mysqlConnection.seek(entryPosition.getJournalName(), 4L, new SinkFunction<LogEvent>() {
 
@@ -643,6 +647,9 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
                 throw new CanalParseException("command : 'show master status' has an error! pls check. you need (at least one of) the SUPER,REPLICATION CLIENT privilege(s) for this operation");
             }
             EntryPosition endPosition = new EntryPosition(fields.get(0), Long.valueOf(fields.get(1)));
+            if (isGTIDMode && fields.size() > 4) {
+                endPosition.setGtid(fields.get(4));
+            }
             return endPosition;
         } catch (IOException e) {
             throw new CanalParseException("command : 'show master status' has an error!", e);
@@ -908,4 +915,10 @@ public class MysqlEventParser extends AbstractMysqlEventParser implements CanalE
         this.dumpErrorCountThreshold = dumpErrorCountThreshold;
     }
 
+
+
+    public AtomicLong getReceivedBinlogBytes() {
+        return this.receivedBinlogBytes;
+    }
+
 }

+ 34 - 1
parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/MysqlMultiStageCoprocessor.java

@@ -2,6 +2,7 @@ package com.alibaba.otter.canal.parse.inbound.mysql;
 
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.LockSupport;
 
 import org.apache.commons.lang.StringUtils;
@@ -52,6 +53,7 @@ import com.taobao.tddl.dbsync.binlog.event.WriteRowsLogEvent;
  */
 public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implements MultiStageCoprocessor {
 
+    private static final int             maxFullTimes               = 10;
     private LogEventConvert              logEventConvert;
     private EventTransactionBuffer       transactionBuffer;
     private ErosaConnection              connection;
@@ -63,6 +65,7 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
     private ExecutorService              stageExecutor;
     private String                       destination;
     private volatile CanalParseException exception;
+    private AtomicLong                   eventsPublishBlockingTime;
 
     public MysqlMultiStageCoprocessor(int ringBufferSize, int parserThreadCount, LogEventConvert logEventConvert,
                                       EventTransactionBuffer transactionBuffer, String destination){
@@ -161,6 +164,8 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
             throw exception;
         }
         boolean interupted = false;
+        long blockingStart = 0L;
+        int fullTimes = 0;
         do {
             try {
                 long next = disruptorMsgBuffer.tryNext();
@@ -170,16 +175,39 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
                     event.setBinlogFileName(binlogFileName);
                 }
                 disruptorMsgBuffer.publish(next);
+                if (fullTimes > 0) {
+                    eventsPublishBlockingTime.addAndGet(System.nanoTime() - blockingStart);
+                }
                 break;
             } catch (InsufficientCapacityException e) {
+                if (fullTimes == 0) {
+                    blockingStart = System.nanoTime();
+                }
                 // park
-                LockSupport.parkNanos(1L);
+                //LockSupport.parkNanos(1L);
+                applyWait(++fullTimes);
                 interupted = Thread.interrupted();
+                if (fullTimes % 1000 == 0) {
+                    long nextStart = System.nanoTime();
+                    eventsPublishBlockingTime.addAndGet(nextStart - blockingStart);
+                    blockingStart = nextStart;
+                }
             }
         } while (!interupted && isStart());
         return isStart();
     }
 
+    // 处理无数据的情况,避免空循环挂死
+    private void applyWait(int fullTimes) {
+        int newFullTimes = fullTimes > maxFullTimes ? maxFullTimes : fullTimes;
+        if (fullTimes <= 3) { // 3次以内
+            Thread.yield();
+        } else { // 超过3次,最多只sleep 1ms
+            LockSupport.parkNanos(100 * 1000L * newFullTimes);
+        }
+
+    }
+
     @Override
     public void reset() {
         if (isStart()) {
@@ -189,6 +217,7 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
         start();
     }
 
+
     private class SimpleParserStage implements EventHandler<MessageEvent>, LifecycleAware {
 
         private LogDecoder decoder;
@@ -427,4 +456,8 @@ public class MysqlMultiStageCoprocessor extends AbstractCanalLifeCycle implement
         this.connection = connection;
     }
 
+    public void setEventsPublishBlockingTime(AtomicLong eventsPublishBlockingTime) {
+        this.eventsPublishBlockingTime = eventsPublishBlockingTime;
+    }
+
 }

+ 0 - 10
pom.xml

@@ -66,16 +66,6 @@
                 <enabled>false</enabled>
             </snapshots>
         </repository>
-        <repository>
-            <id>alibaba</id>
-            <url>http://code.alibabatech.com/mvn/releases/</url>
-            <releases>
-                <enabled>true</enabled>
-            </releases>
-            <snapshots>
-                <enabled>false</enabled>
-            </snapshots>
-        </repository>
         <repository>
             <id>sonatype</id>
             <name>sonatype</name>

+ 53 - 66
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/CanalInstanceExports.java

@@ -1,12 +1,7 @@
 package com.alibaba.otter.canal.prometheus;
 
 import com.alibaba.otter.canal.instance.core.CanalInstance;
-import com.alibaba.otter.canal.prometheus.impl.InstanceMetaCollector;
-import com.alibaba.otter.canal.prometheus.impl.MemoryStoreCollector;
-import com.alibaba.otter.canal.prometheus.impl.PrometheusCanalEventDownStreamHandler;
-import com.alibaba.otter.canal.sink.CanalEventSink;
-import com.alibaba.otter.canal.sink.entry.EntryEventSink;
-import com.alibaba.otter.canal.store.CanalStoreException;
+import com.alibaba.otter.canal.prometheus.impl.*;
 import io.prometheus.client.Collector;
 import io.prometheus.client.CollectorRegistry;
 import org.slf4j.Logger;
@@ -20,79 +15,71 @@ import java.util.List;
  */
 public class CanalInstanceExports {
 
-    private static final Logger      logger         = LoggerFactory.getLogger(CanalInstanceExports.class);
-
-    public static final String[]     labels         = {"destination"};
-
-    public static final List<String> labelList      = Collections.singletonList(labels[0]);
-
-    private final String             destination;
-
-    private Collector                storeCollector;
-
-    private Collector                delayCollector;
-
-    private Collector                metaCollector;
-
-    private CanalInstanceExports(CanalInstance instance) {
-        this.destination = instance.getDestination();
-        initDelayGauge(instance);
-        initStoreCollector(instance);
-        initMetaCollector(instance);
+    private static final Logger      logger           = LoggerFactory.getLogger(CanalInstanceExports.class);
+    public static final String       DEST             = "destination";
+    public static final String[]     DEST_LABELS      = {DEST};
+    public static final List<String> DEST_LABELS_LIST = Collections.singletonList(DEST);
+    private final Collector          storeCollector;
+    private final Collector          entryCollector;
+    private final Collector          metaCollector;
+    private final Collector          sinkCollector;
+    private final Collector          parserCollector;
+
+    private CanalInstanceExports() {
+        this.storeCollector = StoreCollector.instance();
+        this.entryCollector = EntryCollector.instance();
+        this.metaCollector = MetaCollector.instance();
+        this.sinkCollector = SinkCollector.instance();
+        this.parserCollector = ParserCollector.instance();
     }
 
+    private static class SingletonHolder {
+        private static final CanalInstanceExports SINGLETON = new CanalInstanceExports();
+    }
 
+    public static CanalInstanceExports instance() {
+        return SingletonHolder.SINGLETON;
+    }
 
-    static CanalInstanceExports forInstance(CanalInstance instance) {
-        return new CanalInstanceExports(instance);
+    public void initialize() {
+        storeCollector.register();
+        entryCollector.register();
+        metaCollector.register();
+        sinkCollector.register();
+        parserCollector.register();
     }
 
-    void register() {
-        if (delayCollector != null) {
-            delayCollector.register();
-        }
-        if (storeCollector != null) {
-            storeCollector.register();
-        }
-        if (metaCollector != null) {
-            metaCollector.register();
-        }
+    public void terminate() {
+        CollectorRegistry.defaultRegistry.unregister(storeCollector);
+        CollectorRegistry.defaultRegistry.unregister(entryCollector);
+        CollectorRegistry.defaultRegistry.unregister(metaCollector);
+        CollectorRegistry.defaultRegistry.unregister(sinkCollector);
+        CollectorRegistry.defaultRegistry.unregister(parserCollector);
     }
 
-    void unregister() {
-        if (delayCollector != null) {
-            CollectorRegistry.defaultRegistry.unregister(delayCollector);
-        }
-        if (storeCollector != null) {
-            CollectorRegistry.defaultRegistry.unregister(storeCollector);
-        }
-        if (metaCollector != null) {
-            CollectorRegistry.defaultRegistry.unregister(metaCollector);
-        }
+    void register(CanalInstance instance) {
+        requiredInstanceRegistry(storeCollector).register(instance);
+        requiredInstanceRegistry(entryCollector).register(instance);
+        requiredInstanceRegistry(metaCollector).register(instance);
+        requiredInstanceRegistry(sinkCollector).register(instance);
+        requiredInstanceRegistry(parserCollector).register(instance);
+        logger.info("Successfully register metrics for instance {}.", instance.getDestination());
     }
 
-    private void initDelayGauge(CanalInstance instance) {
-        CanalEventSink sink = instance.getEventSink();
-        if (sink instanceof EntryEventSink) {
-            EntryEventSink entryEventSink = (EntryEventSink) sink;
-            // TODO ensure not to add handler again
-            PrometheusCanalEventDownStreamHandler handler = new PrometheusCanalEventDownStreamHandler(destination);
-            entryEventSink.addHandler(handler);
-            delayCollector = handler.getCollector();
-        } else {
-            logger.warn("This impl register metrics for only EntryEventSink, skip.");
-        }
+    void unregister(CanalInstance instance) {
+        requiredInstanceRegistry(storeCollector).unregister(instance);
+        requiredInstanceRegistry(entryCollector).unregister(instance);
+        requiredInstanceRegistry(metaCollector).unregister(instance);
+        requiredInstanceRegistry(sinkCollector).unregister(instance);
+        requiredInstanceRegistry(parserCollector).unregister(instance);
+        logger.info("Successfully unregister metrics for instance {}.", instance.getDestination());
     }
 
-    private void initStoreCollector(CanalInstance instance) {
-        try {
-            storeCollector = new MemoryStoreCollector(instance.getEventStore(), destination);
-        } catch (CanalStoreException cse) {
-            logger.warn("Failed to register metrics for destination {}.", destination, cse);
+    private InstanceRegistry requiredInstanceRegistry(Collector collector) {
+        if (!(collector instanceof InstanceRegistry)) {
+            throw new IllegalArgumentException("Canal prometheus collector need to implement InstanceRegistry.");
         }
+        return (InstanceRegistry) collector;
     }
 
-    private void initMetaCollector(CanalInstance instance) {
-        metaCollector = new InstanceMetaCollector(instance);
-    }
 }

+ 0 - 21
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/CanalServerExports.java

@@ -1,21 +0,0 @@
-package com.alibaba.otter.canal.prometheus;
-
-import com.alibaba.otter.canal.prometheus.impl.InboundThroughputAspect;
-import com.alibaba.otter.canal.prometheus.impl.OutboundThroughputAspect;
-
-/**
- * @author Chuanyi Li
- */
-public class CanalServerExports {
-
-    private static boolean initialized = false;
-
-    public static synchronized void initialize() {
-        if (!initialized) {
-            InboundThroughputAspect.getCollector().register();
-            OutboundThroughputAspect.getCollector().register();
-            initialized = true;
-        }
-    }
-
-}

+ 14 - 0
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/InstanceRegistry.java

@@ -0,0 +1,14 @@
+package com.alibaba.otter.canal.prometheus;
+
+import com.alibaba.otter.canal.instance.core.CanalInstance;
+
+/**
+ * @author Chuanyi Li
+ */
+public interface InstanceRegistry {
+
+    void register(CanalInstance instance);
+
+    void unregister(CanalInstance instance);
+
+}

+ 31 - 26
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/PrometheusService.java

@@ -1,6 +1,8 @@
 package com.alibaba.otter.canal.prometheus;
 
 import com.alibaba.otter.canal.instance.core.CanalInstance;
+import com.alibaba.otter.canal.prometheus.impl.PrometheusClientInstanceProfiler;
+import com.alibaba.otter.canal.server.netty.ClientInstanceProfiler;
 import com.alibaba.otter.canal.spi.CanalMetricsService;
 import io.prometheus.client.exporter.HTTPServer;
 import io.prometheus.client.hotspot.DefaultExports;
@@ -8,23 +10,24 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
+
+import static com.alibaba.otter.canal.server.netty.CanalServerWithNettyProfiler.NOP;
+import static com.alibaba.otter.canal.server.netty.CanalServerWithNettyProfiler.profiler;
 
 /**
  * @author Chuanyi Li
  */
 public class PrometheusService implements CanalMetricsService {
 
-    private static final Logger                           logger  = LoggerFactory.getLogger(PrometheusService.class);
-
-    private final Map<String, CanalInstanceExports>       exports = new ConcurrentHashMap<String, CanalInstanceExports>();
-
-    private volatile boolean                              running = false;
-
-    private HTTPServer                                    server;
+    private static final Logger          logger          = LoggerFactory.getLogger(PrometheusService.class);
+    private final CanalInstanceExports   instanceExports;
+    private volatile boolean             running         = false;
+    private HTTPServer                   server;
+    private final ClientInstanceProfiler clientProfiler;
 
     private PrometheusService() {
+        this.instanceExports = CanalInstanceExports.instance();
+        this.clientProfiler = PrometheusClientInstanceProfiler.instance();
     }
 
     private static class SingletonHolder {
@@ -48,8 +51,11 @@ public class PrometheusService implements CanalMetricsService {
         try {
             // JVM exports
             DefaultExports.initialize();
-            // Canal server level exports
-            CanalServerExports.initialize();
+            instanceExports.initialize();
+            if (!clientProfiler.isStart()) {
+                clientProfiler.start();
+            }
+            profiler().setInstanceProfiler(clientProfiler);
         } catch (Throwable t) {
             logger.warn("Unable to initialize server exports.", t);
         }
@@ -60,14 +66,17 @@ public class PrometheusService implements CanalMetricsService {
     @Override
     public void terminate() {
         running = false;
-        // Normally, service should be terminated at canal shutdown.
-        // No need to unregister instance exports explicitly.
-        // But for the sake of safety, unregister them.
-        for (CanalInstanceExports ie : exports.values()) {
-            ie.unregister();
-        }
-        if (server != null) {
-            server.stop();
+        try {
+            instanceExports.terminate();
+            if (clientProfiler.isStart()) {
+                clientProfiler.stop();
+            }
+            profiler().setInstanceProfiler(NOP);
+            if (server != null) {
+                server.stop();
+            }
+        } catch (Throwable t) {
+            logger.warn("Something happened while terminating.", t);
         }
     }
 
@@ -83,9 +92,7 @@ public class PrometheusService implements CanalMetricsService {
             return;
         }
         try {
-            CanalInstanceExports export = CanalInstanceExports.forInstance(instance);
-            export.register();
-            exports.put(instance.getDestination(), export);
+            instanceExports.register(instance);
         } catch (Throwable t) {
             logger.warn("Unable to register instance exports for {}.", instance.getDestination(), t);
         }
@@ -98,13 +105,11 @@ public class PrometheusService implements CanalMetricsService {
             logger.warn("Try unregister metrics after destination {} is stopped.", instance.getDestination());
         }
         try {
-            CanalInstanceExports export = exports.remove(instance.getDestination());
-            if (export != null) {
-                export.unregister();
-            }
+            instanceExports.unregister(instance);
         } catch (Throwable t) {
             logger.warn("Unable to unregister instance exports for {}.", instance.getDestination(), t);
         }
         logger.info("Unregister metrics for destination {}.", instance.getDestination());
     }
+
 }

+ 134 - 0
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/EntryCollector.java

@@ -0,0 +1,134 @@
+package com.alibaba.otter.canal.prometheus.impl;
+
+import com.alibaba.otter.canal.instance.core.CanalInstance;
+import com.alibaba.otter.canal.prometheus.InstanceRegistry;
+import com.alibaba.otter.canal.sink.CanalEventDownStreamHandler;
+import com.alibaba.otter.canal.sink.CanalEventSink;
+import com.alibaba.otter.canal.sink.entry.EntryEventSink;
+import com.google.common.base.Preconditions;
+import io.prometheus.client.Collector;
+import io.prometheus.client.CounterMetricFamily;
+import io.prometheus.client.GaugeMetricFamily;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static com.alibaba.otter.canal.prometheus.CanalInstanceExports.DEST_LABELS_LIST;
+
+/**
+ * @author Chuanyi Li
+ */
+public class EntryCollector extends Collector implements InstanceRegistry {
+
+    private static final Logger                             logger           = LoggerFactory.getLogger(SinkCollector.class);
+    private static final String                             DELAY            = "canal_instance_traffic_delay";
+    private static final String                             TRANSACTION      = "canal_instance_transactions";
+    private static final String                             DELAY_HELP       = "Traffic delay of canal instance in milliseconds";
+    private static final String                             TRANSACTION_HELP = "Transactions counter of canal instance";
+    private final ConcurrentMap<String, EntryMetricsHolder> instances        = new ConcurrentHashMap<String, EntryMetricsHolder>();
+
+    private EntryCollector() {}
+
+    private static class SingletonHolder {
+        private static final EntryCollector SINGLETON = new EntryCollector();
+    }
+
+    public static EntryCollector instance() {
+        return SingletonHolder.SINGLETON;
+    }
+
+    @Override
+    public List<MetricFamilySamples> collect() {
+        List<MetricFamilySamples> mfs = new ArrayList<MetricFamilySamples>();
+        GaugeMetricFamily delay = new GaugeMetricFamily(DELAY,
+                DELAY_HELP, DEST_LABELS_LIST);
+        CounterMetricFamily transactions = new CounterMetricFamily(TRANSACTION,
+                TRANSACTION_HELP, DEST_LABELS_LIST);
+        for (EntryMetricsHolder emh : instances.values()) {
+            long now = System.currentTimeMillis();
+            long latest = emh.latestExecTime.get();
+            if (now > latest) {
+                delay.addMetric(emh.destLabelValues, (now - latest));
+            }
+            transactions.addMetric(emh.destLabelValues, emh.transactionCounter.doubleValue());
+        }
+        mfs.add(delay);
+        mfs.add(transactions);
+        return mfs;
+    }
+
+    @Override
+    public void register(CanalInstance instance) {
+        final String destination = instance.getDestination();
+        EntryMetricsHolder holder = new EntryMetricsHolder();
+        holder.destLabelValues = Collections.singletonList(destination);
+        CanalEventSink sink = instance.getEventSink();
+        if (!(sink instanceof EntryEventSink)) {
+            throw new IllegalArgumentException("CanalEventSink must be EntryEventSink");
+        }
+        EntryEventSink entrySink = (EntryEventSink) sink;
+        PrometheusCanalEventDownStreamHandler handler = assembleHandler(entrySink);
+        holder.latestExecTime = handler.getLatestExecuteTime();
+        holder.transactionCounter = handler.getTransactionCounter();
+        Preconditions.checkNotNull(holder.latestExecTime);
+        Preconditions.checkNotNull(holder.transactionCounter);
+        EntryMetricsHolder old = instances.put(destination, holder);
+        if (old != null) {
+            logger.warn("Remove stale EntryCollector for instance {}.", destination);
+        }
+    }
+
+    @Override
+    public void unregister(CanalInstance instance) {
+        final String destination = instance.getDestination();
+        CanalEventSink sink = instance.getEventSink();
+        if (!(sink instanceof EntryEventSink)) {
+            throw new IllegalArgumentException("CanalEventSink must be EntryEventSink");
+        }
+        unloadHandler((EntryEventSink) sink);
+        instances.remove(destination);
+    }
+
+    private PrometheusCanalEventDownStreamHandler assembleHandler(EntryEventSink entrySink) {
+        PrometheusCanalEventDownStreamHandler ph = new PrometheusCanalEventDownStreamHandler();
+        List<CanalEventDownStreamHandler> handlers = entrySink.getHandlers();
+        for (CanalEventDownStreamHandler handler : handlers) {
+            if (handler instanceof PrometheusCanalEventDownStreamHandler) {
+                throw new IllegalStateException("PrometheusCanalEventDownStreamHandler already exists in handlers.");
+            }
+        }
+        entrySink.addHandler(ph, 0);
+        return ph;
+    }
+
+    private void unloadHandler(EntryEventSink entrySink) {
+        List<CanalEventDownStreamHandler> handlers = entrySink.getHandlers();
+        int i = 0;
+        for (; i < handlers.size(); i++) {
+            if (handlers.get(i) instanceof PrometheusCanalEventDownStreamHandler) {
+                break;
+            }
+        }
+        entrySink.removeHandler(i);
+        // Ensure no PrometheusCanalEventDownStreamHandler
+        handlers = entrySink.getHandlers();
+        for (CanalEventDownStreamHandler handler : handlers) {
+            if (handler instanceof PrometheusCanalEventDownStreamHandler) {
+                throw new IllegalStateException("Multiple prometheusCanalEventDownStreamHandler exists in handlers.");
+            }
+        }
+    }
+
+    private class EntryMetricsHolder {
+        private AtomicLong   latestExecTime;
+        private AtomicLong   transactionCounter;
+        private List<String> destLabelValues;
+    }
+
+}

+ 0 - 79
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/InboundThroughputAspect.java

@@ -1,79 +0,0 @@
-package com.alibaba.otter.canal.prometheus.impl;
-
-import io.prometheus.client.Collector;
-import io.prometheus.client.CounterMetricFamily;
-import org.aspectj.lang.annotation.After;
-import org.aspectj.lang.annotation.Aspect;
-import org.aspectj.lang.annotation.Pointcut;
-import org.jctools.maps.ConcurrentAutoTable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * @author Chuanyi Li
- */
-@Aspect
-public class InboundThroughputAspect {
-
-    private static final Logger              logger    = LoggerFactory.getLogger(InboundThroughputAspect.class);
-
-    /**
-     *  Support highly scalable counters
-     *  @see ConcurrentAutoTable
-     */
-    private static final ConcurrentAutoTable total     = new ConcurrentAutoTable();
-
-    private static final Collector           collector = new InboundThroughputCollector();
-
-    public static Collector getCollector() {
-        return collector;
-    }
-
-    @Pointcut("call(byte[] com.alibaba.otter.canal.parse.driver.mysql.socket.SocketChannel.read(..))")
-    public void read() {}
-
-    @Pointcut("call(void com.alibaba.otter.canal.parse.driver.mysql.socket.SocketChannel.read(..)) ")
-    public void readBytes() {}
-
-    //nested read, just eliminate them.
-    @Pointcut("withincode(* com.alibaba.otter.canal.parse.driver.mysql.socket.SocketChannel.read(..))")
-    public void nestedCall() {}
-
-    @After("read() && !nestedCall()  && args(len, ..)")
-    public void recordRead(int len) {
-        accumulateBytes(len);
-    }
-
-    @After("readBytes() && !nestedCall() && args(.., len, timeout)")
-    public void recordReadBytes(int len, int timeout) {
-        accumulateBytes(len);
-    }
-
-    private void accumulateBytes(int count) {
-        try {
-            total.add(count);
-        } catch (Throwable t) {
-            //Catch every Throwable, rather than break the business logic.
-            logger.warn("Error while accumulate inbound bytes.", t);
-        }
-    }
-
-    public static class InboundThroughputCollector extends Collector {
-
-        private InboundThroughputCollector() {}
-
-        @Override
-        public List<MetricFamilySamples> collect() {
-            List<MetricFamilySamples> mfs = new ArrayList<MetricFamilySamples>();
-            CounterMetricFamily bytes = new CounterMetricFamily("canal_net_inbound_bytes",
-                    "Total socket inbound bytes of canal server.",
-                    total.get());
-            mfs.add(bytes);
-            return mfs;
-        }
-    }
-
-}

+ 0 - 64
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/InstanceMetaCollector.java

@@ -1,64 +0,0 @@
-package com.alibaba.otter.canal.prometheus.impl;
-
-import com.alibaba.otter.canal.instance.core.CanalInstance;
-import com.alibaba.otter.canal.instance.spring.CanalInstanceWithSpring;
-import com.alibaba.otter.canal.meta.CanalMetaManager;
-import com.alibaba.otter.canal.prometheus.CanalInstanceExports;
-import com.alibaba.otter.canal.protocol.ClientIdentity;
-import io.prometheus.client.Collector;
-import io.prometheus.client.GaugeMetricFamily;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-/**
- * @author Chuanyi Li
- */
-public class InstanceMetaCollector extends Collector {
-
-    private static final List<String> InfoLabel    = Arrays.asList("destination", "mode");
-
-    private CanalMetaManager          metaManager;
-
-    private final String              destination;
-
-    private final String              mode;
-
-    private final String              subsHelp;
-
-    public InstanceMetaCollector(CanalInstance instance) {
-        if (instance == null) {
-            throw new IllegalArgumentException("CanalInstance must not be null.");
-        }
-        if (instance instanceof CanalInstanceWithSpring) {
-            mode = "spring";
-        } else {
-            mode = "manager";
-        }
-        this.metaManager = instance.getMetaManager();
-        this.destination = instance.getDestination();
-        this.subsHelp = "Subscriptions of canal instance " + destination;
-    }
-
-    @Override
-    public List<MetricFamilySamples> collect() {
-        List<MetricFamilySamples> mfs = new ArrayList<MetricFamilySamples>();
-        GaugeMetricFamily instanceInfo = new GaugeMetricFamily(
-                "canal_instance",
-                "Canal instance",
-                InfoLabel);
-        instanceInfo.addMetric(Arrays.asList(destination, mode), 1);
-        mfs.add(instanceInfo);
-        if (metaManager.isStart()) {
-            // client id = hardcode 1001, 目前没有意义
-            List<ClientIdentity> subs = metaManager.listAllSubscribeInfo(destination);
-            GaugeMetricFamily subscriptions = new GaugeMetricFamily(
-                    "canal_instance_subscription",
-                    subsHelp, CanalInstanceExports.labelList);
-            subscriptions.addMetric(Arrays.asList(destination), subs.size());
-            mfs.add(subscriptions);
-        }
-        return mfs;
-    }
-}

+ 0 - 75
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/MemoryStoreCollector.java

@@ -1,75 +0,0 @@
-package com.alibaba.otter.canal.prometheus.impl;
-
-import com.alibaba.otter.canal.prometheus.CanalInstanceExports;
-import com.alibaba.otter.canal.store.CanalEventStore;
-import com.alibaba.otter.canal.store.CanalStoreException;
-import com.alibaba.otter.canal.store.memory.MemoryEventStoreWithBuffer;
-import io.prometheus.client.Collector;
-import io.prometheus.client.CounterMetricFamily;
-
-import java.lang.reflect.Field;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * @author Chuanyi Li
- */
-public class MemoryStoreCollector extends Collector {
-
-    private static final Class<MemoryEventStoreWithBuffer> clazz  = MemoryEventStoreWithBuffer.class;
-
-    private final String                                   destination;
-
-    private final AtomicLong                               putSequence;
-
-    private final AtomicLong                               ackSequence;
-
-    private final String                                   putHelp;
-
-    private final String                                   ackHelp;
-
-    public MemoryStoreCollector(CanalEventStore store, String destination) {
-        this.destination = destination;
-        if (!(store instanceof MemoryEventStoreWithBuffer)) {
-            throw new IllegalArgumentException("EventStore must be MemoryEventStoreWithBuffer");
-        }
-        MemoryEventStoreWithBuffer ms = (MemoryEventStoreWithBuffer) store;
-        putSequence = getDeclaredValue(ms, "putSequence");
-        ackSequence = getDeclaredValue(ms, "ackSequence");
-        putHelp = "Produced sequence of canal instance " + destination;
-        ackHelp = "Consumed sequence of canal instance " + destination;
-    }
-
-    @Override
-    public List<MetricFamilySamples> collect() {
-        List<MetricFamilySamples> mfs = new ArrayList<MetricFamilySamples>();
-        CounterMetricFamily put = new CounterMetricFamily("canal_instance_store_produce_seq",
-                putHelp, Arrays.asList(CanalInstanceExports.labels));
-        put.addMetric(Collections.singletonList(destination), putSequence.doubleValue());
-        mfs.add(put);
-        CounterMetricFamily ack = new CounterMetricFamily("canal_instance_store_consume_seq",
-                ackHelp, Arrays.asList(CanalInstanceExports.labels));
-        ack.addMetric(Collections.singletonList(destination), ackSequence.doubleValue());
-        mfs.add(ack);
-        return mfs;
-    }
-
-    @SuppressWarnings("unchecked")
-    private static <T> T getDeclaredValue(MemoryEventStoreWithBuffer store, String name) {
-        T value;
-        try {
-            Field putField = clazz.getDeclaredField(name);
-            putField.setAccessible(true);
-            value = (T) putField.get(store);
-        } catch (NoSuchFieldException e) {
-            throw new CanalStoreException(e);
-        } catch (IllegalAccessException e) {
-            throw new CanalStoreException(e);
-        }
-        return value;
-    }
-
-}

+ 91 - 0
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/MetaCollector.java

@@ -0,0 +1,91 @@
+package com.alibaba.otter.canal.prometheus.impl;
+
+import com.alibaba.otter.canal.instance.core.CanalInstance;
+import com.alibaba.otter.canal.instance.spring.CanalInstanceWithSpring;
+import com.alibaba.otter.canal.meta.CanalMetaManager;
+import com.alibaba.otter.canal.prometheus.InstanceRegistry;
+import com.alibaba.otter.canal.protocol.ClientIdentity;
+import com.google.common.base.Preconditions;
+import io.prometheus.client.Collector;
+import io.prometheus.client.GaugeMetricFamily;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import static com.alibaba.otter.canal.prometheus.CanalInstanceExports.DEST_LABELS_LIST;
+
+/**
+ * @author Chuanyi Li
+ */
+public class MetaCollector extends Collector implements InstanceRegistry {
+
+    private static final List<String>                      INFO_LABELS_LIST  = Arrays.asList("destination", "mode");
+    private static final Logger                            logger            = LoggerFactory.getLogger(MetaCollector.class);
+    private static final String                            INSTANCE          = "canal_instance";
+    private static final String                            INSTANCE_HELP     = "Canal instance";
+    private static final String                            SUBSCRIPTION      = "canal_instance_subscriptions";
+    private static final String                            SUBSCRIPTION_HELP = "Canal instance subscriptions";
+    private final ConcurrentMap<String, MetaMetricsHolder> instances         = new ConcurrentHashMap<String, MetaMetricsHolder>();
+
+    private MetaCollector() {}
+
+    private static class SingletonHolder {
+        private static final MetaCollector SINGLETON = new MetaCollector();
+    }
+
+    public static MetaCollector instance() {
+        return SingletonHolder.SINGLETON;
+    }
+
+    @Override
+    public List<MetricFamilySamples> collect() {
+        List<MetricFamilySamples> mfs = new ArrayList<MetricFamilySamples>();
+        GaugeMetricFamily instanceInfo = new GaugeMetricFamily(INSTANCE,
+                INSTANCE_HELP, INFO_LABELS_LIST);
+        GaugeMetricFamily subsInfo = new GaugeMetricFamily(SUBSCRIPTION,
+                SUBSCRIPTION_HELP, DEST_LABELS_LIST);
+        for (Map.Entry<String, MetaMetricsHolder> nme : instances.entrySet()) {
+            final String destination = nme.getKey();
+            final MetaMetricsHolder nmh = nme.getValue();
+            instanceInfo.addMetric(nmh.infoLabelValues, 1);
+            List<ClientIdentity> subs = nmh.metaManager.listAllSubscribeInfo(destination);
+            int count = subs == null ? 0 : subs.size();
+            subsInfo.addMetric(nmh.destLabelValues, count);
+        }
+        mfs.add(instanceInfo);
+        mfs.add(subsInfo);
+        return mfs;
+    }
+
+    @Override
+    public void register(CanalInstance instance) {
+        final String destination = instance.getDestination();
+        MetaMetricsHolder holder = new MetaMetricsHolder();
+        String mode = (instance instanceof CanalInstanceWithSpring) ? "spring" : "manager";
+        holder.infoLabelValues = Arrays.asList(destination, mode);
+        holder.destLabelValues = Collections.singletonList(destination);
+        holder.metaManager = instance.getMetaManager();
+        Preconditions.checkNotNull(holder.metaManager);
+        MetaMetricsHolder old = instances.put(destination, holder);
+        if (old != null) {
+            logger.warn("Remove stale MetaCollector for instance {}.", destination);
+        }
+    }
+
+    @Override
+    public void unregister(CanalInstance instance) {
+        final String destination = instance.getDestination();
+        instances.remove(destination);
+    }
+
+    private class MetaMetricsHolder {
+        private List<String>     infoLabelValues;
+        private List<String>     destLabelValues;
+        private CanalMetaManager metaManager;
+    }
+
+
+}

+ 0 - 80
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/OutboundThroughputAspect.java

@@ -1,80 +0,0 @@
-package com.alibaba.otter.canal.prometheus.impl;
-
-import io.prometheus.client.Collector;
-import io.prometheus.client.CounterMetricFamily;
-import org.aspectj.lang.annotation.After;
-import org.aspectj.lang.annotation.Aspect;
-import org.aspectj.lang.annotation.Pointcut;
-import org.jboss.netty.channel.Channel;
-import org.jctools.maps.ConcurrentAutoTable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import static com.alibaba.otter.canal.server.netty.NettyUtils.HEADER_LENGTH;
-
-/**
- * @author Chuanyi Li
- */
-@Aspect
-public class OutboundThroughputAspect {
-    private static final Logger              logger    = LoggerFactory.getLogger(OutboundThroughputAspect.class);
-
-    /**
-     *  Support highly scalable counters
-     *  @see ConcurrentAutoTable
-     */
-    private static final ConcurrentAutoTable total     = new ConcurrentAutoTable();
-
-    private static final Collector           collector = new OutboundThroughputCollector();
-
-    public static Collector getCollector() {
-        return collector;
-    }
-
-    @Pointcut("call(* com.alibaba.otter.canal.server.netty.NettyUtils.write(..))")
-    public void write() {}
-
-    //nested read, just eliminate them.
-    @Pointcut("withincode(* com.alibaba.otter.canal.server.netty.NettyUtils.write(..))")
-    public void nestedCall() {}
-
-    @After("write() && !nestedCall() && args(ch, bytes, ..)")
-    public void recordWriteBytes(Channel ch, byte[] bytes) {
-        if (bytes != null) {
-            accumulateBytes(HEADER_LENGTH + bytes.length);
-        }
-    }
-
-    @After("write() && !nestedCall() && args(ch, buf, ..)")
-    public void recordWriteBuffer(Channel ch, ByteBuffer buf) {
-        if (buf != null) {
-            total.add(HEADER_LENGTH + buf.limit());
-        }
-    }
-    private void accumulateBytes(int count) {
-        try {
-            total.add(count);
-        } catch (Throwable t) {
-            //Catch every Throwable, rather than break the business logic.
-            logger.warn("Error while accumulate inbound bytes.", t);
-        }
-    }
-
-    public static class OutboundThroughputCollector extends Collector {
-
-        private OutboundThroughputCollector() {}
-
-        @Override public List<MetricFamilySamples> collect() {
-            List<MetricFamilySamples> mfs = new ArrayList<MetricFamilySamples>();
-            CounterMetricFamily bytes = new CounterMetricFamily("canal_net_outbound_bytes",
-                    "Total socket outbound bytes of canal server.",
-                    total.get());
-            mfs.add(bytes);
-            return mfs;
-        }
-    }
-}

+ 116 - 0
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/ParserCollector.java

@@ -0,0 +1,116 @@
+package com.alibaba.otter.canal.prometheus.impl;
+
+import com.alibaba.otter.canal.instance.core.CanalInstance;
+import com.alibaba.otter.canal.parse.CanalEventParser;
+import com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser;
+import com.alibaba.otter.canal.prometheus.InstanceRegistry;
+import com.google.common.base.Preconditions;
+import io.prometheus.client.Collector;
+import io.prometheus.client.CounterMetricFamily;
+import io.prometheus.client.GaugeMetricFamily;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static com.alibaba.otter.canal.prometheus.CanalInstanceExports.DEST;
+import static com.alibaba.otter.canal.prometheus.CanalInstanceExports.DEST_LABELS_LIST;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+
+/**
+ * @author Chuanyi Li
+ */
+public class ParserCollector extends Collector implements InstanceRegistry {
+
+    private static final Logger                              logger                = LoggerFactory.getLogger(ParserCollector.class);
+    private static final long                                NANO_PER_MILLI        = 1000 * 1000L;
+    private static final String                              PUBLISH_BLOCKING      = "canal_instance_publish_blocking_time";
+    private static final String                              RECEIVED_BINLOG       = "canal_instance_received_binlog_bytes";
+    private static final String                              PARSER_MODE           = "canal_instance_parser_mode";
+    private static final String                              MODE_LABEL            = "parallel";
+    private static final String                              PUBLISH_BLOCKING_HELP = "Publish blocking time of dump thread in milliseconds";
+    private static final String                              RECEIVED_BINLOG_HELP  = "Received binlog bytes";
+    private static final String                              MODE_HELP             = "Parser mode(parallel/serial) of instance";
+    private final List<String>                               modeLabels            = Arrays.asList(DEST, MODE_LABEL);
+    private final ConcurrentMap<String, ParserMetricsHolder> instances             = new ConcurrentHashMap<String, ParserMetricsHolder>();
+
+    private ParserCollector() {}
+
+    private static class SingletonHolder {
+        private static final ParserCollector SINGLETON = new ParserCollector();
+    }
+
+    public static ParserCollector instance() {
+        return SingletonHolder.SINGLETON;
+    }
+
+    @Override
+    public List<MetricFamilySamples> collect() {
+        List<MetricFamilySamples> mfs = new ArrayList<MetricFamilySamples>();
+        boolean hasParallel = false;
+        CounterMetricFamily bytesCounter = new CounterMetricFamily(RECEIVED_BINLOG,
+                RECEIVED_BINLOG_HELP, DEST_LABELS_LIST);
+        GaugeMetricFamily modeGauge = new GaugeMetricFamily(PARSER_MODE,
+                MODE_HELP, modeLabels);
+        CounterMetricFamily blockingCounter = new CounterMetricFamily(PUBLISH_BLOCKING,
+                PUBLISH_BLOCKING_HELP, DEST_LABELS_LIST);
+        for (ParserMetricsHolder emh : instances.values()) {
+            if (emh.isParallel) {
+                blockingCounter.addMetric(emh.destLabelValues, (emh.eventsPublishBlockingTime.doubleValue() / NANO_PER_MILLI));
+                hasParallel = true;
+            }
+            modeGauge.addMetric(emh.modeLabelValues, 1);
+            bytesCounter.addMetric(emh.destLabelValues, emh.receivedBinlogBytes.doubleValue());
+
+        }
+        mfs.add(bytesCounter);
+        mfs.add(modeGauge);
+        if (hasParallel) {
+            mfs.add(blockingCounter);
+        }
+        return mfs;
+    }
+
+    @Override
+    public void register(CanalInstance instance) {
+        final String destination = instance.getDestination();
+        ParserMetricsHolder holder = new ParserMetricsHolder();
+        CanalEventParser parser = instance.getEventParser();
+        if (!(parser instanceof MysqlEventParser)) {
+            throw new IllegalArgumentException("CanalEventParser must be MysqlEventParser");
+        }
+        MysqlEventParser mysqlParser = (MysqlEventParser) parser;
+        holder.destLabelValues = Collections.singletonList(destination);
+        holder.modeLabelValues = Arrays.asList(destination, Boolean.toString(mysqlParser.isParallel()));
+        holder.eventsPublishBlockingTime = mysqlParser.getEventsPublishBlockingTime();
+        holder.receivedBinlogBytes = mysqlParser.getReceivedBinlogBytes();
+        holder.isParallel = mysqlParser.isParallel();
+        Preconditions.checkNotNull(holder.eventsPublishBlockingTime);
+        Preconditions.checkNotNull(holder.receivedBinlogBytes);
+        ParserMetricsHolder old = instances.put(destination, holder);
+        if (old != null) {
+            logger.warn("Remove stale ParserCollector for instance {}.", destination);
+        }
+    }
+
+    @Override
+    public void unregister(CanalInstance instance) {
+        final String destination = instance.getDestination();
+        instances.remove(destination);
+    }
+
+    private class ParserMetricsHolder {
+        private List<String> destLabelValues;
+        private List<String> modeLabelValues;
+        private AtomicLong   receivedBinlogBytes;
+        private AtomicLong   eventsPublishBlockingTime;
+        private boolean      isParallel;
+    }
+
+}

+ 69 - 49
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/PrometheusCanalEventDownStreamHandler.java

@@ -1,73 +1,93 @@
 package com.alibaba.otter.canal.prometheus.impl;
 
-import com.alibaba.otter.canal.prometheus.CanalInstanceExports;
+import com.alibaba.otter.canal.protocol.CanalEntry;
+import com.alibaba.otter.canal.protocol.CanalEntry.EntryType;
 import com.alibaba.otter.canal.sink.AbstractCanalEventDownStreamHandler;
 import com.alibaba.otter.canal.store.model.Event;
-import io.prometheus.client.Collector;
-import io.prometheus.client.GaugeMetricFamily;
 
-import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
 
 /**
  * @author Chuanyi Li
  */
 public class PrometheusCanalEventDownStreamHandler extends AbstractCanalEventDownStreamHandler<List<Event>> {
 
-    private final Collector     collector;
-
-    private long                latestExecuteTime = 0L;
-
-    private static final String DELAY_NAME        = "canal_instance_traffic_delay";
-
-    private final String        delayHelpName;
-
-    private final List<String>  labelValues;
-
-    public PrometheusCanalEventDownStreamHandler(final String destination) {
-        this.delayHelpName = "Traffic delay of canal instance " + destination + " in seconds.";
-        this.labelValues = Collections.singletonList(destination);
-        collector = new Collector() {
-            @Override
-            public List<MetricFamilySamples> collect() {
-                List<MetricFamilySamples> mfs = new ArrayList<MetricFamilySamples>();
-                long now = System.currentTimeMillis();
-                GaugeMetricFamily delay = new GaugeMetricFamily(
-                        DELAY_NAME,
-                        delayHelpName,
-                        CanalInstanceExports.labelList);
-                double d = 0.0;
-                if (latestExecuteTime > 0) {
-                    d = now - latestExecuteTime;
-                }
-                d = d > 0.0 ? (d / 1000) : 0.0;
-                delay.addMetric(labelValues, d);
-                mfs.add(delay);
-                return mfs;
-            }
-        };
-    }
+    private final AtomicLong latestExecuteTime  = new AtomicLong(0L);
+    private final AtomicLong transactionCounter = new AtomicLong(0L);
+    private final AtomicLong rowEventCounter    = new AtomicLong(0L);
+    private final AtomicLong rowsCounter        = new AtomicLong(0L);
 
     @Override
     public List<Event> before(List<Event> events) {
-        // TODO utilize MySQL master heartbeat packet to refresh delay if always no more events coming
-        // see: https://dev.mysql.com/worklog/task/?id=342
-        // heartbeats are sent by the master only if there is no
-        // more unsent events in the actual binlog file for a period longer that
-        // master_heartbeat_period.
+        long localExecTime = 0L;
         if (events != null && !events.isEmpty()) {
-            Event last = events.get(events.size() - 1);
-            long ts = last.getExecuteTime();
-            if (ts > latestExecuteTime) {
-                latestExecuteTime = ts;
+            for (Event e : events) {
+                EntryType type = e.getEntryType();
+                if (type == null) continue;
+                switch (type) {
+                    case TRANSACTIONBEGIN: {
+                        long exec = e.getExecuteTime();
+                        if (exec > 0) localExecTime = exec;
+                        break;
+                    }
+                    case ROWDATA: {
+                        long exec = e.getExecuteTime();
+                        if (exec > 0) localExecTime = exec;
+                        // TODO 当前proto无法直接获得荣威change的变更行数(需要parse),可考虑放到header里面
+                        break;
+                    }
+                    case TRANSACTIONEND: {
+                        long exec = e.getExecuteTime();
+                        if (exec > 0) localExecTime = exec;
+                        transactionCounter.incrementAndGet();
+                        break;
+                    }
+                    case HEARTBEAT:
+                        // 发现canal自己的heartbeat是带有execTime的
+                        // TODO 确认一下不是canal自己产生的
+                        CanalEntry.EventType eventType = e.getEventType();
+                        // TODO utilize MySQL master heartbeat packet to refresh delay if always no more events coming
+                        // see: https://dev.mysql.com/worklog/task/?id=342
+                        // heartbeats are sent by the master only if there is no
+                        // more unsent events in the actual binlog file for a period longer that
+                        // master_heartbeat_period.
+                        break;
+                    default:
+                        break;
+                }
+            }
+            if (localExecTime > 0) {
+                latestExecuteTime.lazySet(localExecTime);
             }
         }
         return events;
     }
 
-    public Collector getCollector() {
-        return this.collector;
+    @Override
+    public void start() {
+
+        super.start();
+    }
+
+    @Override
+    public void stop() {
+        super.stop();
+    }
+
+    public AtomicLong getLatestExecuteTime() {
+        return latestExecuteTime;
     }
 
+    public AtomicLong getTransactionCounter() {
+        return transactionCounter;
+    }
+
+    public AtomicLong getRowsCounter() {
+        return rowsCounter;
+    }
+
+    public AtomicLong getRowEventCounter() {
+        return rowEventCounter;
+    }
 }

+ 138 - 0
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/PrometheusClientInstanceProfiler.java

@@ -0,0 +1,138 @@
+package com.alibaba.otter.canal.prometheus.impl;
+
+import com.alibaba.otter.canal.protocol.CanalPacket.PacketType;
+import com.alibaba.otter.canal.server.netty.ClientInstanceProfiler;
+import com.alibaba.otter.canal.server.netty.listener.ChannelFutureAggregator.ClientRequestResult;
+import io.prometheus.client.CollectorRegistry;
+import io.prometheus.client.Counter;
+import io.prometheus.client.Histogram;
+
+import static com.alibaba.otter.canal.prometheus.CanalInstanceExports.DEST;
+import static com.alibaba.otter.canal.prometheus.CanalInstanceExports.DEST_LABELS;
+
+/**
+ * @author Chuanyi Li
+ */
+public class PrometheusClientInstanceProfiler implements ClientInstanceProfiler {
+
+    private static final long   NANO_PER_MILLI = 1000 * 1000L;
+    private static final String PACKET_TYPE    = "canal_instance_client_packets";
+    private static final String OUTBOUND_BYTES = "canal_instance_client_bytes";
+    private static final String EMPTY_BATCHES  = "canal_instance_client_empty_batches";
+    private static final String ERRORS         = "canal_instance_client_request_error";
+    private static final String LATENCY        = "canal_instance_client_request_latency";
+    private final Counter       outboundCounter;
+    private final Counter       packetsCounter;
+    private final Counter       emptyBatchesCounter;
+    private final Counter       errorsCounter;
+    private final Histogram     responseLatency;
+    private volatile boolean    running        = false;
+
+    private static class SingletonHolder {
+        private static final PrometheusClientInstanceProfiler SINGLETON = new PrometheusClientInstanceProfiler();
+    }
+
+    public static PrometheusClientInstanceProfiler instance() {
+        return SingletonHolder.SINGLETON;
+    }
+
+    private PrometheusClientInstanceProfiler() {
+        this.outboundCounter = Counter.build()
+                .labelNames(DEST_LABELS)
+                .name(OUTBOUND_BYTES)
+                .help("Total bytes sent to client.")
+                .create();
+        this.packetsCounter = Counter.build()
+                .labelNames(new String[]{DEST, "packetType"})
+                .name(PACKET_TYPE)
+                .help("Total packets sent to client.")
+                .create();
+        this.emptyBatchesCounter = Counter.build()
+                .labelNames(DEST_LABELS)
+                .name(EMPTY_BATCHES)
+                .help("Total empty batches sent to client.")
+                .create();
+        this.errorsCounter = Counter.build()
+                .labelNames(new String[]{DEST, "errorCode"})
+                .name(ERRORS)
+                .help("Total client request errors.")
+                .create();
+        this.responseLatency = Histogram.build()
+                .labelNames(DEST_LABELS)
+                .name(LATENCY)
+                .help("Client request latency.")
+                // buckets in milliseconds
+                .buckets(2.5, 10.0, 25.0, 100.0)
+                .create();
+    }
+
+    @Override
+    public void profiling(ClientRequestResult result) {
+        String destination = result.getDestination();
+        PacketType type = result.getType();
+        outboundCounter.labels(destination).inc(result.getAmount());
+        packetsCounter.labels(destination, type.name()).inc();
+        short errorCode = result.getErrorCode();
+        if (errorCode > 0) {
+            errorsCounter.labels(destination, Short.toString(errorCode)).inc();
+        }
+        long latency = result.getLatency();
+        responseLatency.labels(destination).observe(((double) latency) / NANO_PER_MILLI);
+        switch (type) {
+            case GET:
+                boolean empty = result.getEmpty();
+                if (empty) {
+                    emptyBatchesCounter.labels(destination).inc();
+                }
+                break;
+            // reserve for others
+            default:
+                break;
+        }
+    }
+
+    @Override
+    public void start() {
+        if (outboundCounter != null) {
+            outboundCounter.register();
+        }
+        if (packetsCounter != null) {
+            packetsCounter.register();
+        }
+        if (emptyBatchesCounter != null) {
+            emptyBatchesCounter.register();
+        }
+        if (errorsCounter != null) {
+            errorsCounter.register();
+        }
+        if (responseLatency != null) {
+            responseLatency.register();
+        }
+        running = true;
+    }
+
+    @Override
+    public void stop() {
+        running = false;
+        if (outboundCounter != null) {
+            CollectorRegistry.defaultRegistry.unregister(outboundCounter);
+        }
+        if (packetsCounter != null) {
+            CollectorRegistry.defaultRegistry.unregister(packetsCounter);
+        }
+        if (emptyBatchesCounter != null) {
+            CollectorRegistry.defaultRegistry.unregister(emptyBatchesCounter);
+        }
+        if (errorsCounter != null) {
+            CollectorRegistry.defaultRegistry.unregister(errorsCounter);
+        }
+        if (responseLatency != null) {
+            CollectorRegistry.defaultRegistry.unregister(responseLatency);
+        }
+    }
+
+    @Override
+    public boolean isStart() {
+        return running;
+    }
+}

+ 84 - 0
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/SinkCollector.java

@@ -0,0 +1,84 @@
+package com.alibaba.otter.canal.prometheus.impl;
+
+import com.alibaba.otter.canal.instance.core.CanalInstance;
+import com.alibaba.otter.canal.prometheus.InstanceRegistry;
+import com.alibaba.otter.canal.sink.CanalEventSink;
+import com.alibaba.otter.canal.sink.entry.EntryEventSink;
+import com.google.common.base.Preconditions;
+import io.prometheus.client.Collector;
+import io.prometheus.client.CounterMetricFamily;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static com.alibaba.otter.canal.prometheus.CanalInstanceExports.DEST_LABELS_LIST;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+
+/**
+ * @author Chuanyi Li
+ */
+public class SinkCollector extends Collector implements InstanceRegistry {
+
+    private static final Logger                            logger               = LoggerFactory.getLogger(SinkCollector.class);
+    private static final long                              NANO_PER_MILLI       = 1000 * 1000L;
+    private static final String                            SINK_BLOCKING_TIME   = "canal_instance_sink_blocking_time";
+    private static final String                            SINK_BLOCK_TIME_HELP = "Total sink blocking time in milliseconds";
+    private final ConcurrentMap<String, SinkMetricsHolder> instances            = new ConcurrentHashMap<String, SinkMetricsHolder>();
+
+    private SinkCollector() {}
+
+    private static class SingletonHolder {
+        private static final SinkCollector SINGLETON = new SinkCollector();
+    }
+
+    public static SinkCollector instance() {
+        return SingletonHolder.SINGLETON;
+    }
+
+    @Override
+    public List<MetricFamilySamples> collect() {
+        List<MetricFamilySamples> mfs = new ArrayList<MetricFamilySamples>();
+        CounterMetricFamily blockingCounter = new CounterMetricFamily(SINK_BLOCKING_TIME,
+                SINK_BLOCK_TIME_HELP, DEST_LABELS_LIST);
+        for (SinkMetricsHolder smh : instances.values()) {
+            blockingCounter.addMetric(smh.destLabelValues, (smh.eventsSinkBlockingTime.doubleValue() / NANO_PER_MILLI));
+        }
+        mfs.add(blockingCounter);
+        return mfs;
+    }
+
+    @Override
+    public void register(CanalInstance instance) {
+        final String destination = instance.getDestination();
+        SinkMetricsHolder holder = new SinkMetricsHolder();
+        holder.destLabelValues = Collections.singletonList(destination);
+        CanalEventSink sink = instance.getEventSink();
+        if (!(sink instanceof EntryEventSink)) {
+            throw new IllegalArgumentException("CanalEventSink must be EntryEventSink");
+        }
+        EntryEventSink entrySink = (EntryEventSink) sink;
+        holder.eventsSinkBlockingTime = entrySink.getEventsSinkBlockingTime();
+        Preconditions.checkNotNull(holder.eventsSinkBlockingTime);
+        SinkMetricsHolder old = instances.put(destination, holder);
+        if (old != null) {
+            logger.warn("Remote stale SinkCollector for instance {}.", destination);
+        }
+    }
+
+    @Override
+    public void unregister(CanalInstance instance) {
+        final String destination = instance.getDestination();
+        instances.remove(destination);
+    }
+
+    private class SinkMetricsHolder {
+        private AtomicLong   eventsSinkBlockingTime;
+        private List<String> destLabelValues;
+    }
+}

+ 132 - 0
prometheus/src/main/java/com/alibaba/otter/canal/prometheus/impl/StoreCollector.java

@@ -0,0 +1,132 @@
+package com.alibaba.otter.canal.prometheus.impl;
+
+import com.alibaba.otter.canal.instance.core.CanalInstance;
+import com.alibaba.otter.canal.prometheus.InstanceRegistry;
+import com.alibaba.otter.canal.store.CanalEventStore;
+import com.alibaba.otter.canal.store.memory.MemoryEventStoreWithBuffer;
+import com.alibaba.otter.canal.store.model.BatchMode;
+import com.google.common.base.Preconditions;
+import io.prometheus.client.Collector;
+import io.prometheus.client.CounterMetricFamily;
+import io.prometheus.client.GaugeMetricFamily;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static com.alibaba.otter.canal.prometheus.CanalInstanceExports.DEST;
+import static com.alibaba.otter.canal.prometheus.CanalInstanceExports.DEST_LABELS_LIST;
+
+/**
+ * @author Chuanyi Li
+ */
+public class StoreCollector extends Collector implements InstanceRegistry {
+
+    private static final Logger                             logger           = LoggerFactory.getLogger(SinkCollector.class);
+    private static final String                             PRODUCE          = "canal_instance_store_produce_seq";
+    private static final String                             CONSUME          = "canal_instance_store_consume_seq";
+    private static final String                             STORE            = "canal_instance_store";
+    private static final String                             PRODUCE_MEM      = "canal_instance_store_produce_mem";
+    private static final String                             CONSUME_MEM      = "canal_instance_store_consume_mem";
+    private static final String                             PRODUCE_HELP     = "Produced events counter of canal instance";
+    private static final String                             CONSUME_HELP     = "Consumed events counter of canal instance";
+    private static final String                             STORE_HELP       = "Canal instance info";
+    private static final String                             PRODUCE_MEM_HELP = "Produced mem bytes of canal instance";
+    private static final String                             CONSUME_MEM_HELP = "Consumed mem bytes of canal instance";
+    private final ConcurrentMap<String, StoreMetricsHolder> instances        = new ConcurrentHashMap<String, StoreMetricsHolder>();
+    private final List<String>                              storeLabelsList  = Arrays.asList(DEST, "batchMode");
+
+    private StoreCollector() {}
+
+    private static class SingletonHolder {
+        private static final StoreCollector SINGLETON = new StoreCollector();
+    }
+
+    public static StoreCollector instance() {
+        return SingletonHolder.SINGLETON;
+    }
+
+    @Override
+    public List<MetricFamilySamples> collect() {
+        List<MetricFamilySamples> mfs = new ArrayList<MetricFamilySamples>();
+        CounterMetricFamily put = new CounterMetricFamily(PRODUCE,
+                PRODUCE_HELP, DEST_LABELS_LIST);
+        CounterMetricFamily ack = new CounterMetricFamily(CONSUME,
+                CONSUME_HELP, DEST_LABELS_LIST);
+        GaugeMetricFamily store = new GaugeMetricFamily(STORE,
+                STORE_HELP, storeLabelsList);
+        CounterMetricFamily putMem = new CounterMetricFamily(PRODUCE_MEM,
+                PRODUCE_MEM_HELP, DEST_LABELS_LIST);
+        CounterMetricFamily ackMem = new CounterMetricFamily(CONSUME_MEM,
+                CONSUME_MEM_HELP, DEST_LABELS_LIST);
+        boolean hasMem = false;
+        for (StoreMetricsHolder smh : instances.values()) {
+            final boolean isMem = smh.batchMode.isMemSize();
+            put.addMetric(smh.destLabelValues, smh.putSeq.doubleValue());
+            ack.addMetric(smh.destLabelValues, smh.ackSeq.doubleValue());
+            store.addMetric(smh.storeLabelValues, 1);
+            if (isMem) {
+                hasMem = true;
+                putMem.addMetric(smh.destLabelValues, smh.putMemSize.doubleValue());
+                ackMem.addMetric(smh.destLabelValues, smh.ackMemSize.doubleValue());
+            }
+        }
+        mfs.add(put);
+        mfs.add(ack);
+        mfs.add(store);
+        if (hasMem) {
+            mfs.add(putMem);
+            mfs.add(ackMem);
+        }
+        return mfs;
+    }
+
+    @Override public void register(CanalInstance instance) {
+        final String destination = instance.getDestination();
+        StoreMetricsHolder holder = new StoreMetricsHolder();
+        CanalEventStore store = instance.getEventStore();
+        if (!(store instanceof MemoryEventStoreWithBuffer)) {
+            throw new IllegalArgumentException("EventStore must be MemoryEventStoreWithBuffer");
+        }
+        MemoryEventStoreWithBuffer memStore = (MemoryEventStoreWithBuffer) store;
+        holder.batchMode = memStore.getBatchMode();
+        holder.putSeq = memStore.getPutSequence();
+        holder.ackSeq = memStore.getAckSequence();
+        holder.destLabelValues = Collections.singletonList(destination);
+        holder.storeLabelValues = Arrays.asList(destination, memStore.getBatchMode().name());
+        Preconditions.checkNotNull(holder.batchMode);
+        Preconditions.checkNotNull(holder.putSeq);
+        Preconditions.checkNotNull(holder.ackSeq);
+        if (holder.batchMode.isMemSize()) {
+            holder.putMemSize = memStore.getPutMemSize();
+            holder.ackMemSize = memStore.getAckMemSize();
+            Preconditions.checkNotNull(holder.putMemSize);
+            Preconditions.checkNotNull(holder.ackMemSize);
+        }
+        StoreMetricsHolder old = instances.putIfAbsent(destination, holder);
+        if (old != null) {
+            logger.warn("Remote stale StoreCollector for instance {}.", destination);
+        }
+    }
+
+    @Override public void unregister(CanalInstance instance) {
+        final String destination = instance.getDestination();
+        instances.remove(destination);
+    }
+
+    private class StoreMetricsHolder {
+        private AtomicLong   putSeq;
+        private AtomicLong   ackSeq;
+        private BatchMode    batchMode;
+        private AtomicLong   putMemSize;
+        private AtomicLong   ackMemSize;
+        private List<String> destLabelValues;
+        private List<String> storeLabelValues;
+    }
+}

+ 0 - 11
prometheus/src/main/resources/META-INF/aop.xml

@@ -1,11 +0,0 @@
-<aspectj>
-
-    <aspects>
-        <aspect name="com.alibaba.otter.canal.prometheus.impl.InboundThroughputAspect"/>
-        <aspect name="com.alibaba.otter.canal.prometheus.impl.OutboundThroughputAspect"/>
-    </aspects>
-    <weaver options="-verbose -showWeaveInfo">
-        <include within="com.alibaba.otter.canal..*"/>
-    </weaver>
-
-</aspectj>

+ 39 - 0
server/src/main/java/com/alibaba/otter/canal/server/netty/CanalServerWithNettyProfiler.java

@@ -0,0 +1,39 @@
+package com.alibaba.otter.canal.server.netty;
+
+import com.alibaba.otter.canal.common.AbstractCanalLifeCycle;
+import com.alibaba.otter.canal.server.netty.listener.ChannelFutureAggregator.ClientRequestResult;
+
+/**
+ * @author Chuanyi Li
+ */
+public class CanalServerWithNettyProfiler {
+
+    public static final ClientInstanceProfiler NOP               = new DefaultClientInstanceProfiler();
+    private ClientInstanceProfiler             instanceProfiler;
+
+    private static class SingletonHolder {
+        private static CanalServerWithNettyProfiler SINGLETON = new CanalServerWithNettyProfiler();
+    }
+
+    private CanalServerWithNettyProfiler() {
+        this.instanceProfiler = NOP;
+    }
+
+    public static CanalServerWithNettyProfiler profiler() {
+        return SingletonHolder.SINGLETON;
+    }
+
+    public void profiling(ClientRequestResult result) {
+        instanceProfiler.profiling(result);
+    }
+
+    public void setInstanceProfiler(ClientInstanceProfiler instanceProfiler) {
+        this.instanceProfiler = instanceProfiler;
+    }
+
+    private static class DefaultClientInstanceProfiler extends AbstractCanalLifeCycle implements ClientInstanceProfiler {
+        @Override
+        public void profiling(ClientRequestResult result) {}
+    }
+
+}

+ 13 - 0
server/src/main/java/com/alibaba/otter/canal/server/netty/ClientInstanceProfiler.java

@@ -0,0 +1,13 @@
+package com.alibaba.otter.canal.server.netty;
+
+import com.alibaba.otter.canal.common.CanalLifeCycle;
+import com.alibaba.otter.canal.server.netty.listener.ChannelFutureAggregator.ClientRequestResult;
+
+/**
+ * @author Chuanyi Li
+ */
+public interface ClientInstanceProfiler extends CanalLifeCycle {
+
+    void profiling(ClientRequestResult result);
+
+}

+ 16 - 0
server/src/main/java/com/alibaba/otter/canal/server/netty/NettyUtils.java

@@ -74,4 +74,20 @@ public class NettyUtils {
                 .toByteArray(),
             channelFutureListener);
     }
+
+    public static byte[] ackPacket() {
+        return Packet.newBuilder()
+                .setType(CanalPacket.PacketType.ACK)
+                .setBody(Ack.newBuilder().build().toByteString())
+                .build()
+                .toByteArray();
+    }
+
+    public static byte[] errorPacket(int errorCode, String errorMessage) {
+        return Packet.newBuilder()
+                .setType(CanalPacket.PacketType.ACK)
+                .setBody(Ack.newBuilder().setErrorCode(errorCode).setErrorMessage(errorMessage).build().toByteString())
+                .build()
+                .toByteArray();
+    }
 }

+ 1 - 1
server/src/main/java/com/alibaba/otter/canal/server/netty/handler/ClientAuthenticationHandler.java

@@ -73,7 +73,7 @@ public class ClientAuthenticationHandler extends SimpleChannelHandler {
                         MDC.remove("destination");
                     }
                 }
-
+                // 鉴权一次性,暂不统计
                 NettyUtils.ack(ctx.getChannel(), new ChannelFutureListener() {
 
                     public void operationComplete(ChannelFuture future) throws Exception {

+ 38 - 37
server/src/main/java/com/alibaba/otter/canal/server/netty/handler/SessionHandler.java

@@ -3,6 +3,7 @@ package com.alibaba.otter.canal.server.netty.handler;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 
+import com.alibaba.otter.canal.server.netty.listener.ChannelFutureAggregator;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.exception.ExceptionUtils;
 import org.jboss.netty.buffer.ChannelBuffer;
@@ -51,6 +52,7 @@ public class SessionHandler extends SimpleChannelHandler {
 
     public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
         logger.info("message receives in session handler...");
+        long start = System.nanoTime();
         ChannelBuffer buffer = (ChannelBuffer) e.getMessage();
         Packet packet = Packet.parseFrom(buffer.readBytes(buffer.readableBytes()).array());
         ClientIdentity clientIdentity = null;
@@ -74,12 +76,13 @@ public class SessionHandler extends SimpleChannelHandler {
 
                         embeddedServer.subscribe(clientIdentity);
                         // ctx.setAttachment(clientIdentity);// 设置状态数据
-                        NettyUtils.ack(ctx.getChannel(), null);
+                        byte[] ackBytes = NettyUtils.ackPacket();
+                        NettyUtils.write(ctx.getChannel(), ackBytes, new ChannelFutureAggregator(sub.getDestination(),
+                                sub, packet.getType(), ackBytes.length, System.nanoTime() - start));
                     } else {
-                        NettyUtils.error(401,
-                            MessageFormatter.format("destination or clientId is null", sub.toString()).getMessage(),
-                            ctx.getChannel(),
-                            null);
+                        byte[] errorBytes = NettyUtils.errorPacket(401, MessageFormatter.format("destination or clientId is null", sub.toString()).getMessage());
+                        NettyUtils.write(ctx.getChannel(), errorBytes ,new ChannelFutureAggregator(sub.getDestination(),
+                                sub, packet.getType(), errorBytes.length, System.nanoTime() - start, (short) 401));
                     }
                     break;
                 case UNSUBSCRIPTION:
@@ -91,12 +94,13 @@ public class SessionHandler extends SimpleChannelHandler {
                         MDC.put("destination", clientIdentity.getDestination());
                         embeddedServer.unsubscribe(clientIdentity);
                         stopCanalInstanceIfNecessary(clientIdentity);// 尝试关闭
-                        NettyUtils.ack(ctx.getChannel(), null);
+                        byte[] ackBytes = NettyUtils.ackPacket();
+                        NettyUtils.write(ctx.getChannel(), ackBytes, new ChannelFutureAggregator(unsub.getDestination(),
+                                unsub, packet.getType(), ackBytes.length, System.nanoTime() - start));
                     } else {
-                        NettyUtils.error(401,
-                            MessageFormatter.format("destination or clientId is null", unsub.toString()).getMessage(),
-                            ctx.getChannel(),
-                            null);
+                        byte[] errorBytes = NettyUtils.errorPacket(401, MessageFormatter.format("destination or clientId is null", unsub.toString()).getMessage());
+                        NettyUtils.write(ctx.getChannel(), errorBytes, new ChannelFutureAggregator(unsub.getDestination(),
+                                unsub, packet.getType(), errorBytes.length, System.nanoTime() - start, (short) 401));
                     }
                     break;
                 case GET:
@@ -171,7 +175,8 @@ public class SessionHandler extends SimpleChannelHandler {
                                 output.writeBytes(2, rowEntries.get(i));
                             }
                             output.checkNoSpaceLeft();
-                            NettyUtils.write(ctx.getChannel(), body, null);
+                            NettyUtils.write(ctx.getChannel(), body, new ChannelFutureAggregator(get.getDestination(),
+                                    get, packet.getType(), body.length, System.nanoTime() - start, message.getId() == -1));
                             
                             // output.flush();
                             // byteBuffer.flip();
@@ -192,14 +197,14 @@ public class SessionHandler extends SimpleChannelHandler {
                                     }
                                 }
                             }
-                            packetBuilder.setBody(messageBuilder.build().toByteString());
-                            NettyUtils.write(ctx.getChannel(), packetBuilder.build().toByteArray(), null);// 输出数据
+                            byte[] body = packetBuilder.setBody(messageBuilder.build().toByteString()).build().toByteArray();
+                            NettyUtils.write(ctx.getChannel(), body, new ChannelFutureAggregator(get.getDestination(),
+                                    get, packet.getType(), body.length, System.nanoTime() - start, message.getId() == -1));// 输出数据
                         }
                     } else {
-                        NettyUtils.error(401,
-                            MessageFormatter.format("destination or clientId is null", get.toString()).getMessage(),
-                            ctx.getChannel(),
-                            null);
+                        byte[] errorBytes = NettyUtils.errorPacket(401, MessageFormatter.format("destination or clientId is null", get.toString()).getMessage());
+                        NettyUtils.write(ctx.getChannel(), errorBytes, new ChannelFutureAggregator(get.getDestination(),
+                                get, packet.getType(), errorBytes.length, System.nanoTime() - start, (short) 401));
                     }
                     break;
                 case CLIENTACK:
@@ -207,10 +212,9 @@ public class SessionHandler extends SimpleChannelHandler {
                     MDC.put("destination", ack.getDestination());
                     if (StringUtils.isNotEmpty(ack.getDestination()) && StringUtils.isNotEmpty(ack.getClientId())) {
                         if (ack.getBatchId() == 0L) {
-                            NettyUtils.error(402,
-                                MessageFormatter.format("batchId should assign value", ack.toString()).getMessage(),
-                                ctx.getChannel(),
-                                null);
+                            byte[] errorBytes = NettyUtils.errorPacket(402, MessageFormatter.format("batchId should assign value", ack.toString()).getMessage());
+                            NettyUtils.write(ctx.getChannel(), errorBytes, new ChannelFutureAggregator(ack.getDestination(),
+                                    ack, packet.getType(), errorBytes.length, System.nanoTime() - start, (short) 402));
                         } else if (ack.getBatchId() == -1L) { // -1代表上一次get没有数据,直接忽略之
                             // donothing
                         } else {
@@ -218,10 +222,9 @@ public class SessionHandler extends SimpleChannelHandler {
                             embeddedServer.ack(clientIdentity, ack.getBatchId());
                         }
                     } else {
-                        NettyUtils.error(401,
-                            MessageFormatter.format("destination or clientId is null", ack.toString()).getMessage(),
-                            ctx.getChannel(),
-                            null);
+                        byte[] errorBytes = NettyUtils.errorPacket(401, MessageFormatter.format("destination or clientId is null", ack.toString()).getMessage());
+                        NettyUtils.write(ctx.getChannel(), errorBytes, new ChannelFutureAggregator(ack.getDestination(),
+                                ack, packet.getType(), errorBytes.length, System.nanoTime() - start, (short) 401));
                     }
                     break;
                 case CLIENTROLLBACK:
@@ -237,25 +240,23 @@ public class SessionHandler extends SimpleChannelHandler {
                             embeddedServer.rollback(clientIdentity, rollback.getBatchId()); // 只回滚单个批次
                         }
                     } else {
-                        NettyUtils.error(401,
-                            MessageFormatter.format("destination or clientId is null", rollback.toString())
-                                .getMessage(),
-                            ctx.getChannel(),
-                            null);
+                        byte[] errorBytes = NettyUtils.errorPacket(401, MessageFormatter.format("destination or clientId is null", rollback.toString()).getMessage());
+                        NettyUtils.write(ctx.getChannel(), errorBytes, new ChannelFutureAggregator(rollback.getDestination(),
+                                rollback, packet.getType(), errorBytes.length, System.nanoTime() - start, (short) 401));
                     }
                     break;
                 default:
-                    NettyUtils.error(400, MessageFormatter.format("packet type={} is NOT supported!", packet.getType())
-                        .getMessage(), ctx.getChannel(), null);
+                    byte[] errorBytes = NettyUtils.errorPacket(400, MessageFormatter.format("packet type={} is NOT supported!", packet.getType()).getMessage());
+                    NettyUtils.write(ctx.getChannel(), errorBytes, new ChannelFutureAggregator(ctx.getChannel().getRemoteAddress().toString(),
+                            null, packet.getType(), errorBytes.length, System.nanoTime() - start, (short) 400));
                     break;
             }
         } catch (Throwable exception) {
-            NettyUtils.error(400,
-                MessageFormatter.format("something goes wrong with channel:{}, exception={}",
+            byte[] errorBytes = NettyUtils.errorPacket(400, MessageFormatter.format("something goes wrong with channel:{}, exception={}",
                     ctx.getChannel(),
-                    ExceptionUtils.getStackTrace(exception)).getMessage(),
-                ctx.getChannel(),
-                null);
+                    ExceptionUtils.getStackTrace(exception)).getMessage());
+            NettyUtils.write(ctx.getChannel(), errorBytes, new ChannelFutureAggregator(ctx.getChannel().getRemoteAddress().toString(),
+                    null, packet.getType(), errorBytes.length, System.nanoTime() - start, (short) 400));
         } finally {
             MDC.remove("destination");
         }

+ 179 - 0
server/src/main/java/com/alibaba/otter/canal/server/netty/listener/ChannelFutureAggregator.java

@@ -0,0 +1,179 @@
+package com.alibaba.otter.canal.server.netty.listener;
+
+import com.alibaba.otter.canal.protocol.CanalPacket;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.GeneratedMessage;
+import org.jboss.netty.channel.ChannelFuture;
+import org.jboss.netty.channel.ChannelFutureListener;
+
+import static com.alibaba.otter.canal.server.netty.CanalServerWithNettyProfiler.profiler;
+
+/**
+ * @author Chuanyi Li
+ */
+public class ChannelFutureAggregator implements ChannelFutureListener {
+
+    private ClientRequestResult result;
+
+    public ChannelFutureAggregator(String destination, GeneratedMessage request, CanalPacket.PacketType type, int amount, long latency, boolean empty) {
+        this(destination, request, type, amount, latency, empty, (short) 0);
+    }
+
+    public ChannelFutureAggregator(String destination, GeneratedMessage request, CanalPacket.PacketType type, int amount, long latency) {
+        this(destination, request, type, amount, latency, false, (short) 0);
+    }
+
+    public ChannelFutureAggregator(String destination, GeneratedMessage request, CanalPacket.PacketType type, int amount, long latency, short errorCode) {
+        this(destination, request, type, amount, latency, false, errorCode);
+    }
+
+    private ChannelFutureAggregator(String destination, GeneratedMessage request, CanalPacket.PacketType type, int amount, long latency, boolean empty, short errorCode) {
+        this.result = new ClientRequestResult.Builder()
+                .destination(destination)
+                .type(type)
+                .request(request)
+                .amount(amount)
+                .latency(latency)
+                .errorCode(errorCode)
+                .empty(empty)
+                .build();
+    }
+
+    @Override
+    public void operationComplete(ChannelFuture future) {
+        // profiling after I/O operation
+        if (future.getCause() != null) {
+            result.channelError = future.getCause();
+        }
+        profiler().profiling(result);
+    }
+
+    /**
+     * Client request result pojo
+     */
+    public static class ClientRequestResult {
+
+        private String                 destination;
+        private CanalPacket.PacketType type;
+        private GeneratedMessage       request;
+        private int                    amount;
+        private long                   latency;
+        private short                  errorCode;
+        private boolean                empty;
+        private Throwable              channelError;
+
+        private ClientRequestResult() {}
+
+        private ClientRequestResult(Builder builder) {
+            this.destination = Preconditions.checkNotNull(builder.destination);
+            this.type = Preconditions.checkNotNull(builder.type);
+            this.request = builder.request;
+            this.amount = builder.amount;
+            this.latency = builder.latency;
+            this.errorCode = builder.errorCode;
+            this.empty = builder.empty;
+            this.channelError = builder.channelError;
+        }
+
+        // auto-generated
+        public static class Builder {
+
+            private String                 destination;
+            private CanalPacket.PacketType type;
+            private GeneratedMessage       request;
+            private int                    amount;
+            private long                   latency;
+            private short                  errorCode;
+            private boolean                empty;
+            private Throwable              channelError;
+
+            Builder destination(String destination) {
+                this.destination = destination;
+                return this;
+            }
+
+            Builder type(CanalPacket.PacketType type) {
+                this.type = type;
+                return this;
+            }
+
+            Builder request(GeneratedMessage request) {
+                this.request = request;
+                return this;
+            }
+
+            Builder amount(int amount) {
+                this.amount = amount;
+                return this;
+            }
+
+            Builder latency(long latency) {
+                this.latency = latency;
+                return this;
+            }
+
+            Builder errorCode(short errorCode) {
+                this.errorCode = errorCode;
+                return this;
+            }
+
+            Builder empty(boolean empty) {
+                this.empty = empty;
+                return this;
+            }
+
+            public Builder channelError(Throwable channelError) {
+                this.channelError = channelError;
+                return this;
+            }
+
+            public Builder fromPrototype(ClientRequestResult prototype) {
+                destination = prototype.destination;
+                type = prototype.type;
+                request = prototype.request;
+                amount = prototype.amount;
+                latency = prototype.latency;
+                errorCode = prototype.errorCode;
+                empty = prototype.empty;
+                channelError = prototype.channelError;
+                return this;
+            }
+
+            ClientRequestResult build() {
+                return new ClientRequestResult(this);
+            }
+        }
+        // getters
+        public String getDestination() {
+            return destination;
+        }
+
+        public CanalPacket.PacketType getType() {
+            return type;
+        }
+
+        public GeneratedMessage getRequest() {
+            return request;
+        }
+
+        public int getAmount() {
+            return amount;
+        }
+
+        public long getLatency() {
+            return latency;
+        }
+
+        public short getErrorCode() {
+            return errorCode;
+        }
+
+        public boolean getEmpty() {
+            return empty;
+        }
+
+        public Throwable getChannelError() {
+            return channelError;
+        }
+    }
+}

+ 53 - 0
server/src/test/java/com/alibaba/otter/canal/server/CanalServerWithEmbedded_FileModeTest.java

@@ -0,0 +1,53 @@
+package com.alibaba.otter.canal.server;
+
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+
+import com.alibaba.otter.canal.instance.manager.model.Canal;
+import com.alibaba.otter.canal.instance.manager.model.CanalParameter;
+import com.alibaba.otter.canal.instance.manager.model.CanalParameter.*;
+
+public class CanalServerWithEmbedded_FileModeTest extends BaseCanalServerWithEmbededTest {
+
+    protected Canal buildCanal() {
+        Canal canal = new Canal();
+        canal.setId(1L);
+        canal.setName(DESTINATION);
+        canal.setDesc("my standalone server test ");
+
+        CanalParameter parameter = new CanalParameter();
+
+        parameter.setMetaMode(MetaMode.LOCAL_FILE);
+        parameter.setDataDir("./conf");
+        parameter.setMetaFileFlushPeriod(1000);
+        parameter.setHaMode(HAMode.HEARTBEAT);
+        parameter.setIndexMode(IndexMode.MEMORY_META_FAILBACK);
+
+        parameter.setStorageMode(StorageMode.MEMORY);
+        parameter.setMemoryStorageBufferSize(32 * 1024);
+
+        parameter.setSourcingType(SourcingType.MYSQL);
+        parameter.setDbAddresses(Arrays.asList(new InetSocketAddress(MYSQL_ADDRESS, 3306),
+            new InetSocketAddress(MYSQL_ADDRESS, 3306)));
+        parameter.setDbUsername(USERNAME);
+        parameter.setDbPassword(PASSWORD);
+        parameter.setPositions(Arrays.asList("{\"journalName\":\"mysql-bin.000001\",\"position\":332L,\"timestamp\":\"1505998863000\"}",
+            "{\"journalName\":\"mysql-bin.000001\",\"position\":332L,\"timestamp\":\"1505998863000\"}"));
+
+        parameter.setSlaveId(1234L);
+
+        parameter.setDefaultConnectionTimeoutInSeconds(30);
+        parameter.setConnectionCharset("UTF-8");
+        parameter.setConnectionCharsetNumber((byte) 33);
+        parameter.setReceiveBufferSize(8 * 1024);
+        parameter.setSendBufferSize(8 * 1024);
+
+        parameter.setDetectingEnable(false);
+        parameter.setDetectingIntervalInSeconds(10);
+        parameter.setDetectingRetryTimes(3);
+        parameter.setDetectingSQL(DETECTING_SQL);
+
+        canal.setCanalParameter(parameter);
+        return canal;
+    }
+}

+ 17 - 1
sink/src/main/java/com/alibaba/otter/canal/sink/entry/EntryEventSink.java

@@ -39,6 +39,7 @@ public class EntryEventSink extends AbstractCanalEventSink<List<CanalEntry.Entry
     protected long                 emptyTransctionThresold       = 8192;                                         // 超过1024个事务头,输出一个
     protected volatile long        lastEmptyTransactionTimestamp = 0L;
     protected AtomicLong           lastEmptyTransactionCount     = new AtomicLong(0L);
+    private AtomicLong             eventsSinkBlockingTime        = new AtomicLong(0L);
 
     public EntryEventSink(){
         addHandler(new HeartBeatEntryEventHandler());
@@ -147,16 +148,27 @@ public class EntryEventSink extends AbstractCanalEventSink<List<CanalEntry.Entry
         for (CanalEventDownStreamHandler<List<Event>> handler : getHandlers()) {
             events = handler.before(events);
         }
-
+        long blockingStart = 0L;
         int fullTimes = 0;
         do {
             if (eventStore.tryPut(events)) {
+                if (fullTimes > 0) {
+                    eventsSinkBlockingTime.addAndGet(System.nanoTime() - blockingStart);
+                }
                 for (CanalEventDownStreamHandler<List<Event>> handler : getHandlers()) {
                     events = handler.after(events);
                 }
                 return true;
             } else {
+                if (fullTimes == 0) {
+                    blockingStart = System.nanoTime();
+                }
                 applyWait(++fullTimes);
+                if (fullTimes % 1000 == 0) {
+                    long nextStart = System.nanoTime();
+                    eventsSinkBlockingTime.addAndGet(nextStart - blockingStart);
+                    blockingStart = nextStart;
+                }
             }
 
             for (CanalEventDownStreamHandler<List<Event>> handler : getHandlers()) {
@@ -202,4 +214,8 @@ public class EntryEventSink extends AbstractCanalEventSink<List<CanalEntry.Entry
         this.emptyTransctionThresold = emptyTransctionThresold;
     }
 
+    public AtomicLong getEventsSinkBlockingTime() {
+        return eventsSinkBlockingTime;
+    }
+
 }

+ 19 - 0
store/src/main/java/com/alibaba/otter/canal/store/memory/MemoryEventStoreWithBuffer.java

@@ -562,4 +562,23 @@ public class MemoryEventStoreWithBuffer extends AbstractCanalStoreScavenge imple
         this.ddlIsolation = ddlIsolation;
     }
 
+    public AtomicLong getPutSequence() {
+        return putSequence;
+    }
+
+    public AtomicLong getAckSequence() {
+        return ackSequence;
+    }
+
+    public AtomicLong getPutMemSize() {
+        return putMemSize;
+    }
+
+    public AtomicLong getAckMemSize() {
+        return ackMemSize;
+    }
+
+    public BatchMode getBatchMode() {
+        return batchMode;
+    }
 }