Bladeren bron

first commit

songwenbin 4 jaren geleden
commit
76199e1828
100 gewijzigde bestanden met toevoegingen van 118363 en 0 verwijderingen
  1. BIN
      .gradle/5.4/executionHistory/executionHistory.bin
  2. BIN
      .gradle/5.4/executionHistory/executionHistory.lock
  3. BIN
      .gradle/5.4/fileChanges/last-build.bin
  4. BIN
      .gradle/5.4/fileContent/fileContent.lock
  5. BIN
      .gradle/5.4/fileHashes/fileHashes.bin
  6. BIN
      .gradle/5.4/fileHashes/fileHashes.lock
  7. BIN
      .gradle/5.4/fileHashes/resourceHashesCache.bin
  8. 0 0
      .gradle/5.4/gc.properties
  9. BIN
      .gradle/5.4/javaCompile/classAnalysis.bin
  10. BIN
      .gradle/5.4/javaCompile/jarAnalysis.bin
  11. BIN
      .gradle/5.4/javaCompile/javaCompile.lock
  12. BIN
      .gradle/5.4/javaCompile/taskHistory.bin
  13. BIN
      .gradle/buildOutputCleanup/buildOutputCleanup.lock
  14. 2 0
      .gradle/buildOutputCleanup/cache.properties
  15. BIN
      .gradle/buildOutputCleanup/outputFiles.bin
  16. 0 0
      .gradle/vcs-1/gc.properties
  17. 0 0
      .gradle/vcsWorkingDirs/gc.properties
  18. 23 0
      README.md
  19. 28 0
      build.gradle
  20. 16 0
      common/build.gradle
  21. 14 0
      common/src/main/java/com/gyee/ygys/exception/IncorrectParameterException.java
  22. 25 0
      common/src/main/java/com/gyee/ygys/exception/WisdomException.java
  23. 29 0
      common/src/main/java/com/gyee/ygys/exception/WisdomRuntimeException.java
  24. 50 0
      common/src/main/java/com/gyee/ygys/protocol/BitMapGroup.java
  25. 65 0
      common/src/main/java/com/gyee/ygys/protocol/BitMapMessage.java
  26. 85 0
      common/src/main/java/com/gyee/ygys/protocol/BitMapMessageParser.java
  27. 45 0
      common/src/main/java/com/gyee/ygys/utils/BitsetConvert.java
  28. 222 0
      common/src/main/java/com/gyee/ygys/utils/BytesUtil.java
  29. 604 0
      common/src/main/java/com/gyee/ygys/utils/DateUtil.java
  30. 129 0
      common/src/main/java/com/gyee/ygys/utils/EncryptUtil.java
  31. 22 0
      common/src/main/java/com/gyee/ygys/utils/RequestMapToJsonUtil.java
  32. 46 0
      common/src/main/java/com/gyee/ygys/utils/StringUtil.java
  33. 11 0
      golden-example/build.gradle
  34. 282 0
      golden-example/src/main/java/com/rtdb/test/ArchiveTest.java
  35. 1326 0
      golden-example/src/main/java/com/rtdb/test/BaseTest.java
  36. 71 0
      golden-example/src/main/java/com/rtdb/test/EquationTest.java
  37. 1147 0
      golden-example/src/main/java/com/rtdb/test/HistorianTest.java
  38. 431 0
      golden-example/src/main/java/com/rtdb/test/ServerTest.java
  39. 493 0
      golden-example/src/main/java/com/rtdb/test/SnapshotTest.java
  40. BIN
      golden-example/src/main/lib/commons-beanutils-1.8.3.jar
  41. BIN
      golden-example/src/main/lib/commons-logging-1.1.1.jar
  42. BIN
      golden-example/src/main/lib/golden-java-sdk-3.0.27.jar
  43. BIN
      golden-example/src/main/lib/protobuf-java-2.6.1.jar
  44. 14 0
      golden-history-opentsdb/README.md
  45. 29 0
      golden-history-opentsdb/build.gradle
  46. 22 0
      golden-history-opentsdb/src/main/java/com/gyee/wisdom/Bootstrap.java
  47. BIN
      golden-history-opentsdb/src/main/lib/commons-beanutils-1.8.3.jar
  48. BIN
      golden-history-opentsdb/src/main/lib/commons-logging-1.1.1.jar
  49. BIN
      golden-history-opentsdb/src/main/lib/golden-java-sdk-3.0.27.jar
  50. BIN
      golden-history-opentsdb/src/main/lib/protobuf-java-2.6.1.jar
  51. 6 0
      golden-history-opentsdb/src/main/resources/application.yaml
  52. 11 0
      golden-history-opentsdb/src/main/resources/banner.txt
  53. 64 0
      golden-history-opentsdb/src/main/resources/log4j2.xml
  54. 14 0
      golden-realtime-kafka/README.md
  55. 33 0
      golden-realtime-kafka/build.gradle
  56. 23 0
      golden-realtime-kafka/src/main/java/com/gyee/wisdom/ApplicationReadyEventListener.java
  57. 22 0
      golden-realtime-kafka/src/main/java/com/gyee/wisdom/Bootstrap.java
  58. 252 0
      golden-realtime-kafka/src/main/java/com/gyee/wisdom/CalculateServer.java
  59. 29 0
      golden-realtime-kafka/src/main/java/com/gyee/wisdom/config/ConfigProperties.java
  60. 62 0
      golden-realtime-kafka/src/main/java/com/gyee/wisdom/config/GoldenConfig.java
  61. 363 0
      golden-realtime-kafka/src/main/java/com/gyee/wisdom/config/GoldenConnectionPool.java
  62. 28 0
      golden-realtime-kafka/src/main/java/com/gyee/wisdom/model/TagPoint.java
  63. 28 0
      golden-realtime-kafka/src/main/java/com/gyee/wisdom/model/TagPointData.java
  64. 78 0
      golden-realtime-kafka/src/main/java/com/gyee/wisdom/service/CacheService.java
  65. BIN
      golden-realtime-kafka/src/main/lib/commons-beanutils-1.8.3.jar
  66. BIN
      golden-realtime-kafka/src/main/lib/commons-logging-1.1.1.jar
  67. BIN
      golden-realtime-kafka/src/main/lib/golden-java-sdk-3.0.27.jar
  68. BIN
      golden-realtime-kafka/src/main/lib/protobuf-java-2.6.1.jar
  69. 23 0
      golden-realtime-kafka/src/main/resources/application.yaml
  70. 8 0
      golden-realtime-kafka/src/main/resources/banner.txt
  71. 64 0
      golden-realtime-kafka/src/main/resources/log4j2.xml
  72. 109494 0
      golden-realtime-kafka/src/main/resources/tag-point.csv
  73. 35 0
      gradle.properties
  74. BIN
      gradle/wrapper/gradle-wrapper.jar
  75. 6 0
      gradle/wrapper/gradle-wrapper.properties
  76. 172 0
      gradlew
  77. 84 0
      gradlew.bat
  78. 34 0
      kafka-connectors/build.gradle
  79. 127 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/file/FileStreamSinkConnector.java
  80. 163 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/file/FileStreamSinkTask.java
  81. 82 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/file/FileStreamSourceConnector.java
  82. 223 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/file/FileStreamSourceTask.java
  83. 100 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/golden/GoldenConfig.java
  84. 384 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/golden/GoldenConnectionPool.java
  85. 54 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/golden/GoldenSourceConnector.java
  86. 41 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/golden/GoldenSourceTask.java
  87. 132 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/kudu/KuduSinkConfig.java
  88. 111 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/kudu/KuduSinkConnector.java
  89. 151 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/kudu/KuduSinkTask.java
  90. 298 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/kudu/KuduWriter.java
  91. 21 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/redis/Constants.java
  92. 71 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/redis/GreeterSink.java
  93. 68 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/redis/GreeterTask.java
  94. 78 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/redis/NameSource.java
  95. 91 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/redis/NameTask.java
  96. 4 0
      kafka-connectors/src/main/java/com/gyee/ygys/connector/redis/package-info.java
  97. BIN
      kafka-connectors/src/main/lib/commons-beanutils-1.8.3.jar
  98. BIN
      kafka-connectors/src/main/lib/commons-logging-1.1.1.jar
  99. BIN
      kafka-connectors/src/main/lib/golden-java-sdk-3.0.27.jar
  100. 0 0
      kafka-connectors/src/main/lib/protobuf-java-2.6.1.jar

BIN
.gradle/5.4/executionHistory/executionHistory.bin


BIN
.gradle/5.4/executionHistory/executionHistory.lock


BIN
.gradle/5.4/fileChanges/last-build.bin


BIN
.gradle/5.4/fileContent/fileContent.lock


BIN
.gradle/5.4/fileHashes/fileHashes.bin


BIN
.gradle/5.4/fileHashes/fileHashes.lock


BIN
.gradle/5.4/fileHashes/resourceHashesCache.bin


+ 0 - 0
.gradle/5.4/gc.properties


BIN
.gradle/5.4/javaCompile/classAnalysis.bin


BIN
.gradle/5.4/javaCompile/jarAnalysis.bin


BIN
.gradle/5.4/javaCompile/javaCompile.lock


BIN
.gradle/5.4/javaCompile/taskHistory.bin


BIN
.gradle/buildOutputCleanup/buildOutputCleanup.lock


+ 2 - 0
.gradle/buildOutputCleanup/cache.properties

@@ -0,0 +1,2 @@
+#Thu May 14 17:36:49 CST 2020
+gradle.version=5.4

BIN
.gradle/buildOutputCleanup/outputFiles.bin


+ 0 - 0
.gradle/vcs-1/gc.properties


+ 0 - 0
.gradle/vcsWorkingDirs/gc.properties


+ 23 - 0
README.md

@@ -0,0 +1,23 @@
+# ygys
+    the Foolish Old Man, who removed the mountains.
+    the determination to win victory and the courage to surmount every difficulty
+
+
+##模块:
+### common -- 公共类型和方法
+* data -- 公共数据类型
+* utils -- 公共方法
+
+
+### opentsdb-client -- OpenTsdb 客户端API封装
+
+### kafka-connectors -- kafka 连接器,包括:
+*
+
+
+
+
+
+
+
+

+ 28 - 0
build.gradle

@@ -0,0 +1,28 @@
+subprojects {
+    apply plugin: "java"
+    apply plugin: "idea"
+    apply plugin: "eclipse"
+    apply plugin: "maven"
+
+    [compileJava,compileTestJava,javadoc]*.options*.encoding = "UTF-8"
+
+    configurations {
+        all*.exclude module: "spring-boot-starter-tomcat"
+        all*.exclude module: "spring-boot-starter-logging"
+    }
+
+    dependencies{
+        //添加如下的依赖后,可以不安装lombok插件
+        compileOnly 'org.projectlombok:lombok:1.18.12'
+        annotationProcessor 'org.projectlombok:lombok:1.18.12'
+//        testAnnotationProcessor 'org.projectlombok:lombok:$lombokVersion'
+//        testCompileOnly 'org.projectlombok:lombok:$lombokVersion'
+    }
+
+    repositories {
+        mavenLocal()
+        maven { url "http://maven.aliyun.com/nexus/content/groups/public/" }
+        mavenCentral()
+    }
+}
+

+ 16 - 0
common/build.gradle

@@ -0,0 +1,16 @@
+buildscript {
+}
+
+dependencies {
+
+    compile("com.google.code.findbugs:jsr305:$jsr305Version")
+    compile("joda-time:joda-time:$jodaTimeVersion")
+    compile("commons-codec:commons-codec:$commonsCodecVersion")
+    compile("org.apache.commons:commons-lang3:$commonsLang3Version")
+    compile("com.google.guava:guava:$guavaVersion")
+    compile 'com.alibaba:fastjson:1.2.17'
+    //compile 'org.apache.commons:commons-lang3:3.4'
+    compile group: 'commons-beanutils', name: 'commons-beanutils', version: '1.9.3'
+    //compile 'com.google.guava:guava:19.0'
+
+}

+ 14 - 0
common/src/main/java/com/gyee/ygys/exception/IncorrectParameterException.java

@@ -0,0 +1,14 @@
+package com.gyee.ygys.exception;
+
+public class IncorrectParameterException extends RuntimeException {
+
+    private static final long serialVersionUID = 601995650578985289L;
+
+    public IncorrectParameterException(String message) {
+        super(message);
+    }
+
+    public IncorrectParameterException(String message, Throwable cause) {
+        super(message, cause);
+    }
+}

+ 25 - 0
common/src/main/java/com/gyee/ygys/exception/WisdomException.java

@@ -0,0 +1,25 @@
+package com.gyee.ygys.exception;
+
+/**
+ * @author songwb<songwenbin@gyee-china.com>
+ */
+public class WisdomException extends Exception {
+    public WisdomException() {
+    }
+
+    public WisdomException(String message) {
+        super(message);
+    }
+
+    public WisdomException(String message, Throwable cause) {
+        super(message, cause);
+    }
+
+    public WisdomException(Throwable cause) {
+        super(cause);
+    }
+
+    public WisdomException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {
+        super(message, cause, enableSuppression, writableStackTrace);
+    }
+}

+ 29 - 0
common/src/main/java/com/gyee/ygys/exception/WisdomRuntimeException.java

@@ -0,0 +1,29 @@
+package com.gyee.ygys.exception;
+
+/**
+ * @author songwb<songwenbin@gyee-china.com>
+ */
+public class WisdomRuntimeException extends RuntimeException{
+    public WisdomRuntimeException() {
+        super();
+    }
+
+    public WisdomRuntimeException(String message) {
+        super(message);
+    }
+
+    public WisdomRuntimeException(String message, Throwable cause) {
+        super(message, cause);
+    }
+
+    public WisdomRuntimeException(Throwable cause) {
+        super(cause);
+    }
+
+    public WisdomRuntimeException(String message,
+                                 Throwable cause,
+                                 boolean enableSuppression,
+                                 boolean writableStackTrace) {
+        super(message, cause, enableSuppression, writableStackTrace);
+    }
+}

+ 50 - 0
common/src/main/java/com/gyee/ygys/protocol/BitMapGroup.java

@@ -0,0 +1,50 @@
+package com.gyee.ygys.protocol;
+
+import com.gyee.ygys.utils.BytesUtil;
+import lombok.Data;
+
+import java.io.ByteArrayOutputStream;
+import java.util.BitSet;
+
+/**
+ * 对应BitMapMessage中的分组数据区,
+ * 数据格式:
+ *  6.1   数据组1长度 -- 4字节
+ *  6.2   数据组1二级bitmap长度 -- 4字节
+ *  6.3   数据组1二级bitmap   --  N字节,由6.2确定
+ *  6.4   数据组1数据, -- 变长,X
+ */
+@Data
+public class BitMapGroup {
+    private short groupIndex;
+    private int groupLength;
+    private int bitMapLength;
+    private BitSet bitMapL2;
+    private byte[] data;
+
+    public byte[] toBytes() {
+
+        int len = bitMapLength + data.length +10;
+        byte[] result = new byte[len];
+        int offset = 0;
+
+//        byte[] giBytes = BytesUtil.short2Byte(groupId);
+//        System.arraycopy(giBytes,0,result,offset,2);
+//        offset += 2;
+
+        byte[] glBytes = BytesUtil.int2Byte(groupLength);
+        System.arraycopy(glBytes,0, result, offset, 4);
+        offset += 4;
+
+        byte[] bmlBytes = BytesUtil.int2Byte(bitMapLength);
+        System.arraycopy(bmlBytes,0,result,offset,4);
+        offset += 4;
+
+        System.arraycopy(bitMapL2.toByteArray(), 0, result, offset, bitMapLength);
+        offset += bitMapLength;
+
+        System.arraycopy(data, 0, result, offset, data.length);
+
+        return result;
+    }
+}

+ 65 - 0
common/src/main/java/com/gyee/ygys/protocol/BitMapMessage.java

@@ -0,0 +1,65 @@
+package com.gyee.ygys.protocol;
+
+import com.gyee.ygys.utils.BytesUtil;
+import lombok.Data;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.HashMap;
+
+/**
+ * 描述:实现基于两级BitMap的TCP消息格式, 适用于带宽资源紧缺的场景,例如,4M带宽的电力专线网络传输10万点秒级传感器数据
+ * 目标: 数据包尽可能小,减少带宽占用
+ * 设计思想:
+ * 1、仅传递变化的数据
+ * 2、基于bitmap去标识数据点是否变化,及数据的顺序
+ * 3、将数据点分组,区分高频变化的点和低频变化的点,区分数据采集来源
+ * 4、根据数据点分组定义一级bitmap,尽可能减少bitmap的长度,提高bitmap解析效率
+ * 数据格式:
+ * 1、 消息头标识 -- 2字节 0xAA, 0x55
+ * 2、 版本标识 -- 1字节
+ * 3、 数据包长度 -- 4字节
+ * 4、 一级BitMap -- 64字节, 最大支持512个分组
+ * 5、 预留字段 -- 1字节
+ * 6、 数据区 -- 变长
+ * 6.1   数据组标识 -- 2字节
+ * 6.2   数据组1长度 -- 4字节
+ * 6.3   数据组1二级bitmap长度 -- 4字节
+ * 6.4   数据组1二级bitmap   --  N字节,由6.2确定
+ * 6.5   数据组1数据, -- 变长,X
+ * 6.xx  重复6.1~6.5
+ * 7、 消息结束标识 -- 2字节 0x55, 0xAA
+ */
+@Data
+public class BitMapMessage {
+
+    private byte[] msgHead = new byte[] {(byte) 0xAA, (byte)0x55};
+    private byte version = '1';
+    private int msgLength;
+    private BitSet bitMapL1 = new BitSet(512);
+    private byte reserve = '0';
+    private ArrayList<BitMapGroup> groups;
+    private byte[] msgTail = new byte[] {(byte) 0x55, (byte)0xAA};
+
+    public byte[] toBytes() throws IOException {
+
+        ByteArrayOutputStream buf = new ByteArrayOutputStream();
+        DataOutputStream out = new DataOutputStream(buf);
+        out.write(msgHead);
+        out.writeByte(version);
+        out.write(msgLength);
+        out.write(bitMapL1.toByteArray());
+        out.writeByte(reserve);
+        for(BitMapGroup bmg : groups) {
+            out.write(bmg.toBytes());
+        }
+        out.write(msgTail);
+
+        return buf.toByteArray();
+    }
+
+
+}

+ 85 - 0
common/src/main/java/com/gyee/ygys/protocol/BitMapMessageParser.java

@@ -0,0 +1,85 @@
+package com.gyee.ygys.protocol;
+
+import com.gyee.ygys.utils.BytesUtil;
+
+import java.util.ArrayList;
+
+/**
+ * 将byte[] 转换为BitMapMessage类型
+ */
+public class BitMapMessageParser {
+
+    /**
+     * 将byte数组转换为BitMapGroup对象
+     * 数据格式:
+     *  6.1   数据组1长度 -- 4字节
+     *  6.2   数据组1二级bitmap长度 -- 4字节
+     *  6.3   数据组1二级bitmap   --  N字节,由6.2确定
+     *  6.4   数据组1数据, -- 变长,X
+     */
+    public static BitMapGroup parserBitMapGroup(byte[] array) {
+
+        BitMapGroup bmg = new BitMapGroup();
+        int offset = 0;
+//        bmg.setGroupId(BytesUtil.getShort(array, offset));
+//        offset += 2;
+
+        bmg.setGroupLength(BytesUtil.getInt32(array,offset));
+        offset += 4;
+
+        bmg.setBitMapLength(BytesUtil.getInt32(array, offset));
+        offset += 4;
+
+        bmg.setBitMapL2(BytesUtil.getBitSet(array,offset, bmg.getBitMapLength()));
+        offset += bmg.getBitMapLength();
+
+        bmg.setData(BytesUtil.getSubBytes(array, offset, array.length - bmg.getBitMapLength() -10));
+
+        return bmg;
+    }
+
+
+    /**
+     * 描述:将byte数组转换为BitMapMessage对象
+     * 数据格式:
+     * 1、 消息头标识 -- 2字节 0xAA, 0x55
+     * 2、 版本标识 -- 1字节
+     * 3、 数据包长度 -- 4字节
+     * 4、 一级BitMap -- 64字节, 最大支持512个分组
+     * 5、 预留字段 -- 1字节
+     * 6、 数据区 -- 变长
+     * 6.1   数据组1长度 -- 4字节
+     * 6.2   数据组1二级bitmap长度 -- 4字节
+     * 6.3   数据组1二级bitmap   --  N字节,由6.2确定
+     * 6.4   数据组1数据, -- 变长,X
+     * 6.xx  重复6.1~6.4
+     * 7、 消息结束标识 -- 2字节 0x55, 0xAA
+     */
+    public static BitMapMessage parserBitMapMessage(byte[] array) {
+
+        BitMapMessage bmm = new BitMapMessage();
+        int offset = 2;
+        bmm.setVersion(array[offset]);
+        offset += 1;
+
+        bmm.setMsgLength(BytesUtil.getInt32(array,offset));
+        offset += 4;
+
+        bmm.setBitMapL1(BytesUtil.getBitSet(array,offset, 64));
+        offset += 64;
+
+        offset += 1; //越过保留位
+        ArrayList<BitMapGroup> groups = new ArrayList<>();
+        while(offset < array.length-3) {
+            int groupLength = BytesUtil.getInt32(array,offset);
+            byte[] groupBytes = BytesUtil.getSubBytes(array, offset, groupLength);
+            BitMapGroup bmg = parserBitMapGroup(groupBytes);
+            groups.add(bmg);
+            offset += groupLength;
+        }
+        bmm.setGroups(groups);
+
+        return bmm;
+    }
+
+}

+ 45 - 0
common/src/main/java/com/gyee/ygys/utils/BitsetConvert.java

@@ -0,0 +1,45 @@
+package com.gyee.ygys.utils;
+
+import java.util.Arrays;
+import java.util.BitSet;
+
+public class BitsetConvert {
+    public static byte[] bitSet2ByteArray(BitSet bitSet) {
+        byte[] bytes = new byte[bitSet.size() / 8];
+        for (int i = 0; i < bitSet.size(); i++) {
+            int index = i / 8;
+            int offset = 7 - i % 8;
+            bytes[index] |= (bitSet.get(i) ? 1 : 0) << offset;
+        }
+        return bytes;
+    }
+
+    public static BitSet byteArray2BitSet(byte[] bytes) {
+        BitSet bitSet = new BitSet(bytes.length * 8);
+        int index = 0;
+        for (int i = 0; i < bytes.length; i++) {
+            for (int j = 7; j >= 0; j--) {
+                bitSet.set(index++, (bytes[i] & (1 << j)) >> j == 1 ? true
+                        : false);
+            }
+        }
+        return bitSet;
+    }
+
+    public static void main(String[] args) {
+        BitSet bitSet = new BitSet();
+        bitSet.set(0, true);
+        bitSet.set(1, true);
+        bitSet.set(63, true);
+
+        //将BitSet对象转成byte数组
+        byte[] bytes = bitSet2ByteArray(bitSet);
+        System.out.println(Arrays.toString(bytes));
+
+        //在将byte数组转回来
+        bitSet = byteArray2BitSet(bytes);
+        System.out.println(bitSet.get(0));
+        System.out.println(bitSet.get(10));
+    }
+
+}

+ 222 - 0
common/src/main/java/com/gyee/ygys/utils/BytesUtil.java

@@ -0,0 +1,222 @@
+package com.gyee.ygys.utils;
+
+import java.util.BitSet;
+
+public class BytesUtil {
+
+    public static short getShort(byte[] bytes, int offset)
+    {
+        byte high = bytes[offset];
+        byte low = bytes[offset + 1];
+        short z = (short)(((high & 0x00FF) << 8) | (0x00FF & low));
+        return z;
+    }
+
+    public static int getInt32(byte[] bytes, int offset)
+    {
+        int value=0;
+        for(int i = offset; i < offset + 4; i++)
+        {
+            int shift= (3-i) * 8;
+            value +=(bytes[i] & 0xFF) << shift;
+        }
+        return value;
+    }
+
+    public static long getLong(byte[] bytes, int offset)
+    {
+        long num = 0;
+        for (int ix = offset; ix < offset+8; ++ix) {
+            num <<= 8;
+            num |= (bytes[ix] & 0xff);
+        }
+
+        return num;
+    }
+
+    public static BitSet getBitSet(byte[] bytes, int offset, int length)
+    {
+        byte[] buf = new byte[length];
+        System.arraycopy(bytes, offset,buf,0,length);
+        return BitSet.valueOf(buf);
+    }
+
+    public static byte[] getSubBytes(byte[] bytes, int offset, int length)
+    {
+        byte[] buf = new byte[length];
+        System.arraycopy(bytes, offset,buf,0,length);
+        return buf;
+    }
+
+
+
+    public static int swap_16(int date)
+    {
+        int a,b;
+        a = (date>>8)&0x00ff;
+        b = (date<<8)&0xff00;
+        return (a+b);
+    }
+
+    public static byte[] short2Byte(short x)
+    {
+        byte high = (byte) (0x00FF & (x>>8));
+        byte low = (byte) (0x00FF & x);
+        byte[] bytes = new byte[2];
+        bytes[0] = high;
+        bytes[1] = low;
+        return bytes;
+    }
+
+    public static short byte2short(byte[] bytes)
+    {
+        byte high = bytes[0];
+        byte low = bytes[1];
+        short z = (short)(((high & 0x00FF) << 8) | (0x00FF & low));
+        return z;
+    }
+
+    public static byte[] int2Byte(int i)
+    {
+        byte[] result = new byte[4];
+        result[0] = (byte)((i >> 24) & 0xFF);
+        result[1] = (byte)((i >> 16) & 0xFF);
+        result[2] = (byte)((i >> 8) & 0xFF);
+        result[3] = (byte)(i & 0xFF);
+        return result;
+    }
+
+    public static int byte2Int(byte[] bytes)
+    {
+        int value=0;
+        for(int i = 0; i < 4; i++)
+        {
+            int shift= (3-i) * 8;
+            value +=(bytes[i] & 0xFF) << shift;
+        }
+        return value;
+    }
+
+    public static byte[] int2Bytes(int num) {
+        byte[] byteNum = new byte[4];
+        for (int ix = 0; ix < 4; ++ix) {
+            int offset = 32 - (ix + 1) * 8;
+            byteNum[ix] = (byte) ((num >> offset) & 0xff);
+        }
+        return byteNum;
+    }
+
+    public static int bytes2Int(byte[] byteNum) {
+        int num = 0;
+        for (int ix = 0; ix < 4; ++ix) {
+            num <<= 8;
+            num |= (byteNum[ix] & 0xff);
+        }
+        return num;
+    }
+
+    public static byte int2OneByte(int num) {
+        return (byte) (num & 0x000000ff);
+    }
+
+    public static int oneByte2Int(byte byteNum) {
+        return byteNum > 0 ? byteNum : (128 + (128 + byteNum));
+    }
+
+    public static byte[] long2Bytes(long num) {
+        byte[] byteNum = new byte[8];
+        for (int ix = 0; ix < 8; ++ix) {
+            int offset = 64 - (ix + 1) * 8;
+            byteNum[ix] = (byte) ((num >> offset) & 0xff);
+        }
+        return byteNum;
+    }
+
+    public static long bytes2Long(byte[] byteNum) {
+        long num = 0;
+        for (int ix = 0; ix < 8; ++ix) {
+            num <<= 8;
+            num |= (byteNum[ix] & 0xff);
+        }
+        return num;
+    }
+
+
+    public static byte[] float2byte(float f) {
+
+        // 把float转换为byte[]
+        int fbit = Float.floatToIntBits(f);
+
+        byte[] b = new byte[4];
+        for (int i = 0; i < 4; i++) {
+            b[i] = (byte) (fbit >> (24 - i * 8));
+        }
+
+        // 翻转数组
+        int len = b.length;
+        // 建立一个与源数组元素类型相同的数组
+        byte[] dest = new byte[len];
+        // 为了防止修改源数组,将源数组拷贝一份副本
+        System.arraycopy(b, 0, dest, 0, len);
+        byte temp;
+        // 将顺位第i个与倒数第i个交换
+        for (int i = 0; i < len / 2; ++i) {
+            temp = dest[i];
+            dest[i] = dest[len - i - 1];
+            dest[len - i - 1] = temp;
+        }
+
+        return dest;
+    }
+
+
+    public static float byte2float(byte[] b, int index) {
+        int l;
+        l = b[index + 0];
+        l &= 0xff;
+        l |= ((long) b[index + 1] << 8);
+        l &= 0xffff;
+        l |= ((long) b[index + 2] << 16);
+        l &= 0xffffff;
+        l |= ((long) b[index + 3] << 24);
+
+        return Float.intBitsToFloat(l);
+    }
+
+
+    //浮点到字节转换
+    public static byte[] double2Byte(double d){
+        byte[] b=new byte[8];
+        long l=Double.doubleToLongBits(d);
+        for(int i=0;i<b.length;i++){
+            b[i] = new Long(l).byteValue();
+            l=l>>8;
+        }
+        return b;
+    }
+
+    //字节到浮点转换
+    public static double byte2Double(byte[] b){
+        long l;
+
+        l=b[0];
+        l&=0xff;
+        l|=((long)b[1]<<8);
+        l&=0xffff;
+        l|=((long)b[2]<<16);
+        l&=0xffffff;
+        l|=((long)b[3]<<24);
+        l&=0xffffffffl;
+        l|=((long)b[4]<<32);
+        l&=0xffffffffffl;
+
+        l|=((long)b[5]<<40);
+        l&=0xffffffffffffl;
+        l|=((long)b[6]<<48);
+        l&=0xffffffffffffffl;
+        l|=((long)b[7]<<56);
+        return Double.longBitsToDouble(l);
+    }
+
+
+}

+ 604 - 0
common/src/main/java/com/gyee/ygys/utils/DateUtil.java

@@ -0,0 +1,604 @@
+package com.gyee.ygys.utils;
+
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.Calendar;
+import java.util.Date;
+
+public class DateUtil {
+
+    private static final String format = "yyyy-MM-dd";
+    private static final String format1 = "yyyy-MM-dd HH:mm:ss";
+    private static final String format2 = "MM/dd/yyyy HH:mm:ss";
+    private static final String formatGMT = "yyyy-MM-dd'T'HH:mm:ss.SSS"; //2017-06-28T13:49:15.000+0800
+
+    // 第一次调用get将返回null
+    private static ThreadLocal<SimpleDateFormat> threadLocal = new ThreadLocal<SimpleDateFormat>();
+
+    // 获取线程的变量副本,如果不覆盖initialValue,第一次get返回null,故需要初始化一个SimpleDateFormat,并set到threadLocal中
+    public static SimpleDateFormat getFormat() {
+
+        SimpleDateFormat df = (SimpleDateFormat) threadLocal.get();
+
+        if (df == null) {
+            df = new SimpleDateFormat(format);
+            threadLocal.set(df);
+        }
+
+        return df;
+
+    }
+
+    public static SimpleDateFormat getFormat1() {
+
+        SimpleDateFormat df1 = (SimpleDateFormat) threadLocal.get();
+
+        if (df1 == null) {
+            df1 = new SimpleDateFormat(format1);
+            threadLocal.set(df1);
+        }
+
+        return df1;
+
+    }
+
+    public static SimpleDateFormat getFormat2() {
+
+        SimpleDateFormat df2 = (SimpleDateFormat) threadLocal.get();
+
+        if (df2 == null) {
+            df2 = new SimpleDateFormat(format2);
+            threadLocal.set(df2);
+        }
+
+        return df2;
+
+    }
+    
+    public static SimpleDateFormat getFormatGMT() {
+
+        SimpleDateFormat df2 = (SimpleDateFormat) threadLocal.get();
+        if (df2 == null) {
+            df2 = new SimpleDateFormat(formatGMT);
+            threadLocal.set(df2);
+        }
+
+        return df2;
+    }
+
+    /**
+     * 获取系统日期(无时分秒毫秒)
+     * 
+     * @return
+     */
+    public static Date today() {
+        return truncate(now());
+    }
+
+    /**
+     * 获取系统时间
+     * 
+     * @return
+     */
+    public static Date now() {
+        return new Date();
+    }
+
+    /**
+     * 根据年月日生成日期对象
+     * 
+     * @param y
+     * @param m
+     * @param d
+     * @return
+     */
+    public static Date cons(int y, int m, int d) {
+        Calendar cal = Calendar.getInstance();
+        cal.set(y, m, d, 0, 0, 0);
+        return cal.getTime();
+    }
+
+    public static String toDate(Date date) {
+        return getFormat1().format(date);
+    }
+
+    public static String toDate2(Date date) {
+        return getFormat2().format(date);
+    }
+
+    public static String toDate1(Date date) {
+        return getFormat().format(date);
+    }
+    
+    public static String toDateGMT(Date date) {
+        return getFormatGMT().format(date);
+    }
+
+    /**
+     * 根据年月日时分秒生成日期对象
+     * 
+     * @param y
+     * @param m
+     * @param d
+     * @param h
+     * @param mi
+     * @param s
+     * @return
+     */
+    public static Date cons(int y, int m, int d, int h, int mi, int s) {
+        Calendar cal = Calendar.getInstance();
+        cal.set(y, m, d, h, mi, s);
+        return cal.getTime();
+    }
+
+    /**
+     * 将指定时间转化为 Calendar
+     * 
+     * @param date
+     * @return
+     */
+    public static Calendar getCal(Date date) {
+        Calendar cal = Calendar.getInstance();
+        cal.setTime(date);
+        return cal;
+    }
+
+    /**
+     * 将时间的时分秒毫秒字段去掉
+     * 
+     * @param date
+     * @return
+     */
+    public static Date truncate(Date date) {
+        Calendar cal = Calendar.getInstance();
+        cal.setTime(date);
+        cal.set(Calendar.HOUR_OF_DAY, 0);
+        cal.set(Calendar.MINUTE, 0);
+        cal.set(Calendar.SECOND, 0);
+        cal.set(Calendar.MILLISECOND, 0);
+        return cal.getTime();
+    }
+
+    /**
+     * 去掉日期中日及下级字段
+     * 
+     * @param date
+     * @return
+     */
+    public static Date truncDay(Date date) {
+        Calendar cal = Calendar.getInstance();
+        cal.setTime(date);
+        cal.set(Calendar.DAY_OF_MONTH, 1);
+        cal.set(Calendar.HOUR_OF_DAY, 0);
+        cal.set(Calendar.MINUTE, 0);
+        cal.set(Calendar.SECOND, 0);
+        cal.set(Calendar.MILLISECOND, 0);
+        return cal.getTime();
+    }
+
+    /**
+     * 去掉日期中的月及下级字段
+     * 
+     * @param date
+     * @return
+     */
+    public static Date truncMonth(Date date) {
+        Calendar cal = Calendar.getInstance();
+        cal.setTime(date);
+        cal.set(Calendar.MONTH, 0);
+        cal.set(Calendar.DAY_OF_MONTH, 1);
+        cal.set(Calendar.HOUR_OF_DAY, 0);
+        cal.set(Calendar.MINUTE, 0);
+        cal.set(Calendar.SECOND, 0);
+        cal.set(Calendar.MILLISECOND, 0);
+        return cal.getTime();
+    }
+
+    /**
+     * 在指定时间上加指定的天数
+     * 
+     * @param date
+     * @param day
+     * @return
+     */
+    public static Date addDays(Date date, int day) {
+        Calendar cal = Calendar.getInstance();
+        cal.setTime(date);
+        cal.add(Calendar.DAY_OF_MONTH, day);
+        return cal.getTime();
+    }
+
+    /**
+     * 在指定的时间上加指定的月数
+     * 
+     * @param date
+     * @param month
+     * @return
+     */
+    public static Date addMonths(Date date, int month) {
+        Calendar cal = Calendar.getInstance();
+        cal.setTime(date);
+        cal.add(Calendar.MONTH, month);
+        return cal.getTime();
+    }
+
+
+    public static Date addYears(Date date, int year) {
+        Calendar cal = Calendar.getInstance();
+        cal.setTime(date);
+        cal.add(Calendar.YEAR, year);
+        return cal.getTime();
+    }
+
+
+    public static Date addHours(Date date, int hour) {
+        return new Date(date.getTime() + hour * 3600 * 1000);
+    }
+
+
+    public static Date addMinutes(Date date, int m) {
+        return new Date(date.getTime() + m * 60 * 1000);
+    }
+
+
+    public static Date addSeconds(Date date, int s) {
+        return new Date(date.getTime() + s * 1000);
+    }
+
+    /**
+     * 计算两个时间之间差的天数(取整后)
+     * 
+     * @param d1
+     * @param d2
+     * @return
+     */
+    public static int daysDiff(Date d1, Date d2) {
+        return (int) Math.floor(Math.abs((d1.getTime() - d2.getTime())) / (60 * 60 * 24 * 1000));
+    }
+
+    /**
+     * 计算两个时间之间差的小时数(取整后)
+     * 
+     * @param d1
+     * @param d2
+     * @return
+     */
+    public static int hoursDiff(Date d1, Date d2) {
+        return (int) Math.floor(Math.abs((d1.getTime() - d2.getTime())) / (60 * 60 * 1000));
+    }
+
+    public static double hoursDiff1(Date d1, Date d2) {
+        return Math.floor(Math.abs((d1.getTime() - d2.getTime())) / (double) (60 * 60 * 1000));
+    }
+
+    public static double hoursDiff2(Date d1, Date d2) {
+        return Math.abs((d1.getTime() - d2.getTime())) / (double) (60 * 60 * 1000);
+    }
+
+    /**
+     * 计算两个时间之间差的分钟数(取整后)
+     * 
+     * @param d1
+     * @param d2
+     * @return
+     */
+    public static int minutesDiff(Date d1, Date d2) {
+        return (int) Math.floor(Math.abs((d1.getTime() - d2.getTime())) / (60 * 1000));
+    }
+
+    /**
+     * 计算两个时间之间差的分钟数(取整后)
+     * 
+     * @param d1
+     * @param d2
+     * @return
+     */
+    public static double minutesDiff2(Date d1, Date d2) {
+        return Math.floor(Math.abs((d1.getTime() - d2.getTime())) / (60 * 1000));
+    }
+
+    /**
+     * 计算两个时间之间差的毫秒数(取整后)
+     * 
+     * @param d1
+     * @param d2
+     * @return
+     */
+    public static long millisecondDiff(Date d1, Date d2) {
+        return Math.abs(d1.getTime() - d2.getTime());
+    }
+
+    /**
+     * 计算两个时间之间差的秒数(取整后)
+     * 
+     * @param d1
+     * @param d2
+     * @return
+     */
+    public static int secondsDiff(Date d1, Date d2) {
+        return (int) Math.floor(Math.abs((d1.getTime() - d2.getTime())) / (1000));
+    }
+
+    /**
+     * 计算两个时间之间的月差
+     * 
+     * @param d1
+     * @param d2
+     * @return
+     */
+    public static int monthsDiff(Date d1, Date d2) {
+        Calendar cal1 = Calendar.getInstance();
+        Calendar cal2 = Calendar.getInstance();
+        cal1.setTime(d1);
+        cal2.setTime(d2);
+
+        return (int) Math.abs((cal1.get(Calendar.YEAR) - cal2.get(Calendar.YEAR)) * 12 + cal1.get(Calendar.MONTH) - cal2.get(Calendar.MONTH));
+
+    }
+
+    /**
+     * 计算两个时间之间的月差
+     *
+     * @param d1
+     * @param d2
+     * @return
+     */
+    public static int monthsDiff2(Date d1, Date d2) {
+        Calendar cal1 = Calendar.getInstance();
+        Calendar cal2 = Calendar.getInstance();
+        cal1.setTime(d1);
+        cal2.setTime(d2);
+
+        return (cal1.get(Calendar.YEAR) - cal2.get(Calendar.YEAR)) * 12 + cal1.get(Calendar.MONTH) - cal2.get(Calendar.MONTH);
+
+    }
+
+    /**
+     * 获得指定时间的月数
+     * 
+     * @param date
+     * @return
+     */
+    public static int getMonth(Date date) {
+        Calendar cd = Calendar.getInstance();
+        cd.setTime(date);
+        return cd.get(Calendar.MONTH);
+    }
+
+    /**
+     * 获得指定时间的年数
+     * 
+     * @param date
+     * @return
+     */
+    public static int getYear(Date date) {
+        Calendar cd = Calendar.getInstance();
+        cd.setTime(date);
+        return cd.get(Calendar.YEAR);
+    }
+
+    /**
+     * 获取指定时间的天数
+     * 
+     * @param date
+     * @return
+     */
+    public static int getDay(Date date) {
+        Calendar cd = Calendar.getInstance();
+        cd.setTime(date);
+        return cd.get(Calendar.DAY_OF_MONTH);
+    }
+
+    public static int getCurrentMonthLastDay() {
+        Calendar a = Calendar.getInstance();
+        a.set(Calendar.DATE, 1);
+        a.roll(Calendar.DATE, -1);
+        int maxDate = a.get(Calendar.DATE);
+        return maxDate;
+    }
+
+    public static int getMonthDays(Date date) {
+        Calendar cal = Calendar.getInstance();
+        cal.set(DateUtil.getYear(date), DateUtil.getMonth(date), DateUtil.getDay(date));
+        int dayst = cal.getActualMaximum(Calendar.DAY_OF_MONTH);
+        return dayst;
+    }
+
+    /**
+     * 获取当前月的第一天
+     * 
+     * @return
+     */
+    public static String getCurrtenFirstDay() {
+
+        Calendar c = Calendar.getInstance();
+        // c.add(Calendar.MONTH, 0);
+        c.set(Calendar.DAY_OF_MONTH, 1);
+        return getFormat().format(c.getTime());
+    }
+
+    /**
+     * 获取当前月的最后一天
+     * 
+     * @return
+     */
+    public static String getCurrtenLastDay() {
+
+        Calendar ca = Calendar.getInstance();
+        ca.set(Calendar.DAY_OF_MONTH, ca.getActualMaximum(Calendar.DAY_OF_MONTH));
+        return getFormat().format(ca.getTime());
+    }
+
+    /**
+     * 获取当前月的第一天
+     * 
+     * @return
+     */
+    public static Date getCurrtenFirstDate() {
+
+        Calendar c = Calendar.getInstance();
+        c.set(Calendar.DAY_OF_MONTH, c.getActualMinimum(Calendar.DAY_OF_MONTH));
+        c.set(Calendar.HOUR_OF_DAY, 0);
+        c.set(Calendar.MINUTE, 0);
+        c.set(Calendar.SECOND, 1);
+        return c.getTime();
+    }
+
+    /**
+     * 获取当前月的最后一天
+     * 
+     * @return
+     */
+    public static Date getCurrtenLastDate() {
+
+        Calendar c = Calendar.getInstance();
+        c.set(Calendar.DAY_OF_MONTH, c.getActualMaximum(Calendar.DAY_OF_MONTH));
+        c.set(Calendar.HOUR_OF_DAY, 23);
+        c.set(Calendar.MINUTE, 59);
+        c.set(Calendar.SECOND, 59);
+        return c.getTime();
+    }
+
+    public static Date parseDate(String date) {
+        try {
+            SimpleDateFormat sdf = new SimpleDateFormat(format);
+            return sdf.parse(date);
+        } catch (ParseException e) {
+            e.printStackTrace();
+        }
+        return null;
+    }
+
+    public static Date parseDate1(String date) {
+        try {
+            return getFormat1().parse(date);
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+        return null;
+    }
+
+    public static Date parseDate2(String date) {
+        try {
+            return getFormat2().parse(date);
+        } catch (ParseException e) {
+            e.printStackTrace();
+        }
+        return null;
+    }
+    
+    public static Date parseDateGMT(String date) {
+        try {
+            return getFormatGMT().parse(date);
+        } catch (ParseException e) {
+            e.printStackTrace();
+        }
+        return null;
+    }
+
+    public static Date parseLongToDate(long time) {
+        return new Date(time);
+    }
+
+    /**
+     * 转换Edna时间格式为标准格式
+     * 
+     * @param pointTime
+     * @return
+     */
+    public static String convertEdnaTime2(String pointTime, Boolean isNoSec) {
+        StringBuffer sb = new StringBuffer();
+        String[] dt = pointTime.split(" ");
+        String[] ymd = dt[0].split("-");
+        String[] hms = dt[1].split(":");
+        sb.append(ymd[0]).append("-");
+        if (ymd[1].length() == 1) {
+            sb.append("0").append(ymd[1]);
+        } else {
+            sb.append(ymd[1]);
+        }
+        if (ymd[2].length() == 1) {
+            sb.append("-").append("0").append(ymd[2]);
+        } else {
+            sb.append("-").append(ymd[2]);
+        }
+        if (hms[0].length() == 1) {
+            sb.append(" ").append("0").append(hms[0]);
+        } else {
+            sb.append(" ").append(hms[0]);
+        }
+        if (hms[1].length() == 1) {
+            sb.append(":").append("0").append(hms[1]);
+        } else {
+            sb.append(":").append(hms[1]);
+        }
+
+        if (isNoSec) {
+            sb.append(":").append("00");
+        } else {
+            if (hms[2].length() == 1) {
+                sb.append(":").append("0").append(hms[2]);
+            } else {
+                sb.append(":").append(hms[2]);
+            }
+        }
+
+        return sb.toString();
+    }
+
+    /**
+     * 转换Edna时间格式为标准格式
+     * 
+     * @param pointTime
+     * @return
+     */
+    public static String convertEdnaTime(String pointTime, Boolean isNoSec) {
+        String date = getFormat().format(new Date());
+        StringBuffer sb = new StringBuffer();
+        String[] dt = pointTime.split(" ");
+        String[] ymd = dt[0].split("/");
+        String[] hms = dt[1].split(":");
+        if (ymd[2].length() == 2) {
+            sb.append(date.substring(0, 2)).append(ymd[2]).append("-");
+        }
+        if (ymd[0].length() == 1) {
+            sb.append("0").append(ymd[0]);
+        } else {
+            sb.append(ymd[0]);
+        }
+        if (ymd[1].length() == 1) {
+            sb.append("-").append("0").append(ymd[1]);
+        } else {
+            sb.append("-").append(ymd[1]);
+        }
+        if (hms[0].length() == 1) {
+            sb.append(" ").append("0").append(hms[0]);
+        } else {
+            sb.append(" ").append(hms[0]);
+        }
+        if (hms[1].length() == 1) {
+            sb.append(":").append("0").append(hms[1]);
+        } else {
+            sb.append(":").append(hms[1]);
+        }
+
+        if (isNoSec) {
+            sb.append(":").append("00");
+        } else {
+            if (hms[2].length() == 1) {
+                sb.append(":").append("0").append(hms[2]);
+            } else {
+                sb.append(":").append(hms[2]);
+            }
+        }
+
+        return sb.toString();
+    }
+
+    public static String convertEdnaTime(String pointTime) {
+        return convertEdnaTime2(pointTime, false);
+    }
+
+
+
+}

+ 129 - 0
common/src/main/java/com/gyee/ygys/utils/EncryptUtil.java

@@ -0,0 +1,129 @@
+package com.gyee.ygys.utils;
+
+import org.apache.commons.codec.binary.Base64;
+
+import javax.crypto.KeyGenerator;
+import javax.crypto.Mac;
+import javax.crypto.SecretKey;
+import javax.crypto.spec.SecretKeySpec;
+import java.security.InvalidKeyException;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+
+public class EncryptUtil {
+    public static final String KEY_SHA = "SHA";
+    public static final String KEY_MD5 = "MD5";
+    public static final String KEY_MAC = "HmacMD5";
+
+
+// sun不推荐使用它们自己的base64,用apache的挺好
+
+    /**
+     * BASE64解密
+     */
+    public static byte[] decryptBASE64(byte[] dest) {
+        if (dest == null) {
+            return null;
+        }
+        return Base64.decodeBase64(dest);
+    }
+
+    /**
+     * BASE64加密
+     */
+    public static byte[] encryptBASE64(byte[] origin) {
+        if (origin == null) {
+            return null;
+        }
+        return Base64.encodeBase64(origin);
+    }
+
+    /**
+     * MD5加密
+     *
+     * @throws NoSuchAlgorithmException
+     */
+    public static byte[] encryptMD5(byte[] data)
+            throws NoSuchAlgorithmException {
+        if (data == null) {
+            return null;
+        }
+        MessageDigest md5 = MessageDigest.getInstance(KEY_MD5);
+        md5.update(data);
+        return md5.digest();
+    }
+
+    /**
+     * SHA加密
+     *
+     * @throws NoSuchAlgorithmException
+     */
+    public static byte[] encryptSHA(byte[] data)
+            throws NoSuchAlgorithmException {
+        if (data == null) {
+            return null;
+        }
+        MessageDigest sha = MessageDigest.getInstance(KEY_SHA);
+        sha.update(data);
+        return sha.digest();
+    }
+
+    /**
+     * 初始化HMAC密钥
+     *
+     * @throws NoSuchAlgorithmException
+     */
+    public static String initMacKey() throws NoSuchAlgorithmException {
+        KeyGenerator keyGenerator = KeyGenerator.getInstance(KEY_MAC);
+        SecretKey secretKey = keyGenerator.generateKey();
+        return new String(encryptBASE64(secretKey.getEncoded()));
+    }
+
+    /**
+     * HMAC加密
+     *
+     * @throws NoSuchAlgorithmException
+     * @throws InvalidKeyException
+     */
+    public static byte[] encryptHMAC(byte[] data, String key)
+            throws NoSuchAlgorithmException, InvalidKeyException {
+        SecretKey secretKey = new SecretKeySpec(decryptBASE64(key.getBytes()),
+                KEY_MAC);
+        Mac mac = Mac.getInstance(secretKey.getAlgorithm());
+        mac.init(secretKey);
+        return mac.doFinal(data);
+
+    }
+
+    /***
+     * 系统加密算法
+     * @param plainPassword 原始密码
+     * @return
+     */
+    public static String encryptPwd(String plainPassword) {
+        try {
+            return encryptMD5(plainPassword.getBytes()).toString();
+        } catch (NoSuchAlgorithmException e) {
+            e.printStackTrace();
+        }
+        return null;
+    }
+
+    public static void main(String[] args) throws Exception {
+// TODO Auto-generated method stub
+        String data = "简单加密";
+//        System.out.println(new BigInteger(encryptBASE64(data.getBytes())).toString(16));
+//        System.out.println(new BigInteger(encryptBASE64(data.getBytes())).toString(32));
+//        System.out.println(new String(decryptBASE64(encryptBASE64(data.getBytes()))));
+//
+//        System.out.println(new BigInteger(encryptMD5(data.getBytes())).toString());
+//        System.out.println(encryptSHA(data.getBytes()).toString());
+        System.out.println(EncryptUtil.encryptPwd(encryptMD5("123456".getBytes()).toString()));
+        System.out.println(EncryptUtil.encryptPwd("123456"));
+        System.out.println(EncryptUtil.encryptPwd("e10adc3949ba59abbe56e057f20f883e"));
+        System.out.println(EncryptUtil.encryptPwd("e10adc3949ba59abbe56e057f20f883e"));
+//
+//        System.out.println(new BigInteger(encryptHMAC(data.getBytes(), initMacKey())).toString());
+    }
+
+}

+ 22 - 0
common/src/main/java/com/gyee/ygys/utils/RequestMapToJsonUtil.java

@@ -0,0 +1,22 @@
+package com.gyee.ygys.utils;
+
+import com.alibaba.fastjson.JSON;
+import org.apache.commons.collections.map.HashedMap;
+
+import java.util.Map;
+
+
+public class RequestMapToJsonUtil {
+
+    public static String toJson(Map<String, String[]> params) {
+        Map<String,String> dataMap=new HashedMap(params.size()-1);
+        for(String key : params.keySet()) {
+            if (key.equals("_csrf")) {
+                continue;
+            }
+            dataMap.put(key,params.get(key)[0]);
+        }
+        return JSON.toJSONString(dataMap);
+    }
+
+}

+ 46 - 0
common/src/main/java/com/gyee/ygys/utils/StringUtil.java

@@ -0,0 +1,46 @@
+package com.gyee.ygys.utils;
+
+import org.apache.commons.lang3.StringUtils;
+
+import java.util.UUID;
+
+/**
+ * String辅助类
+ * 
+ */
+public class StringUtil extends StringUtils {
+	
+	/**
+	 * 非空判断
+	 * 
+	 * @param obj
+	 * @return
+	 */
+	public static boolean isNotBlank(Object obj) {
+		return !isBlank(obj);
+	}
+
+	/**
+	 * 为空判断
+	 * 
+	 * @param obj
+	 * @return
+	 */
+	public static boolean isBlank(Object obj) {
+		if (obj == null || StringUtils.isBlank(obj.toString())) {
+			return true;
+		}
+		return false;
+	}
+
+	/**
+	 * 获得一个UUID
+	 * @return String UUID
+	 */
+	public static String getUUID(){
+		String uuid = UUID.randomUUID().toString();
+		//去掉“-”符号
+		return uuid.replaceAll("-", "");
+	}
+
+}

+ 11 - 0
golden-example/build.gradle

@@ -0,0 +1,11 @@
+buildscript {
+    repositories {
+        mavenLocal()
+        maven { url "http://maven.aliyun.com/nexus/content/groups/public" }
+        mavenCentral()
+    }
+}
+
+dependencies {
+    compile fileTree(dir: 'src/main/lib', include: '*.jar')
+}

+ 282 - 0
golden-example/src/main/java/com/rtdb/test/ArchiveTest.java

@@ -0,0 +1,282 @@
+package com.rtdb.test;
+
+import java.util.Date;
+import java.util.List;
+
+import com.rtdb.api.enums.GoldenError;
+import com.rtdb.api.enums.RtdbProcessName;
+import com.rtdb.api.exception.NoAuthorityException;
+import com.rtdb.api.model.GoldenArchiveData;
+import com.rtdb.api.model.GoldenArchivePerfData;
+import com.rtdb.api.model.GoldenHeaderPage;
+import com.rtdb.api.model.RtdbBigJob;
+import com.rtdb.api.model.RtdbBigJobName;
+import com.rtdb.api.util.DateUtil;
+import com.rtdb.enums.ArchiveState;
+import com.rtdb.model.ArchiveEntity;
+import com.rtdb.model.ErrorParse;
+import com.rtdb.model.HeaderPage;
+import com.rtdb.service.impl.ArchiveImpl;
+import com.rtdb.service.impl.ServerImplPool;
+import com.rtdb.service.inter.Archive;
+/**
+ * 
+ * <p>
+ * Description:存档文件类测试
+ * </p>
+ * <p>
+ * Copyright: Copyright (c) 2012 GoldenAPI_Java , 北京庚顿数据科技有限公司
+ * </p>
+ * <p>
+ * Company: 北京庚顿数据科技有限公司(www.golden-data.com.cn)
+ * </p>
+ */
+@SuppressWarnings("unused")
+public class ArchiveTest {
+	private static Archive archive;
+
+	public static void main(String[] args) {
+
+		try {
+			ServerImplPool pool = new ServerImplPool("192.168.0.88", 6327, "sa", "golden", 2, 4);
+			archive = new ArchiveImpl(pool.getServerImpl());
+			// appendArchive();
+			// arrangeArchive();
+			// backupArchive();
+			// cancelBigJob();
+			// convertIndex();
+			// createArchive();
+			// createRangedArchive();
+			// getArchiveCount();
+			// getArchiveInfo();
+			// getArchives();
+			// getArchivesInfo();
+			// getArchivesPerfData();
+			// getArchivesStatus();
+			// getFirstArchive();
+			// getNextArchive();
+			// mergeArchive();
+			// moveArchive();
+			// queryBigJob();
+			// reactiveArchive();
+			// reindexArchive();
+			// removeArchive();
+			// shiftActived();
+			// updateArchive();
+			// archive.close();
+		} catch (Exception e) {
+			e.printStackTrace();
+		}
+	}
+
+	// 移动存档文件
+
+	private static void moveArchive() throws NoAuthorityException, Exception {
+		String file = "000001.rdf";
+		String path = "c:\\golden\\data\\";
+		String dest = "e:\\";
+		boolean result = archive.moveArchive(file, dest, path);
+		System.out.println(result);
+	}
+
+	public static void getArchivesPerfData() throws NoAuthorityException, Exception {
+		String[] paths = { "c:/golden/data/" };
+		String[] files = { "000001.rdf" };
+		List<GoldenArchivePerfData> archivesPerfData = archive.getArchivesPerfData(paths, files);
+		for (GoldenArchivePerfData goldenArchivePerfData : archivesPerfData) {
+			float total_lock_time = goldenArchivePerfData.getTotal_lock_time();
+			float index_lock_time = goldenArchivePerfData.getIndex_lock_time();
+			float write_real_size = goldenArchivePerfData.getWrite_real_size();
+			int write_count = goldenArchivePerfData.getWrite_count();
+			System.out.println(write_count + "  " + index_lock_time + "  " + write_real_size + " " + total_lock_time);
+		}
+	}
+
+	// 存档文件自动管理的状态
+	public static void getArchivesStatus() throws NoAuthorityException, Exception {
+		long archivesStatus = archive.getArchivesStatus();
+		if (archivesStatus != 0) {
+			GoldenError goldenError = ErrorParse.getGoldenError(archivesStatus);
+			System.out.println(goldenError.getErrmsg());
+		}
+
+	}
+
+	// 整理存档文件,将同一标签点的数据块存放在一起以提高查询效率
+	public static void arrangeArchive() throws NoAuthorityException, Exception {
+		String path = "c:/golden/data/";
+		String file = "000040.rdf";
+		boolean b = archive.arrangeArchive(path, file);
+		System.out.println(b);
+
+	}
+
+	public static void reindexArchive() throws NoAuthorityException, Exception {
+		String path = "c:/golden/data/";
+		String file = "000001.rdf";
+		boolean reindexArchive = archive.reindexArchive(path, file);
+		System.out.println(reindexArchive);
+
+	}
+
+	public static void convertIndex() throws NoAuthorityException, Exception {
+		String path = "c:\\golden\\data ";// C:/golden/data/";
+		String file = "000025.rdf";
+		boolean reindexArchive = archive.convertIndex(path, file);
+		System.out.println(reindexArchive);
+
+	}
+
+	public static void getArchives() throws NoAuthorityException, Exception {
+		List<GoldenArchiveData> archives = archive.getArchives();
+		int count = 0;
+		for (GoldenArchiveData gad : archives) {
+			String path = gad.getPath();
+			count++;
+			System.out.println("数量为:" + count + path + gad.getState());
+		}
+
+	}
+
+	// 批量获取所有存档文件的详细信息
+	public static void getArchivesInfo() {
+		try {
+			String[] path = { "c:/golden/data/", "D:/project/GoldenRTDBMS/WorkBench/CodeBase/testspace_v3.0/data/" };
+			String[] file = { "000452.rdf", "000008.rdf" };
+			GoldenHeaderPage[] archivesInfo = archive.getArchivesInfo(path, file);
+			for (GoldenHeaderPage hp : archivesInfo) {
+				System.out.println(
+						"文件名:" + hp.getFile_name() + " \r\n创建时间: " + DateUtil.TimeStampDate(hp.getCreate_time())
+								+ "\r\n开始时间 :" + DateUtil.TimeStampDate(hp.getBegin()) + "\r\n结束时间:"
+								+ DateUtil.TimeStampDate(hp.getEnd()) + "\r\n整理时间 :"
+								+ (hp.getArranged() == 1 ? "已整理" : "没整理过") + "使用了 " + hp.getRated_capacity() / 1024
+								+ "M\r\n状态:" + ErrorParse.getGoldenError(hp.getError()).getErrmsg());
+			}
+		} catch (Exception e) {
+			e.printStackTrace();
+		}
+	}
+
+	// 获取存档文件及其附属文件的详细信息
+	public static void getArchiveInfo() {
+		try {
+			String path = "c:/golden/data/";
+			String file = "000002.rdf";
+			HeaderPage hp = archive.getArchiveInfo(path, file, 0);
+			System.out.println("文件名:" + hp.getFile_name() + " \r\n创建时间: " + DateUtil.TimeStampDate(hp.getCreate_time())
+					+ "\r\n开始时间 :" + DateUtil.TimeStampDate(hp.getBegin()) + "\r\n整理时间 :"
+					+ (hp.getArranged() == 1 ? "已整理" : "没整理过"));
+		} catch (Exception e) {
+			e.printStackTrace();
+		}
+	}
+
+	// 获得存档文件的数量
+	public static void getArchiveCount() throws Exception {
+		int result = archive.getArchiveCount();
+		System.out.println(result);
+	}
+
+	// 新建历史存S档文件并追加到历史数据库
+	public static void createArchive() throws Exception {
+		String path = "c:\\golden\\data";
+		String fiel = "000012.rdf";
+		boolean result = archive.createArchive(path, fiel, 1024);
+		System.out.println(result);
+	}
+
+	// 新建指定时间范围的历史存档文件并插入到历史数据库
+	public static void createRangedArchive() throws Exception {
+		Date begin = DateUtil.stringToDate("2017-10-11 12:32:11");
+		Date end = DateUtil.stringToDate("2017-10-12 12:32:11");
+		boolean result = archive.createRangedArchive("c:/golden/data/", "000007.rdf", 1024, begin, end);
+		System.out.println(result);
+	}
+
+	// 追加磁盘中的存档文件到数据库 入列操作(即存档文件入列)
+	public static void appendArchive() throws Exception {
+		String path = "c:\\golden\\data";
+		String fiel = "000002.rdf";
+		boolean result = archive.appendArchive(path, fiel, ArchiveState.NORMAL_ARCHIVE);
+		System.out.println(result);
+	}
+
+	// 解列现有的存储文档
+	public static void removeArchive() throws Exception {
+		String path = "C:\\golden\\data";
+		String fiel = "000001.rdf";
+		boolean result = archive.removeArchive(path, fiel);
+		System.out.println(result);
+	}
+
+	// 切换活动文件 备注:当前活动文件被写满时该事务被启动,改变当前活动文件的状态为普通状态,
+	// 在所有历史数据存档文件中寻找未被使用过的插入到前活动文件的右侧并改为活动状态
+	public static void shiftActived() throws Exception {
+		boolean result = archive.shiftActived();
+		System.out.println(result);
+	}
+
+	// 激活指定存档文件为活动存档文件
+	// 备注:只有数据库中没有活动文件的时候才能使用该方法指定
+	public static void reactiveArchive() throws Exception {
+		boolean result = archive.reactiveArchive("c:/golden/data/", "000001.rdf");
+		System.out.println(result);
+	}
+
+	// 获取首个存档文件的路径、名称、状态
+	public static void getFirstArchive() throws Exception {
+		ArchiveEntity result = archive.getFirstArchive();
+		System.out.println(result.getPath() + "文件名称 " + result.getFile() + " 状态 " + result.getState());
+	}
+
+	// 获取下一个存档文件的路径、名称、状态(备注:当 path 返回内容为 "END"
+	// 时表示全部存档文件已经遍历完毕)
+	public static void getNextArchive() throws Exception {
+		ArchiveEntity f = archive.getFirstArchive();
+		ArchiveEntity result = archive.getNextArchive(f.getPath(), f.getFile());
+		System.out.println(result.getPath() + "  " + result.getFile() + " " + result.getState());
+	}
+
+	// 修改存档文件的可配置项
+	public static void updateArchive() throws Exception {
+		String path = "c:\\golden\\data";
+		String fiel = "000001.rdf";
+		boolean result = archive.updateArchive(path, fiel, 1024, 0, false, false);
+		System.out.println(result);
+	}
+
+	// 合并附属文件
+	public static void mergeArchive() throws Exception {
+		boolean result = archive.mergeArchive("C:\\historian\\", "000002.g");
+		System.out.println(result);
+	}
+
+	// 备份存档文件及其附属文件到指定的路径
+	public static void backupArchive() throws Exception {
+		String path = "c:\\golden\\data";
+		String fiel = "000025.rdf";
+		String backPath = "e:\\data";
+		boolean result = archive.backupArchive(path, fiel, backPath);
+		System.out.println(result);
+	}
+
+	// 查询进程正在执行的后台任务类型、状态和进度(备注:path 及 file 参数可传空指针,对应的信息将不再返回)
+	public static void queryBigJob() throws Exception {
+		RtdbBigJob bigJob = archive.queryBigJob(RtdbProcessName.PROCESS_HISTORIAN);
+		if (bigJob == null) {
+			System.out.println("没有后台任务正在巡行");
+		} else {
+			System.out.println(bigJob.getFile() + "  " + DateUtil.dateToString(bigJob.getEnd_time()) + "状态:"
+					+ ErrorParse.getGoldenError(bigJob.getState()) + "  "
+					+ RtdbBigJobName.parse(bigJob.getJob().getNum()));
+		}
+	}
+
+	public static void cancelBigJob() throws NoAuthorityException, Exception {
+		RtdbProcessName parse = RtdbProcessName.PROCESS_HISTORIAN;
+		boolean cancelBigJob = archive.cancelBigJob(parse);
+		System.out.println(cancelBigJob);
+
+	}
+
+}

File diff suppressed because it is too large
+ 1326 - 0
golden-example/src/main/java/com/rtdb/test/BaseTest.java


+ 71 - 0
golden-example/src/main/java/com/rtdb/test/EquationTest.java

@@ -0,0 +1,71 @@
+package com.rtdb.test;
+
+import java.io.IOException;
+import java.text.ParseException;
+import java.util.Date;
+import java.util.List;
+
+import com.rtdb.api.enums.RtdbGraphFlag;
+import com.rtdb.api.exception.EncodePacketErrorException;
+import com.rtdb.api.exception.InvalidParameterException;
+import com.rtdb.api.exception.NoAuthorityException;
+import com.rtdb.api.model.RtdbGraphData;
+import com.rtdb.api.util.DateUtil;
+import com.rtdb.enums.ComputerFlag;
+import com.rtdb.model.ComputerEntity;
+import com.rtdb.model.Entity;
+import com.rtdb.service.impl.EquationImpl;
+import com.rtdb.service.impl.ServerImpl;
+import com.rtdb.service.inter.Equation;
+
+/**
+ * <p>
+ * Description:计算方程式服务接口测试
+ * </p>
+ * 
+ * @version 1.0
+ */
+public class EquationTest {
+
+	private static Equation equation;
+
+	public static void main(String[] args) {
+		try {
+			equation = new EquationImpl(new ServerImpl("127.0.0.1", 6327, "sa", "golden"));
+			//computeHistory();
+			 getEquationGraphCount();
+			// getEquationGraphDatas();
+
+		} catch (Exception e) {
+			// TODO: handle exception
+			e.printStackTrace();
+		}
+	}
+
+	// 重算或补算批量计算标签点历史数据
+	public static void computeHistory() throws InvalidParameterException, NoAuthorityException, ParseException,
+			IOException, EncodePacketErrorException, Exception {
+		Entity<ComputerEntity> entity;
+		Date beginTime = DateUtil.stringToDate("2017-11-27 10:44:00");
+		Date endTime = DateUtil.stringToDate("2017-11-27 10:45:00");
+		entity = equation.computeHistory(new int[] { 10146 }, ComputerFlag.ADD_COMPUTE, beginTime, endTime);
+		System.out.println(entity.getSucCount() + " 反馈信息: " + entity.getRetInfo());
+	}
+
+	// 根据标签点 id 获取相关联方程式键值对数量
+	public static void getEquationGraphCount() throws InvalidParameterException, NoAuthorityException, Exception {
+		int equationGraphCount = equation.getEquationGraphCount(10146, RtdbGraphFlag.GOLDEN_GRAPH_DIRECT);
+		System.out.println(equationGraphCount);
+	}
+
+	// 根据标签点 id 获取相关联方程式键值对数据
+	public static void getEquationGraphDatas() throws NoAuthorityException, Exception {
+		int id = 10146;
+		List<RtdbGraphData> equationGraphDatas = equation.getEquationGraphDatas(id, RtdbGraphFlag.GOLDEN_GRAPH_ALL);
+		int equationGraphCount = equation.getEquationGraphCount(10146, RtdbGraphFlag.GOLDEN_GRAPH_ALL);
+		for (RtdbGraphData RtdbGraphData2 : equationGraphDatas) {
+			System.out.println("数量为:" + equationGraphCount + "  id是: " + RtdbGraphData2.getId() + " 父類:"
+					+ RtdbGraphData2.getParent_id() + " 标签点名:" + RtdbGraphData2.getTag());
+		}
+	}
+}

File diff suppressed because it is too large
+ 1147 - 0
golden-example/src/main/java/com/rtdb/test/HistorianTest.java


+ 431 - 0
golden-example/src/main/java/com/rtdb/test/ServerTest.java

@@ -0,0 +1,431 @@
+package com.rtdb.test;
+
+import java.io.IOException;
+import java.util.Date;
+import java.util.List;
+
+import com.rtdb.api.exception.EncodePacketErrorException;
+import com.rtdb.api.exception.InvalidParameterException;
+import com.rtdb.api.exception.NoAuthorityException;
+import com.rtdb.api.model.Login;
+import com.rtdb.api.model.Rtdb_Path;
+import com.rtdb.api.util.DateUtil;
+import com.rtdb.enums.PrivGroup;
+import com.rtdb.enums.RtdbDbParamIndex;
+import com.rtdb.model.AuthorizationInfo;
+import com.rtdb.model.BlackListInfo;
+import com.rtdb.model.Entity;
+import com.rtdb.model.ErrorParse;
+import com.rtdb.model.HostConnectInfo;
+import com.rtdb.model.JobMessage;
+import com.rtdb.model.UsersInfo;
+import com.rtdb.service.impl.ServerImpl;
+import com.rtdb.service.inter.Server;
+
+/**
+ * 
+ * <p>
+ * Description:网络服务类测试
+ * </p>
+ * <p>
+ * Copyright: Copyright (c) 2012 GoldenAPI_Java , 北京庚顿数据科技有限公司
+ * </p>
+ * <p>
+ * Company: 北京庚顿数据科技有限公司(www.golden-data.com.cn)
+ * </p>
+ */
+@SuppressWarnings("unused")
+public class ServerTest {
+	private static Server server;
+
+	public static void main(String[] args) {
+		try {
+			server = new ServerImpl("192.168.0.88", 6327, "sa", "golden");
+			// addAuthorization();
+			// addBlacklist();
+			// addUser();
+			// changeMyPassword();
+			// changePassword();
+			// changePriv();
+			// closePath();
+			// disConneciton();
+			// formatQuality();
+			// getApiVersion();
+			// getAuthorizations();
+			// getBlacklists();
+			// getConnectInfos();
+			// getConnectionCount();
+			// getConnectionInfo();
+			// getConnectionInfos();
+			// getConnections();
+			// getDbInfo1();
+			// getDbInfo2();
+			// getFileSize();
+			// getHostTime();
+			// getJobMessage();
+			// getLogicalDrivers();
+			// getLogin();
+			// getMaxBlobLen();
+			// getPriv();
+			// getTimeOut();
+			// getUsers();
+			// KillConnection();
+			// lockUser();
+			// mkDir();
+			// OpenPath();
+			// ReadFile();
+			// ReadPath();
+			// removeAuthorization();
+			// removeBlacklist();
+			// removeUser();
+			// setDbInfo1();
+			// setDbInfo2();
+			// setTimeOut();
+			// updateAuthorization();
+			// updateBlacklist();
+
+		} catch (Exception e) {
+			e.printStackTrace();
+		}
+	}
+
+	// 获取质量码对应的定义
+	private static void formatQuality()
+			throws InvalidParameterException, NoAuthorityException, IOException, EncodePacketErrorException, Exception {
+
+		List<String> formatQuality = server.formatQuality(1, new int[] { 2 });
+		for (String str : formatQuality) {
+			System.out.println(str);
+		}
+	}
+
+	// 关闭当前正遍历的目录
+	public static void closePath()
+			throws InvalidParameterException, NoAuthorityException, IOException, EncodePacketErrorException, Exception {
+
+		boolean b = server.closePath();
+		System.out.println(b);
+
+	}
+
+	private static void disConneciton() throws NoAuthorityException, IOException, Exception {
+		server.disconnect();
+		System.out.println("已断开连接!");
+
+	}
+
+	private static void getApiVersion() throws Exception {
+		String version = server.getApiVersion();
+
+		System.out.println(version);
+
+	}
+
+	// 获取连接句柄所连接的服务器操作系统类型
+	private static void getLinkedOstype() {
+		// String type = server.getLinkedOstype();
+		// System.out.println(type);
+
+	}
+
+	public static void getTimeOut()
+			throws InvalidParameterException, NoAuthorityException, IOException, EncodePacketErrorException, Exception {
+		int[] socket = server.getConnections(9);
+
+		for (int i = 0; i < socket.length; i++) {
+			System.out.print(socket[i] + "\t");
+			int out = server.getTimeOut(socket[i]);
+			System.out.println(out + " 毫秒");
+
+		}
+	}
+
+	// 启用或禁用用户
+	private static void lockUser() throws NoAuthorityException, Exception {
+		boolean b = server.lockUser("ab", true);
+		System.out.println(b);
+
+	}
+
+	private static void setDbInfo2() throws Exception {
+		long setDbInfo2 = server.setDbInfo2(RtdbDbParamIndex.GOLDEN_PARAM_EX_ARCHIVE_SIZE, 50);
+		System.out.println(ErrorParse.getGoldenError(setDbInfo2).getErrmsg());
+	}
+
+	private static void setDbInfo1() throws Exception {
+
+		long setDbInfo1 = server.setDbInfo1(RtdbDbParamIndex.GOLDEN_PARAM_SERVER_SENDER_IP, "loaclhost");
+		System.out.println(ErrorParse.getGoldenError(setDbInfo1).getErrmsg());
+
+	}
+
+	public static void setTimeOut() throws Exception {
+		int timeOut = 100000;
+		int[] socket = server.getConnections(4);
+		for (int i = 0; i < socket.length; i++) {
+
+			System.out.println(socket[i]);
+		}
+		server.setTimeOut(736, timeOut);
+		int out = server.getTimeOut(736);
+		System.out.println(out);
+
+	}
+
+	static void getMaxBlobLen() throws Exception {
+		int n = server.getMaxBlobLen();
+		System.out.println("maxBlobLen: " + n);
+	}
+
+	public static void getConnectionInfos() {
+		try {
+			List<HostConnectInfo> list = server.getConnectInfos();
+			for (HostConnectInfo hl : list) {
+				System.out.println(hl.getUser() + "---" + hl.getClient() + "---" + hl.getPort() + "---"
+						+ DateUtil.getGMTTime(hl.getJob_time()) + "-  -  -" + DateUtil.getGMTTime(hl.getConnect_time())
+						+ "---" + hl.getIpaddr());
+			}
+		} catch (Exception e) {
+			e.printStackTrace();
+		}
+	}
+
+	// 获得当前登陆的用户信息
+	public static void getLogin() throws Exception {
+		Login l = server.getLogin();
+		System.out.println(l.getUsername() + "---" + l.getPassword());
+	}
+
+	// 获得当前数据库被链接的数量
+	public static void getConnectionCount() throws Exception {
+		int n = server.getConnectionCount();
+		System.out.println(n);
+	}
+
+	// 获得当前用户链接的句柄
+	public static void getConnections() throws Exception {
+
+		int con = server.getConnectionCount();// 获得所有连接个数
+		int[] n = server.getConnections(con);
+		for (int i = 0; i < n.length; i++) {
+			System.out.println(n[i]);
+		}
+	}
+
+	// 返回数据库服务器指定连接的信息
+	public static void getConnectionInfo() throws Exception {
+		int con = server.getConnectionCount();// 获得所有连接个数
+		int[] n = server.getConnections(con);
+
+		for (int i = 0; i < n.length; i++) {
+
+			if (n[i] == 736) {
+				HostConnectInfo info = server.getConnectionInfo(n[i]);
+
+				System.out.println(info.getClient() + "   " + info.getIpaddr() + "   " + info.getUser() + "  "
+						+ info.getJob() + "    \t " + DateUtil.getGMTTime(info.getConnect_time()) + "   "
+						+ DateUtil.getGMTTime(info.getJob_time()));
+			}
+		}
+	}
+
+	// 返回数据库服务器指定连接的信息集合
+	public static void getConnectInfos() throws Exception {
+		List<HostConnectInfo> result = server.getConnectInfos();
+		for (int i = 0; i < result.size(); i++) {
+			HostConnectInfo info = result.get(i);
+			System.out.println(info.getClient() + "   " + info.getIpaddr() + "   " + info.getUser() + "  "
+					+ info.getJob() + "    \t " + DateUtil.getGMTTime(info.getConnect_time()) + "   "
+					+ DateUtil.getGMTTime(info.getJob_time()));
+		}
+
+	}
+
+	// 修改密码
+	public static void changePassword() throws Exception {
+		String user = "ab";
+		String pwd = "golden";
+		boolean result = server.changePassword(user, pwd);
+		System.out.println(result);
+	}
+
+	// 修改当前登陆的密码
+	public static void changeMyPassword() throws Exception {
+		String oldPwd = "golden";
+		String newPwd = "golden123";
+		boolean result = server.changeMyPassword(oldPwd, newPwd);
+		System.out.println(result);
+	}
+
+	// 修改权限
+	public static void changePriv() throws Exception {
+		boolean result = server.changePriv("ab", PrivGroup.SA);
+		System.out.println(result);
+	}
+
+	// 添加用户
+	public static void addUser() throws Exception {
+		boolean result = server.addUser("你好", "123", PrivGroup.TA);
+		System.out.println(result);
+	}
+
+	// 删除用户
+	public static void removeUser() throws Exception {
+		boolean result = server.removeUser("你好");
+		System.out.println(result);
+	}
+
+	// 获得数据库用户信息集合
+	public static void getUsers() throws Exception {
+		Entity<UsersInfo> result = server.getUsers(800);
+		System.out.println(result.getSucCount());
+		List<UsersInfo> list = result.getList();
+		for (UsersInfo us : list) {
+			System.out.println(us.getUser() + "---" + us.getPriv());
+		}
+	}
+
+	// 添加黑名单
+	public static void addBlacklist() throws Exception {
+		BlackListInfo black = new BlackListInfo();
+		black.setAddr("192.168.1.78");
+		black.setMask("255.255.255.0");
+		black.setDesc("none");
+		boolean result = server.addBlacklist(black);
+		System.out.println(result);
+	}
+
+	// 修改黑名单
+	public static void updateBlacklist() throws Exception {
+		BlackListInfo black = new BlackListInfo();
+		black.setAddr("192.168.1.76");
+		black.setMask("255.255.255.0");
+		black.setDesc("中国");
+		boolean result = server.updateBlacklist("192.168.1.78", "255.255.255.0", black);
+		System.out.println(result);
+	}
+
+	// 删除黑名单
+	public static void removeBlacklist() throws Exception {
+		boolean result = server.removeBlacklist("192.168.0.77", "255.255.255.0");
+		System.out.println(result);
+	}
+
+	// 获得黑名单列表
+	public static void getBlacklists() throws Exception {
+		Entity<BlackListInfo> result = server.getBlacklists(10);
+		List<BlackListInfo> list = result.getList();
+		for (BlackListInfo bl : list) {
+
+			System.out.println(bl.getAddr() + "  " + bl.getDesc() + "   " + bl.getMask());
+		}
+	}
+
+	// 添加信任链接
+	public static void addAuthorization() throws Exception {
+		boolean result = server.addAuthorization("192.168.2.47", "255.255.255.255", PrivGroup.TA, "木有");
+		System.out.println(result);
+	}
+
+	// 添加信任链接
+	public static void updateAuthorization() throws Exception {
+		AuthorizationInfo au = new AuthorizationInfo();
+		au.setIpAddr("192.168.2.47");
+		au.setMask("255.255.255.255");
+		au.setDesc("神马what");
+		au.setPriv(PrivGroup.SA);
+		boolean result = server.updateAuthorization("192.168.2.47", "255.255.255.255", au);
+		System.out.println(result);
+	}
+
+	// 删除信任链接
+	public static void removeAuthorization() throws Exception {
+		boolean result = server.removeAuthorization("192.168.2.42", "255.255.255.255");
+		System.out.println(result);
+	}
+
+	// 获得信任链接的集合
+	public static void getAuthorizations() throws Exception {
+		Entity<AuthorizationInfo> result = server.getAuthorizations(4);
+		System.out.println(result.getList());
+	}
+
+	// 获得信任链接的集合
+	public static void getHostTime() throws Exception {
+		Date result = server.getHostTime();
+		System.out.println(DateUtil.getGMTTime(result));
+	}
+
+	// 获得任务的简短描述
+	public static void getJobMessage() throws Exception {
+		int jobID = 20492;
+		JobMessage result = server.getJobMessage(jobID);
+		System.out.println(result.getJobName());
+	}
+
+	// 断开已知的链接
+	public static void KillConnection() throws Exception {
+		int socket = server.getConnections(2)[0];
+		boolean result = server.KillConnection(4048);
+		System.out.println(result);
+	}
+
+	// 获得整型数据库系统参数
+	public static void getDbInfo1() throws Exception {
+		String result = server.getDbInfo1(RtdbDbParamIndex.GOLDEN_PARAM_ARV_PAGES_FILE, 1024);
+		System.out.println(result);
+	}
+
+	// 获得整型数据库系统参数
+	public static void getDbInfo2() throws Exception {
+		int result = server.getDbInfo2(RtdbDbParamIndex.GOLDEN_PARAM_ARCHIVE_BATCH_SIZE);
+		System.out.println(result);
+	}
+
+	// 获得逻辑盘符(备注:drivers 的内存空间由用户负责维护,长度应不小于 32)
+	public static void getLogicalDrivers() throws Exception {
+		String result = server.getLogicalDrivers();
+		System.out.println(result);
+	}
+
+	// 打开目录以便遍历其中的文件和子目录
+	public static void OpenPath() throws Exception {
+		boolean result = server.openPath("C:\\");
+		System.out.println(result);
+	}
+
+	// 读取目录中的文件或子目录
+	public static void ReadPath() throws Exception {
+		server.openPath("E:\\ios");
+		while (true) {
+
+			Rtdb_Path result = server.readPath();
+			System.out.println(result.getPath());
+		}
+
+	}
+
+	// 建立目录
+	public static void mkDir() throws Exception {
+		boolean result = server.mkDir("C:\\golden\\cc");
+		System.out.println(result);
+	}
+
+	// 获得文件大小
+	public static void getFileSize() throws Exception {
+		long result = server.getFileSize("c:\\golden\\data\\000001.rdf");
+		System.out.println(result);
+	}
+
+	// 读取文件
+	public static void ReadFile() throws Exception {
+		String result = server.readFile("c:\\golden\\data\\000001.rdf", 0, 1024);
+		System.out.println(result);
+	}
+
+	// 获得权限
+	public static void getPriv() {
+		PrivGroup privGroup = server.getPriv();
+		System.out.println(privGroup.toString());
+	}
+}

+ 493 - 0
golden-example/src/main/java/com/rtdb/test/SnapshotTest.java

@@ -0,0 +1,493 @@
+package com.rtdb.test;
+
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Random;
+
+import com.rtdb.api.callbackInter.RSDataChange;
+import com.rtdb.api.callbackInter.RSDataChangeEx;
+import com.rtdb.api.model.MemoryStream;
+import com.rtdb.api.model.NamedDataTypeField;
+import com.rtdb.api.model.NamedType;
+import com.rtdb.api.model.NamedTypeNameToFieldsCount;
+import com.rtdb.api.model.RtdbData;
+import com.rtdb.api.util.BytesConvertUtil;
+import com.rtdb.api.util.DateUtil;
+import com.rtdb.enums.DataSort;
+import com.rtdb.enums.Quality;
+import com.rtdb.model.BlobData;
+import com.rtdb.model.CoorData;
+import com.rtdb.model.DatetimeData;
+import com.rtdb.model.DoubleData;
+import com.rtdb.model.Entity;
+import com.rtdb.model.ErrorParse;
+import com.rtdb.model.IntData;
+import com.rtdb.model.NamedTypeData;
+import com.rtdb.model.SearchCondition;
+import com.rtdb.service.impl.BaseImpl;
+import com.rtdb.service.impl.ServerImpl;
+import com.rtdb.service.impl.ServerImplPool;
+import com.rtdb.service.impl.SnapshotImpl;
+import com.rtdb.service.inter.Base;
+import com.rtdb.service.inter.Server;
+import com.rtdb.service.inter.Snapshot;
+
+/**
+ * <p>
+ * Description:快照服务类测试
+ * </p>
+ * <p>
+ * Copyright: Copyright (c) 2012 GoldenAPI_Java , 北京庚顿数据科技有限公司
+ * </p>
+ * <p>
+ * Company: 北京庚顿数据科技有限公司(www.golden-data.com.cn)
+ * </p>
+ */
+@SuppressWarnings("unused")
+public class SnapshotTest {
+	private static Snapshot snap = null;
+	private static Base base = null;
+	//private static ServerImpl server = null;
+
+	public static void main(String[] args) {
+		try {
+			//ServerImplPool pool = new ServerImplPool("192.168.1.144", 6327, "sa", "golden", 1, 8);
+			ServerImpl server = new ServerImpl("172.168.1.3", 6327, "sa", "golden");
+			//server = pool.getServerImpl();
+			snap = new SnapshotImpl(server);
+		 
+			base = new BaseImpl(server);
+			// cancelSubscribeSnapshots();
+			// fixCoorSnapshots();//3.0测试完成
+			// fixDoubleSnapshot();//3.0测试完成
+			// fixIntSnapshot();//3.0测试完成
+			// getBlobSnapshot();//3.0测试完成
+			// getBlobSnapshots();//3.0测试完成与GEM显示数据不同
+			// getCoorSnapshots();//3.0测试完成
+			// getDateTimeSnapshots();//3.0测试完成
+			// getDoubleSnapshots();// 3.0测试完成
+			// getIntSnapshots();//3.0测试完成
+			// getNamedTypeSnapshot();//此API暂缓测试
+			// getNamedTypeSnapshots();//此API暂缓测试
+			// pubDateTimeSnapshots();//3.0测试完成
+			//putBlobSnapshot();//3.0测试完成
+			// putBlobSnapshots();//3.0测试完成
+			 //putCoorSnapshots();//3.0测试完成
+			// putDoubleSnapshots();//3.0测试完成
+			// putIntSnapshots();//3.0测试完成。
+			//putNamedTypeSnapshot();// 此API暂缓测试
+			// putNamedTypeSnapshots();//此API暂缓测试
+			// subscribeSnapshots();//3.0测试完成
+			 subscribsnapshotsEx();//3.0测试完成
+			 //server.close();
+
+		} catch (Exception e) {
+			e.printStackTrace();
+		}
+	}
+
+	public static void getBlobSnapshot() throws Exception {
+		int id = 233;
+		BlobData b = snap.getBlobSnapshot(id);
+		byte[] blob = b.getBlob();
+		System.out.println(new String(blob, "gbk"));
+
+	}
+
+	// 批量读取dateTime类型标签点实时数据
+	public static void getDateTimeSnapshots() throws Exception {
+		int[] ids = { 10115, 10116 };
+		Entity<DatetimeData> en = snap.getDateTimeSnapshots(ids);
+		List<DatetimeData> list = en.getList();
+		for (DatetimeData d : list) {
+			Date value = d.getValue();
+			System.out
+					.println(DateUtil.getGMTTime(value) + "   " + ErrorParse.getGoldenError(d.getError()).getErrmsg());
+		}
+
+	}
+
+	// 获取自定义类型测点的单个快照
+	@SuppressWarnings("null")
+	private static void getNamedTypeSnapshot() throws Exception {
+		int id = 6022;
+		short length = 12;
+
+		NamedTypeData t = snap.getNamedTypeSnapshot(id, length);
+		// 获取快照数据
+		byte[] bz = t.getValue();
+		// 根据列名,列数据类型读数据
+		MemoryStream m = new MemoryStream(bz);
+		float readFloat = m.readFloat();
+		float readFloat2 = m.readFloat();
+		float readFloat3 = m.readFloat();
+		int[] ids = { id };
+
+		// 1.根据id获取自定义类型的名称
+		List<NamedTypeNameToFieldsCount> p = base.getNamedTypeNamesProperty(ids);
+
+		String[] name = new String[p.size()];
+		int[] lens = new int[p.size()];
+		for (int i = 0; i < p.size(); i++) {
+			NamedTypeNameToFieldsCount nt = p.get(i);
+			// 获取自定义类型的名称
+			name[i] = nt.getName().trim();
+			// 获取自定义类型的长度
+			lens[i] = nt.getLen();
+		}
+		// 获取自定义类型数据的列属性(如几列,列名称,该列的字节长度,该列的数据类型
+		NamedType namedType = base.getNamedType(name[0]);
+		List<NamedDataTypeField> list = namedType.getTypeFields();
+		String[] fName = null;
+		int[] len = new int[list.size()];
+		for (int i = 0; i < list.size(); i++) {
+			fName[i] = list.get(i).getFieldName();
+			len[i] = list.get(i).getLength();
+		}
+		byte[] value = t.getValue();
+		MemoryStream stream = new MemoryStream(value);
+		boolean v1 = ((stream.readByte() > 0) ? true : false);
+		float v2 = stream.readFloat();
+
+		System.out.println(v1);
+		System.out.println(v2);
+
+	}
+
+	// 批量获取自定义类型测点的快照
+	private static void getNamedTypeSnapshots() throws Exception {
+		int[] ids = { 116510 };
+		short[] lengths = {};
+
+		Entity<NamedTypeData> nd = snap.getNamedTypeSnapshots(ids, lengths);
+		List<NamedTypeData> list = nd.getList();
+		for (int i = 0; i < list.size(); i++) {
+			byte[] value = list.get(i).getValue();
+			for (int j = 0; j < value.length; j++) {
+
+				System.out.println(value[i]);
+			}
+		}
+
+	}
+
+	private static void pubDateTimeSnapshots() throws Exception {
+		List<DatetimeData> list = new ArrayList<DatetimeData>();
+		DatetimeData d = new DatetimeData();
+		String date = "2018-10-16 11:45:00";
+		SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+
+		d.setDateTime(new Date());
+		d.setId(6);
+		d.setValue(format.parse(date));
+		d.setMs((short) 999);
+		d.setQuality(Quality.GOOD.getNum());
+		list.add(d);
+		Entity<DatetimeData> entity = snap.putDatetimeSnapshots(list);
+		List<DatetimeData> res = entity.getList();
+		for (DatetimeData data : res) {
+			System.out.println(data.getId() + "    " + ErrorParse.getGoldenError(data.getError()).getErrmsg());
+		}
+
+	}
+
+	// 写入单个自定义类型标签点的快照
+	private static void putNamedTypeSnapshot() throws Exception {
+		NamedTypeData named = new NamedTypeData();
+		named.setId(39440);
+		named.setLen(12);
+		byte[] by = new byte[12];
+		int c = 3;
+		float d = 38;
+		int e = 29;
+		byte[] es = BytesConvertUtil.intToBytes(e);
+		byte[] ds = BytesConvertUtil.floatToBytes(d,0);
+		byte[] cs = BytesConvertUtil.intToBytes(c);
+		System.arraycopy(cs, 0, by, 0, cs.length);
+		System.arraycopy(ds, 0, by, cs.length, ds.length);
+		System.arraycopy(es, 0, by, cs.length+ds.length, es.length);
+		
+		named.setValue(by);
+		named.setQuality((short) 0);
+		named.setDatetime(new Date());
+		snap.putNamedTypeSnapshot(named);
+
+	}
+
+	// 批量写入自定义类型标签点的快照
+	private static void putNamedTypeSnapshots() throws Exception {
+		List<NamedTypeData> list = new ArrayList<NamedTypeData>();
+		NamedTypeData named = new NamedTypeData();
+		named.setId(116393);
+		named.setLen(20);
+
+		named.setValue("112".getBytes());
+		named.setQuality((short) 2);
+
+		named.setDatetime(new Date());
+		list.add(named);
+
+		snap.putNamedTypeSnapshots(list);
+	}
+
+	// 批量标签点快照改变的通知订阅
+	public static void subscribsnapshotsEx() throws Exception {
+		/*
+		 * Base base = new BaseImpl(new ServerImpl("127.0.0.1", 6327, "sa",
+		 * "golden")); int[] ids = { 1, 2, 3, 4, 5 };
+		 */
+		int count = base.getTableSizeByName("QSFJ");
+		Object param = " abc ";
+		SearchCondition s = new SearchCondition();
+		s.setTablemask("QSFJ");
+		int[] ids = base.search(s, count, DataSort.SORT_BY_ID);
+		int[] errors = new int[ids.length];
+		snap.subscribeSnapshotsEx(param, ids, new RSDataChangeEx() {
+
+			@Override
+			public void run(Object param, RtdbData[] goldenDatas) {
+				System.out.print(param + ": \t" + goldenDatas.length);
+				for (int i = 0; i < goldenDatas.length; i++) {
+					System.out.println(",id: " + goldenDatas[i].getId() + " :: " + goldenDatas[i].getValue() + " :: "
+							+ DateUtil.dateToString(goldenDatas[i].getDate()) + " ");
+				}
+				System.out.println(" end");
+
+			}
+		}, errors);
+
+	}
+
+	// 获得int类型的快照值
+	public static void getIntSnapshots() throws Exception {
+		Entity<IntData> ents = snap.getIntSnapshots(new int[] { 10113 });
+		for (IntData db : ents.getList())
+			System.out.println(db.getId() + "---" + db.getValue() + "---" + DateUtil.getGMTTime(db.getDateTime()));
+	}
+
+	// 批量插入int类型的快照
+	public static void putIntSnapshots() throws Exception {
+		List<IntData> list = new ArrayList<IntData>();
+		IntData data = new IntData();
+		data.setDateTime(new Date());
+		data.setState(22);
+		data.setId(239);
+		data.setQuality((short) 0);
+		data.setError(0);
+		list.add(data);
+		IntData data1 = new IntData();
+		data1.setDateTime(new Date());
+		data1.setState(13);
+		data1.setId(241);
+		data1.setQuality((short) 0);
+		data1.setError(0);
+		list.add(data1);
+		IntData data2 = new IntData();
+		data2.setDateTime(new Date());
+		data2.setState(13);
+		data2.setId(242);
+		data2.setQuality((short) 0);
+		data2.setError(0);
+		list.add(data2);
+		
+		int count = snap.putIntSnapshots(list);
+		System.out.println("成功了    " + count + " 条     id: ");
+		for (IntData intData : list) {
+			System.out.println(intData.getId() + "    " + ErrorParse.getGoldenError(intData.getError()).getErrmsg());
+		}
+	}
+
+	// 获得double类型的快照
+	public static void getDoubleSnapshots() throws Exception {
+		int[] ids = { 10108 };
+		Entity<DoubleData> d = snap.getDoubleSnapshots(ids);
+		List<DoubleData> list = d.getList();
+		for (DoubleData db : list) {
+			System.out.println(db.getId() + "---" + db.getValue() + "---" + DateUtil.getGMTTime(db.getDateTime()));
+		}
+
+	}
+
+	// 写入浮点类型的快照
+	public static void putDoubleSnapshots() throws Exception {
+		List<DoubleData> doubleDataList = new ArrayList<DoubleData>();
+		DoubleData data = new DoubleData();
+		data.setId(248);
+		data.setValue(200.12);
+		data.setDateTime(new Date());
+		DoubleData data3 = new DoubleData();
+		data3.setId(250);
+		data3.setValue(200.12);
+		data3.setDateTime(new Date());
+		DoubleData data1= new DoubleData();
+		data1.setId(249);
+		data1.setValue(1000.30);
+		data1.setDateTime(new Date());
+		doubleDataList.add(data);
+		doubleDataList.add(data3);
+		doubleDataList.add(data1);
+		int count = snap.putDoubleSnapshots(doubleDataList);
+		System.out.println("成功   " + count + "  条    ");
+		for (DoubleData d : doubleDataList) {
+			System.out.println( "id:"+d.getId() + "   " + ErrorParse.getGoldenError(d.getError()).getErrmsg() + "\r\t");
+		}
+	}
+	// 批量修改int类型的快照
+	public static void fixIntSnapshot() throws Exception {
+		List<IntData> list = new ArrayList<IntData>();
+		IntData d = new IntData();
+		d.setId(10113);
+		d.setValue(666);
+		list.add(d);
+		int n = snap.fixIntSnapshot(list);
+		System.out.println(n);
+	}
+
+	// 批量修改浮点类型的快照
+	public static void fixDoubleSnapshot() throws Exception {
+
+		List<DoubleData> list = new ArrayList<DoubleData>();
+		Random r = new Random();
+		DoubleData d = new DoubleData();
+		d.setId(1);
+		d.setValue((r.nextDouble()) * 100);
+		list.add(d);
+		DoubleData d2 = new DoubleData();
+		d2.setId(2);
+		d2.setValue((r.nextDouble()) * 100);
+		list.add(d2);
+		int n = snap.fixDoubleSnapshot(list);
+		System.out.println(n);
+
+	}
+
+	// 获得坐标类型的快照
+	public static void getCoorSnapshots() throws Exception {
+		int[] ids = new int[] { 10105, 10106 };
+		Entity<CoorData> ents = snap.getCoorSnapshots(ids);
+		List<CoorData> list = ents.getList();
+		for (int i = 0; i < list.size(); i++) {
+
+			System.out.println(ents.getRetInfo() + " " + list.get(i).getX() + " " + list.get(i).getY());
+		}
+	}
+
+	// 批量写入coor类型快照值
+	public static void putCoorSnapshots() throws Exception {
+		List<CoorData> list = new ArrayList<CoorData>();
+		CoorData data = new CoorData();
+		data.setId(251);
+		data.setX((float) 323.2);
+		data.setY((float) 333.3);
+		data.setDateTime(new Date());
+		list.add(data);
+		int count = snap.putCoorSnapshots(list);
+		System.out.print("成功    " + count + "条          id:   ");
+		for (CoorData coorData : list) {
+			System.out.println(coorData.getId() + "   " + ErrorParse.getGoldenError(coorData.getError()).getErrmsg());
+		}
+
+	}
+
+	// 批量修改coor类型 的快照
+	public static void fixCoorSnapshots() throws Exception {
+		List<CoorData> list = new ArrayList<CoorData>();
+		CoorData d = new CoorData();
+		d.setId(10107);
+		d.setDateTime(new Date());
+		d.setX((float) 988.22);
+		d.setY((float) 466.23);
+		list.add(d);
+		CoorData d2 = new CoorData();
+		d2.setId(10108);
+		d2.setDateTime(new Date());
+		d2.setX((float) 989.22);
+		d2.setY((float) 277.23);
+		list.add(d2);
+		int n = snap.fixCoorSnapshots(list);
+		System.out.println(n);
+	}
+
+	// 批量获得blob快照
+	public static void getBlobSnapshots() throws Exception {
+		int[] ids = new int[] { 10111 };
+		Entity<BlobData> ents = snap.getBlobSnapshots(ids);
+		// for(int i = 0; i < ents.getList().size(); i++)
+		for (BlobData data : ents.getList()) {
+			byte[] blob = data.getBlob();
+			for (int j = 0; j < blob.length; j++) {
+
+				System.out.print(blob[j] + " ");
+			}
+			System.out.println(data.getError());
+			System.out.println();
+		}
+		// System.out.println(new String(blob,"gbk"));
+	}
+
+	// 写入blob类型的快照
+	public static void putBlobSnapshot() throws Exception {
+		BlobData data = new BlobData();
+		data.setId(252);
+		String s = "zhong'gu";
+		byte[] bytes = s.getBytes();
+		data.setBlob(bytes);
+		data.setDatetime(new Date());
+		data.setQuality(Quality.GOOD);
+		boolean b = snap.putBlobSnapshot(data);
+		System.out.println(b);
+
+	}
+
+	// 写入批量blob类型的快照
+	public static void putBlobSnapshots() throws Exception {
+		List<BlobData> datas = new ArrayList<BlobData>();
+		BlobData data = new BlobData();
+
+		data.setId(242);
+		String s = "中";
+		byte[] bytes = s.getBytes();
+		data.setBlob(bytes);
+		data.setDatetime(new Date());
+		data.setQuality(Quality.GOOD);
+		datas.add(data);
+		int result = snap.putBlobSnapshots(datas);
+		System.out.println(result);
+
+	}
+
+	// 批量标签点快照改变的通知订阅
+	public static void subscribeSnapshots() throws Exception {
+		int[] ids = { 1 };
+		String[] str ={"test.f"};
+		snap.subscribeSnapshots(str, new RSDataChange() {
+
+			@Override
+			public void run(RtdbData[] rtdbDatas) {
+				System.out.print(rtdbDatas.length + "条数据 ");
+				for (int i = 0; i < rtdbDatas.length; i++) {
+					System.out.print("  "+rtdbDatas[i].getValue() + " , ");
+				}
+				System.out.println(" end");
+			}
+
+		});
+	}
+
+	// 取消标签点快照更改通知订阅
+	public static void cancelSubscribeSnapshots() throws Exception {
+
+		subscribeSnapshots();
+
+		Thread.sleep(5000);
+		snap.cancelSubscribeSnapshots();
+
+		subscribsnapshotsEx();
+		Thread.sleep(20000);
+		snap.cancelSubscribeSnapshots();
+
+	}
+
+}

BIN
golden-example/src/main/lib/commons-beanutils-1.8.3.jar


BIN
golden-example/src/main/lib/commons-logging-1.1.1.jar


BIN
golden-example/src/main/lib/golden-java-sdk-3.0.27.jar


BIN
golden-example/src/main/lib/protobuf-java-2.6.1.jar


+ 14 - 0
golden-history-opentsdb/README.md

@@ -0,0 +1,14 @@
+# golden-history-opentsdb
+    golden历史数据迁移至opentsdb
+    
+
+
+
+
+
+
+
+
+
+
+

+ 29 - 0
golden-history-opentsdb/build.gradle

@@ -0,0 +1,29 @@
+buildscript {
+    repositories {
+        mavenLocal()
+        maven { url "http://maven.aliyun.com/nexus/content/groups/public" }
+        mavenCentral()
+    }
+    dependencies {
+        classpath("$bootGroup:spring-boot-gradle-plugin:$springBootVersion")
+    }
+}
+
+apply plugin: "$bootGroup"
+apply plugin: 'io.spring.dependency-management'
+
+
+dependencies {
+
+    compile project(":common")
+    compile project(":opentsdb-client")
+    compile fileTree(dir: 'src/main/lib', include: '*.jar')
+
+    compile("$bootGroup:spring-boot-starter-web")
+    compile("$bootGroup:spring-boot-starter-undertow")
+    compile("$bootGroup:spring-boot-starter-log4j2")
+    compile 'com.alibaba:fastjson:1.2.17'
+    testCompile("$bootGroup:spring-boot-starter-test")
+
+}
+

+ 22 - 0
golden-history-opentsdb/src/main/java/com/gyee/wisdom/Bootstrap.java

@@ -0,0 +1,22 @@
+package com.gyee.wisdom;
+
+import org.springframework.boot.SpringApplication;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.boot.web.servlet.ServletComponentScan;
+import org.springframework.scheduling.annotation.EnableScheduling;
+
+/**
+ * @author northriver
+ */
+@SpringBootApplication
+@ServletComponentScan
+@EnableScheduling
+public class Bootstrap {
+
+    public static void main(String[] args) {
+        SpringApplication.run(Bootstrap.class, args);
+    }
+
+}
+
+

BIN
golden-history-opentsdb/src/main/lib/commons-beanutils-1.8.3.jar


BIN
golden-history-opentsdb/src/main/lib/commons-logging-1.1.1.jar


BIN
golden-history-opentsdb/src/main/lib/golden-java-sdk-3.0.27.jar


BIN
golden-history-opentsdb/src/main/lib/protobuf-java-2.6.1.jar


+ 6 - 0
golden-history-opentsdb/src/main/resources/application.yaml

@@ -0,0 +1,6 @@
+server:
+  port: 8066
+
+spring:
+  application:
+    name: ygys-history

+ 11 - 0
golden-history-opentsdb/src/main/resources/banner.txt

@@ -0,0 +1,11 @@
+                                   _     _     _
+                                  | |   (_)   | |
+  _   _  __ _ _   _ ___   ______  | |__  _ ___| |_ ___  _ __ _   _
+ | | | |/ _` | | | / __| |______| | '_ \| / __| __/ _ \| '__| | | |
+ | |_| | (_| | |_| \__ \          | | | | \__ \ || (_) | |  | |_| |
+  \__, |\__, |\__, |___/          |_| |_|_|___/\__\___/|_|   \__, |
+   __/ | __/ | __/ |                                          __/ |
+  |___/ |___/ |___/                                          |___/
+
+
+ :: ygys-history ::                    version 1.0.0

+ 64 - 0
golden-history-opentsdb/src/main/resources/log4j2.xml

@@ -0,0 +1,64 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Configuration status="WARN">
+    <Properties>
+        <Property name="Pattern">%d{yyyy-MM-dd HH:mm:ss.SSS} %5p %t %M(%F:%L) %m%n</Property>
+    </Properties>
+    <Filter type="ThresholdFilter" level="INFO"/>
+
+    <Appenders>
+        <Console name="Console" target="SYSTEM_OUT">
+            <PatternLayout pattern="${Pattern}"/>
+        </Console>
+        <RollingFile name="RollingFileInfo" fileName="logs/info.log"
+                     filePattern="logs/%d{yyyy-MM}/info-%d{yyyy-MM-dd}.%i.log">
+            <PatternLayout pattern="${Pattern}"/>
+            <ThresholdFilter level="INFO" onMatch="ACCEPT" onMismatch="DENY"/>
+            <Policies>
+                <TimeBasedTriggeringPolicy/>
+            </Policies>
+            <DefaultRolloverStrategy>
+                <Delete basePath="${baseDir}" maxDepth="2">
+                    <IfFileName glob="*/*.log" />
+                    <IfLastModified age="24H" />
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingFile>
+        <RollingFile name="RollingFileWarn" fileName="logs/warn.log"
+                     filePattern="logs/%d{yyyy-MM}/warn-%d{yyyy-MM-dd}.%i.log">
+            <PatternLayout pattern="${Pattern}"/>
+            <ThresholdFilter level="WARN" onMatch="ACCEPT" onMismatch="DENY"/>
+            <Policies>
+                <TimeBasedTriggeringPolicy/>
+            </Policies>
+            <DefaultRolloverStrategy>
+                <Delete basePath="${baseDir}" maxDepth="2">
+                    <IfFileName glob="*/*.log" />
+                    <IfLastModified age="24H" />
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingFile>
+        <RollingFile name="RollingFileError" fileName="logs/error.log"
+                     filePattern="logs/%d{yyyy-MM}/error-%d{yyyy-MM-dd}.%i.log">
+            <PatternLayout pattern="${Pattern}"/>
+            <ThresholdFilter level="ERROR" onMatch="ACCEPT" onMismatch="DENY"/>
+            <Policies>
+                <TimeBasedTriggeringPolicy/>
+            </Policies>
+            <DefaultRolloverStrategy>
+                <Delete basePath="${baseDir}" maxDepth="2">
+                    <IfFileName glob="*/*.log" />
+                    <IfLastModified age="24H" />
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingFile>
+    </Appenders>
+
+    <Loggers>
+        <Root level="WARN">
+            <AppenderRef ref="Console"/>
+         <!-- <appender-ref ref="RollingFileInfo"/>
+            <appender-ref ref="RollingFileWarn"/>-->
+            <appender-ref ref="RollingFileError"/>
+        </Root>
+    </Loggers>
+</Configuration>

+ 14 - 0
golden-realtime-kafka/README.md

@@ -0,0 +1,14 @@
+# golden-realtime-kafka
+    golden实时数据转发到kafka
+    
+
+
+
+
+
+
+
+
+
+
+

+ 33 - 0
golden-realtime-kafka/build.gradle

@@ -0,0 +1,33 @@
+buildscript {
+    repositories {
+        mavenLocal()
+        maven { url "http://maven.aliyun.com/nexus/content/groups/public" }
+        mavenCentral()
+    }
+    dependencies {
+        classpath("$bootGroup:spring-boot-gradle-plugin:$springBootVersion")
+    }
+}
+
+apply plugin: "$bootGroup"
+apply plugin: 'io.spring.dependency-management'
+
+
+dependencies {
+
+    compile project(":common")
+    compile fileTree(dir: 'src/main/lib', include: '*.jar')
+
+    compile("$bootGroup:spring-boot-starter-web")
+    compile("$bootGroup:spring-boot-starter-undertow")
+    compile("$bootGroup:spring-boot-starter-log4j2")
+
+    compile 'com.alibaba:fastjson:1.2.17'
+    compile 'org.springframework.kafka:spring-kafka:2.2.7.RELEASE'
+
+    compile 'com.opencsv:opencsv:4.5'
+
+    testCompile("$bootGroup:spring-boot-starter-test")
+
+}
+

+ 23 - 0
golden-realtime-kafka/src/main/java/com/gyee/wisdom/ApplicationReadyEventListener.java

@@ -0,0 +1,23 @@
+package com.gyee.wisdom;
+
+
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.context.event.ApplicationReadyEvent;
+import org.springframework.context.ApplicationListener;
+import org.springframework.stereotype.Component;
+
+@Component
+public class ApplicationReadyEventListener implements
+        ApplicationListener<ApplicationReadyEvent> {
+
+    @Autowired
+    private CalculateServer calculateServer;
+
+    @Override
+    public void onApplicationEvent(ApplicationReadyEvent event) {
+        System.out.println("ApplicationReadyEvent  rised!");
+        System.out.println("listener: " + event.toString());
+        calculateServer.start();
+    }
+
+}

+ 22 - 0
golden-realtime-kafka/src/main/java/com/gyee/wisdom/Bootstrap.java

@@ -0,0 +1,22 @@
+package com.gyee.wisdom;
+
+import org.springframework.boot.SpringApplication;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.boot.web.servlet.ServletComponentScan;
+import org.springframework.scheduling.annotation.EnableAsync;
+
+/**
+ * @author northriver
+ */
+@SpringBootApplication
+@ServletComponentScan
+@EnableAsync
+public class Bootstrap {
+
+    public static void main(String[] args) {
+        SpringApplication.run(Bootstrap.class, args);
+    }
+
+}
+
+

+ 252 - 0
golden-realtime-kafka/src/main/java/com/gyee/wisdom/CalculateServer.java

@@ -0,0 +1,252 @@
+package com.gyee.wisdom;
+
+import com.gyee.wisdom.config.ConfigProperties;
+import com.gyee.wisdom.config.GoldenConfig;
+import com.gyee.wisdom.model.TagPoint;
+import com.gyee.wisdom.model.TagPointData;
+import com.gyee.wisdom.service.CacheService;
+import com.gyee.ygys.protocol.BitMapGroup;
+import com.gyee.ygys.protocol.BitMapMessage;
+import com.gyee.ygys.utils.BytesUtil;
+import com.rtdb.api.callbackInter.RSDataChangeEx;
+import com.rtdb.api.model.RtdbData;
+import com.rtdb.api.util.DateUtil;
+import com.rtdb.enums.DataSort;
+import com.rtdb.model.SearchCondition;
+import com.rtdb.service.impl.ServerImpl;
+import com.rtdb.service.impl.SnapshotImpl;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.kafka.core.KafkaTemplate;
+import org.springframework.scheduling.annotation.Async;
+import org.springframework.stereotype.Component;
+
+import java.util.*;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.TimeUnit;
+
+@Slf4j
+@Component
+public class CalculateServer {
+
+    @Autowired
+    private ConfigProperties configProperties;
+
+    @Autowired
+    private CacheService cacheService;
+
+    @Autowired
+    private GoldenConfig goldenConfig;
+
+    private boolean serverStarted = false;
+
+    //线程1:从Golden订阅数据,讲订阅到的数据打包后发送到kafka
+    private boolean subscribeThreadFlag = false;
+    //线程2:
+    // 2.1: 监视golden服务状态,实现异常重连
+    // 2.2: 监视kafka服务状态,实现异常重连
+    // 2.3: 协调数据生产和消费
+    private boolean watchThreadFlag = false;
+
+    //golden实时数据读取类
+    private SnapshotImpl snap;
+
+    private HashMap<Integer, TagPoint> tagPointMap;
+
+    @Autowired
+    private KafkaTemplate<String, BitMapMessage> messageKafkaTemplate;
+
+    public boolean start() {
+        if (serverStarted) {
+            return true;
+        }
+
+        try {
+            log.info("计算服务正在启动...... ");
+            tagPointMap = cacheService.getTagPointMap();
+
+            subscribeThreadFlag = true;
+            getSubscribeThread().start();
+
+            sleep(5000);
+
+            watchThreadFlag = true;
+            getWatchThread().start();
+
+            serverStarted = true;
+        } catch ( Exception ex) {
+            serverStarted = false;
+            stop();
+            return  false;
+        }
+
+        return true;
+    }
+
+    public void stop() {
+        try {
+            if (snap != null)
+                snap.cancelSubscribeSnapshots();
+
+            Thread.sleep(3000);
+        } catch (Exception ex) {
+        }
+    }
+
+    private void sleep(int milliseconds) {
+        try {
+            TimeUnit.MILLISECONDS.sleep(milliseconds);
+        } catch (Exception ex) {
+            log.info(ex.getMessage());
+        }
+    }
+
+    Thread getSubscribeThread() {
+        return new Thread(new Runnable() {
+            public void run() {
+                log.info("golden数据订阅线程启动...");
+
+                try {
+                    int[] tagIds = cacheService.getTagIds();
+                    List<TagPoint> tagList = cacheService.getPointTags();
+
+                    ServerImpl connection = null;
+                    connection = goldenConfig.getGoldenConnectionPool().getConnection();
+                    snap = new SnapshotImpl(connection);
+                    int[] errors = new int[tagIds.length];
+                    Object param = " abc ";
+                    snap.subscribeSnapshotsEx(param, tagIds, new RSDataChangeEx() {
+
+                        @Override
+                        public void run(Object param, RtdbData[] goldenDatas) {
+                            //System.out.println( System.currentTimeMillis() + ": \t" + goldenDatas.length);
+                            //牛首山109494个点,订阅包平均间隔74.31ms,每个包平均大小3168,最大包大小为5000, 每秒平均订阅40935个
+                            publishRstdData(goldenDatas);
+                        }
+                    }, errors);
+                } catch (Exception ex) {
+
+                }
+
+            }
+        });
+    }
+
+    @Async
+    void publishRstdData(RtdbData[] goldenDatas) {
+        BitMapMessage msg = createBitMapMessage(goldenDatas);
+        //todo: 发送到kafka
+        messageKafkaTemplate.send("NSSFJ", msg);
+
+    }
+
+
+    private BitMapMessage createBitMapMessage(RtdbData[] goldenDatas) {
+        BitMapMessage msg = new BitMapMessage();
+        msg.setGroups(new ArrayList<>());
+
+        SortedSet<TagPointData> sortedSet = new TreeSet<>();
+        for (int i = 0; i < goldenDatas.length; i++) {
+            int tagId = goldenDatas[i].getId();
+            TagPoint tp = tagPointMap.get(tagId);
+            sortedSet.add(new TagPointData(tp, goldenDatas[i]));
+        }
+
+        short groupIndex = -1;
+        int dataOffset = 0;
+        BitMapGroup bitMapGroup = null;
+        Iterator interator2=sortedSet.iterator();
+        while(interator2.hasNext()){
+            TagPointData tpd = (TagPointData)interator2.next();
+            if (tpd.getTagPoint().getGroupIndex() != groupIndex) {
+                if (groupIndex != -1) {
+                    //重置datasize
+                    byte[] tmp = new byte[dataOffset+1];
+                    System.arraycopy(bitMapGroup.getData(),0, tmp,0, dataOffset+1);
+                    bitMapGroup.setData(tmp);
+                    bitMapGroup.setGroupLength(dataOffset+bitMapGroup.getBitMapLength() + 5);
+                }
+
+                //创建新的BitMapGroup
+                bitMapGroup = new BitMapGroup();
+                dataOffset = 0;
+                int groupSize = tpd.getTagPoint().getGroupSize();
+                bitMapGroup.setBitMapL2(new BitSet(groupSize));
+                bitMapGroup.setBitMapLength(((int)Math.ceil(groupSize*1.0/64))*8);
+                bitMapGroup.setData(new byte[groupSize*8]);
+                groupIndex = tpd.getTagPoint().getGroupIndex();
+                bitMapGroup.setGroupIndex(groupIndex);
+                msg.getGroups().add(bitMapGroup);
+                msg.getBitMapL1().set(groupIndex);
+            }
+            bitMapGroup.getBitMapL2().set(tpd.getTagPoint().getPointIndex());
+            byte[] groupData = bitMapGroup.getData();
+
+            //按照数据类型写入data
+            String dataType = tpd.getTagPoint().getDataType();
+            switch (dataType) {
+                case "FLOAT32":
+                    //golden数据类型标记为FLOAT32,实际取值确实Double
+//                    Float valuef = (Float)tpd.getRtdbData().getValue();
+//                    System.arraycopy(BytesUtil.float2byte(valuef),0, groupData, dataOffset, 4);
+//                    dataOffset += 4;
+//                    break;
+                case "FLOAT64":
+                    Double valued = (Double)tpd.getRtdbData().getValue();
+                    System.arraycopy(BytesUtil.double2Byte(valued),  0, groupData, dataOffset, 8);
+                    dataOffset += 8;
+                    break;
+                case "BOOL":
+                    Object valueo = tpd.getRtdbData().getValue();
+                    if (valueo instanceof Double) {
+                        Double valuel = (Double) tpd.getRtdbData().getValue();
+                        byte valueb = 0;
+                        if (valuel != 0)
+                            valueb = 1;
+                        groupData[dataOffset] = valueb;
+                    } else {
+                        Long valuel = (Long) tpd.getRtdbData().getValue();
+                        byte valueb = 0;
+                        if (valuel != 0)
+                            valueb = 1;
+                        groupData[dataOffset] = valueb;
+                    }
+
+                    dataOffset += 1;
+                    break;
+                case "INT":
+                case "INT32":
+                    //golden取值只有Long和Double两种类型,无32位整数型
+//                    Integer valuei = (Integer)tpd.getRtdbData().getValue();
+//                    System.arraycopy(BytesUtil.int2Bytes(valuei),0, groupData, dataOffset, 4);
+//                    dataOffset += 4;
+//                    break;
+                default:
+                    Long valuell = (Long)tpd.getRtdbData().getValue();
+                    //todo: 时间是否需要代入
+                    System.arraycopy(BytesUtil.long2Bytes(valuell),0, groupData, dataOffset, 8);
+                    dataOffset += 8;
+            }
+        }
+
+        //最后一个BitMapGroup修改data
+        byte[] tmp = new byte[dataOffset+1];
+        System.arraycopy(bitMapGroup.getData(),0, tmp,0, dataOffset+1);
+        bitMapGroup.setData(tmp);
+        bitMapGroup.setGroupLength(dataOffset+bitMapGroup.getBitMapLength() + 5);
+
+        return msg;
+    }
+
+    Thread getWatchThread() {
+        return new Thread(new Runnable() {
+            public void run() {
+                log.info("异常处理线程启动...");
+                while(true) {
+                    sleep(60000);
+                }
+
+            }
+        });
+    }
+}

+ 29 - 0
golden-realtime-kafka/src/main/java/com/gyee/wisdom/config/ConfigProperties.java

@@ -0,0 +1,29 @@
+package com.gyee.wisdom.config;
+
+import lombok.Data;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.stereotype.Component;
+
+
+@Data
+@Component
+@ConfigurationProperties("calculate.config")
+public class ConfigProperties {
+
+    //数据适配器websocket服务地址
+    private String serviceUrl;
+
+    //离线判定时间间隔,单位毫秒
+    private long offlineInterval = 600000;
+
+    //扫描实时数据线程轮询时间间隔,单位毫秒
+    private int readThreadInterval = 1000;
+
+    //计算状态线程轮询时间间隔,单位毫秒
+    private int calcThreadInterval = 1000;
+
+    //总装机容量
+    private double stationCapacity = 100;
+
+
+}

+ 62 - 0
golden-realtime-kafka/src/main/java/com/gyee/wisdom/config/GoldenConfig.java

@@ -0,0 +1,62 @@
+package com.gyee.wisdom.config;
+
+
+import com.rtdb.service.impl.ServerImpl;
+import com.rtdb.service.impl.ServerImplPool;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.context.annotation.Configuration;
+
+@Configuration
+//@PropertySource("classpath:/golden.properties")
+public class GoldenConfig {
+
+    @Value("${golden.server_ip:127.0.0.1}")
+    private String serverIp;
+
+    @Value("${golden.server_port:6327}")
+    private int serverPort;
+
+    @Value("${golden.user_name:sa}")
+    private String userName;
+
+    @Value("${golden.password:golden}")
+    private String password;
+
+    @Value("${golden.pool_size:10}")
+    private int poolSize;
+
+    @Value("${golden.max_pool_size:100}")
+    private int maxPoolSize;
+
+    @Value("${golden.query_history_limit:100000}")
+    private int queryHistoryLimit;
+
+    private ServerImplPool pool;
+
+    private GoldenConnectionPool connectionPool;
+
+    public ServerImplPool getServerImplPool() {
+        if (pool == null) {
+            pool = new ServerImplPool(serverIp, serverPort, userName,password, poolSize, maxPoolSize);
+        }
+        return pool;
+    }
+
+    public GoldenConnectionPool getGoldenConnectionPool() {
+        if (connectionPool == null) {
+            connectionPool = new GoldenConnectionPool();
+            connectionPool.init(serverIp, (short)serverPort, userName,password, poolSize,1, maxPoolSize);
+            System.out.println("golden server ip = " +  serverIp);
+        }
+        return connectionPool;
+    }
+
+    public ServerImpl getServerImpl() throws Exception {
+        return new ServerImpl(serverIp, serverPort, userName, password);
+    }
+
+    public int getQueryHistoryLimit() {
+        return queryHistoryLimit;
+    }
+
+}

+ 363 - 0
golden-realtime-kafka/src/main/java/com/gyee/wisdom/config/GoldenConnectionPool.java

@@ -0,0 +1,363 @@
+package com.gyee.wisdom.config;
+
+
+import com.gyee.ygys.exception.WisdomException;
+import com.rtdb.service.impl.ServerImpl;
+import org.springframework.stereotype.Component;
+
+import java.util.Enumeration;
+import java.util.Vector;
+
+@Component
+public class GoldenConnectionPool {
+
+    private String serverIp = "127.0.0.1"; // 服务器IP
+    private short serverPort = 10010; // 服务器端口
+    private String userName = "sa";
+    private String password = "golden";
+    private int defaultConnections = 1; // 连接池的初始大小
+    private int incrementalConnections = 1;// 连接池自动增加的大小
+    private int maxConnections = 10; // 连接池最大的大小
+    private Vector connections = null; // 存放连接池中连接的向量 , 初始时为 null
+
+
+    public void  init(String serverIp, short serverPort, String userName, String password, int defaultConnections, int incrementalConnections, int maxConnections) {
+        this.serverIp = serverIp;
+        this.serverPort = serverPort;
+        this.userName = userName;
+        this.password = password;
+        this.defaultConnections = defaultConnections;
+        this.incrementalConnections = incrementalConnections;
+        this.maxConnections = maxConnections;
+    }
+
+    /**
+     * 创建一个连接池,连接池中的可用连接的数量采用类成员 defaultConnections 中设置的值
+     */
+    private synchronized void createPool() throws Exception {
+        // 确保连接池没有创建
+        // 如果连接池己经创建了,保存连接的向量 connections 不会为空
+        if (connections != null) {
+            return; // 如果己经创建,则返回
+        }
+        // 创建保存连接的向量 , 初始时有 0 个元素
+        connections = new Vector();
+        // 根据 defaultConnections 中设置的值,创建连接。
+        createConnections(this.defaultConnections);
+        System.out.println(" Golden连接池创建成功! ");
+    }
+
+    /**
+     * 创建由 numConnections 指定数目的Golden连接 , 并把这些连接
+     * 放入 connections 向量中
+     * @param numConnections  要创建的Golden连接的数目
+     */
+    private void createConnections(int numConnections) throws Exception {
+        // 循环创建指定数目的Golden连接
+        for (int x = 0; x < numConnections; x++) {
+            // 是否连接池中的Golden连接的数量己经达到最大?最大值由类成员 maxConnections
+            // 指出,如果 maxConnections 为 0 或负数,表示连接数量没有限制。
+            // 如果连接数己经达到最大,即退出。
+            if (this.maxConnections > 0  && this.connections.size() >= this.maxConnections) {
+                break;
+            }
+
+            // add a new PooledConnection object to connections vector
+            // 增加一个连接到连接池中(向量 connections 中)
+            try {
+                connections.addElement(new PooledConnection(newConnection()));
+
+            } catch (WisdomException e) {
+                System.out.println(" 创建Golden连接失败! " + e.getMessage());
+                throw new WisdomException();
+            }
+            System.out.println(" Golden连接己创建 ......");
+        }
+    }
+
+    /**
+     * 创建一个新的Golden连接并返回它
+     * @return 返回一个新创建的Golden连接
+     */
+    private ServerImpl newConnection() throws Exception {
+        // 创建一个Golden连接
+//        String file = RtdbServerImpl.class.getProtectionDomain().getCodeSource().getLocation().getFile();
+//        System.out.println("fffffffffffffffffffff = " + file);
+//        if (file.endsWith(".jar")) {
+//            System.out.println("对对对对对对对对对对对对对对");
+//        } else {
+//            System.out.println("错错错错错错错错错错错错错错");
+//        }
+
+        ServerImpl conn = new ServerImpl(serverIp, serverPort, userName, password);
+        return conn; // 返回创建的新的Golden连接
+    }
+
+    /**
+     * 通过调用 getFreeConnection() 函数返回一个可用的Golden连接 ,
+     * 如果当前没有可用的Golden连接,并且更多的Golden连接不能创建(如连接池大小的限制),此函数等待一会再尝试获取。
+     *
+     * @return 返回一个可用的Golden连接对象
+     */
+    public synchronized ServerImpl getConnection() throws Exception {
+        // 确保连接池己被创建
+        if (connections == null) {
+            createPool(); // 连接池还没创建,则返回 null
+        }
+
+        ServerImpl conn = getFreeConnection(); // 获得一个可用的Golden连接
+        // 如果目前没有可以使用的连接,即所有的连接都在使用中
+        while (conn == null) {
+            // 等一会再试
+            wait(250);
+            conn = getFreeConnection(); // 重新再试,直到获得可用的连接,如果
+            // getFreeConnection() 返回的为 null
+            // 则表明创建一批连接后也不可获得可用连接
+        }
+        return conn;// 返回获得的可用的连接
+    }
+
+    /**
+     * 本函数从连接池向量 connections 中返回一个可用的的Golden连接,如果
+     * 当前没有可用的Golden连接,本函数则根据 incrementalConnections 设置
+     * 的值创建几个Golden连接,并放入连接池中。
+     * 如果创建后,所有的连接仍都在使用中,则返回 null
+     * @return 返回一个可用的Golden连接
+     */
+    private ServerImpl getFreeConnection() throws Exception {
+        // 从连接池中获得一个可用的Golden连接
+        ServerImpl conn = findFreeConnection();
+        if (conn == null) {
+
+            // 如果目前连接池中没有可用的连接
+            // 创建一些连接
+            createConnections(incrementalConnections);
+            // 重新从池中查找是否有可用连接
+            conn = findFreeConnection();
+            if (conn == null) {
+                // 如果创建连接后仍获得不到可用的连接,则返回 null
+                return null;
+            }
+        }
+        return conn;
+    }
+
+    /**
+     * 查找连接池中所有的连接,查找一个可用的Golden连接,
+     * 如果没有可用的连接,返回 null
+     * @return 返回一个可用的Golden连接
+     */
+    private ServerImpl findFreeConnection() throws Exception {
+        ServerImpl conn = null;
+        PooledConnection pConn = null;
+        // 获得连接池向量中所有的对象
+        Enumeration enumerate = connections.elements();
+
+        // 遍历所有的对象,看是否有可用的连接
+        while (enumerate.hasMoreElements()) {
+            pConn = (PooledConnection) enumerate.nextElement();
+            if (!pConn.isBusy()) {
+                // 如果此对象不忙,则获得它的Golden连接并把它设为忙
+                conn = pConn.getConnection();
+                pConn.setBusy(true);
+                break; // 己经找到一个可用的连接,退出
+            }
+        }
+        return conn;// 返回找到到的可用连接
+    }
+
+
+    /**
+     * 此函数返回一个Golden连接到连接池中,并把此连接置为空闲。
+     * 所有使用连接池获得的Golden连接均应在不使用此连接时返回它。
+     */
+    public void returnConnection(ServerImpl conn) {
+        // 确保连接池存在,如果连接没有创建(不存在),直接返回
+        if (connections == null) {
+            System.out.println(" 连接池不存在,无法返回此连接到连接池中 !");
+            return;
+        }
+
+        PooledConnection pConn = null;
+        Enumeration enumerate = connections.elements();
+        // 遍历连接池中的所有连接,找到这个要返回的连接对象
+        while (enumerate.hasMoreElements()) {
+            pConn = (PooledConnection) enumerate.nextElement();
+            // 先找到连接池中的要返回的连接对象
+            if (conn == pConn.getConnection()) {
+                // 找到了 , 设置此连接为空闲状态
+                pConn.setBusy(false);
+                break;
+            }
+        }
+    }
+
+    /**
+     * 刷新连接池中所有的连接对象
+     */
+    public synchronized void refreshConnections() throws Exception {
+        // 确保连接池己创新存在
+        if (connections == null) {
+            System.out.println(" 连接池不存在,无法刷新 !");
+            return;
+        }
+
+        PooledConnection pConn = null;
+        Enumeration enumerate = connections.elements();
+        while (enumerate.hasMoreElements()) {
+
+            // 获得一个连接对象
+            pConn = (PooledConnection) enumerate.nextElement();
+            // 如果对象忙则等 5 秒 ,5 秒后直接刷新
+            if (pConn.isBusy()) {
+                wait(5000); // 等 5 秒
+            }
+            // 关闭此连接,用一个新的连接代替它。
+            closeConnection(pConn.getConnection());
+            pConn.setConnection(newConnection());
+            pConn.setBusy(false);
+        }
+    }
+
+    /**
+     * 关闭连接池中所有的连接,并清空连接池。
+     */
+    private synchronized void closeConnectionPool() throws WisdomException {
+        // 确保连接池存在,如果不存在,返回
+        if (connections == null) {
+            System.out.println(" 连接池不存在,无法关闭 !");
+            return;
+        }
+
+        PooledConnection pConn = null;
+        Enumeration enumerate = connections.elements();
+        while (enumerate.hasMoreElements()) {
+            pConn = (PooledConnection) enumerate.nextElement();
+            // 如果忙,等 5 秒
+            if (pConn.isBusy()) {
+                wait(5000); // 等 5 秒
+            }
+            // 5 秒后直接关闭它
+            closeConnection(pConn.getConnection());
+            // 从连接池向量中删除它
+            connections.removeElement(pConn);
+        }
+        // 置连接池为空
+        connections = null;
+    }
+
+    /**
+     * 关闭一个Golden连接
+     */
+    private void closeConnection(ServerImpl conn) {
+        try {
+            conn.close();
+        } catch (Exception e) {
+            System.out.println(" 关闭Golden连接出错: " + e.getMessage());
+        }
+    }
+
+    @Override
+    public void finalize() {
+        try {
+            closeConnectionPool();
+        } catch (WisdomException e) {
+            System.out.println(" 关闭Golden连接池出错: " + e.getMessage());
+        }
+    }
+
+
+    /**
+     * 使程序等待给定的毫秒数
+     */
+    private void wait(int mSeconds) {
+        try {
+            Thread.sleep(mSeconds);
+
+        } catch (InterruptedException e) {
+        }
+    }
+
+    /**
+     * 返回连接池的初始大小
+     *
+     * @return 初始连接池中可获得的连接数量
+     */
+    public int getDefaultConnections() {
+        return this.defaultConnections;
+    }
+
+    /**
+     * 设置连接池的初始大小
+     */
+    public void setDefaultConnections(int defaultConnections) {
+        this.defaultConnections = defaultConnections;
+    }
+
+    /**
+     * 返回连接池自动增加的大小 、
+     * @return 连接池自动增加的大小
+     */
+    public int getIncrementalConnections() {
+        return this.incrementalConnections;
+    }
+
+    /**
+     * 设置连接池自动增加的大小
+     */
+    public void setIncrementalConnections(int incrementalConnections) {
+        this.incrementalConnections = incrementalConnections;
+    }
+
+    /**
+     * 返回连接池中最大的可用连接数量
+     * @return 连接池中最大的可用连接数量
+     */
+    public int getMaxConnections() {
+        return this.maxConnections;
+    }
+
+    /**
+     * 设置连接池中最大可用的连接数量
+     */
+    public void setMaxConnections(int maxConnections) {
+        this.maxConnections = maxConnections;
+    }
+
+    /**
+     * 内部使用的用于保存连接池中连接对象的类
+     * 此类中有两个成员,一个是Golden的连接,另一个是指示此连接是否
+     * 正在使用的标志。
+     */
+    class PooledConnection {
+        ServerImpl connection = null;// Golden连接
+        boolean busy = false; // 此连接是否正在使用的标志,默认没有正在使用
+
+        // 构造函数,根据一个 Connection 构告一个 PooledConnection 对象
+        public PooledConnection(ServerImpl connection) {
+            this.connection = connection;
+        }
+
+        // 返回此对象中的连接
+        public ServerImpl getConnection() {
+            return connection;
+        }
+
+        // 设置此对象的,连接
+        public void setConnection(ServerImpl connection) {
+            this.connection = connection;
+        }
+
+        // 获得对象连接是否忙
+        public boolean isBusy() {
+            return busy;
+        }
+
+        // 设置对象的连接正在忙
+        public void setBusy(boolean busy) {
+            this.busy = busy;
+        }
+
+    }
+
+}

+ 28 - 0
golden-realtime-kafka/src/main/java/com/gyee/wisdom/model/TagPoint.java

@@ -0,0 +1,28 @@
+package com.gyee.wisdom.model;
+
+import com.opencsv.bean.CsvBindByPosition;
+import lombok.Data;
+
+@Data
+public class TagPoint {
+    //NX_GD_NSSF_FJ_P1_L1_001_AI0001,1350048,FLOAT32,机舱X向振动,001,AI0001,001H
+    @CsvBindByPosition(position = 0,required = true)
+    private String tagCode;
+    @CsvBindByPosition(position = 1,required = true)
+    private String tagId;
+    @CsvBindByPosition(position = 2,required = true)
+    private String dataType;
+    @CsvBindByPosition(position = 3,required = true)
+    private String description;
+    @CsvBindByPosition(position = 4,required = true)
+    private String thingId;
+    @CsvBindByPosition(position = 5,required = true)
+    private String uniformCode;
+    @CsvBindByPosition(position = 6,required = true)
+    private short groupIndex;
+    @CsvBindByPosition(position = 7,required = true)
+    private int pointIndex;
+    @CsvBindByPosition(position = 8,required = true)
+    private int groupSize;
+
+}

+ 28 - 0
golden-realtime-kafka/src/main/java/com/gyee/wisdom/model/TagPointData.java

@@ -0,0 +1,28 @@
+package com.gyee.wisdom.model;
+
+import com.opencsv.bean.CsvBindByPosition;
+import com.rtdb.api.model.RtdbData;
+import lombok.Data;
+
+@Data
+public class TagPointData implements Comparable {
+
+    private TagPoint tagPoint;
+    private RtdbData rtdbData;
+
+    @Override
+    public int compareTo(Object newData) {
+        TagPoint newPoint = ((TagPointData)newData).tagPoint;
+        int result = this.tagPoint.getGroupIndex() - newPoint.getGroupIndex();
+        if(result == 0){
+            result =this.tagPoint.getPointIndex() - newPoint.getPointIndex();
+        }
+        return result;
+    }
+
+    public  TagPointData(TagPoint tp, RtdbData rd) {
+        tagPoint = tp;
+        rtdbData = rd;
+    }
+
+}

+ 78 - 0
golden-realtime-kafka/src/main/java/com/gyee/wisdom/service/CacheService.java

@@ -0,0 +1,78 @@
+package com.gyee.wisdom.service;
+
+import com.gyee.wisdom.model.TagPoint;
+import com.opencsv.bean.CsvToBeanBuilder;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.core.io.ClassPathResource;
+import org.springframework.core.io.Resource;
+import org.springframework.stereotype.Service;
+
+import java.io.BufferedReader;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+@Slf4j
+@Service
+public class CacheService {
+
+    private int[] tagIds;
+
+    public int[] getTagIds() {
+        if (tagIds == null)
+        {
+            List<TagPoint> tps = getPointTags();
+            if (tps != null && tps.size() > 0) {
+                tagIds = tps.stream().mapToInt(t-> Integer.parseInt(t.getTagId())).toArray();
+            }
+        }
+
+        return tagIds;
+    }
+
+    private HashMap<Integer, TagPoint> tagPointMap;
+
+    public HashMap<Integer, TagPoint> getTagPointMap() {
+        if (tagPointMap == null) {
+            List<TagPoint> tps = getPointTags();
+            tagPointMap = new HashMap<>();
+            if (tps != null && tps.size() > 0) {
+                for(TagPoint tp : tps) {
+                    Integer id = Integer.parseInt(tp.getTagId());
+                    if (tagPointMap.containsKey(id) == false)
+                        tagPointMap.put(id, tp);
+                }
+            }
+        }
+
+        return  tagPointMap;
+    }
+
+
+    private List<TagPoint> pointTags;
+
+    public List<TagPoint> getPointTags() {
+        if (pointTags == null)
+            pointTags = createPointTags();
+
+        return pointTags;
+    }
+
+    private List<TagPoint> createPointTags() {
+        try {
+            Resource resource = new ClassPathResource("tag-point.csv");
+            InputStream ins = resource.getInputStream();
+            BufferedReader reader = new BufferedReader(new InputStreamReader(ins, "UTF-8"));
+            return new CsvToBeanBuilder(reader)
+                    .withType(TagPoint.class).withSeparator(',').build().parse();
+
+        } catch (Exception ex) {
+            log.error(ex.getMessage());
+        }
+
+        return  new ArrayList<>();
+    }
+
+}

BIN
golden-realtime-kafka/src/main/lib/commons-beanutils-1.8.3.jar


BIN
golden-realtime-kafka/src/main/lib/commons-logging-1.1.1.jar


BIN
golden-realtime-kafka/src/main/lib/golden-java-sdk-3.0.27.jar


BIN
golden-realtime-kafka/src/main/lib/protobuf-java-2.6.1.jar


+ 23 - 0
golden-realtime-kafka/src/main/resources/application.yaml

@@ -0,0 +1,23 @@
+server:
+  port: 8067
+
+spring:
+  application:
+    name: ygys-golden-latest
+
+golden:
+  #server_ip: 10.155.32.1
+  server_ip: 172.168.1.3
+  server_port: 6327
+  user_name: sa
+  password: golden
+  pool_size: 10
+  max_pool_size: 100
+  #单次查询历史原始数据的数量上限
+  query_history_limit: 100000
+
+kafka:
+  bootstrap-server: 172.168.5.61:9092
+  kafka.topic-name: myTopic
+  kafka.consumer.group.id: test-consumer-group
+

+ 8 - 0
golden-realtime-kafka/src/main/resources/banner.txt

@@ -0,0 +1,8 @@
+                                 _    _             _      _          _       _         __ _
+  _  _ __ _ _  _ ______ __ _ ___| |__| |___ _ _ ___| |__ _| |_ ___ __| |_ ___| |____ _ / _| |____ _
+ | || / _` | || (_-<___/ _` / _ \ / _` / -_) ' \___| / _` |  _/ -_|_-<  _|___| / / _` |  _| / / _` |
+  \_, \__, |\_, /__/   \__, \___/_\__,_\___|_||_|  |_\__,_|\__\___/__/\__|   |_\_\__,_|_| |_\_\__,_|
+  |__/|___/ |__/       |___/
+
+
+ :: ygys-golden-latest-kafka ::                    version 1.0.0

+ 64 - 0
golden-realtime-kafka/src/main/resources/log4j2.xml

@@ -0,0 +1,64 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Configuration status="WARN">
+    <Properties>
+        <Property name="Pattern">%d{yyyy-MM-dd HH:mm:ss.SSS} %5p %t %M(%F:%L) %m%n</Property>
+    </Properties>
+    <Filter type="ThresholdFilter" level="INFO"/>
+
+    <Appenders>
+        <Console name="Console" target="SYSTEM_OUT">
+            <PatternLayout pattern="${Pattern}"/>
+        </Console>
+        <RollingFile name="RollingFileInfo" fileName="logs/info.log"
+                     filePattern="logs/%d{yyyy-MM}/info-%d{yyyy-MM-dd}.%i.log">
+            <PatternLayout pattern="${Pattern}"/>
+            <ThresholdFilter level="INFO" onMatch="ACCEPT" onMismatch="DENY"/>
+            <Policies>
+                <TimeBasedTriggeringPolicy/>
+            </Policies>
+            <DefaultRolloverStrategy>
+                <Delete basePath="${baseDir}" maxDepth="2">
+                    <IfFileName glob="*/*.log" />
+                    <IfLastModified age="24H" />
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingFile>
+        <RollingFile name="RollingFileWarn" fileName="logs/warn.log"
+                     filePattern="logs/%d{yyyy-MM}/warn-%d{yyyy-MM-dd}.%i.log">
+            <PatternLayout pattern="${Pattern}"/>
+            <ThresholdFilter level="WARN" onMatch="ACCEPT" onMismatch="DENY"/>
+            <Policies>
+                <TimeBasedTriggeringPolicy/>
+            </Policies>
+            <DefaultRolloverStrategy>
+                <Delete basePath="${baseDir}" maxDepth="2">
+                    <IfFileName glob="*/*.log" />
+                    <IfLastModified age="24H" />
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingFile>
+        <RollingFile name="RollingFileError" fileName="logs/error.log"
+                     filePattern="logs/%d{yyyy-MM}/error-%d{yyyy-MM-dd}.%i.log">
+            <PatternLayout pattern="${Pattern}"/>
+            <ThresholdFilter level="ERROR" onMatch="ACCEPT" onMismatch="DENY"/>
+            <Policies>
+                <TimeBasedTriggeringPolicy/>
+            </Policies>
+            <DefaultRolloverStrategy>
+                <Delete basePath="${baseDir}" maxDepth="2">
+                    <IfFileName glob="*/*.log" />
+                    <IfLastModified age="24H" />
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingFile>
+    </Appenders>
+
+    <Loggers>
+        <Root level="WARN">
+            <AppenderRef ref="Console"/>
+         <!-- <appender-ref ref="RollingFileInfo"/>
+            <appender-ref ref="RollingFileWarn"/>-->
+            <appender-ref ref="RollingFileError"/>
+        </Root>
+    </Loggers>
+</Configuration>

File diff suppressed because it is too large
+ 109494 - 0
golden-realtime-kafka/src/main/resources/tag-point.csv


+ 35 - 0
gradle.properties

@@ -0,0 +1,35 @@
+group=com.gyee.wisdom
+version=1
+description=Gyee wistom platform
+
+sourceCompatibility=1.8
+targetCompatibility=1.8
+
+# Abbr
+bootGroup=org.springframework.boot
+cloudGroup=org.springframework.cloud
+kotlinGroup=org.jetbrains.kotlin
+# Lib
+kotlinVersion=1.1.3
+aspectjVersion=1.8.10
+junitVersion=4.12
+mockitoVersion=2.8.47
+objenesisVersion=2.6
+findbugsContribVersion=7.0.2
+findbugsSecVersion=1.6.0
+log4jVersion=2.8.2
+jsr305Version=3.0.1
+lombokVersion=1.18.12
+guavaVersion=19.0
+checkstyleVersion=8.0
+springBootVersion=2.0.3.RELEASE
+springCloudDependenciesVersion=Dalston.SR1
+commonsLang3Version=3.4
+commonsCodecVersion=1.10
+jodaTimeVersion=2.9.9
+nettyVersion=4.1.12.Final
+cassandraVersion=3.0.0
+hbaseVersion=1.2.0-cdh5.16.2
+hadoopVersion=2.6.0-cdh5.16.2
+kuduVersion = 1.2.0
+kafkaVersion = 1.0.1

BIN
gradle/wrapper/gradle-wrapper.jar


+ 6 - 0
gradle/wrapper/gradle-wrapper.properties

@@ -0,0 +1,6 @@
+#Tue Sep 17 14:57:12 CST 2019
+distributionBase=GRADLE_USER_HOME
+distributionPath=wrapper/dists
+zipStoreBase=GRADLE_USER_HOME
+zipStorePath=wrapper/dists
+distributionUrl=https\://services.gradle.org/distributions/gradle-5.4-all.zip

+ 172 - 0
gradlew

@@ -0,0 +1,172 @@
+#!/usr/bin/env sh
+
+##############################################################################
+##
+##  Gradle start up script for UN*X
+##
+##############################################################################
+
+# Attempt to set APP_HOME
+# Resolve links: $0 may be a link
+PRG="$0"
+# Need this for relative symlinks.
+while [ -h "$PRG" ] ; do
+    ls=`ls -ld "$PRG"`
+    link=`expr "$ls" : '.*-> \(.*\)$'`
+    if expr "$link" : '/.*' > /dev/null; then
+        PRG="$link"
+    else
+        PRG=`dirname "$PRG"`"/$link"
+    fi
+done
+SAVED="`pwd`"
+cd "`dirname \"$PRG\"`/" >/dev/null
+APP_HOME="`pwd -P`"
+cd "$SAVED" >/dev/null
+
+APP_NAME="Gradle"
+APP_BASE_NAME=`basename "$0"`
+
+# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+DEFAULT_JVM_OPTS=""
+
+# Use the maximum available, or set MAX_FD != -1 to use that value.
+MAX_FD="maximum"
+
+warn () {
+    echo "$*"
+}
+
+die () {
+    echo
+    echo "$*"
+    echo
+    exit 1
+}
+
+# OS specific support (must be 'true' or 'false').
+cygwin=false
+msys=false
+darwin=false
+nonstop=false
+case "`uname`" in
+  CYGWIN* )
+    cygwin=true
+    ;;
+  Darwin* )
+    darwin=true
+    ;;
+  MINGW* )
+    msys=true
+    ;;
+  NONSTOP* )
+    nonstop=true
+    ;;
+esac
+
+CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+
+# Determine the Java command to use to start the JVM.
+if [ -n "$JAVA_HOME" ] ; then
+    if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+        # IBM's JDK on AIX uses strange locations for the executables
+        JAVACMD="$JAVA_HOME/jre/sh/java"
+    else
+        JAVACMD="$JAVA_HOME/bin/java"
+    fi
+    if [ ! -x "$JAVACMD" ] ; then
+        die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+    fi
+else
+    JAVACMD="java"
+    which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+fi
+
+# Increase the maximum file descriptors if we can.
+if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
+    MAX_FD_LIMIT=`ulimit -H -n`
+    if [ $? -eq 0 ] ; then
+        if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
+            MAX_FD="$MAX_FD_LIMIT"
+        fi
+        ulimit -n $MAX_FD
+        if [ $? -ne 0 ] ; then
+            warn "Could not set maximum file descriptor limit: $MAX_FD"
+        fi
+    else
+        warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
+    fi
+fi
+
+# For Darwin, add options to specify how the application appears in the dock
+if $darwin; then
+    GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
+fi
+
+# For Cygwin, switch paths to Windows format before running java
+if $cygwin ; then
+    APP_HOME=`cygpath --path --mixed "$APP_HOME"`
+    CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
+    JAVACMD=`cygpath --unix "$JAVACMD"`
+
+    # We build the pattern for arguments to be converted via cygpath
+    ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
+    SEP=""
+    for dir in $ROOTDIRSRAW ; do
+        ROOTDIRS="$ROOTDIRS$SEP$dir"
+        SEP="|"
+    done
+    OURCYGPATTERN="(^($ROOTDIRS))"
+    # Add a user-defined pattern to the cygpath arguments
+    if [ "$GRADLE_CYGPATTERN" != "" ] ; then
+        OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
+    fi
+    # Now convert the arguments - kludge to limit ourselves to /bin/sh
+    i=0
+    for arg in "$@" ; do
+        CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
+        CHECK2=`echo "$arg"|egrep -c "^-"`                                 ### Determine if an option
+
+        if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then                    ### Added a condition
+            eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
+        else
+            eval `echo args$i`="\"$arg\""
+        fi
+        i=$((i+1))
+    done
+    case $i in
+        (0) set -- ;;
+        (1) set -- "$args0" ;;
+        (2) set -- "$args0" "$args1" ;;
+        (3) set -- "$args0" "$args1" "$args2" ;;
+        (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
+        (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
+        (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
+        (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
+        (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
+        (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
+    esac
+fi
+
+# Escape application args
+save () {
+    for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
+    echo " "
+}
+APP_ARGS=$(save "$@")
+
+# Collect all arguments for the java command, following the shell quoting and substitution rules
+eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
+
+# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
+if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
+  cd "$(dirname "$0")"
+fi
+
+exec "$JAVACMD" "$@"

+ 84 - 0
gradlew.bat

@@ -0,0 +1,84 @@
+@if "%DEBUG%" == "" @echo off
+@rem ##########################################################################
+@rem
+@rem  Gradle startup script for Windows
+@rem
+@rem ##########################################################################
+
+@rem Set local scope for the variables with windows NT shell
+if "%OS%"=="Windows_NT" setlocal
+
+set DIRNAME=%~dp0
+if "%DIRNAME%" == "" set DIRNAME=.
+set APP_BASE_NAME=%~n0
+set APP_HOME=%DIRNAME%
+
+@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+set DEFAULT_JVM_OPTS=
+
+@rem Find java.exe
+if defined JAVA_HOME goto findJavaFromJavaHome
+
+set JAVA_EXE=java.exe
+%JAVA_EXE% -version >NUL 2>&1
+if "%ERRORLEVEL%" == "0" goto init
+
+echo.
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:findJavaFromJavaHome
+set JAVA_HOME=%JAVA_HOME:"=%
+set JAVA_EXE=%JAVA_HOME%/bin/java.exe
+
+if exist "%JAVA_EXE%" goto init
+
+echo.
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:init
+@rem Get command-line arguments, handling Windows variants
+
+if not "%OS%" == "Windows_NT" goto win9xME_args
+
+:win9xME_args
+@rem Slurp the command line arguments.
+set CMD_LINE_ARGS=
+set _SKIP=2
+
+:win9xME_args_slurp
+if "x%~1" == "x" goto execute
+
+set CMD_LINE_ARGS=%*
+
+:execute
+@rem Setup the command line
+
+set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+
+@rem Execute Gradle
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
+
+:end
+@rem End local scope for the variables with windows NT shell
+if "%ERRORLEVEL%"=="0" goto mainEnd
+
+:fail
+rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
+rem the _cmd.exe /c_ return code!
+if  not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
+exit /b 1
+
+:mainEnd
+if "%OS%"=="Windows_NT" endlocal
+
+:omega

+ 34 - 0
kafka-connectors/build.gradle

@@ -0,0 +1,34 @@
+buildscript {
+    repositories {
+        mavenLocal()
+        maven { url "http://maven.aliyun.com/nexus/content/groups/public" }
+        mavenCentral()
+    }
+    dependencies {
+        classpath("$bootGroup:spring-boot-gradle-plugin:$springBootVersion")
+    }
+}
+
+apply plugin: "$bootGroup"
+apply plugin: 'io.spring.dependency-management'
+
+
+dependencies {
+
+    compile project(":common")
+    compile fileTree(dir: 'src/main/lib', include: '*.jar')
+
+    compile("org.apache.kafka:connect-api:$kafkaVersion")
+    compile("org.apache.kudu:kudu-client:$kuduVersion")
+
+    compile 'redis.clients:jedis:2.8.1'
+    compile 'org.apache.directory.studio:org.apache.commons.codec:1.8'
+    compile 'io.inbot:inbot-testfixtures:1.6'
+
+    compile("$bootGroup:spring-boot-starter-log4j2")
+    compile 'com.alibaba:fastjson:1.2.17'
+
+    testCompile("$bootGroup:spring-boot-starter-test")
+
+}
+

+ 127 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/file/FileStreamSinkConnector.java

@@ -0,0 +1,127 @@
+package com.gyee.ygys.connector.file;
+
+import org.apache.kafka.common.config.AbstractConfig;
+
+import org.apache.kafka.common.config.ConfigDef;
+
+import org.apache.kafka.common.config.ConfigDef.Importance;
+
+import org.apache.kafka.common.config.ConfigDef.Type;
+
+import org.apache.kafka.common.utils.AppInfoParser;
+
+import org.apache.kafka.connect.connector.Task;
+
+import org.apache.kafka.connect.sink.SinkConnector;
+
+
+
+import java.util.ArrayList;
+
+import java.util.HashMap;
+
+import java.util.List;
+
+import java.util.Map;
+
+
+
+/**
+
+ * Very simple connector that works with the console. This connector supports both source and
+
+ * sink modes via its 'mode' setting.
+
+ */
+
+public class FileStreamSinkConnector extends SinkConnector {
+
+
+
+    public static final String FILE_CONFIG = "file";
+
+    private static final ConfigDef CONFIG_DEF = new ConfigDef()
+
+            .define(FILE_CONFIG, Type.STRING, null, Importance.HIGH, "Destination filename. If not specified, the standard output will be used");
+
+
+
+    private String filename;
+
+
+
+    @Override
+
+    public String version() {
+
+        return AppInfoParser.getVersion();
+
+    }
+
+
+
+    @Override
+
+    public void start(Map<String, String> props) {
+
+        AbstractConfig parsedConfig = new AbstractConfig(CONFIG_DEF, props);
+
+        filename = parsedConfig.getString(FILE_CONFIG);
+
+    }
+
+
+
+    @Override
+
+    public Class<? extends Task> taskClass() {
+
+        return FileStreamSinkTask.class;
+
+    }
+
+
+
+    @Override
+
+    public List<Map<String, String>> taskConfigs(int maxTasks) {
+
+        ArrayList<Map<String, String>> configs = new ArrayList<>();
+
+        for (int i = 0; i < maxTasks; i++) {
+
+            Map<String, String> config = new HashMap<>();
+
+            if (filename != null)
+
+                config.put(FILE_CONFIG, filename);
+
+            configs.add(config);
+
+        }
+
+        return configs;
+
+    }
+
+
+
+    @Override
+
+    public void stop() {
+
+        // Nothing to do since FileStreamSinkConnector has no background monitoring.
+
+    }
+
+
+
+    @Override
+
+    public ConfigDef config() {
+
+        return CONFIG_DEF;
+
+    }
+
+}

+ 163 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/file/FileStreamSinkTask.java

@@ -0,0 +1,163 @@
+package com.gyee.ygys.connector.file;
+
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+
+import org.apache.kafka.common.TopicPartition;
+
+import org.apache.kafka.connect.errors.ConnectException;
+
+import org.apache.kafka.connect.sink.SinkRecord;
+
+import org.apache.kafka.connect.sink.SinkTask;
+
+import org.slf4j.Logger;
+
+import org.slf4j.LoggerFactory;
+
+
+
+import java.io.IOException;
+
+import java.io.PrintStream;
+
+import java.nio.charset.StandardCharsets;
+
+import java.nio.file.Files;
+
+import java.nio.file.Paths;
+
+import java.nio.file.StandardOpenOption;
+
+import java.util.Collection;
+
+import java.util.Map;
+
+
+
+/**
+
+ * FileStreamSinkTask writes records to stdout or a file.
+
+ */
+
+public class FileStreamSinkTask extends SinkTask {
+
+    private static final Logger log = LoggerFactory.getLogger(FileStreamSinkTask.class);
+
+
+
+    private String filename;
+
+    private PrintStream outputStream;
+
+
+
+    public FileStreamSinkTask() {
+
+    }
+
+
+
+    // for testing
+
+    public FileStreamSinkTask(PrintStream outputStream) {
+
+        filename = null;
+
+        this.outputStream = outputStream;
+
+    }
+
+
+
+    @Override
+
+    public String version() {
+
+        return new FileStreamSinkConnector().version();
+
+    }
+
+
+
+    @Override
+
+    public void start(Map<String, String> props) {
+
+        filename = props.get(FileStreamSinkConnector.FILE_CONFIG);
+
+        if (filename == null) {
+
+            outputStream = System.out;
+
+        } else {
+
+            try {
+
+                outputStream = new PrintStream(
+
+                        Files.newOutputStream(Paths.get(filename), StandardOpenOption.CREATE, StandardOpenOption.APPEND),
+
+                        false,
+
+                        StandardCharsets.UTF_8.name());
+
+            } catch (IOException e) {
+
+                throw new ConnectException("Couldn't find or create file '" + filename + "' for FileStreamSinkTask", e);
+
+            }
+
+        }
+
+    }
+
+
+
+    @Override
+
+    public void put(Collection<SinkRecord> sinkRecords) {
+
+        for (SinkRecord record : sinkRecords) {
+
+            log.trace("Writing line to {}: {}", logFilename(), record.value());
+
+            outputStream.println(record.value());
+
+        }
+
+    }
+
+
+
+    @Override
+
+    public void flush(Map<TopicPartition, OffsetAndMetadata> offsets) {
+
+        log.trace("Flushing output stream for {}", logFilename());
+
+        outputStream.flush();
+
+    }
+
+
+
+    @Override
+
+    public void stop() {
+
+        if (outputStream != null && outputStream != System.out)
+
+            outputStream.close();
+
+    }
+
+
+
+    private String logFilename() {
+
+        return filename == null ? "stdout" : filename;
+
+    }
+
+}

+ 82 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/file/FileStreamSourceConnector.java

@@ -0,0 +1,82 @@
+package com.gyee.ygys.connector.file;
+
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.config.ConfigDef;
+import org.apache.kafka.common.config.ConfigDef.Importance;
+import org.apache.kafka.common.config.ConfigDef.Type;
+import org.apache.kafka.common.config.ConfigException;
+import org.apache.kafka.common.utils.AppInfoParser;
+import org.apache.kafka.connect.connector.Task;
+import org.apache.kafka.connect.source.SourceConnector;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Very simple connector that works with the console. This connector supports both source and
+ * sink modes via its 'mode' setting.
+ */
+public class FileStreamSourceConnector extends SourceConnector {
+    public static final String TOPIC_CONFIG = "topic";
+    public static final String FILE_CONFIG = "file";
+    public static final String TASK_BATCH_SIZE_CONFIG = "batch.size";
+
+    public static final int DEFAULT_TASK_BATCH_SIZE = 2000;
+
+    private static final ConfigDef CONFIG_DEF = new ConfigDef()
+            .define(FILE_CONFIG, Type.STRING, null, Importance.HIGH, "Source filename. If not specified, the standard input will be used")
+            .define(TOPIC_CONFIG, Type.LIST, Importance.HIGH, "The topic to publish data to")
+            .define(TASK_BATCH_SIZE_CONFIG, Type.INT, DEFAULT_TASK_BATCH_SIZE, Importance.LOW,
+                    "The maximum number of records the Source task can read from file one time");
+
+    private String filename;
+    private String topic;
+    private int batchSize;
+
+    @Override
+    public String version() {
+        return AppInfoParser.getVersion();
+    }
+
+    @Override
+    public void start(Map<String, String> props) {
+        AbstractConfig parsedConfig = new AbstractConfig(CONFIG_DEF, props);
+        filename = parsedConfig.getString(FILE_CONFIG);
+        List<String> topics = parsedConfig.getList(TOPIC_CONFIG);
+        if (topics.size() != 1) {
+            throw new ConfigException("'topic' in FileStreamSourceConnector configuration requires definition of a single topic");
+        }
+        topic = topics.get(0);
+        batchSize = parsedConfig.getInt(TASK_BATCH_SIZE_CONFIG);
+    }
+
+    @Override
+    public Class<? extends Task> taskClass() {
+        return FileStreamSourceTask.class;
+    }
+
+    @Override
+    public List<Map<String, String>> taskConfigs(int maxTasks) {
+        ArrayList<Map<String, String>> configs = new ArrayList<>();
+        // Only one input stream makes sense.
+        Map<String, String> config = new HashMap<>();
+        if (filename != null)
+            config.put(FILE_CONFIG, filename);
+        config.put(TOPIC_CONFIG, topic);
+        config.put(TASK_BATCH_SIZE_CONFIG, String.valueOf(batchSize));
+        configs.add(config);
+        return configs;
+    }
+
+    @Override
+    public void stop() {
+        // Nothing to do since FileStreamSourceConnector has no background monitoring.
+    }
+
+    @Override
+    public ConfigDef config() {
+        return CONFIG_DEF;
+    }
+}

+ 223 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/file/FileStreamSourceTask.java

@@ -0,0 +1,223 @@
+package com.gyee.ygys.connector.file;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.kafka.connect.data.Schema;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.kafka.connect.source.SourceRecord;
+import org.apache.kafka.connect.source.SourceTask;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * FileStreamSourceTask reads from stdin or a file.
+ */
+public class FileStreamSourceTask extends SourceTask {
+
+    private static final Logger log = LoggerFactory.getLogger(FileStreamSourceTask.class);
+    public static final String FILENAME_FIELD = "filename";
+    public static final String POSITION_FIELD = "position";
+    private static final Schema VALUE_SCHEMA = Schema.STRING_SCHEMA;
+
+
+    private String filename;
+    private InputStream stream;
+    private BufferedReader reader = null;
+    private char[] buffer = new char[1024];
+    private int offset = 0;
+    private String topic = null;
+    private int batchSize = FileStreamSourceConnector.DEFAULT_TASK_BATCH_SIZE;
+
+    private Long streamOffset;
+
+    @Override
+    public String version() {
+        return new FileStreamSourceConnector().version();
+    }
+
+    @Override
+    public void start(Map<String, String> props) {
+        filename = props.get(FileStreamSourceConnector.FILE_CONFIG);
+        if (filename == null || filename.isEmpty()) {
+            stream = System.in;
+            // Tracking offset for stdin doesn't make sense
+            streamOffset = null;
+            reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8));
+        }
+        // Missing topic or parsing error is not possible because we've parsed the config in the
+        // Connector
+        topic = props.get(FileStreamSourceConnector.TOPIC_CONFIG);
+        batchSize = Integer.parseInt(props.get(FileStreamSourceConnector.TASK_BATCH_SIZE_CONFIG));
+    }
+
+
+    @Override
+    public List<SourceRecord> poll() throws InterruptedException {
+        if (stream == null) {
+            try {
+                stream = Files.newInputStream(Paths.get(filename));
+                Map<String, Object> offset = context.offsetStorageReader().offset(Collections.singletonMap(FILENAME_FIELD, filename));
+                if (offset != null) {
+                    Object lastRecordedOffset = offset.get(POSITION_FIELD);
+                    if (lastRecordedOffset != null && !(lastRecordedOffset instanceof Long))
+                        throw new ConnectException("Offset position is the incorrect type");
+                    if (lastRecordedOffset != null) {
+                        log.debug("Found previous offset, trying to skip to file offset {}", lastRecordedOffset);
+                        long skipLeft = (Long) lastRecordedOffset;
+                        while (skipLeft > 0) {
+                            try {
+                                long skipped = stream.skip(skipLeft);
+                                skipLeft -= skipped;
+                            } catch (IOException e) {
+                                log.error("Error while trying to seek to previous offset in file {}: ", filename, e);
+                                throw new ConnectException(e);
+                            }
+                        }
+                        log.debug("Skipped to offset {}", lastRecordedOffset);
+                    }
+                    streamOffset = (lastRecordedOffset != null) ? (Long) lastRecordedOffset : 0L;
+                } else {
+                    streamOffset = 0L;
+                }
+                reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8));
+                log.debug("Opened {} for reading", logFilename());
+            } catch (NoSuchFileException e) {
+                log.warn("Couldn't find file {} for FileStreamSourceTask, sleeping to wait for it to be created", logFilename());
+                synchronized (this) {
+                    this.wait(1000);
+                }
+                return null;
+            } catch (IOException e) {
+                log.error("Error while trying to open file {}: ", filename, e);
+                throw new ConnectException(e);
+            }
+        }
+
+        // Unfortunately we can't just use readLine() because it blocks in an uninterruptible way.
+        // Instead we have to manage splitting lines ourselves, using simple backoff when no new data
+        // is available.
+        try {
+            final BufferedReader readerCopy;
+            synchronized (this) {
+                readerCopy = reader;
+            }
+            if (readerCopy == null)
+                return null;
+
+            ArrayList<SourceRecord> records = null;
+
+            int nread = 0;
+            while (readerCopy.ready()) {
+                nread = readerCopy.read(buffer, offset, buffer.length - offset);
+                log.trace("Read {} bytes from {}", nread, logFilename());
+
+                if (nread > 0) {
+                    offset += nread;
+                    if (offset == buffer.length) {
+                        char[] newbuf = new char[buffer.length * 2];
+                        System.arraycopy(buffer, 0, newbuf, 0, buffer.length);
+                        buffer = newbuf;
+                    }
+
+                    String line;
+                    do {
+                        line = extractLine();
+                        if (line != null) {
+                            log.trace("Read a line from {}", logFilename());
+                            if (records == null)
+                                records = new ArrayList<>();
+                            records.add(new SourceRecord(offsetKey(filename), offsetValue(streamOffset), topic, null,
+                                    null, null, VALUE_SCHEMA, line, System.currentTimeMillis()));
+
+                            if (records.size() >= batchSize) {
+                                return records;
+                            }
+                        }
+                    } while (line != null);
+                }
+            }
+
+            if (nread <= 0)
+                synchronized (this) {
+                    this.wait(1000);
+                }
+
+           return records;
+        } catch (IOException e) {
+            // Underlying stream was killed, probably as a result of calling stop. Allow to return
+            // null, and driving thread will handle any shutdown if necessary.
+        }
+        return null;
+    }
+
+    private String extractLine() {
+        int until = -1, newStart = -1;
+        for (int i = 0; i < offset; i++) {
+            if (buffer[i] == '\n') {
+                until = i;
+                newStart = i + 1;
+                break;
+            } else if (buffer[i] == '\r') {
+                // We need to check for \r\n, so we must skip this if we can't check the next char
+                if (i + 1 >= offset)
+                    return null;
+
+               until = i;
+                newStart = (buffer[i + 1] == '\n') ? i + 2 : i + 1;
+                break;
+            }
+        }
+
+        if (until != -1) {
+            String result = new String(buffer, 0, until);
+            System.arraycopy(buffer, newStart, buffer, 0, buffer.length - newStart);
+            offset = offset - newStart;
+            if (streamOffset != null)
+                streamOffset += newStart;
+            return result;
+        } else {
+            return null;
+        }
+    }
+
+    @Override
+    public void stop() {
+        log.trace("Stopping");
+        synchronized (this) {
+            try {
+                if (stream != null && stream != System.in) {
+                    stream.close();
+                    log.trace("Closed input stream");
+                }
+            } catch (IOException e) {
+                log.error("Failed to close FileStreamSourceTask stream: ", e);
+            }
+            this.notify();
+        }
+    }
+
+    private Map<String, String> offsetKey(String filename) {
+        return Collections.singletonMap(FILENAME_FIELD, filename);
+    }
+
+    private Map<String, Long> offsetValue(Long pos) {
+        return Collections.singletonMap(POSITION_FIELD, pos);
+    }
+
+    private String logFilename() {
+        return filename == null ? "stdin" : filename;
+    }
+}

+ 100 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/golden/GoldenConfig.java

@@ -0,0 +1,100 @@
+package com.gyee.ygys.connector.golden;
+
+
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.config.ConfigDef;
+
+import java.util.Map;
+
+public class GoldenConfig extends AbstractConfig {
+
+    public static final String GOLDEN_SERVER_IP = "golden.server_ip";
+    private static final String GOLDEN_SERVER_IP_DOC = "庚顿服务器IP地址";
+    private static final String GOLDEN_SERVER_IP_DEFAULT = "127.0.0.1";
+
+    public static final String GOLDEN_SERVER_PORT = "golden.server_port";
+    private static final String GOLDEN_SERVER_PORT_DOC = "庚顿服务器IP端口";
+    private static final int GOLDEN_SERVER_PORT_DEFAULT = 6237;
+
+    public static final String GOLDEN_USER_NAME = "golden.user_name";
+    private static final String GOLDEN_USER_NAME_DOC = "庚顿服务器登录账号";
+    private static final String GOLDEN_USER_NAME_DEFAULT = "sa";
+
+    public static final String GOLDEN_PASSWORD = "golden.password";
+    private static final String GOLDEN_PASSWORD_DOC = "庚顿服务器登录密码";
+    private static final String GOLDEN_PASSWORD_DEFAULT = "golden";
+
+    public static final String GOLDEN_POOL_SIZE = "golden.pool_size";
+    private static final String GOLDEN_POOL_SIZE_DOC = "庚顿客户端连接池大小";
+    private static final int GOLDEN_POOL_SIZE_DEFAULT = 2;
+
+    public static final String GOLDEN_MAX_POOL_SIZE = "golden.max_pool_size";
+    private static final String GOLDEN_MAX_POOL_SIZE_DOC = "庚顿客户端最大连接池数";
+    private static final int GOLDEN_MAX_POOL_SIZE_DEFAULT = 10;
+
+    public static final String GOLDEN_QUERY_HISTORY_LIMIT = "golden.query_history_limit";
+    private static final String GOLDEN_QUERY_HISTORY_LIMIT_DOC = "庚顿读取历史数据一次返回数据量上限";
+    private static final int GOLDEN_QUERY_HISTORY_LIMIT_DEFAULT = 100000;
+
+    private static final String CONNECTION_GROUP = "Connection";
+
+
+    private static final ConfigDef.Range NON_NEGATIVE_INT_VALIDATOR = ConfigDef.Range.atLeast(0);
+
+    public static final ConfigDef CONFIG_DEF = new ConfigDef()
+            // Connection
+            .define(
+                    GOLDEN_SERVER_IP, ConfigDef.Type.STRING, GOLDEN_SERVER_IP_DEFAULT,
+                    ConfigDef.Importance.HIGH, GOLDEN_SERVER_IP_DOC,
+                    CONNECTION_GROUP, 1, ConfigDef.Width.MEDIUM, GOLDEN_SERVER_IP)
+            .define(
+                    GOLDEN_SERVER_PORT, ConfigDef.Type.INT, GOLDEN_SERVER_PORT_DEFAULT,
+                    ConfigDef.Importance.MEDIUM, GOLDEN_SERVER_PORT_DOC,
+                    CONNECTION_GROUP, 2, ConfigDef.Width.MEDIUM, GOLDEN_SERVER_PORT)
+            .define(
+                    GOLDEN_USER_NAME, ConfigDef.Type.STRING, GOLDEN_USER_NAME_DEFAULT,
+                    ConfigDef.Importance.LOW, GOLDEN_USER_NAME_DOC,
+                    CONNECTION_GROUP, 3, ConfigDef.Width.MEDIUM, GOLDEN_USER_NAME)
+            .define(
+                    GOLDEN_PASSWORD, ConfigDef.Type.STRING, GOLDEN_PASSWORD_DEFAULT,
+                    ConfigDef.Importance.LOW, GOLDEN_PASSWORD_DOC,
+                    CONNECTION_GROUP, 4, ConfigDef.Width.MEDIUM, GOLDEN_PASSWORD)
+            .define(
+                    GOLDEN_POOL_SIZE, ConfigDef.Type.INT, GOLDEN_POOL_SIZE_DEFAULT,
+                    ConfigDef.Importance.MEDIUM, GOLDEN_POOL_SIZE_DOC,
+                    CONNECTION_GROUP, 2, ConfigDef.Width.MEDIUM, GOLDEN_POOL_SIZE)
+            .define(
+                    GOLDEN_MAX_POOL_SIZE, ConfigDef.Type.INT, GOLDEN_MAX_POOL_SIZE_DEFAULT,
+                    ConfigDef.Importance.MEDIUM, GOLDEN_MAX_POOL_SIZE_DOC,
+                    CONNECTION_GROUP, 2, ConfigDef.Width.MEDIUM, GOLDEN_MAX_POOL_SIZE)
+            .define(
+                    GOLDEN_QUERY_HISTORY_LIMIT, ConfigDef.Type.INT, GOLDEN_QUERY_HISTORY_LIMIT_DEFAULT,
+                    ConfigDef.Importance.MEDIUM, GOLDEN_QUERY_HISTORY_LIMIT_DOC,
+                    CONNECTION_GROUP, 2, ConfigDef.Width.MEDIUM, GOLDEN_QUERY_HISTORY_LIMIT);
+
+    public final String serverIp;
+    public final int serverPort;
+    public final String userName;
+    public final String password;
+    public final int poolSize;
+    public final int maxPoolSize;
+    public final int queryHistoryLimit;
+
+    public GoldenConfig(Map<?, ?> props) {
+        super(CONFIG_DEF, props);
+        serverIp = getString(GOLDEN_SERVER_IP);
+        serverPort = getInt(GOLDEN_SERVER_PORT);
+        userName = getString(GOLDEN_USER_NAME);
+        password = getString(GOLDEN_PASSWORD);
+        poolSize = getInt(GOLDEN_POOL_SIZE);
+        maxPoolSize = getInt(GOLDEN_MAX_POOL_SIZE);
+        queryHistoryLimit = getInt(GOLDEN_QUERY_HISTORY_LIMIT);
+    }
+
+
+    public static void main(String... args) {
+        System.out.println(CONFIG_DEF.toRst());
+    }
+
+
+}

+ 384 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/golden/GoldenConnectionPool.java

@@ -0,0 +1,384 @@
+package com.gyee.ygys.connector.golden;
+
+
+import com.gyee.ygys.connector.kudu.KuduSinkConnector;
+import com.rtdb.service.impl.ServerImpl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Enumeration;
+import java.util.Vector;
+
+public class GoldenConnectionPool {
+    private static final Logger log = LoggerFactory.getLogger(KuduSinkConnector.class);
+    private boolean isReady = false;
+
+    private GoldenConnectionPool() {
+    }
+
+    private static class SingletonHolder {
+        private static final GoldenConnectionPool singleton = new GoldenConnectionPool();
+    }
+
+    public static GoldenConnectionPool getInstance() {
+        return SingletonHolder.singleton;
+    }
+
+    private String serverIp = "127.0.0.1"; // 服务器IP
+    private short serverPort = 10010; // 服务器端口
+    private String userName = "sa";
+    private String password = "golden";
+    private int defaultConnections = 1; // 连接池的初始大小
+    private int incrementalConnections = 1;// 连接池自动增加的大小
+    private int maxConnections = 10; // 连接池最大的大小
+    private Vector connections = null; // 存放连接池中连接的向量 , 初始时为 null
+
+
+    public void  init(String serverIp, short serverPort, String userName, String password, int defaultConnections, int incrementalConnections, int maxConnections) {
+        if (isReady) {
+            log.warn("Golden连接池已配置,不允许重复配置");
+            return;
+        }
+
+        this.serverIp = serverIp;
+        this.serverPort = serverPort;
+        this.userName = userName;
+        this.password = password;
+        this.defaultConnections = defaultConnections;
+        this.incrementalConnections = incrementalConnections;
+        this.maxConnections = maxConnections;
+
+        isReady = true;
+        log.info("Golden连接池已配置");
+    }
+
+    /**
+     * 创建一个连接池,连接池中的可用连接的数量采用类成员 defaultConnections 中设置的值
+     */
+    private synchronized void createPool() throws Exception {
+        // 确保连接池没有创建
+        // 如果连接池己经创建了,保存连接的向量 connections 不会为空
+        if (connections != null) {
+            return; // 如果己经创建,则返回
+        }
+        // 创建保存连接的向量 , 初始时有 0 个元素
+        connections = new Vector();
+        // 根据 defaultConnections 中设置的值,创建连接。
+        createConnections(this.defaultConnections);
+        System.out.println(" Golden连接池创建成功! ");
+    }
+
+    /**
+     * 创建由 numConnections 指定数目的Golden连接 , 并把这些连接
+     * 放入 connections 向量中
+     * @param numConnections  要创建的Golden连接的数目
+     */
+    private void createConnections(int numConnections) throws Exception {
+        // 循环创建指定数目的Golden连接
+        for (int x = 0; x < numConnections; x++) {
+            // 是否连接池中的Golden连接的数量己经达到最大?最大值由类成员 maxConnections
+            // 指出,如果 maxConnections 为 0 或负数,表示连接数量没有限制。
+            // 如果连接数己经达到最大,即退出。
+            if (this.maxConnections > 0  && this.connections.size() >= this.maxConnections) {
+                break;
+            }
+
+            // add a new PooledConnection object to connections vector
+            // 增加一个连接到连接池中(向量 connections 中)
+            try {
+                connections.addElement(new PooledConnection(newConnection()));
+
+            } catch (Exception e) {
+                log.error(" 创建Golden连接失败! " + e.getMessage());
+                throw e;
+            }
+            log.info(" Golden连接己创建 ......");
+        }
+    }
+
+    /**
+     * 创建一个新的Golden连接并返回它
+     * @return 返回一个新创建的Golden连接
+     */
+    private ServerImpl newConnection() throws Exception {
+        // 创建一个Golden连接
+//        String file = RtdbServerImpl.class.getProtectionDomain().getCodeSource().getLocation().getFile();
+//        System.out.println("fffffffffffffffffffff = " + file);
+//        if (file.endsWith(".jar")) {
+//            System.out.println("对对对对对对对对对对对对对对");
+//        } else {
+//            System.out.println("错错错错错错错错错错错错错错");
+//        }
+
+        ServerImpl conn = new ServerImpl(serverIp, serverPort, userName, password);
+        return conn; // 返回创建的新的Golden连接
+    }
+
+    /**
+     * 通过调用 getFreeConnection() 函数返回一个可用的Golden连接 ,
+     * 如果当前没有可用的Golden连接,并且更多的Golden连接不能创建(如连接池大小的限制),此函数等待一会再尝试获取。
+     *
+     * @return 返回一个可用的Golden连接对象
+     */
+    public synchronized ServerImpl getConnection() throws Exception {
+        // 确保连接池己被创建
+        if (connections == null) {
+            createPool(); // 连接池还没创建,则返回 null
+        }
+
+        ServerImpl conn = getFreeConnection(); // 获得一个可用的Golden连接
+        // 如果目前没有可以使用的连接,即所有的连接都在使用中
+        while (conn == null) {
+            // 等一会再试
+            wait(250);
+            conn = getFreeConnection(); // 重新再试,直到获得可用的连接,如果
+            // getFreeConnection() 返回的为 null
+            // 则表明创建一批连接后也不可获得可用连接
+        }
+        return conn;// 返回获得的可用的连接
+    }
+
+    /**
+     * 本函数从连接池向量 connections 中返回一个可用的的Golden连接,如果
+     * 当前没有可用的Golden连接,本函数则根据 incrementalConnections 设置
+     * 的值创建几个Golden连接,并放入连接池中。
+     * 如果创建后,所有的连接仍都在使用中,则返回 null
+     * @return 返回一个可用的Golden连接
+     */
+    private ServerImpl getFreeConnection() throws Exception {
+        // 从连接池中获得一个可用的Golden连接
+        ServerImpl conn = findFreeConnection();
+        if (conn == null) {
+
+            // 如果目前连接池中没有可用的连接
+            // 创建一些连接
+            createConnections(incrementalConnections);
+            // 重新从池中查找是否有可用连接
+            conn = findFreeConnection();
+            if (conn == null) {
+                // 如果创建连接后仍获得不到可用的连接,则返回 null
+                return null;
+            }
+        }
+        return conn;
+    }
+
+    /**
+     * 查找连接池中所有的连接,查找一个可用的Golden连接,
+     * 如果没有可用的连接,返回 null
+     * @return 返回一个可用的Golden连接
+     */
+    private ServerImpl findFreeConnection() throws Exception {
+        ServerImpl conn = null;
+        PooledConnection pConn = null;
+        // 获得连接池向量中所有的对象
+        Enumeration enumerate = connections.elements();
+
+        // 遍历所有的对象,看是否有可用的连接
+        while (enumerate.hasMoreElements()) {
+            pConn = (PooledConnection) enumerate.nextElement();
+            if (!pConn.isBusy()) {
+                // 如果此对象不忙,则获得它的Golden连接并把它设为忙
+                conn = pConn.getConnection();
+                pConn.setBusy(true);
+                break; // 己经找到一个可用的连接,退出
+            }
+        }
+        return conn;// 返回找到到的可用连接
+    }
+
+
+    /**
+     * 此函数返回一个Golden连接到连接池中,并把此连接置为空闲。
+     * 所有使用连接池获得的Golden连接均应在不使用此连接时返回它。
+     */
+    public void returnConnection(ServerImpl conn) {
+        // 确保连接池存在,如果连接没有创建(不存在),直接返回
+        if (connections == null) {
+            System.out.println(" 连接池不存在,无法返回此连接到连接池中 !");
+            return;
+        }
+
+        PooledConnection pConn = null;
+        Enumeration enumerate = connections.elements();
+        // 遍历连接池中的所有连接,找到这个要返回的连接对象
+        while (enumerate.hasMoreElements()) {
+            pConn = (PooledConnection) enumerate.nextElement();
+            // 先找到连接池中的要返回的连接对象
+            if (conn == pConn.getConnection()) {
+                // 找到了 , 设置此连接为空闲状态
+                pConn.setBusy(false);
+                break;
+            }
+        }
+    }
+
+    /**
+     * 刷新连接池中所有的连接对象
+     */
+    public synchronized void refreshConnections() throws Exception {
+        // 确保连接池己创新存在
+        if (connections == null) {
+            System.out.println(" 连接池不存在,无法刷新 !");
+            return;
+        }
+
+        PooledConnection pConn = null;
+        Enumeration enumerate = connections.elements();
+        while (enumerate.hasMoreElements()) {
+
+            // 获得一个连接对象
+            pConn = (PooledConnection) enumerate.nextElement();
+            // 如果对象忙则等 5 秒 ,5 秒后直接刷新
+            if (pConn.isBusy()) {
+                wait(5000); // 等 5 秒
+            }
+            // 关闭此连接,用一个新的连接代替它。
+            closeConnection(pConn.getConnection());
+            pConn.setConnection(newConnection());
+            pConn.setBusy(false);
+        }
+    }
+
+    /**
+     * 关闭连接池中所有的连接,并清空连接池。
+     */
+    private synchronized void closeConnectionPool() throws Exception {
+        // 确保连接池存在,如果不存在,返回
+        if (connections == null) {
+            log.warn(" 连接池不存在,无法关闭 !");
+            return;
+        }
+
+        PooledConnection pConn = null;
+        Enumeration enumerate = connections.elements();
+        while (enumerate.hasMoreElements()) {
+            pConn = (PooledConnection) enumerate.nextElement();
+            // 如果忙,等 5 秒
+            if (pConn.isBusy()) {
+                wait(5000); // 等 5 秒
+            }
+            // 5 秒后直接关闭它
+            closeConnection(pConn.getConnection());
+            // 从连接池向量中删除它
+            connections.removeElement(pConn);
+        }
+        // 置连接池为空
+        connections = null;
+    }
+
+    /**
+     * 关闭一个Golden连接
+     */
+    private void closeConnection(ServerImpl conn) {
+        try {
+            conn.close();
+        } catch (Exception e) {
+            log.error(" 关闭Golden连接出错: " + e.getMessage());
+        }
+    }
+
+    @Override
+    public void finalize() {
+        try {
+            closeConnectionPool();
+        } catch (Exception e) {
+            log.error(" 关闭Golden连接池出错: " + e.getMessage());
+        }
+    }
+
+
+    /**
+     * 使程序等待给定的毫秒数
+     */
+    private void wait(int mSeconds) {
+        try {
+            Thread.sleep(mSeconds);
+
+        } catch (InterruptedException e) {
+        }
+    }
+
+    /**
+     * 返回连接池的初始大小
+     *
+     * @return 初始连接池中可获得的连接数量
+     */
+    public int getDefaultConnections() {
+        return this.defaultConnections;
+    }
+
+    /**
+     * 设置连接池的初始大小
+     */
+    public void setDefaultConnections(int defaultConnections) {
+        this.defaultConnections = defaultConnections;
+    }
+
+    /**
+     * 返回连接池自动增加的大小 、
+     * @return 连接池自动增加的大小
+     */
+    public int getIncrementalConnections() {
+        return this.incrementalConnections;
+    }
+
+    /**
+     * 设置连接池自动增加的大小
+     */
+    public void setIncrementalConnections(int incrementalConnections) {
+        this.incrementalConnections = incrementalConnections;
+    }
+
+    /**
+     * 返回连接池中最大的可用连接数量
+     * @return 连接池中最大的可用连接数量
+     */
+    public int getMaxConnections() {
+        return this.maxConnections;
+    }
+
+    /**
+     * 设置连接池中最大可用的连接数量
+     */
+    public void setMaxConnections(int maxConnections) {
+        this.maxConnections = maxConnections;
+    }
+
+    /**
+     * 内部使用的用于保存连接池中连接对象的类
+     * 此类中有两个成员,一个是Golden的连接,另一个是指示此连接是否
+     * 正在使用的标志。
+     */
+    class PooledConnection {
+        ServerImpl connection = null;// Golden连接
+        boolean busy = false; // 此连接是否正在使用的标志,默认没有正在使用
+
+        // 构造函数,根据一个 Connection 构告一个 PooledConnection 对象
+        public PooledConnection(ServerImpl connection) {
+            this.connection = connection;
+        }
+
+        // 返回此对象中的连接
+        public ServerImpl getConnection() {
+            return connection;
+        }
+
+        // 设置此对象的,连接
+        public void setConnection(ServerImpl connection) {
+            this.connection = connection;
+        }
+
+        // 获得对象连接是否忙
+        public boolean isBusy() {
+            return busy;
+        }
+
+        // 设置对象的连接正在忙
+        public void setBusy(boolean busy) {
+            this.busy = busy;
+        }
+
+    }
+
+}

+ 54 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/golden/GoldenSourceConnector.java

@@ -0,0 +1,54 @@
+
+package com.gyee.ygys.connector.golden;
+
+import org.apache.kafka.common.config.ConfigDef;
+import org.apache.kafka.connect.connector.Task;
+import org.apache.kafka.connect.source.SourceConnector;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class GoldenSourceConnector extends SourceConnector {
+  private static final Logger log = LoggerFactory.getLogger(GoldenSourceConnector.class);
+
+  private Map<String, String> configProps;
+
+  @Override
+  public String version() {
+    return this.getClass().getPackage().getImplementationVersion();
+  }
+
+  @Override
+  public void start(Map<String, String> props) {
+    log.info("\nGoldenSourceConnector\n starting.");
+    configProps = props;
+  }
+
+  @Override
+  public Class<? extends Task> taskClass() {
+    return GoldenSourceTask.class;
+  }
+
+  @Override
+  public List<Map<String, String>> taskConfigs(int maxTasks) {
+    log.info("Setting task configurations for {} workers.", maxTasks);
+    final List<Map<String, String>> configs = new ArrayList<>(maxTasks);
+    for (int i = 0; i < maxTasks; ++i) {
+      configs.add(configProps);
+    }
+    return configs;
+  }
+
+  @Override
+  public void stop() {
+  }
+
+  @Override
+  public ConfigDef config() {
+    return GoldenConfig.CONFIG_DEF;
+  }
+
+}

+ 41 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/golden/GoldenSourceTask.java

@@ -0,0 +1,41 @@
+package com.gyee.ygys.connector.golden;
+
+import com.gyee.ygys.connector.kudu.KuduSinkConfig;
+import org.apache.kafka.connect.source.SourceRecord;
+import org.apache.kafka.connect.source.SourceTask;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
+
+public class GoldenSourceTask extends SourceTask {
+    private static final Logger log = LoggerFactory.getLogger(GoldenSourceTask.class);
+
+    GoldenConfig config;
+
+    @Override
+    public String version() {
+        return new GoldenSourceConnector().version();
+    }
+
+    @Override
+    public void start(Map<String, String> props) {
+        log.info("GoldenSourceTask start...");
+        config = new GoldenConfig(props);
+
+
+    }
+
+    @Override
+    public List<SourceRecord> poll() throws InterruptedException {
+        log.info("poll");
+        return null;
+    }
+
+    @Override
+    public void stop() {
+        log.trace("Stopping");
+    }
+
+}

+ 132 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/kudu/KuduSinkConfig.java

@@ -0,0 +1,132 @@
+/*
+ * Copyright 2016 Onfocus SAS
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gyee.ygys.connector.kudu;
+
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.config.ConfigDef;
+
+import java.util.Map;
+
+public class KuduSinkConfig extends AbstractConfig {
+
+  public static final String KUDU_MASTER = "kudu.master";
+  private static final String KUDU_MASTER_DOC = "Comma-separated list of \"host:port\" pairs of the masters.";
+
+  public static final String KUDU_WORKER_COUNT = "kudu.worker.count";
+  private static final String KUDU_WORKER_COUNT_DOC = "Maximum number of worker threads. Defauts to \"2 * the number of available processors\".";
+  private static final int KUDU_WORKER_COUNT_DEFAULT = -1;
+
+  public static final String KUDU_OPERATION_TIMEOUT = "kudu.operation.timeout.ms";
+  private static final String KUDU_OPERATION_TIMEOUT_DOC = "Timeout used for user operations (using sessions and scanners). Defauts to -1, to apply the default value of kudu-client: 30s.";
+  private static final int KUDU_OPERATION_TIMEOUT_DEFAULT = -1;
+
+  public static final String KUDU_SOCKET_READ_TIMEOUT = "kudu.socket.read.timeout.ms";
+  private static final String KUDU_SOCKET_READ_TIMEOUT_DOC = "Maximum number of worker threads. Defauts to -1, to apply the default value of kudu-client: 10s.";
+  private static final int KUDU_SOCKET_READ_TIMEOUT_DEFAULT = -1;
+
+  public static final String KUDU_TABLE_FIELD = "kudu.table.field";
+  private static final String KUDU_TABLE_FIELD_DOC = "Record field defining the target table name. Defaults to the topic name of the current record.";
+
+  public static final String KUDU_TABLE_FILTER = "kudu.table.filter";
+  private static final String KUDU_TABLE_FILTER_DOC = "If a table field was given, filter records containing the given string.";
+
+  public static final String KEY_INSERT = "key.insert";
+  private static final String KEY_INSERT_DOC = "Also insert the fields from the key in Kudu.";
+  private static final boolean KEY_INSERT_DEFAULT = false;
+
+  public static final String MAX_RETRIES = "max.retries";
+  private static final int MAX_RETRIES_DEFAULT = 10;
+  private static final String MAX_RETRIES_DOC = "The maximum number of times to retry on errors before failing the task.";
+  private static final String MAX_RETRIES_DISPLAY = "Maximum Retries";
+
+  public static final String RETRY_BACKOFF_MS = "retry.backoff.ms";
+  private static final int RETRY_BACKOFF_MS_DEFAULT = 3000;
+  private static final String RETRY_BACKOFF_MS_DOC = "The time in milliseconds to wait following an error before a retry attempt is made.";
+  private static final String RETRY_BACKOFF_MS_DISPLAY = "Retry Backoff (millis)";
+
+  private static final String CONNECTION_GROUP = "Connection";
+  private static final String DATA_MAPPING_GROUP = "Data Mapping";
+  private static final String RETRIES_GROUP = "Retries";
+
+  private static final ConfigDef.Range NON_NEGATIVE_INT_VALIDATOR = ConfigDef.Range.atLeast(0);
+
+  public static final ConfigDef CONFIG_DEF = new ConfigDef()
+    // Connection
+    .define(
+      KUDU_MASTER, ConfigDef.Type.STRING, ConfigDef.NO_DEFAULT_VALUE,
+      ConfigDef.Importance.HIGH, KUDU_MASTER_DOC,
+      CONNECTION_GROUP, 1, ConfigDef.Width.MEDIUM, KUDU_MASTER)
+    .define(
+      KUDU_WORKER_COUNT, ConfigDef.Type.INT, KUDU_WORKER_COUNT_DEFAULT,
+      ConfigDef.Importance.MEDIUM, KUDU_WORKER_COUNT_DOC,
+      CONNECTION_GROUP, 2, ConfigDef.Width.MEDIUM, KUDU_WORKER_COUNT)
+    .define(
+      KUDU_OPERATION_TIMEOUT, ConfigDef.Type.INT, KUDU_OPERATION_TIMEOUT_DEFAULT,
+      ConfigDef.Importance.LOW, KUDU_OPERATION_TIMEOUT_DOC,
+      CONNECTION_GROUP, 3, ConfigDef.Width.MEDIUM, KUDU_OPERATION_TIMEOUT)
+    .define(
+      KUDU_SOCKET_READ_TIMEOUT, ConfigDef.Type.INT, KUDU_SOCKET_READ_TIMEOUT_DEFAULT,
+      ConfigDef.Importance.LOW, KUDU_SOCKET_READ_TIMEOUT_DOC,
+      CONNECTION_GROUP, 4, ConfigDef.Width.MEDIUM, KUDU_SOCKET_READ_TIMEOUT)
+    // Data Mapping
+    .define(
+      KUDU_TABLE_FIELD, ConfigDef.Type.STRING, null,
+      ConfigDef.Importance.LOW, KUDU_TABLE_FIELD_DOC,
+      DATA_MAPPING_GROUP, 1, ConfigDef.Width.MEDIUM, KUDU_TABLE_FIELD)
+    .define(
+      KUDU_TABLE_FILTER, ConfigDef.Type.STRING, null,
+      ConfigDef.Importance.LOW, KUDU_TABLE_FILTER_DOC,
+      DATA_MAPPING_GROUP, 2, ConfigDef.Width.MEDIUM, KUDU_TABLE_FILTER)
+    .define(
+      KEY_INSERT, ConfigDef.Type.BOOLEAN, KEY_INSERT_DEFAULT,
+      ConfigDef.Importance.LOW, KEY_INSERT_DOC,
+      DATA_MAPPING_GROUP, 3, ConfigDef.Width.MEDIUM, KEY_INSERT)
+    // Retries
+    .define(MAX_RETRIES, ConfigDef.Type.INT, MAX_RETRIES_DEFAULT, NON_NEGATIVE_INT_VALIDATOR,
+            ConfigDef.Importance.MEDIUM, MAX_RETRIES_DOC,
+            RETRIES_GROUP, 1, ConfigDef.Width.SHORT, MAX_RETRIES_DISPLAY)
+    .define(RETRY_BACKOFF_MS, ConfigDef.Type.INT, RETRY_BACKOFF_MS_DEFAULT, NON_NEGATIVE_INT_VALIDATOR,
+            ConfigDef.Importance.MEDIUM, RETRY_BACKOFF_MS_DOC,
+            RETRIES_GROUP, 2, ConfigDef.Width.SHORT, RETRY_BACKOFF_MS_DISPLAY);
+
+  public final String kuduMaster;
+  public final Integer kuduWorkerCount;
+  public final Integer kuduOperationTimeout;
+  public final Integer kuduSocketReadTimeout;
+  public final String kuduTableField;
+  public final String kuduTableFilter;
+  public final boolean kuduKeyInsert;
+  public final int maxRetries;
+  public final int retryBackoffMs;
+
+  public KuduSinkConfig(Map<?, ?> props) {
+    super(CONFIG_DEF, props);
+    kuduMaster = getString(KUDU_MASTER);
+    kuduWorkerCount = getInt(KUDU_WORKER_COUNT);
+    kuduOperationTimeout = getInt(KUDU_OPERATION_TIMEOUT);
+    kuduSocketReadTimeout = getInt(KUDU_SOCKET_READ_TIMEOUT);
+    kuduTableField = getString(KUDU_TABLE_FIELD);
+    kuduTableFilter = getString(KUDU_TABLE_FILTER);
+    kuduKeyInsert = getBoolean(KEY_INSERT);
+    maxRetries = getInt(MAX_RETRIES);
+    retryBackoffMs = getInt(RETRY_BACKOFF_MS);
+  }
+
+  public static void main(String... args) {
+    System.out.println(CONFIG_DEF.toRst());
+  }
+}

+ 111 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/kudu/KuduSinkConnector.java

@@ -0,0 +1,111 @@
+/*
+ * Copyright 2016 Onfocus SAS
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gyee.ygys.connector.kudu;
+
+
+import org.apache.kafka.common.config.ConfigDef;
+import org.apache.kafka.connect.connector.Task;
+import org.apache.kafka.connect.sink.SinkConnector;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class KuduSinkConnector extends SinkConnector {
+  private static final Logger log = LoggerFactory.getLogger(KuduSinkConnector.class);
+
+  private Map<String, String> configProps;
+
+  /**
+   * Get the version of this connector.
+   *
+   * @return the version, formatted as a String
+   */
+  @Override
+  public String version() {
+    return this.getClass().getPackage().getImplementationVersion();
+  }
+
+  /**
+   * Start this Connector. This method will only be called on a clean Connector, i.e. it has
+   * either just been instantiated and initialized or {@link #stop()} has been invoked.
+   *
+   * @param props configuration settings
+   */
+  @Override
+  public void start(Map<String, String> props) {
+    log.info("\n" +
+      "   ___        __                         \n" +
+      "  /___\\_ __  / _| ___   ___ _   _ ___    \n" +
+      " //  // '_ \\| |_ / _ \\ / __| | | / __|   \n" +
+      "/ \\_//| | | |  _| (_) | (__| |_| \\__ \\   \n" +
+      "\\___/ |_| |_|_|  \\___/ \\___|\\__,_|___/   \n" +
+      "                 _       __ _       _    \n" +
+      "  /\\ /\\_   _  __| |_   _/ _(_)_ __ | | __\n" +
+      " / //_/ | | |/ _` | | | \\ \\| | '_ \\| |/ /\n" +
+      "/ __ \\| |_| | (_| | |_| |\\ \\ | | | |   < \n" +
+      "\\/  \\/ \\__,_|\\__,_|\\__,_\\__/_|_| |_|_|\\_\\\n" +
+      "                                         \n" +
+      "starting.");
+
+    configProps = props;
+  }
+
+  /**
+   * Returns the Task implementation for this Connector.
+   */
+  @Override
+  public Class<? extends Task> taskClass() {
+    return KuduSinkTask.class;
+  }
+
+  /**
+   * Returns a set of configurations for Tasks based on the current configuration,
+   * producing at most count configurations.
+   *
+   * @param maxTasks maximum number of configurations to generate
+   * @return configurations for Tasks
+   */
+  @Override
+  public List<Map<String, String>> taskConfigs(int maxTasks) {
+    log.info("Setting task configurations for {} workers.", maxTasks);
+    final List<Map<String, String>> configs = new ArrayList<>(maxTasks);
+    for (int i = 0; i < maxTasks; ++i) {
+      configs.add(configProps);
+    }
+    return configs;
+  }
+
+  /**
+   * Stop this connector.
+   */
+  @Override
+  public void stop() {
+  }
+
+  /**
+   * Define the configuration for the connector.
+   *
+   * @return The ConfigDef for this connector.
+   */
+  @Override
+  public ConfigDef config() {
+    return KuduSinkConfig.CONFIG_DEF;
+  }
+}

+ 151 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/kudu/KuduSinkTask.java

@@ -0,0 +1,151 @@
+/*
+ * Copyright 2016 Onfocus SAS
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gyee.ygys.connector.kudu;
+
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.kafka.connect.errors.RetriableException;
+import org.apache.kafka.connect.sink.SinkRecord;
+import org.apache.kafka.connect.sink.SinkTask;
+import org.apache.kudu.client.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collection;
+import java.util.Map;
+
+public class KuduSinkTask extends SinkTask {
+  private static final Logger log = LoggerFactory.getLogger(KuduSinkTask.class);
+
+  KuduSinkConfig config;
+  KuduWriter writer;
+  int remainingRetries;
+
+  /**
+   * Get the version of this task. Usually this should be the same as the corresponding {@link org.apache.kafka.connect.connector.Connector} class's version.
+   *
+   * @return the version, formatted as a String
+   */
+  @Override
+  public String version() {
+    return getClass().getPackage().getImplementationVersion();
+  }
+
+  /**
+   * Start the Task. This should handle any configuration parsing and one-time setup of the task.
+   *
+   * @param props initial configuration
+   */
+  @Override
+  public void start(Map<String, String> props) {
+    log.info("Starting task");
+    config = new KuduSinkConfig(props);
+
+    initWriter();
+
+    remainingRetries = config.maxRetries;
+  }
+
+  void initWriter() {
+    log.info("Connecting to Kudu Master at {}", config.kuduMaster);
+    KuduClient.KuduClientBuilder clientBuilder = new KuduClient.KuduClientBuilder(config.kuduMaster);
+
+    if (config.kuduWorkerCount != -1) {
+      clientBuilder.workerCount(config.kuduWorkerCount);
+    }
+    if (config.kuduOperationTimeout != -1) {
+      clientBuilder.defaultOperationTimeoutMs(config.kuduOperationTimeout);
+    }
+    if (config.kuduSocketReadTimeout != -1) {
+      clientBuilder.defaultSocketReadTimeoutMs(config.kuduSocketReadTimeout);
+    }
+
+    final KuduClient client = clientBuilder.build();
+    writer = new KuduWriter(config, client);
+  }
+
+  /**
+   * Put the records in the sink. Usually this should send the records to the sink asynchronously
+   * and immediately return.
+   * <p>
+   * If this operation fails, the SinkTask may throw a {@link RetriableException} to
+   * indicate that the framework should attempt to retry the same call again. Other exceptions will cause the task to
+   * be stopped immediately. {@link org.apache.kafka.connect.sink.SinkTaskContext#timeout(long)} can be used to set the maximum time before the
+   * batch will be retried.
+   *
+   * @param records the set of records to send
+   */
+  @Override
+  public void put(Collection<SinkRecord> records) {
+    if (records.isEmpty()) { return; }
+
+    if (log.isTraceEnabled()) {
+      final SinkRecord first = records.iterator().next();
+      final int recordsCount = records.size();
+      log.trace("Received {} records. First record kafka coordinates:({}-{}-{}). Writing them to Kudu...",
+        recordsCount, first.topic(), first.kafkaPartition(), first.kafkaOffset());
+    }
+
+    try {
+      writer.write(records);
+    } catch (KuduException ke) {
+      if (ke instanceof PleaseThrottleException) {
+        log.warn("Write of {} records failed. Kudu asks to throttle. Will retry.", records.size(), ke);
+        throw new RetriableException(ke);
+      } else {
+        log.warn("Write of {} records failed, remainingRetries={}", records.size(), remainingRetries, ke);
+        if (remainingRetries == 0) {
+          throw new ConnectException(ke);
+        }
+        else {
+          writer.close();
+          initWriter();
+          remainingRetries--;
+          context.timeout(config.retryBackoffMs);
+          throw new RetriableException(ke);
+        }
+      }
+    }
+    remainingRetries = config.maxRetries;
+  }
+
+  /**
+   * Flush all records that have been {@link #put} for the specified topic-partitions. The
+   * offsets are provided for convenience, but could also be determined by tracking all offsets
+   * included in the SinkRecords passed to {@link #put}.
+   *
+   * @param offsets mapping of TopicPartition to committed offset
+   */
+  @Override
+  public void flush(Map<TopicPartition, OffsetAndMetadata> offsets) {
+    // Not necessary
+  }
+
+  /**
+   * Perform any cleanup to stop this task. In SinkTasks, this method is invoked only once outstanding calls to other
+   * methods have completed (e.g., {@link #put(Collection)} has returned) and a final {@link #flush(Map)} and offset
+   * commit has completed. Implementations of this method should only need to perform final cleanup operations, such
+   * as closing network connections to the sink system.
+   */
+  @Override
+  public void stop() {
+    log.info("Stopping task");
+    // Closing a KuduWriter triggers a flush.
+    if (writer != null) writer.close();
+  }
+}

+ 298 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/kudu/KuduWriter.java

@@ -0,0 +1,298 @@
+/*
+ * Copyright 2016 Onfocus SAS
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gyee.ygys.connector.kudu;
+
+import org.apache.kafka.connect.data.Field;
+import org.apache.kafka.connect.data.Schema;
+import org.apache.kafka.connect.data.Struct;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.kafka.connect.sink.SinkRecord;
+import org.apache.kudu.client.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+public class KuduWriter {
+  private static final Logger log = LoggerFactory.getLogger(KuduWriter.class);
+
+  private final KuduSinkConfig config;
+  private final KuduClient client;
+  private final KuduSession session;
+
+  final Map<String, KuduTable> kuduTables = new HashMap<>();
+
+  public KuduWriter(final KuduSinkConfig config, final KuduClient client) {
+    this.config = config;
+    this.client = client;
+
+    this.session = client.newSession();
+
+    // Ignore duplicates in case of redelivery
+    session.setIgnoreAllDuplicateRows(true);
+
+    // Let the client handle flushes
+    // "The writes will be sent in the background, potentially batched together with other writes from
+    // the same session. If there is not sufficient buffer space, then
+    // {@link KuduSession#apply KuduSession.apply()} may block for buffer space to be available."
+    session.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND);
+  }
+
+  /**
+   * Convert SinkRecord
+   *
+   * @param records
+   * @throws KuduException
+   */
+  void write(final Collection<SinkRecord> records) throws KuduException {
+    for (SinkRecord record : records) {
+      final KuduTable table = destinationTable(record);
+      if (table == null) continue;
+      final Upsert upsert = table.newUpsert();
+      final PartialRow row = upsert.getRow();
+
+      // Get column names of the Kudu table
+      final Set<String> kuduColNames = table.getSchema().getColumns().stream().map((c) -> c.getName()).collect(Collectors.toSet());
+
+      // Set the fields of the record value to a new Kudu row
+      Struct value = (Struct)record.value();
+      for (Field field : record.valueSchema().fields()) {
+        Schema.Type fieldType = field.schema().type();
+        // Check if the field name is in the Kudu table. If it's an array, the field name will be generated then checked later.
+        if (fieldType.equals(Schema.Type.ARRAY) || kuduColNames.contains(field.name())) {
+          addStructToRow(value, field, fieldType, row, kuduColNames);
+        }
+      }
+
+      // Add the fields of the record key to the Kudu row
+      if (config.kuduKeyInsert) {
+        Struct key = (Struct)record.key();
+        for (Field field : record.keySchema().fields()) {
+          Schema.Type fieldType = field.schema().type();
+          // Check if the field name is in the Kudu table. If it's an array, the field name will be generated then checked later.
+          if (fieldType.equals(Schema.Type.ARRAY) || kuduColNames.contains(field.name())) {
+            addStructToRow(key, field, fieldType, row, kuduColNames);
+          }
+        }
+      }
+
+      // Since we're in auto-flush mode, this will return immediately
+      session.apply(upsert);
+    }
+
+    flush();
+  }
+
+  /**
+   * Open or reuse a {@link KuduTable} macthing the record or the value
+   * of the record field configured with `kudu.table.field`.
+   *
+   * @param record
+   * @return
+   * @throws KuduException
+   */
+  KuduTable destinationTable(SinkRecord record) throws KuduException {
+    String tableName;
+    if (config.kuduTableField != null) {
+      tableName = ((Struct)record.value()).getString(config.kuduTableField);
+      if (config.kuduTableFilter != null && tableName.indexOf(config.kuduTableFilter) != -1) {
+        return null;
+      }
+    } else {
+      tableName = record.topic();
+    }
+    KuduTable table = kuduTables.get(tableName);
+    if (table == null) {
+      table = client.openTable(tableName);
+      kuduTables.put(tableName, table);
+    }
+    return table;
+  }
+
+  /**
+   * Force the session to flush its buffers.
+   * */
+  public void flush() throws KuduException {
+    if (session != null && !session.isClosed()) {
+      final List<OperationResponse> responses = session.flush();
+      for (OperationResponse response : responses) {
+        if (response.hasRowError()) {
+          // TODO would there be a reason to throw RetriableException ?
+          throw new ConnectException(response.getRowError().toString());
+        }
+      }
+    }
+  }
+
+  /**
+   * Close {@link KuduSession} and {@link KuduClient}.
+   * */
+  public void close() {
+    if (session != null && !session.isClosed()) {
+      try {
+        session.close();
+      } catch (KuduException ke) {
+        log.warn("Error closing the Kudu session: {}", ke.getMessage());
+      }
+    }
+    if (client != null) {
+      try {
+        client.shutdown();
+      } catch (KuduException ke) {
+        log.warn("Error closing the Kudu client: {}", ke.getMessage());
+      }
+    }
+  }
+
+  /**
+   * Convert a {@link SinkRecord} type to Kudu and add the column to the Kudu {@link PartialRow}.
+   *
+   * @param struct Struct value
+   * @param field SinkRecord Field
+   * @paran fieldType
+   * @param row The Kudu row to add the field to
+   * @return the updated Kudu row
+   **/
+  private PartialRow addStructToRow(Struct struct, Field field, Schema.Type fieldType, PartialRow row, Set<String> kuduColNames) {
+    String fieldName = field.name();
+
+    switch (fieldType) {
+      case STRING:
+        row.addString(fieldName, struct.getString(fieldName));
+        break;
+      case INT8:
+        row.addByte(fieldName, struct.getInt8(fieldName));
+        break;
+      case INT16:
+        row.addShort(fieldName, struct.getInt16(fieldName));
+        break;
+      case INT32:
+        row.addInt(fieldName, struct.getInt32(fieldName));
+        break;
+      case INT64:
+        row.addLong(fieldName, struct.getInt64(fieldName));
+        break;
+      case BOOLEAN:
+        row.addBoolean(fieldName, struct.getBoolean(fieldName));
+        break;
+      case FLOAT32:
+        row.addFloat(fieldName, struct.getFloat32(fieldName));
+        break;
+      case FLOAT64:
+        row.addDouble(fieldName, struct.getFloat64(fieldName));
+        break;
+      case BYTES:
+        row.addBinary(fieldName, struct.getBytes(fieldName));
+        break;
+      case ARRAY:
+        // Support for arrays is handled by adding an index suffix to the field name, starting with "_1".
+        ListIterator<Object> innerValues = struct.getArray(fieldName).listIterator();
+        Schema.Type innerFieldsType = field.schema().valueSchema().type();
+        switch (innerFieldsType) {
+          case STRING:
+            while (innerValues.hasNext()) {
+              String finalFieldName = fieldName + "_" + (innerValues.nextIndex()+1);
+              if (kuduColNames.contains(finalFieldName))
+                row.addString(finalFieldName, (String) innerValues.next());
+              else
+                innerValues.next(); // Consume the iterator
+            }
+            break;
+          case INT8:
+            while (innerValues.hasNext()) {
+              String finalFieldName = fieldName + "_" + (innerValues.nextIndex()+1);
+              if (kuduColNames.contains(finalFieldName))
+                row.addByte(finalFieldName, (Byte) innerValues.next());
+              else
+                innerValues.next(); // Consume the iterator
+            }
+            break;
+          case INT16:
+            while (innerValues.hasNext()) {
+              String finalFieldName = fieldName + "_" + (innerValues.nextIndex()+1);
+              if (kuduColNames.contains(finalFieldName))
+                row.addShort(finalFieldName, (Short) innerValues.next());
+              else
+                innerValues.next(); // Consume the iterator
+            }
+            break;
+          case INT32:
+            while (innerValues.hasNext()) {
+              String finalFieldName = fieldName + "_" + (innerValues.nextIndex()+1);
+              if (kuduColNames.contains(finalFieldName))
+                row.addInt(finalFieldName, (Integer) innerValues.next());
+              else
+                innerValues.next(); // Consume the iterator
+            }
+            break;
+          case INT64:
+            while (innerValues.hasNext()) {
+              String finalFieldName = fieldName + "_" + (innerValues.nextIndex()+1);
+              if (kuduColNames.contains(finalFieldName))
+                row.addLong(finalFieldName, (Long) innerValues.next());
+              else
+                innerValues.next(); // Consume the iterator
+            }
+            break;
+          case BOOLEAN:
+            while (innerValues.hasNext()) {
+              String finalFieldName = fieldName + "_" + (innerValues.nextIndex()+1);
+              if (kuduColNames.contains(finalFieldName))
+                row.addBoolean(finalFieldName, (Boolean) innerValues.next());
+              else
+                innerValues.next(); // Consume the iterator
+            }
+            break;
+          case FLOAT32:
+            while (innerValues.hasNext()) {
+              String finalFieldName = fieldName + "_" + (innerValues.nextIndex()+1);
+              if (kuduColNames.contains(finalFieldName))
+                row.addFloat(finalFieldName, (Float) innerValues.next());
+              else
+                innerValues.next(); // Consume the iterator
+            }
+            break;
+          case FLOAT64:
+            while (innerValues.hasNext()) {
+              String finalFieldName = fieldName + "_" + (innerValues.nextIndex()+1);
+              if (kuduColNames.contains(finalFieldName))
+                row.addDouble(finalFieldName, (Double) innerValues.next());
+              else
+                innerValues.next(); // Consume the iterator
+            }
+            break;
+          case BYTES:
+            while (innerValues.hasNext()) {
+              String finalFieldName = fieldName + "_" + (innerValues.nextIndex()+1);
+              if (kuduColNames.contains(finalFieldName))
+                row.addBinary(finalFieldName, (byte[]) innerValues.next());
+              else
+                innerValues.next(); // Consume the iterator
+            }
+            break;
+          default:
+            throw new ConnectException("Unsupported source data type in array field '" + fieldName + "': " + fieldType);
+        }
+        break;
+      default:
+        throw new ConnectException("Unsupported source data type: " + fieldType);
+    }
+
+    return row;
+  }
+}

+ 21 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/redis/Constants.java

@@ -0,0 +1,21 @@
+package com.gyee.ygys.connector.redis;
+
+/**
+ * Declares various reusable constants
+ */
+public final class Constants {
+  /** Utility class */
+  private Constants() {}
+
+  public static final String VERSION = "0.1";
+
+  public static final String TOPIC_DELIMITER = ",";
+
+  public static final String CONFIG_TOPICS = "topics";
+  public static final String CONFIG_KAFKA_PARTITIONS = "kafka_partitions";
+  public static final String CONFIG_REDIS_ADDRESS = "redis_address";
+  public static final String CONFIG_NAME_LIST_KEY = "name_list_key";
+  public static final String CONFIG_GREETING_LIST_KEY = "greeting_list_key";
+
+  public static final int REDIS_QUERY_TIMEOUT = 3; // sec
+}

+ 71 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/redis/GreeterSink.java

@@ -0,0 +1,71 @@
+package com.gyee.ygys.connector.redis;
+
+import org.apache.kafka.connect.sink.SinkConnector;
+import org.apache.kafka.connect.connector.Task;
+import org.apache.kafka.common.config.ConfigDef;
+import org.apache.kafka.common.config.ConfigDef.Type;
+import org.apache.kafka.common.config.ConfigDef.Range;
+import org.apache.kafka.common.config.ConfigDef.Importance;
+
+import java.util.List;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.HashMap;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+/**
+ * Manages GreeterTask's
+ */
+public final class GreeterSink extends SinkConnector {
+  private URI redisAddress;
+  private String greetingListKey;
+
+  @Override
+  public Class<? extends Task> taskClass() {
+    return GreeterTask.class;
+  }
+
+  @Override
+  public ConfigDef config() {
+    final ConfigDef configDef = new ConfigDef();
+    configDef.define(Constants.CONFIG_REDIS_ADDRESS, Type.STRING, "redis://localhost:6379", Importance.HIGH, "Redis address (redis://<host>:<port>)");
+    configDef.define(Constants.CONFIG_GREETING_LIST_KEY, Type.STRING, "greetings", Importance.HIGH, "Redis key for greeting list");
+
+    return configDef;
+  }
+
+  @Override
+  public List<Map<String, String>> taskConfigs(final int maxTasks) {
+    final List<Map<String, String>> configs = new LinkedList<>();
+
+    for (int i = 0; i < maxTasks; i++) {
+      final Map<String, String> config = new HashMap<>();
+      config.put(Constants.CONFIG_REDIS_ADDRESS, redisAddress.toString());
+      config.put(Constants.CONFIG_GREETING_LIST_KEY, greetingListKey);
+
+      configs.add(config);
+    }
+
+    return configs;
+  }
+
+  @Override
+  public void start(final Map<String, String> props) {
+    try {
+      redisAddress = new URI(props.get(Constants.CONFIG_REDIS_ADDRESS));
+    } catch (URISyntaxException e) {
+      throw new RuntimeException(e);
+    }
+
+    greetingListKey = props.get(Constants.CONFIG_GREETING_LIST_KEY);
+  }
+
+  @Override
+  public void stop() {}
+
+  @Override
+  public String version() {
+    return Constants.VERSION;
+  }
+}

+ 68 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/redis/GreeterTask.java

@@ -0,0 +1,68 @@
+package com.gyee.ygys.connector.redis;
+
+import org.apache.kafka.connect.sink.SinkTask;
+import org.apache.kafka.connect.sink.SinkRecord;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+
+import redis.clients.jedis.Jedis;
+
+import org.apache.commons.codec.binary.Hex;
+
+import java.util.Collection;
+import java.util.Map;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.charset.Charset;
+
+/**
+ * Welcomes names from Kafka
+ */
+public final class GreeterTask extends SinkTask {
+  private URI redisAddress;
+  private String greetingListKey;
+
+  private Jedis jedis;
+
+  @Override
+  public void start(final Map<String, String> props) {
+    try {
+      redisAddress = new URI(props.get(Constants.CONFIG_REDIS_ADDRESS));
+    } catch (URISyntaxException e) {
+      throw new RuntimeException(e);
+    }
+
+    greetingListKey = props.get(Constants.CONFIG_GREETING_LIST_KEY);
+
+    jedis = new Jedis(redisAddress);
+    jedis.connect();
+  }
+
+  @Override
+  public void put(final Collection<SinkRecord> records) {
+    for (SinkRecord record : records) {
+      final byte[] message = (byte[]) record.value();
+
+      final String name = new String(message, Charset.forName("UTF-8"));
+
+      final String greeting = String.format("Welcome, %s", name);
+
+      if (jedis.isConnected()) {
+        jedis.lpush(greetingListKey, greeting);
+      }
+    }
+  }
+
+  @Override
+  public void flush(final Map<TopicPartition, OffsetAndMetadata> offsets) {}
+
+  @Override
+  public void stop() {
+    jedis.disconnect();
+  }
+
+  @Override
+  public String version() {
+    return Constants.VERSION;
+  }
+}

+ 78 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/redis/NameSource.java

@@ -0,0 +1,78 @@
+package com.gyee.ygys.connector.redis;
+
+import org.apache.kafka.connect.source.SourceConnector;
+import org.apache.kafka.connect.connector.Task;
+import org.apache.kafka.common.config.ConfigDef;
+import org.apache.kafka.common.config.ConfigDef.Type;
+import org.apache.kafka.common.config.ConfigDef.Range;
+import org.apache.kafka.common.config.ConfigDef.Importance;
+
+import java.util.List;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.HashMap;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+/**
+ * Manages NameTask's
+ */
+public final class NameSource extends SourceConnector {
+  private String[] kafkaTopics;
+  private int kafkaPartitions;
+  private URI redisAddress;
+  private String nameListKey;
+
+  @Override
+  public Class<? extends Task> taskClass() {
+    return NameTask.class;
+  }
+
+  @Override
+  public ConfigDef config() {
+    final ConfigDef configDef = new ConfigDef();
+    configDef.define(Constants.CONFIG_KAFKA_PARTITIONS, Type.INT, Range.atLeast(0), Importance.LOW, "Number of available Kafka partitions");
+    configDef.define(Constants.CONFIG_REDIS_ADDRESS, Type.STRING, "redis://localhost:6379", Importance.HIGH, "Redis address (redis://<host>:<port>)");
+    configDef.define(Constants.CONFIG_NAME_LIST_KEY, Type.STRING, "names", Importance.HIGH, "Redis key for name list");
+
+    return configDef;
+  }
+
+  @Override
+  public void start(final Map<String, String> props) {
+    kafkaTopics = props.get(Constants.CONFIG_TOPICS).split(Constants.TOPIC_DELIMITER);
+    kafkaPartitions = Integer.parseInt(props.get(Constants.CONFIG_KAFKA_PARTITIONS));
+
+    try {
+      redisAddress = new URI(props.get(Constants.CONFIG_REDIS_ADDRESS));
+    } catch (URISyntaxException e) {
+      throw new RuntimeException(e);
+    }
+
+    nameListKey = props.get(Constants.CONFIG_NAME_LIST_KEY);
+  }
+
+  @Override
+  public void stop() {}
+
+  @Override
+  public List<Map<String, String>> taskConfigs(final int maxTasks) {
+    final List<Map<String, String>> configs = new LinkedList<>();
+
+    for (int i = 0; i < maxTasks; i++) {
+      final Map<String, String> config = new HashMap<>();
+      config.put(Constants.CONFIG_KAFKA_PARTITIONS, String.valueOf(kafkaPartitions));
+      config.put(Constants.CONFIG_REDIS_ADDRESS, redisAddress.toString());
+      config.put(Constants.CONFIG_NAME_LIST_KEY, nameListKey);
+
+      configs.add(config);
+    }
+
+    return configs;
+  }
+
+  @Override
+  public String version() {
+    return Constants.VERSION;
+  }
+}

+ 91 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/redis/NameTask.java

@@ -0,0 +1,91 @@
+package com.gyee.ygys.connector.redis;
+
+import org.apache.kafka.connect.source.SourceTask;
+import org.apache.kafka.connect.source.SourceRecord;
+import org.apache.kafka.connect.data.Schema;
+
+import redis.clients.jedis.Jedis;
+import redis.clients.jedis.exceptions.JedisConnectionException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Random;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.charset.Charset;
+
+/**
+ * Sends names to Kafka
+ */
+public final class NameTask extends SourceTask {
+  private static final Logger LOG = LoggerFactory.getLogger(NameTask.class);
+
+  private Random random;
+  private String[] kafkaTopics;
+  private int kafkaPartitions;
+  private URI redisAddress;
+  private String nameListKey;
+
+  private Jedis jedis;
+
+  @Override
+  public void start(final Map<String, String> props) {
+    random = new Random();
+
+    kafkaTopics = props.get(Constants.CONFIG_TOPICS).split(Constants.TOPIC_DELIMITER);
+    kafkaPartitions = Integer.parseInt(props.get(Constants.CONFIG_KAFKA_PARTITIONS));
+
+    final String redisAddressString = props.get(Constants.CONFIG_REDIS_ADDRESS);
+
+    try {
+      redisAddress = new URI(redisAddressString);
+    } catch (URISyntaxException e) {
+      LOG.error("Error parsing URI {} {}", redisAddressString, e);
+    }
+
+    nameListKey = props.get(Constants.CONFIG_NAME_LIST_KEY);
+
+    jedis = new Jedis(redisAddress);
+    jedis.connect();
+  }
+
+  @Override
+  public List<SourceRecord> poll() {
+    final List<SourceRecord> records = new LinkedList<>();
+
+    if (jedis.isConnected()) {
+      try {
+        final List<String> entry = jedis.blpop(Constants.REDIS_QUERY_TIMEOUT, nameListKey);
+
+        final String name = entry.get(1);
+
+        if (name != null) {
+          final byte[] message = name.getBytes(Charset.forName("UTF-8"));
+
+          for (String topic : kafkaTopics) {
+            final SourceRecord record = new SourceRecord(null, null, topic, random.nextInt(kafkaPartitions), Schema.BYTES_SCHEMA, message);
+            records.add(record);
+          }
+        }
+      } catch (JedisConnectionException e) {
+        LOG.warn("Socket closed during Redis query {}", e);
+      }
+    }
+
+    return records;
+  }
+
+  @Override
+  public void stop() {
+    jedis.disconnect();
+  }
+
+  @Override
+  public String version() {
+    return Constants.VERSION;
+  }
+}

+ 4 - 0
kafka-connectors/src/main/java/com/gyee/ygys/connector/redis/package-info.java

@@ -0,0 +1,4 @@
+/**
+ * Example sources and sinks for Kafka Connect
+ */
+package com.gyee.ygys.connector.redis;

BIN
kafka-connectors/src/main/lib/commons-beanutils-1.8.3.jar


BIN
kafka-connectors/src/main/lib/commons-logging-1.1.1.jar


BIN
kafka-connectors/src/main/lib/golden-java-sdk-3.0.27.jar


+ 0 - 0
kafka-connectors/src/main/lib/protobuf-java-2.6.1.jar


Some files were not shown because too many files changed in this diff