Java Code Examples for org.influxdb.dto.BatchPoints#point()

The following examples show how to use org.influxdb.dto.BatchPoints#point() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TicketTest.java    From influxdb-java with MIT License 6 votes vote down vote up
/**
 * Test for ticket #39
 *
 */
@Test
public void testTicket39() {
	String dbName = "ticket39_" + System.currentTimeMillis();
	this.influxDB.query(new Query("CREATE DATABASE " + dbName));
	BatchPoints batchPoints = BatchPoints
			.database(dbName)
			.tag("async", "true")
			.retentionPolicy(TestUtils.defaultRetentionPolicy(this.influxDB.version()))
			.consistency(InfluxDB.ConsistencyLevel.ALL)
			.build();
	Point.Builder builder = Point.measurement("my_type");
	builder.addField("my_field", "string_value");
	Point point = builder.build();
	batchPoints.point(point);
	this.influxDB.write(batchPoints);
	this.influxDB.query(new Query("DROP DATABASE " + dbName));
}
 
Example 2
Source File: HttpStatServiceImpl.java    From EserKnife with Apache License 2.0 6 votes vote down vote up
@Override
public void batchInsert(List<NodeHttpStatInfo> nodeHttpStatInfos) {
    if(InflusDbUtil.FLAG){
        if (CollectionUtils.isNotEmpty(nodeHttpStatInfos)) {
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints.database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen").consistency(InfluxDB.ConsistencyLevel.ALL).build();
            long times = System.currentTimeMillis();
            for (NodeHttpStatInfo nodeHttpStatInfo : nodeHttpStatInfos) {
                Point point = Point.measurement("http").time(times, TimeUnit.MILLISECONDS)
                        .tag("host", nodeHttpStatInfo.getHost())
                        .tag("clusterName", nodeHttpStatInfo.getClustName())
                        .addField("createTime", DateUtil.formatYYYYMMddHHMMSS(nodeHttpStatInfo.getCreateTime()))
                        .addField("totalOpened", nodeHttpStatInfo.getTotal_opened())
                        .addField("currentOpen", nodeHttpStatInfo.getCurrent_open())
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        httpStatDao.batchInsert(nodeHttpStatInfos);
    }
}
 
Example 3
Source File: InfluxDBReporterTest.java    From statsd-jvm-profiler with MIT License 6 votes vote down vote up
@Override
protected void testCase(Object[] args) {
    assertEquals(1, args.length);

    BatchPoints actual = (BatchPoints) args[0];

    Point expectedPoint = Point.measurement("fake")
            .field(InfluxDBReporter.VALUE_COLUMN, 100L)
            .tag(TagUtil.PREFIX_TAG, "influxdb.reporter.test")
            .build();

    BatchPoints expected = BatchPoints.database("database").build();
    expected.point(expectedPoint);

    assertEquals(expected.getDatabase(), actual.getDatabase());
    assertEquals(expected.getPoints().size(), actual.getPoints().size());

    Point actualPoint = actual.getPoints().get(0);

    // All the fields on Point are private
    assertTrue(actualPoint.lineProtocol().startsWith("fake"));
    assertTrue(actualPoint.lineProtocol().contains("value=100"));
    assertTrue(actualPoint.lineProtocol().contains("prefix=influxdb.reporter.test"));
}
 
Example 4
Source File: InfluxTarget.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
public void write(Batch batch) throws StageException {
  BatchPoints batchPoints = BatchPoints
      .database(conf.dbName)
      .retentionPolicy(conf.retentionPolicy)
      .consistency(conf.consistencyLevel)
      .build();

  Iterator<Record> recordIterator = batch.getRecords();

  while (recordIterator.hasNext()) {
    Record record = recordIterator.next();

    for (Point point : converter.getPoints(record)) {
      batchPoints.point(point);
    }
  }

  client.write(batchPoints);
}
 
Example 5
Source File: InfluxDB.java    From iotdb-benchmark with Apache License 2.0 6 votes vote down vote up
@Override
public Status insertOneBatch(Batch batch) {
  BatchPoints batchPoints = BatchPoints.database(influxDbName)
      .retentionPolicy(defaultRp)
      .consistency(org.influxdb.InfluxDB.ConsistencyLevel.ALL).build();
  try {
    InfluxDataModel model;
    for (Record record : batch.getRecords()) {
      model = createDataModel(batch.getDeviceSchema(), record.getTimestamp(),
          record.getRecordDataValue());
      batchPoints.point(model.toInfluxPoint());
    }

    influxDbInstance.write(batchPoints);

    return new Status(true);
  } catch (Exception e) {
    LOGGER.warn(e.getMessage());
    return new Status(false, 0, e, e.toString());
  }
}
 
Example 6
Source File: StatsCollector.java    From cloudstack with Apache License 2.0 5 votes vote down vote up
/**
 * Writes batches of InfluxDB database points into a given database.
 */
protected void writeBatches(InfluxDB influxDbConnection, String dbName, List<Point> points) {
    BatchPoints batchPoints = BatchPoints.database(dbName).build();
    influxDbConnection.enableBatch(BatchOptions.DEFAULTS);

    for (Point point : points) {
        batchPoints.point(point);
    }

    influxDbConnection.write(batchPoints);
}
 
Example 7
Source File: PerformanceTests.java    From influxdb-java with MIT License 5 votes vote down vote up
@Disabled
@Test
public void testWritePerformance() {
	String dbName = "writepoints_" + System.currentTimeMillis();
	this.influxDB.query(new Query("CREATE DATABASE " + dbName));
	String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version());

	long start = System.currentTimeMillis();
	for (int i = 0; i < COUNT; i++) {

		BatchPoints batchPoints = BatchPoints
				.database(dbName)
				.tag("blubber", "bla")
				.retentionPolicy(rp)
				.build();
		for (int j = 0; j < POINT_COUNT; j++) {
			Point point = Point
					.measurement("cpu")
					.addField("idle", (double) j)
					.addField("user", 2.0 * j)
					.addField("system", 3.0 * j)
					.build();
			batchPoints.point(point);
		}

		this.influxDB.write(batchPoints);
	}
	System.out.println("WritePoints for " + COUNT + " writes of " + POINT_COUNT + " Points took:" + (System.currentTimeMillis() - start));
	this.influxDB.query(new Query("DROP DATABASE " + dbName));
}
 
Example 8
Source File: BatchOptionsTest.java    From influxdb-java with MIT License 5 votes vote down vote up
private BatchPoints createBatchPoints(String dbName, String measurement, int n) {
  BatchPoints batchPoints = BatchPoints.database(dbName).build();
  for (int i = 1; i <= n; i++) {
    Point point = Point.measurement(measurement)
            .time(i,TimeUnit.MILLISECONDS)
            .addField("f1", (double) i)
            .addField("f2", (double) (i) * 1.1)
            .addField("f3", "f_v3").build();
    batchPoints.point(point);
  }

  return batchPoints;
}
 
Example 9
Source File: InfluxDBImpl.java    From influxdb-java with MIT License 5 votes vote down vote up
@Override
public void write(final String database, final String retentionPolicy, final Point point) {
  if (this.batchEnabled.get()) {
    HttpBatchEntry batchEntry = new HttpBatchEntry(point, database, retentionPolicy);
    this.batchProcessor.put(batchEntry);
  } else {
    BatchPoints batchPoints = BatchPoints.database(database)
                                         .retentionPolicy(retentionPolicy).build();
    batchPoints.point(point);
    this.write(batchPoints);
    this.unBatchedCount.increment();
  }
  this.writeCount.increment();
}
 
Example 10
Source File: InfluxDBReporter.java    From statsd-jvm-profiler with MIT License 5 votes vote down vote up
/**
 * Record multiple gauge values in InfluxDB
 *
 * @param gauges A map of gauge names to values
 */
@Override
public void recordGaugeValues(Map<String, ? extends Number> gauges) {
    long time = System.currentTimeMillis();
    BatchPoints batchPoints = BatchPoints.database(database).build();
    for (Map.Entry<String, ? extends Number> gauge: gauges.entrySet()) {
        batchPoints.point(constructPoint(time, gauge.getKey(), gauge.getValue()));
    }
    client.write(batchPoints);
}
 
Example 11
Source File: OffsetsInfluxDBDaoImpl.java    From Kafka-Insight with Apache License 2.0 5 votes vote down vote up
@Override
    public void insert() {
        InfluxDB influxDB = null;
        try {
            influxDB = InfluxDBFactory.connect(influxDBUrl);
            if (!influxDB.databaseExists(dbName)) {
                influxDB.createDatabase(dbName);
            }
            for (OffsetInfo offsetInfo : offsetInfoList) {
                String group = offsetInfo.getGroup();
                String topic = offsetInfo.getTopic();
                Long logSize = offsetInfo.getLogSize();
                Long offsets = offsetInfo.getCommittedOffset();
                Long lag = offsetInfo.getLag();
                Long timestamp = offsetInfo.getTimestamp();

                BatchPoints batchPoints = BatchPoints
                        .database(dbName)
                        .tag("group", group)
                        .tag("topic", topic)
                        .build();
                Point point = Point.measurement("offsetsConsumer")
                        .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
//                        .time(timestamp, TimeUnit.MILLISECONDS)
                        .addField("logSize", logSize)
                        .addField("offsets", offsets)
                        .addField("lag", lag)
                        .build();
                batchPoints.point(point);
                influxDB.write(batchPoints);
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (influxDB != null) {
                influxDB.close();
            }
        }

    }
 
Example 12
Source File: CommonServiceImpl.java    From EserKnife with Apache License 2.0 5 votes vote down vote up
@Override
public void batchInsert(List<NodeCommonStatInfo> nodeCommonStatInfos) {
    if(InflusDbUtil.FLAG){
        if (CollectionUtils.isNotEmpty(nodeCommonStatInfos)) {
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints.database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen").consistency(InfluxDB.ConsistencyLevel.ALL).build();
            long times = System.currentTimeMillis();
            for (NodeCommonStatInfo nodeCommonStatInfo : nodeCommonStatInfos) {
                Point point = Point.measurement("common").time(times, TimeUnit.MILLISECONDS)
                        .tag("clusterName", nodeCommonStatInfo.getClusterName())
                        .addField("createTime", DateUtil.formatYYYYMMddHHMMSS(nodeCommonStatInfo.getCreateTime()))
                        .addField("nodeCount", nodeCommonStatInfo.getNodeCount())
                        .addField("dataNodeCount",nodeCommonStatInfo.getDataNodeCount())
                        .addField("docCount", nodeCommonStatInfo.getDocCounts())
                        .addField("storeSize", nodeCommonStatInfo.getStoreSize())
                        .addField("indiceCount", nodeCommonStatInfo.getIndicesCount())
                        .addField("shardCount", nodeCommonStatInfo.getShardCounts())
                        .addField("clusterStatus", nodeCommonStatInfo.getClusterStatus())
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        commonDAO.batchInsert(nodeCommonStatInfos);
    }
}
 
Example 13
Source File: TransportStatServiceImpl.java    From EserKnife with Apache License 2.0 5 votes vote down vote up
@Override
public void batchInsert(List<NodeTransportStatInfo> transportStatInfos) {
    if(InflusDbUtil.FLAG){
        if (CollectionUtils.isNotEmpty(transportStatInfos)) {
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints.database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen").consistency(InfluxDB.ConsistencyLevel.ALL).build();
            long times = System.currentTimeMillis();
            for (NodeTransportStatInfo transportStatInfo : transportStatInfos) {
                Point point = Point.measurement("transport").time(times, TimeUnit.MILLISECONDS)
                        .tag("host", transportStatInfo.getHost())
                        .tag("clusterName", transportStatInfo.getClustName())
                        .addField("createTime", DateUtil.formatYYYYMMddHHMMSS(transportStatInfo.getCreateTime()))
                        .addField("rxCount",transportStatInfo.getRx_count())
                        .addField("rxSize",transportStatInfo.getRx_size() == null?"":transportStatInfo.getRx_size())
                        .addField("rxSizeInBytes",transportStatInfo.getRx_size_in_bytes())
                        .addField("txCount",transportStatInfo.getTx_count())
                        .addField("txSize",transportStatInfo.getTx_size() == null ?"":transportStatInfo.getTx_size())
                        .addField("txSizeInBytes",transportStatInfo.getTx_size_in_bytes())
                        .addField("serverOpen",transportStatInfo.getServer_open())
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        transportStatDao.batchInsert(transportStatInfos);
    }
}
 
Example 14
Source File: ThreadPoolServiceImpl.java    From EserKnife with Apache License 2.0 5 votes vote down vote up
@Override
public void batchInsert(List<NodeThreadPoolStatInfo> nodeThreadPoolStatInfos) {
    if(InflusDbUtil.FLAG){
        if (CollectionUtils.isNotEmpty(nodeThreadPoolStatInfos)) {
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints.database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen").consistency(InfluxDB.ConsistencyLevel.ALL).build();
            long times = System.currentTimeMillis();
            for (NodeThreadPoolStatInfo threadPoolStatInfo : nodeThreadPoolStatInfos) {
                Point point = Point.measurement("thread_pool").time(times, TimeUnit.MILLISECONDS)
                        .tag("threadType",threadPoolStatInfo.getThreadType())
                        .tag("host", threadPoolStatInfo.getHost())
                        .tag("clusterName", threadPoolStatInfo.getClusterName())
                        .addField("createTime", DateUtil.formatYYYYMMddHHMMSS(threadPoolStatInfo.getCreateTime()))
                        .addField("largest",threadPoolStatInfo.getLargest())
                        .addField("completed",threadPoolStatInfo.getCompleted())
                        .addField("active",threadPoolStatInfo.getActive())
                        .addField("rejected",threadPoolStatInfo.getRejected())
                        .addField("threads",threadPoolStatInfo.getThreads())
                        .addField("queue",threadPoolStatInfo.getQueue())
                        .addField("intervalCompleted",threadPoolStatInfo.getIntervalCompleted())
                        .addField("intervalRejected",threadPoolStatInfo.getIntervalRejected())
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        threadPoolDao.batchInsert(nodeThreadPoolStatInfos);
    }
}
 
Example 15
Source File: OsStatServiceImpl.java    From EserKnife with Apache License 2.0 5 votes vote down vote up
@Override
public void batchInsert(List<NodeOSStatInfo> statInfos) {
    if(InflusDbUtil.FLAG){
        if(CollectionUtils.isNotEmpty(statInfos)){
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints
                    .database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen")
                    .consistency(InfluxDB.ConsistencyLevel.ALL)
                    .build();
            long times = System.currentTimeMillis();
            for (NodeOSStatInfo nodeOSStatInfo : statInfos){
                Point point = Point.measurement("os")
                        .time(times, TimeUnit.MILLISECONDS)
                        .tag("host",nodeOSStatInfo.getHost())
                        .tag("clusterName",nodeOSStatInfo.getClusterName())
                        .addField("cpuPercent",nodeOSStatInfo.getCpuPercent())
                        .addField("loadAverage",nodeOSStatInfo.getLoadAverage())
                        .addField("memTotal",nodeOSStatInfo.getMemTotal())
                        .addField("memUsed",nodeOSStatInfo.getMemUsed())
                        .addField("memFree",nodeOSStatInfo.getMemFree())
                        .addField("memFreePercent",nodeOSStatInfo.getMemFreePercent())
                        .addField("memUsedPercent",nodeOSStatInfo.getMemUsedPercent())
                        .addField("swapTotal",nodeOSStatInfo.getSwapTotal())
                        .addField("swapUsed",nodeOSStatInfo.getSwapUsed())
                        .addField("swapFree",nodeOSStatInfo.getSwapFree())
                        .addField("createTime", DateUtil.formatYYYYMMddHHMMSS(nodeOSStatInfo.getCreateTime()))
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        osStatDao.batchInsert(statInfos);
    }
}
 
Example 16
Source File: MBeansInfluxDBDaoImpl.java    From Kafka-Insight with Apache License 2.0 4 votes vote down vote up
@Override
    public void insert() {

        InfluxDB influxDB = null;
        try {
            influxDB = InfluxDBFactory.connect(influxDBUrl);
            if (!influxDB.databaseExists(dbName)) {
                influxDB.createDatabase(dbName);
            }
            for (MBeanInfo mBeanInfo : mBeanInfoList) {
                String label = mBeanInfo.getLabel();
                String topic = mBeanInfo.getTopic();
                double oneMinute = mBeanInfo.getOneMinute();
                double fiveMinute = mBeanInfo.getFiveMinute();
                double fifteenMinute = mBeanInfo.getFifteenMinute();
                double meanRate = mBeanInfo.getMeanRate();


                BatchPoints batchPoints = BatchPoints
                        .database(dbName)
                        .tag("label", label)
                        .tag("topic", topic)
                        .build();
                Point point = Point.measurement("mBeanMetric")
                        .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
//                        .time(timestamp, TimeUnit.MILLISECONDS)
                        .addField("oneMinuteRate", oneMinute)
                        .addField("fiveMinuteRate", fiveMinute)
                        .addField("fifteenMinuteRate", fifteenMinute)
                        .addField("meanRate", meanRate)
                        .build();
                batchPoints.point(point);
                influxDB.write(batchPoints);
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (influxDB != null) {
                influxDB.close();
            }
        }

    }
 
Example 17
Source File: TestInfluxDB.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
@Override
public void write(String database, String retentionPolicy, Point point) {
  BatchPoints batchPoints = BatchPoints.database(database).retentionPolicy(retentionPolicy).build();
  batchPoints.point(point);
  this.write(batchPoints);
}
 
Example 18
Source File: JvmServiceImpl.java    From EserKnife with Apache License 2.0 4 votes vote down vote up
@Override
public void batchInsert(List<NodeJVMStatInfo> statInfos) {
    if(InflusDbUtil.FLAG){
        if(CollectionUtils.isNotEmpty(statInfos)){
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints
                    .database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen")
                    .consistency(InfluxDB.ConsistencyLevel.ALL)
                    .build();
            long times = System.currentTimeMillis();
            for (NodeJVMStatInfo nodeJVMStatInfo : statInfos){
                Point point = Point.measurement("jvm")
                        .time(times, TimeUnit.MILLISECONDS)
                        .tag("host",nodeJVMStatInfo.getHost())
                        .tag("clusterName",nodeJVMStatInfo.getClusterName())
                        .addField("threadsCount",nodeJVMStatInfo.getThreadsCount())
                        .addField("threadsPeakCount",nodeJVMStatInfo.getThreadsPeakCount())
                        .addField("heapUsedInBytes",nodeJVMStatInfo.getHeapUsedInBytes())
                        .addField("heapUsedPercent",nodeJVMStatInfo.getHeapUsedPercent())
                        .addField("heapCommittedInBytes",nodeJVMStatInfo.getHeapCommittedInBytes())
                        .addField("heapMaxInBytes",nodeJVMStatInfo.getHeapMaxInBytes())
                        .addField("nonHeapUsedInBytes",nodeJVMStatInfo.getNonHeapUsedInBytes())
                        .addField("nonHeapCommittedInBytes",nodeJVMStatInfo.getNonHeapCommittedInBytes())
                        .addField("oldMemUsed", nodeJVMStatInfo.getOldMemUsed())
                        .addField("oldMemMax", nodeJVMStatInfo.getOldMemMax())
                        .addField("youngMemMax", nodeJVMStatInfo.getYoungMemMax())
                        .addField("youngMemUsed",nodeJVMStatInfo.getYoungMemUsed())
                        .addField("oldCollectionCount",nodeJVMStatInfo.getOldCollectionCount())
                        .addField("oldCollectionTime",nodeJVMStatInfo.getOldCollectionTime())
                        .addField("youngCollectionCount",nodeJVMStatInfo.getYoungCollectionCount())
                        .addField("youngCollectionTime",nodeJVMStatInfo.getYongCollectionTime())
                        .addField("intervalOldCollectionCount",nodeJVMStatInfo.getIntervalOldCollectionCount())
                        .addField("intervalOldCollectionTime",nodeJVMStatInfo.getIntervalOldCollectionTime())
                        .addField("bufferPoolsDirectTotalCapacity",nodeJVMStatInfo.getBufferPoolsDirectTotalCapacity())
                        .addField("bufferPoolsDirectCount",nodeJVMStatInfo.getBufferPoolsDirectCount())
                        .addField("bufferPoolsDirectUsed",nodeJVMStatInfo.getBufferPoolsDirectUsed())
                        .addField("bufferPoolsMappedTotalCapacity",nodeJVMStatInfo.getBufferPoolsMappedTotalCapacity())
                        .addField("bufferPoolsMappedCount",nodeJVMStatInfo.getBufferPoolsMappedCount())
                        .addField("bufferPoolsMappedUserd",nodeJVMStatInfo.getBufferPoolsMappedUserd())
                        .addField("collectTime", DateUtil.formatYYYYMMddHHMMSS(nodeJVMStatInfo.getCollectTime()))
                        .addField("executeTime",DateUtil.formatYYYYMMddHHMMSS(nodeJVMStatInfo.getExecuteTime()))
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        jvmDao.batchInsert(statInfos);
    }
}
 
Example 19
Source File: IndiceServiceImpl.java    From EserKnife with Apache License 2.0 4 votes vote down vote up
@Override
public void batchInsert(List<NodeIndiceStatInfo> statInfos) {
    if(InflusDbUtil.FLAG){
        if(CollectionUtils.isNotEmpty(statInfos)){
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints
                    .database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen")
                    .consistency(InfluxDB.ConsistencyLevel.ALL)
                    .build();
            long times = System.currentTimeMillis();
            for (NodeIndiceStatInfo nodeIndiceStatInfo : statInfos){
                Point point = Point.measurement("indices")
                        .time(times, TimeUnit.MILLISECONDS)
                        .tag("host",nodeIndiceStatInfo.getHost())
                        .tag("clusterName",nodeIndiceStatInfo.getClusterName())
                        .addField("fielddataEvictions",nodeIndiceStatInfo.getFielddataEvictions())
                        .addField("fielddataMemorySizeInBytes",nodeIndiceStatInfo.getFielddataMemorySizeInBytes())
                        .addField("searchFetchTimeInMillis",nodeIndiceStatInfo.getSearchFetchTimeInMillis())
                        .addField("searchFetchTotal",nodeIndiceStatInfo.getSearchFetchTotal())
                        .addField("searchQueryTimeInMillis",nodeIndiceStatInfo.getSearchQueryTimeInMillis())
                        .addField("searchQueryTotal",nodeIndiceStatInfo.getSearchQueryTotal())
                        .addField("getMissingTimeInMillis",nodeIndiceStatInfo.getGetMissingTimeInMillis())
                        .addField("getMissingTotal",nodeIndiceStatInfo.getGetMissingTotal())
                        .addField("getExistsTimeInMillis", nodeIndiceStatInfo.getGetExistsTimeInMillis())
                        .addField("getExistsTotal", nodeIndiceStatInfo.getGetMissingTotal())
                        .addField("getTimeInMillis", nodeIndiceStatInfo.getGetTimeInMillis())
                        .addField("getTotal",nodeIndiceStatInfo.getGetTotal())
                        .addField("indexingDeleteTimeInMillis",nodeIndiceStatInfo.getIndexingDeleteTimeInMillis())
                        .addField("indexingDeleteTotal",nodeIndiceStatInfo.getIndexingDeleteTotal())
                        .addField("indexingIndexTimeInMillis",nodeIndiceStatInfo.getIndexingIndexTimeInMillis())
                        .addField("indexingIndexTotal",nodeIndiceStatInfo.getIndexingIndexTotal())
                        .addField("segmentsCount",nodeIndiceStatInfo.getSegmentsCount())
                        .addField("storeSizeInBytes",nodeIndiceStatInfo.getStoreSizeInBytes())
                        .addField("docsCount",nodeIndiceStatInfo.getDocsCount())
                        .addField("createTime", DateUtil.formatYYYYMMddHHMMSS(nodeIndiceStatInfo.getCreateTime()))
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        indiceDao.batchInsert(statInfos);
    }
}