Java Code Examples for org.influxdb.InfluxDB#write()

The following examples show how to use org.influxdb.InfluxDB#write() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: InfluxDbSourceTest.java    From hazelcast-jet-contrib with Apache License 2.0 7 votes vote down vote up
private void fillCpuData(InfluxDB influxDB) {
    long lastTime = 0;
    for (int value = 0; value < VALUE_COUNT; value++) {
        // loop until the value of currentTimeMillis changes
        // workaround for https://github.com/influxdata/influxdb-java/issues/586 which prevents us from
        // assigning the time explicitly. If two items have the same time, the last one wins.
        while (System.currentTimeMillis() == lastTime) {
            sleepMillis(1);
        }
        Cpu cpu = new Cpu("localhost", (double) value);
        influxDB.write(DATABASE_NAME,
                "autogen",
                Point.measurementByPOJO(cpu.getClass())
                        .addFieldsFromPOJO(cpu)
                        .build()
        );
    }
}
 
Example 2
Source File: HttpStatServiceImpl.java    From EserKnife with Apache License 2.0 6 votes vote down vote up
@Override
public void batchInsert(List<NodeHttpStatInfo> nodeHttpStatInfos) {
    if(InflusDbUtil.FLAG){
        if (CollectionUtils.isNotEmpty(nodeHttpStatInfos)) {
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints.database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen").consistency(InfluxDB.ConsistencyLevel.ALL).build();
            long times = System.currentTimeMillis();
            for (NodeHttpStatInfo nodeHttpStatInfo : nodeHttpStatInfos) {
                Point point = Point.measurement("http").time(times, TimeUnit.MILLISECONDS)
                        .tag("host", nodeHttpStatInfo.getHost())
                        .tag("clusterName", nodeHttpStatInfo.getClustName())
                        .addField("createTime", DateUtil.formatYYYYMMddHHMMSS(nodeHttpStatInfo.getCreateTime()))
                        .addField("totalOpened", nodeHttpStatInfo.getTotal_opened())
                        .addField("currentOpen", nodeHttpStatInfo.getCurrent_open())
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        httpStatDao.batchInsert(nodeHttpStatInfos);
    }
}
 
Example 3
Source File: InfluxDBContainerWithUserTest.java    From testcontainers-java with MIT License 6 votes vote down vote up
@Test
public void queryForWriteAndRead() {
    InfluxDB influxDB = influxDBContainer.getNewInfluxDB();

    Point point = Point.measurement("cpu")
        .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
        .addField("idle", 90L)
        .addField("user", 9L)
        .addField("system", 1L)
        .build();
    influxDB.write(point);

    Query query = new Query("SELECT idle FROM cpu", DATABASE);
    QueryResult actual = influxDB.query(query);

    assertThat(actual, notNullValue());
    assertThat(actual.getError(), nullValue());
    assertThat(actual.getResults(), notNullValue());
    assertThat(actual.getResults().size(), is(1));

}
 
Example 4
Source File: InfluxLogger.java    From Okra with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    String dbName = "centos_test_db";

    InfluxDB influxDB = InfluxDBFactory.connect("http://192.168.0.71:18086", "influxdbUser", "influxdbPsw");

    // Flush every 2000 Points, at least every 100ms
    influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS);

    for (int i = 0; i < 50; i++) {
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        Point point2 = Point.measurement("disk")
                .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
                .addField("used", Math.random() * 80L)
                .addField("free", Math.random() * 30L)
                .build();
        influxDB.write(dbName, "autogen", point2);
    }

    System.out.println();
}
 
Example 5
Source File: InfluxDbSourceTest.java    From hazelcast-jet-contrib with Apache License 2.0 5 votes vote down vote up
private void fillData(InfluxDB influxDB) {
    influxDB.enableBatch(500, 100, TimeUnit.MICROSECONDS);
    for (int value = 0; value < VALUE_COUNT; value++) {
        influxDB.write(DATABASE_NAME,
                "autogen",
                Point.measurement("test")
                        .time(value, TimeUnit.MILLISECONDS)
                        .addField("value", value)
                        .build()
        );
    }
}
 
Example 6
Source File: OsStatServiceImpl.java    From EserKnife with Apache License 2.0 5 votes vote down vote up
@Override
public void batchInsert(List<NodeOSStatInfo> statInfos) {
    if(InflusDbUtil.FLAG){
        if(CollectionUtils.isNotEmpty(statInfos)){
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints
                    .database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen")
                    .consistency(InfluxDB.ConsistencyLevel.ALL)
                    .build();
            long times = System.currentTimeMillis();
            for (NodeOSStatInfo nodeOSStatInfo : statInfos){
                Point point = Point.measurement("os")
                        .time(times, TimeUnit.MILLISECONDS)
                        .tag("host",nodeOSStatInfo.getHost())
                        .tag("clusterName",nodeOSStatInfo.getClusterName())
                        .addField("cpuPercent",nodeOSStatInfo.getCpuPercent())
                        .addField("loadAverage",nodeOSStatInfo.getLoadAverage())
                        .addField("memTotal",nodeOSStatInfo.getMemTotal())
                        .addField("memUsed",nodeOSStatInfo.getMemUsed())
                        .addField("memFree",nodeOSStatInfo.getMemFree())
                        .addField("memFreePercent",nodeOSStatInfo.getMemFreePercent())
                        .addField("memUsedPercent",nodeOSStatInfo.getMemUsedPercent())
                        .addField("swapTotal",nodeOSStatInfo.getSwapTotal())
                        .addField("swapUsed",nodeOSStatInfo.getSwapUsed())
                        .addField("swapFree",nodeOSStatInfo.getSwapFree())
                        .addField("createTime", DateUtil.formatYYYYMMddHHMMSS(nodeOSStatInfo.getCreateTime()))
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        osStatDao.batchInsert(statInfos);
    }
}
 
Example 7
Source File: ThreadPoolServiceImpl.java    From EserKnife with Apache License 2.0 5 votes vote down vote up
@Override
public void batchInsert(List<NodeThreadPoolStatInfo> nodeThreadPoolStatInfos) {
    if(InflusDbUtil.FLAG){
        if (CollectionUtils.isNotEmpty(nodeThreadPoolStatInfos)) {
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints.database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen").consistency(InfluxDB.ConsistencyLevel.ALL).build();
            long times = System.currentTimeMillis();
            for (NodeThreadPoolStatInfo threadPoolStatInfo : nodeThreadPoolStatInfos) {
                Point point = Point.measurement("thread_pool").time(times, TimeUnit.MILLISECONDS)
                        .tag("threadType",threadPoolStatInfo.getThreadType())
                        .tag("host", threadPoolStatInfo.getHost())
                        .tag("clusterName", threadPoolStatInfo.getClusterName())
                        .addField("createTime", DateUtil.formatYYYYMMddHHMMSS(threadPoolStatInfo.getCreateTime()))
                        .addField("largest",threadPoolStatInfo.getLargest())
                        .addField("completed",threadPoolStatInfo.getCompleted())
                        .addField("active",threadPoolStatInfo.getActive())
                        .addField("rejected",threadPoolStatInfo.getRejected())
                        .addField("threads",threadPoolStatInfo.getThreads())
                        .addField("queue",threadPoolStatInfo.getQueue())
                        .addField("intervalCompleted",threadPoolStatInfo.getIntervalCompleted())
                        .addField("intervalRejected",threadPoolStatInfo.getIntervalRejected())
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        threadPoolDao.batchInsert(nodeThreadPoolStatInfos);
    }
}
 
Example 8
Source File: TransportStatServiceImpl.java    From EserKnife with Apache License 2.0 5 votes vote down vote up
@Override
public void batchInsert(List<NodeTransportStatInfo> transportStatInfos) {
    if(InflusDbUtil.FLAG){
        if (CollectionUtils.isNotEmpty(transportStatInfos)) {
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints.database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen").consistency(InfluxDB.ConsistencyLevel.ALL).build();
            long times = System.currentTimeMillis();
            for (NodeTransportStatInfo transportStatInfo : transportStatInfos) {
                Point point = Point.measurement("transport").time(times, TimeUnit.MILLISECONDS)
                        .tag("host", transportStatInfo.getHost())
                        .tag("clusterName", transportStatInfo.getClustName())
                        .addField("createTime", DateUtil.formatYYYYMMddHHMMSS(transportStatInfo.getCreateTime()))
                        .addField("rxCount",transportStatInfo.getRx_count())
                        .addField("rxSize",transportStatInfo.getRx_size() == null?"":transportStatInfo.getRx_size())
                        .addField("rxSizeInBytes",transportStatInfo.getRx_size_in_bytes())
                        .addField("txCount",transportStatInfo.getTx_count())
                        .addField("txSize",transportStatInfo.getTx_size() == null ?"":transportStatInfo.getTx_size())
                        .addField("txSizeInBytes",transportStatInfo.getTx_size_in_bytes())
                        .addField("serverOpen",transportStatInfo.getServer_open())
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        transportStatDao.batchInsert(transportStatInfos);
    }
}
 
Example 9
Source File: CommonServiceImpl.java    From EserKnife with Apache License 2.0 5 votes vote down vote up
@Override
public void batchInsert(List<NodeCommonStatInfo> nodeCommonStatInfos) {
    if(InflusDbUtil.FLAG){
        if (CollectionUtils.isNotEmpty(nodeCommonStatInfos)) {
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints.database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen").consistency(InfluxDB.ConsistencyLevel.ALL).build();
            long times = System.currentTimeMillis();
            for (NodeCommonStatInfo nodeCommonStatInfo : nodeCommonStatInfos) {
                Point point = Point.measurement("common").time(times, TimeUnit.MILLISECONDS)
                        .tag("clusterName", nodeCommonStatInfo.getClusterName())
                        .addField("createTime", DateUtil.formatYYYYMMddHHMMSS(nodeCommonStatInfo.getCreateTime()))
                        .addField("nodeCount", nodeCommonStatInfo.getNodeCount())
                        .addField("dataNodeCount",nodeCommonStatInfo.getDataNodeCount())
                        .addField("docCount", nodeCommonStatInfo.getDocCounts())
                        .addField("storeSize", nodeCommonStatInfo.getStoreSize())
                        .addField("indiceCount", nodeCommonStatInfo.getIndicesCount())
                        .addField("shardCount", nodeCommonStatInfo.getShardCounts())
                        .addField("clusterStatus", nodeCommonStatInfo.getClusterStatus())
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        commonDAO.batchInsert(nodeCommonStatInfos);
    }
}
 
Example 10
Source File: OffsetsInfluxDBDaoImpl.java    From Kafka-Insight with Apache License 2.0 5 votes vote down vote up
@Override
    public void insert() {
        InfluxDB influxDB = null;
        try {
            influxDB = InfluxDBFactory.connect(influxDBUrl);
            if (!influxDB.databaseExists(dbName)) {
                influxDB.createDatabase(dbName);
            }
            for (OffsetInfo offsetInfo : offsetInfoList) {
                String group = offsetInfo.getGroup();
                String topic = offsetInfo.getTopic();
                Long logSize = offsetInfo.getLogSize();
                Long offsets = offsetInfo.getCommittedOffset();
                Long lag = offsetInfo.getLag();
                Long timestamp = offsetInfo.getTimestamp();

                BatchPoints batchPoints = BatchPoints
                        .database(dbName)
                        .tag("group", group)
                        .tag("topic", topic)
                        .build();
                Point point = Point.measurement("offsetsConsumer")
                        .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
//                        .time(timestamp, TimeUnit.MILLISECONDS)
                        .addField("logSize", logSize)
                        .addField("offsets", offsets)
                        .addField("lag", lag)
                        .build();
                batchPoints.point(point);
                influxDB.write(batchPoints);
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (influxDB != null) {
                influxDB.close();
            }
        }

    }
 
Example 11
Source File: StatsCollector.java    From cloudstack with Apache License 2.0 5 votes vote down vote up
/**
 * Writes batches of InfluxDB database points into a given database.
 */
protected void writeBatches(InfluxDB influxDbConnection, String dbName, List<Point> points) {
    BatchPoints batchPoints = BatchPoints.database(dbName).build();
    influxDbConnection.enableBatch(BatchOptions.DEFAULTS);

    for (Point point : points) {
        batchPoints.point(point);
    }

    influxDbConnection.write(batchPoints);
}
 
Example 12
Source File: IndiceServiceImpl.java    From EserKnife with Apache License 2.0 4 votes vote down vote up
@Override
public void batchInsert(List<NodeIndiceStatInfo> statInfos) {
    if(InflusDbUtil.FLAG){
        if(CollectionUtils.isNotEmpty(statInfos)){
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints
                    .database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen")
                    .consistency(InfluxDB.ConsistencyLevel.ALL)
                    .build();
            long times = System.currentTimeMillis();
            for (NodeIndiceStatInfo nodeIndiceStatInfo : statInfos){
                Point point = Point.measurement("indices")
                        .time(times, TimeUnit.MILLISECONDS)
                        .tag("host",nodeIndiceStatInfo.getHost())
                        .tag("clusterName",nodeIndiceStatInfo.getClusterName())
                        .addField("fielddataEvictions",nodeIndiceStatInfo.getFielddataEvictions())
                        .addField("fielddataMemorySizeInBytes",nodeIndiceStatInfo.getFielddataMemorySizeInBytes())
                        .addField("searchFetchTimeInMillis",nodeIndiceStatInfo.getSearchFetchTimeInMillis())
                        .addField("searchFetchTotal",nodeIndiceStatInfo.getSearchFetchTotal())
                        .addField("searchQueryTimeInMillis",nodeIndiceStatInfo.getSearchQueryTimeInMillis())
                        .addField("searchQueryTotal",nodeIndiceStatInfo.getSearchQueryTotal())
                        .addField("getMissingTimeInMillis",nodeIndiceStatInfo.getGetMissingTimeInMillis())
                        .addField("getMissingTotal",nodeIndiceStatInfo.getGetMissingTotal())
                        .addField("getExistsTimeInMillis", nodeIndiceStatInfo.getGetExistsTimeInMillis())
                        .addField("getExistsTotal", nodeIndiceStatInfo.getGetMissingTotal())
                        .addField("getTimeInMillis", nodeIndiceStatInfo.getGetTimeInMillis())
                        .addField("getTotal",nodeIndiceStatInfo.getGetTotal())
                        .addField("indexingDeleteTimeInMillis",nodeIndiceStatInfo.getIndexingDeleteTimeInMillis())
                        .addField("indexingDeleteTotal",nodeIndiceStatInfo.getIndexingDeleteTotal())
                        .addField("indexingIndexTimeInMillis",nodeIndiceStatInfo.getIndexingIndexTimeInMillis())
                        .addField("indexingIndexTotal",nodeIndiceStatInfo.getIndexingIndexTotal())
                        .addField("segmentsCount",nodeIndiceStatInfo.getSegmentsCount())
                        .addField("storeSizeInBytes",nodeIndiceStatInfo.getStoreSizeInBytes())
                        .addField("docsCount",nodeIndiceStatInfo.getDocsCount())
                        .addField("createTime", DateUtil.formatYYYYMMddHHMMSS(nodeIndiceStatInfo.getCreateTime()))
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        indiceDao.batchInsert(statInfos);
    }
}
 
Example 13
Source File: JvmServiceImpl.java    From EserKnife with Apache License 2.0 4 votes vote down vote up
@Override
public void batchInsert(List<NodeJVMStatInfo> statInfos) {
    if(InflusDbUtil.FLAG){
        if(CollectionUtils.isNotEmpty(statInfos)){
            InfluxDB influxDB = InflusDbUtil.getConnection();
            BatchPoints batchPoints = BatchPoints
                    .database(InflusDbUtil.DATABASE)
                    .retentionPolicy("autogen")
                    .consistency(InfluxDB.ConsistencyLevel.ALL)
                    .build();
            long times = System.currentTimeMillis();
            for (NodeJVMStatInfo nodeJVMStatInfo : statInfos){
                Point point = Point.measurement("jvm")
                        .time(times, TimeUnit.MILLISECONDS)
                        .tag("host",nodeJVMStatInfo.getHost())
                        .tag("clusterName",nodeJVMStatInfo.getClusterName())
                        .addField("threadsCount",nodeJVMStatInfo.getThreadsCount())
                        .addField("threadsPeakCount",nodeJVMStatInfo.getThreadsPeakCount())
                        .addField("heapUsedInBytes",nodeJVMStatInfo.getHeapUsedInBytes())
                        .addField("heapUsedPercent",nodeJVMStatInfo.getHeapUsedPercent())
                        .addField("heapCommittedInBytes",nodeJVMStatInfo.getHeapCommittedInBytes())
                        .addField("heapMaxInBytes",nodeJVMStatInfo.getHeapMaxInBytes())
                        .addField("nonHeapUsedInBytes",nodeJVMStatInfo.getNonHeapUsedInBytes())
                        .addField("nonHeapCommittedInBytes",nodeJVMStatInfo.getNonHeapCommittedInBytes())
                        .addField("oldMemUsed", nodeJVMStatInfo.getOldMemUsed())
                        .addField("oldMemMax", nodeJVMStatInfo.getOldMemMax())
                        .addField("youngMemMax", nodeJVMStatInfo.getYoungMemMax())
                        .addField("youngMemUsed",nodeJVMStatInfo.getYoungMemUsed())
                        .addField("oldCollectionCount",nodeJVMStatInfo.getOldCollectionCount())
                        .addField("oldCollectionTime",nodeJVMStatInfo.getOldCollectionTime())
                        .addField("youngCollectionCount",nodeJVMStatInfo.getYoungCollectionCount())
                        .addField("youngCollectionTime",nodeJVMStatInfo.getYongCollectionTime())
                        .addField("intervalOldCollectionCount",nodeJVMStatInfo.getIntervalOldCollectionCount())
                        .addField("intervalOldCollectionTime",nodeJVMStatInfo.getIntervalOldCollectionTime())
                        .addField("bufferPoolsDirectTotalCapacity",nodeJVMStatInfo.getBufferPoolsDirectTotalCapacity())
                        .addField("bufferPoolsDirectCount",nodeJVMStatInfo.getBufferPoolsDirectCount())
                        .addField("bufferPoolsDirectUsed",nodeJVMStatInfo.getBufferPoolsDirectUsed())
                        .addField("bufferPoolsMappedTotalCapacity",nodeJVMStatInfo.getBufferPoolsMappedTotalCapacity())
                        .addField("bufferPoolsMappedCount",nodeJVMStatInfo.getBufferPoolsMappedCount())
                        .addField("bufferPoolsMappedUserd",nodeJVMStatInfo.getBufferPoolsMappedUserd())
                        .addField("collectTime", DateUtil.formatYYYYMMddHHMMSS(nodeJVMStatInfo.getCollectTime()))
                        .addField("executeTime",DateUtil.formatYYYYMMddHHMMSS(nodeJVMStatInfo.getExecuteTime()))
                        .build();
                batchPoints.point(point);
            }
            influxDB.write(batchPoints);
        }
    }else{
        jvmDao.batchInsert(statInfos);
    }
}
 
Example 14
Source File: MBeansInfluxDBDaoImpl.java    From Kafka-Insight with Apache License 2.0 4 votes vote down vote up
@Override
    public void insert() {

        InfluxDB influxDB = null;
        try {
            influxDB = InfluxDBFactory.connect(influxDBUrl);
            if (!influxDB.databaseExists(dbName)) {
                influxDB.createDatabase(dbName);
            }
            for (MBeanInfo mBeanInfo : mBeanInfoList) {
                String label = mBeanInfo.getLabel();
                String topic = mBeanInfo.getTopic();
                double oneMinute = mBeanInfo.getOneMinute();
                double fiveMinute = mBeanInfo.getFiveMinute();
                double fifteenMinute = mBeanInfo.getFifteenMinute();
                double meanRate = mBeanInfo.getMeanRate();


                BatchPoints batchPoints = BatchPoints
                        .database(dbName)
                        .tag("label", label)
                        .tag("topic", topic)
                        .build();
                Point point = Point.measurement("mBeanMetric")
                        .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
//                        .time(timestamp, TimeUnit.MILLISECONDS)
                        .addField("oneMinuteRate", oneMinute)
                        .addField("fiveMinuteRate", fiveMinute)
                        .addField("fifteenMinuteRate", fifteenMinute)
                        .addField("meanRate", meanRate)
                        .build();
                batchPoints.point(point);
                influxDB.write(batchPoints);
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (influxDB != null) {
                influxDB.close();
            }
        }

    }
 
Example 15
Source File: InfluxDBConnectionLiveTest.java    From tutorials with MIT License 4 votes vote down vote up
@Test
public void whenPointsWrittenPointsExists() throws Exception {

    InfluxDB connection = connectDatabase();

    String dbName = "baeldung";
    connection.createDatabase(dbName);

    // Need a retention policy before we can proceed
    connection.createRetentionPolicy("defaultPolicy", "baeldung", "30d", 1, true);

    // Since we are doing a batch thread, we need to set this as a default
    connection.setRetentionPolicy("defaultPolicy");

    // Enable batch mode
    connection.enableBatch(10, 10, TimeUnit.MILLISECONDS);

    for (int i = 0; i < 10; i++) {
        Point point = Point.measurement("memory")
                .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
                .addField("name", "server1")
                .addField("free", 4743656L)
                .addField("used", 1015096L)
                .addField("buffer", 1010467L)
                .build();

        connection.write(dbName, "defaultPolicy", point);
        Thread.sleep(2);

    }

    // Unfortunately, the sleep inside the loop doesn't always add enough time to insure
    // that Influx's batch thread flushes all of the writes and this sometimes fails without
    // another brief pause.
    Thread.sleep(10);

    List<com.baeldung.influxdb.MemoryPoint> memoryPointList = getPoints(connection, "Select * from memory", "baeldung");

    assertEquals(10, memoryPointList.size());

    // Turn off batch and clean up
    connection.disableBatch();
    connection.deleteDatabase("baeldung");
    connection.close();

}
 
Example 16
Source File: InfluxDBConnectionLiveTest.java    From tutorials with MIT License 4 votes vote down vote up
@Test
public void whenBatchWrittenBatchExists() {

    InfluxDB connection = connectDatabase();

    String dbName = "baeldung";
    connection.createDatabase(dbName);

    // Need a retention policy before we can proceed
    // Since we are doing batches, we need not set it
    connection.createRetentionPolicy("defaultPolicy", "baeldung", "30d", 1, true);


    BatchPoints batchPoints = BatchPoints
            .database(dbName)
            .retentionPolicy("defaultPolicy")
            .build();
    Point point1 = Point.measurement("memory")
            .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
            .addField("free", 4743656L)
            .addField("used", 1015096L)
            .addField("buffer", 1010467L)
            .build();
    Point point2 = Point.measurement("memory")
            .time(System.currentTimeMillis() - 100, TimeUnit.MILLISECONDS)
            .addField("free", 4743696L)
            .addField("used", 1016096L)
            .addField("buffer", 1008467L)
            .build();
    batchPoints.point(point1);
    batchPoints.point(point2);
    connection.write(batchPoints);

    List<MemoryPoint> memoryPointList = getPoints(connection, "Select * from memory", "baeldung");

    assertEquals(2, memoryPointList.size());
    assertTrue(4743696L == memoryPointList.get(0).getFree());


    memoryPointList = getPoints(connection, "Select * from memory order by time desc", "baeldung");

    assertEquals(2, memoryPointList.size());
    assertTrue(4743656L == memoryPointList.get(0).getFree());

    // Clean up database
    connection.deleteDatabase("baeldung");
    connection.close();
}