Java Code Examples for org.HdrHistogram.Histogram#reset()

The following examples show how to use org.HdrHistogram.Histogram#reset() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LatencyBenchmarkClient.java    From artio with Apache License 2.0 6 votes vote down vote up
private void runTimedRuns(
    final SocketChannel socketChannel,
    final TestRequestEncoder testRequest,
    final HeaderEncoder header,
    final Histogram histogram)
    throws IOException
{
    histogram.reset();

    for (int i = 0; i < MESSAGES_EXCHANGED; i++)
    {
        exchangeMessage(socketChannel, testRequest, header, WARMUP_MESSAGES + i, histogram);
    }

    HistogramLogReader.prettyPrint(
        System.currentTimeMillis(), histogram, "Client in Micros", 1000);
}
 
Example 2
Source File: RainfallHistogramSink.java    From Rainfall-core with Apache License 2.0 5 votes vote down vote up
public synchronized void reset() {
  for (HistogramHolder hh : actives) {
    hh.setDead(true);
    Collection<Histogram> histograms = hh.getHistograms();
    for (Histogram histogram : histograms) {
      histogram.reset();
    }
  }
  actives.clear();
}
 
Example 3
Source File: Util.java    From maestro-java with Apache License 2.0 4 votes vote down vote up
public static Histogram getAccumulated(final File histogramFile) throws FileNotFoundException {
    Histogram accumulatedHistogram = null;
    DoubleHistogram accumulatedDoubleHistogram = null;

    HistogramLogReader histogramLogReader = new HistogramLogReader(histogramFile);

    int i = 0;
    while (histogramLogReader.hasNext()) {
        EncodableHistogram eh = histogramLogReader.nextIntervalHistogram();
        if (eh == null) {
            logger.error("The histogram library returned an unexpected null value");
            break;
        }

        if (i == 0) {
            if (eh instanceof DoubleHistogram) {
                accumulatedDoubleHistogram = ((DoubleHistogram) eh).copy();
                accumulatedDoubleHistogram.reset();
                accumulatedDoubleHistogram.setAutoResize(true);
            }
            else {
                accumulatedHistogram = ((Histogram) eh).copy();
                accumulatedHistogram.reset();
                accumulatedHistogram.setAutoResize(true);
            }
        }

        logger.debug("Processing histogram from point in time {} to {}",
                Instant.ofEpochMilli(eh.getStartTimeStamp()), Instant.ofEpochMilli(eh.getEndTimeStamp()));

        if (eh instanceof DoubleHistogram) {
            Objects.requireNonNull(accumulatedDoubleHistogram).add((DoubleHistogram) eh);
        }
        else {
            Objects.requireNonNull(accumulatedHistogram).add((Histogram) eh);
        }

        i++;
    }

    if (accumulatedHistogram == null) {
        throw new EmptyDataSet("The HDR data file did not contain any histogram data");
    }

    return accumulatedHistogram;
}
 
Example 4
Source File: Consumer.java    From kafka-sample-programs with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException {
    // set up house-keeping
    ObjectMapper mapper = new ObjectMapper();
    Histogram stats = new Histogram(1, 10000000, 2);
    Histogram global = new Histogram(1, 10000000, 2);

    // and the consumer
    KafkaConsumer<String, String> consumer;
    try (InputStream props = Resources.getResource("consumer.props").openStream()) {
        Properties properties = new Properties();
        properties.load(props);
        if (properties.getProperty("group.id") == null) {
            properties.setProperty("group.id", "group-" + new Random().nextInt(100000));
        }
        consumer = new KafkaConsumer<>(properties);
    }
    consumer.subscribe(Arrays.asList("fast-messages", "summary-markers"));
    int timeouts = 0;
    //noinspection InfiniteLoopStatement
    while (true) {
        // read records with a short timeout. If we time out, we don't really care.
        ConsumerRecords<String, String> records = consumer.poll(200);
        if (records.count() == 0) {
            timeouts++;
        } else {
            System.out.printf("Got %d records after %d timeouts\n", records.count(), timeouts);
            timeouts = 0;
        }
        for (ConsumerRecord<String, String> record : records) {
            switch (record.topic()) {
                case "fast-messages":
                    // the send time is encoded inside the message
                    JsonNode msg = mapper.readTree(record.value());
                    switch (msg.get("type").asText()) {
                        case "test":
                            long latency = (long) ((System.nanoTime() * 1e-9 - msg.get("t").asDouble()) * 1000);
                            stats.recordValue(latency);
                            global.recordValue(latency);
                            break;
                        case "marker":
                            // whenever we get a marker message, we should dump out the stats
                            // note that the number of fast messages won't necessarily be quite constant
                            System.out.printf("%d messages received in period, latency(min, max, avg, 99%%) = %d, %d, %.1f, %d (ms)\n",
                                    stats.getTotalCount(),
                                    stats.getValueAtPercentile(0), stats.getValueAtPercentile(100),
                                    stats.getMean(), stats.getValueAtPercentile(99));
                            System.out.printf("%d messages received overall, latency(min, max, avg, 99%%) = %d, %d, %.1f, %d (ms)\n",
                                    global.getTotalCount(),
                                    global.getValueAtPercentile(0), global.getValueAtPercentile(100),
                                    global.getMean(), global.getValueAtPercentile(99));

                            stats.reset();
                            break;
                        default:
                            throw new IllegalArgumentException("Illegal message type: " + msg.get("type"));
                    }
                    break;
                case "summary-markers":
                    break;
                default:
                    throw new IllegalStateException("Shouldn't be possible to get message on topic " + record.topic());
            }
        }
    }
}
 
Example 5
Source File: Consumer.java    From mapr-streams-sample-programs with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException {
    // set up house-keeping
    ObjectMapper mapper = new ObjectMapper();
    Histogram stats = new Histogram(1, 10000000, 2);
    Histogram global = new Histogram(1, 10000000, 2);

    final String TOPIC_FAST_MESSAGES = "/sample-stream:fast-messages";
    final String TOPIC_SUMMARY_MARKERS = "/sample-stream:summary-markers";

    // and the consumer
    KafkaConsumer<String, String> consumer;
    try (InputStream props = Resources.getResource("consumer.props").openStream()) {
        Properties properties = new Properties();
        properties.load(props);
        if (properties.getProperty("group.id") == null) {
            properties.setProperty("group.id", "group-" + new Random().nextInt(100000));
        }

        consumer = new KafkaConsumer<>(properties);
    }
    consumer.subscribe(Arrays.asList(TOPIC_FAST_MESSAGES, TOPIC_SUMMARY_MARKERS));
    int timeouts = 0;
    //noinspection InfiniteLoopStatement
    while (true) {
        // read records with a short timeout. If we time out, we don't really care.
        ConsumerRecords<String, String> records = consumer.poll(200);
        if (records.count() == 0) {
            timeouts++;
        } else {
            System.out.printf("Got %d records after %d timeouts\n", records.count(), timeouts);
            timeouts = 0;
        }
        for (ConsumerRecord<String, String> record : records) {
            switch (record.topic()) {
                case TOPIC_FAST_MESSAGES:
                    // the send time is encoded inside the message
                    JsonNode msg = mapper.readTree(record.value());
                    switch (msg.get("type").asText()) {
                        case "test":
                            long latency = (long) ((System.nanoTime() * 1e-9 - msg.get("t").asDouble()) * 1000);
                            stats.recordValue(latency);
                            global.recordValue(latency);
                            break;
                        case "marker":
                            // whenever we get a marker message, we should dump out the stats
                            // note that the number of fast messages won't necessarily be quite constant
                            System.out.printf("%d messages received in period, latency(min, max, avg, 99%%) = %d, %d, %.1f, %d (ms)\n",
                                    stats.getTotalCount(),
                                    stats.getValueAtPercentile(0), stats.getValueAtPercentile(100),
                                    stats.getMean(), stats.getValueAtPercentile(99));
                            System.out.printf("%d messages received overall, latency(min, max, avg, 99%%) = %d, %d, %.1f, %d (ms)\n",
                                    global.getTotalCount(),
                                    global.getValueAtPercentile(0), global.getValueAtPercentile(100),
                                    global.getMean(), global.getValueAtPercentile(99));

                            stats.reset();
                            break;
                        default:
                            throw new IllegalArgumentException("Illegal message type: " + msg.get("type"));
                    }
                    break;
                case TOPIC_SUMMARY_MARKERS:
                    break;
                default:
                    throw new IllegalStateException("Shouldn't be possible to get message on topic " + record.topic());
            }
        }
    }
}
 
Example 6
Source File: DBConsumer.java    From mapr-streams-sample-programs with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException {
  // set up house-keeping
  ObjectMapper mapper = new ObjectMapper();
  Histogram stats = new Histogram(1, 10000000, 2);
  Histogram global = new Histogram(1, 10000000, 2);

  final String TOPIC_FAST_MESSAGES = "/sample-stream:fast-messages";
  final String TOPIC_SUMMARY_MARKERS = "/sample-stream:summary-markers";


  Table fastMessagesTable = getTable("/apps/fast-messages");

  // and the consumer
  KafkaConsumer<String, String> consumer;
  try (InputStream props = Resources.getResource("consumer.props").openStream()) {
    Properties properties = new Properties();
    properties.load(props);
    // use a new group id for the dbconsumer
    if (properties.getProperty("group.id") == null) {
      properties.setProperty("group.id", "group-" + new Random().nextInt(100000));
    } else {
      String groupId = properties.getProperty("group.id");
      properties.setProperty("group.id", "db-" + groupId);
    }

    consumer = new KafkaConsumer<>(properties);
  }
  consumer.subscribe(Arrays.asList(TOPIC_FAST_MESSAGES, TOPIC_SUMMARY_MARKERS));
  int timeouts = 0;

  //noinspection InfiniteLoopStatement
  while (true) {
    // read records with a short timeout. If we time out, we don't really care.
    ConsumerRecords<String, String> records = consumer.poll(200);
    if (records.count() == 0) {
      timeouts++;
    } else {
      System.out.printf("Got %d records after %d timeouts\n", records.count(), timeouts);
      timeouts = 0;
    }
    for (ConsumerRecord<String, String> record : records) {
      switch (record.topic()) {
        case TOPIC_FAST_MESSAGES:
          // the send time is encoded inside the message
          JsonNode msg = mapper.readTree(record.value());
          switch (msg.get("type").asText()) {
            case "test":
              // create a Document and set an _id, in this case the message number (document will be updated each time)
              Document messageDocument = MapRDB.newDocument(msg);
              messageDocument.setId( Integer.toString(messageDocument.getInt("k")));
              fastMessagesTable.insertOrReplace( messageDocument );

              long latency = (long) ((System.nanoTime() * 1e-9 - msg.get("t").asDouble()) * 1000);
              stats.recordValue(latency);
              global.recordValue(latency);
              break;
            case "marker":
              // whenever we get a marker message, we should dump out the stats
              // note that the number of fast messages won't necessarily be quite constant
              System.out.printf("%d messages received in period, latency(min, max, avg, 99%%) = %d, %d, %.1f, %d (ms)\n",
                      stats.getTotalCount(),
                      stats.getValueAtPercentile(0), stats.getValueAtPercentile(100),
                      stats.getMean(), stats.getValueAtPercentile(99));
              System.out.printf("%d messages received overall, latency(min, max, avg, 99%%) = %d, %d, %.1f, %d (ms)\n",
                      global.getTotalCount(),
                      global.getValueAtPercentile(0), global.getValueAtPercentile(100),
                      global.getMean(), global.getValueAtPercentile(99));
              stats.reset();
              break;
            default:
              throw new IllegalArgumentException("Illegal message type: " + msg.get("type"));
          }
          break;
        case TOPIC_SUMMARY_MARKERS:
          break;
        default:
          throw new IllegalStateException("Shouldn't be possible to get message on topic " + record.topic());
      }
    }
  }
}
 
Example 7
Source File: LatencyUnderLoadBenchmarkClient.java    From artio with Apache License 2.0 4 votes vote down vote up
public void run()
{
    final Histogram histogram = new Histogram(3);
    final long scaleToMicros = TimeUnit.MICROSECONDS.toNanos(1);
    final SocketChannel socketChannel = this.socketChannel;
    final MutableAsciiBuffer readFlyweight = LatencyUnderLoadBenchmarkClient.this.readFlyweight;
    final long[] sendTimes = LatencyUnderLoadBenchmarkClient.this.sendTimes;

    while (true)
    {
        final long startTime = System.currentTimeMillis();
        int lastMessagesReceived = 0;
        while (lastMessagesReceived < MESSAGES_EXCHANGED)
        {
            try
            {
                final int length = read(socketChannel);
                final long time = System.nanoTime();
                final int received = scanForReceivesMessages(readFlyweight, length);
                for (int j = 0; j < received; j++)
                {
                    final long duration = time - sendTimes[lastMessagesReceived + j];
                    histogram.recordValue(duration);
                }
                lastMessagesReceived += received;
            }
            catch (final IOException ex)
            {
                ex.printStackTrace();
                System.exit(-1);
            }
        }

        printThroughput(startTime, MESSAGES_EXCHANGED);
        HistogramLogReader.prettyPrint(
            System.currentTimeMillis(), histogram, "Benchmark", scaleToMicros);

        histogram.reset();
        await();
    }
}
 
Example 8
Source File: HistogramUtil.java    From rolling-metrics with Apache License 2.0 4 votes vote down vote up
public static void reset(Histogram histogram) {
    if (histogram.getTotalCount() > 0) {
        histogram.reset();
    }
}
 
Example 9
Source File: SendSelectReceiveUdpPing.java    From aeron with Apache License 2.0 4 votes vote down vote up
private void run() throws IOException
{
    final Histogram histogram = new Histogram(TimeUnit.SECONDS.toNanos(10), 3);
    final ByteBuffer buffer = ByteBuffer.allocateDirect(Configuration.MTU_LENGTH_DEFAULT);

    final DatagramChannel receiveChannel = DatagramChannel.open();
    Common.init(receiveChannel);
    receiveChannel.bind(new InetSocketAddress("localhost", Common.PONG_PORT));

    final DatagramChannel sendChannel = DatagramChannel.open();
    Common.init(sendChannel);

    final Selector selector = Selector.open();

    final IntSupplier handler =
        () ->
        {
            try
            {
                buffer.clear();
                receiveChannel.receive(buffer);

                final long receivedSequenceNumber = buffer.getLong(0);
                final long timestampNs = buffer.getLong(SIZE_OF_LONG);

                if (receivedSequenceNumber != sequenceNumber)
                {
                    throw new IllegalStateException(
                        "data Loss:" + sequenceNumber + " to " + receivedSequenceNumber);
                }

                final long durationNs = System.nanoTime() - timestampNs;
                histogram.recordValue(durationNs);
            }
            catch (final IOException ex)
            {
                ex.printStackTrace();
            }

            return 1;
        };

    receiveChannel.register(selector, OP_READ, handler);

    final AtomicBoolean running = new AtomicBoolean(true);
    SigInt.register(() -> running.set(false));

    while (running.get())
    {
        measureRoundTrip(histogram, SEND_ADDRESS, buffer, sendChannel, selector, running);

        histogram.reset();
        System.gc();
        LockSupport.parkNanos(1000 * 1000 * 1000);
    }
}