Java Code Examples for org.apache.hadoop.util.StopWatch#now()

The following examples show how to use org.apache.hadoop.util.StopWatch#now() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestJournalNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void doPerfTest(int editsSize, int numEdits) throws Exception {
  byte[] data = new byte[editsSize];
  ch.newEpoch(1).get();
  ch.setEpoch(1);
  ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
  
  StopWatch sw = new StopWatch().start();
  for (int i = 1; i < numEdits; i++) {
    ch.sendEdits(1L, i, 1, data).get();
  }
  long time = sw.now(TimeUnit.MILLISECONDS);
  
  System.err.println("Wrote " + numEdits + " batches of " + editsSize +
      " bytes in " + time + "ms");
  float avgRtt = (float)time/(float)numEdits;
  long throughput = ((long)numEdits * editsSize * 1000L)/time;
  System.err.println("Time per batch: " + avgRtt + "ms");
  System.err.println("Throughput: " + throughput + " bytes/sec");
}
 
Example 2
Source File: TestJournalNode.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void doPerfTest(int editsSize, int numEdits) throws Exception {
  byte[] data = new byte[editsSize];
  ch.newEpoch(1).get();
  ch.setEpoch(1);
  ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
  
  StopWatch sw = new StopWatch().start();
  for (int i = 1; i < numEdits; i++) {
    ch.sendEdits(1L, i, 1, data).get();
  }
  long time = sw.now(TimeUnit.MILLISECONDS);
  
  System.err.println("Wrote " + numEdits + " batches of " + editsSize +
      " bytes in " + time + "ms");
  float avgRtt = (float)time/(float)numEdits;
  long throughput = ((long)numEdits * editsSize * 1000L)/time;
  System.err.println("Time per batch: " + avgRtt + "ms");
  System.err.println("Throughput: " + throughput + " bytes/sec");
}
 
Example 3
Source File: TestMultiThreadedHflush.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void doAWrite() throws IOException {
  StopWatch sw = new StopWatch().start();
  stm.write(toWrite);
  stm.hflush();
  long micros = sw.now(TimeUnit.MICROSECONDS);
  quantiles.insert(micros);
}
 
Example 4
Source File: TestMultiThreadedHflush.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void doAWrite() throws IOException {
  StopWatch sw = new StopWatch().start();
  stm.write(toWrite);
  stm.hflush();
  long micros = sw.now(TimeUnit.MICROSECONDS);
  quantiles.insert(micros);
}
 
Example 5
Source File: Journal.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Write a batch of edits to the journal.
 * {@see QJournalProtocol#journal(RequestInfo, long, long, int, byte[])}
 */
synchronized void journal(RequestInfo reqInfo,
    long segmentTxId, long firstTxnId,
    int numTxns, byte[] records) throws IOException {
  checkFormatted();
  checkWriteRequest(reqInfo);

  checkSync(curSegment != null,
      "Can't write, no segment open");
  
  if (curSegmentTxId != segmentTxId) {
    // Sanity check: it is possible that the writer will fail IPCs
    // on both the finalize() and then the start() of the next segment.
    // This could cause us to continue writing to an old segment
    // instead of rolling to a new one, which breaks one of the
    // invariants in the design. If it happens, abort the segment
    // and throw an exception.
    JournalOutOfSyncException e = new JournalOutOfSyncException(
        "Writer out of sync: it thinks it is writing segment " + segmentTxId
        + " but current segment is " + curSegmentTxId);
    abortCurSegment();
    throw e;
  }
    
  checkSync(nextTxId == firstTxnId,
      "Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId);
  
  long lastTxnId = firstTxnId + numTxns - 1;
  if (LOG.isTraceEnabled()) {
    LOG.trace("Writing txid " + firstTxnId + "-" + lastTxnId);
  }

  // If the edit has already been marked as committed, we know
  // it has been fsynced on a quorum of other nodes, and we are
  // "catching up" with the rest. Hence we do not need to fsync.
  boolean isLagging = lastTxnId <= committedTxnId.get();
  boolean shouldFsync = !isLagging;
  
  curSegment.writeRaw(records, 0, records.length);
  curSegment.setReadyToFlush();
  StopWatch sw = new StopWatch();
  sw.start();
  curSegment.flush(shouldFsync);
  sw.stop();

  long nanoSeconds = sw.now();
  metrics.addSync(
      TimeUnit.MICROSECONDS.convert(nanoSeconds, TimeUnit.NANOSECONDS));
  long milliSeconds = TimeUnit.MILLISECONDS.convert(
      nanoSeconds, TimeUnit.NANOSECONDS);

  if (milliSeconds > WARN_SYNC_MILLIS_THRESHOLD) {
    LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId +
             " took " + milliSeconds + "ms");
  }

  if (isLagging) {
    // This batch of edits has already been committed on a quorum of other
    // nodes. So, we are in "catch up" mode. This gets its own metric.
    metrics.batchesWrittenWhileLagging.incr(1);
  }
  
  metrics.batchesWritten.incr(1);
  metrics.bytesWritten.incr(records.length);
  metrics.txnsWritten.incr(numTxns);
  
  highestWrittenTxId = lastTxnId;
  nextTxId = lastTxnId + 1;
}
 
Example 6
Source File: Journal.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Write a batch of edits to the journal.
 * {@see QJournalProtocol#journal(RequestInfo, long, long, int, byte[])}
 */
synchronized void journal(RequestInfo reqInfo,
    long segmentTxId, long firstTxnId,
    int numTxns, byte[] records) throws IOException {
  checkFormatted();
  checkWriteRequest(reqInfo);

  checkSync(curSegment != null,
      "Can't write, no segment open");
  
  if (curSegmentTxId != segmentTxId) {
    // Sanity check: it is possible that the writer will fail IPCs
    // on both the finalize() and then the start() of the next segment.
    // This could cause us to continue writing to an old segment
    // instead of rolling to a new one, which breaks one of the
    // invariants in the design. If it happens, abort the segment
    // and throw an exception.
    JournalOutOfSyncException e = new JournalOutOfSyncException(
        "Writer out of sync: it thinks it is writing segment " + segmentTxId
        + " but current segment is " + curSegmentTxId);
    abortCurSegment();
    throw e;
  }
    
  checkSync(nextTxId == firstTxnId,
      "Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId);
  
  long lastTxnId = firstTxnId + numTxns - 1;
  if (LOG.isTraceEnabled()) {
    LOG.trace("Writing txid " + firstTxnId + "-" + lastTxnId);
  }

  // If the edit has already been marked as committed, we know
  // it has been fsynced on a quorum of other nodes, and we are
  // "catching up" with the rest. Hence we do not need to fsync.
  boolean isLagging = lastTxnId <= committedTxnId.get();
  boolean shouldFsync = !isLagging;
  
  curSegment.writeRaw(records, 0, records.length);
  curSegment.setReadyToFlush();
  StopWatch sw = new StopWatch();
  sw.start();
  curSegment.flush(shouldFsync);
  sw.stop();

  long nanoSeconds = sw.now();
  metrics.addSync(
      TimeUnit.MICROSECONDS.convert(nanoSeconds, TimeUnit.NANOSECONDS));
  long milliSeconds = TimeUnit.MILLISECONDS.convert(
      nanoSeconds, TimeUnit.NANOSECONDS);

  if (milliSeconds > WARN_SYNC_MILLIS_THRESHOLD) {
    LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId +
             " took " + milliSeconds + "ms");
  }

  if (isLagging) {
    // This batch of edits has already been committed on a quorum of other
    // nodes. So, we are in "catch up" mode. This gets its own metric.
    metrics.batchesWrittenWhileLagging.incr(1);
  }
  
  metrics.batchesWritten.incr(1);
  metrics.bytesWritten.incr(records.length);
  metrics.txnsWritten.incr(numTxns);
  
  highestWrittenTxId = lastTxnId;
  nextTxId = lastTxnId + 1;
}