Java Code Examples for org.apache.hadoop.util.Time#monotonicNow()

The following examples show how to use org.apache.hadoop.util.Time#monotonicNow() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ChunkUtils.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
private static void writeData(ChunkBuffer data, String filename,
    long offset, long len, VolumeIOStats volumeIOStats,
    ToLongFunction<ChunkBuffer> writer) throws StorageContainerException {

  validateBufferSize(len, data.remaining());

  final long startTime = Time.monotonicNow();
  final long bytesWritten;
  try {
    bytesWritten = writer.applyAsLong(data);
  } catch (UncheckedIOException e) {
    throw wrapInStorageContainerException(e.getCause());
  }

  final long endTime = Time.monotonicNow();
  long elapsed = endTime - startTime;
  volumeIOStats.incWriteTime(elapsed);
  volumeIOStats.incWriteOpCount();
  volumeIOStats.incWriteBytes(bytesWritten);

  LOG.debug("Written {} bytes at offset {} to {} in {} ms",
      bytesWritten, offset, filename, elapsed);

  validateWriteSize(len, bytesWritten);
}
 
Example 2
Source File: ClosePipelineCommandHandler.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
/**
 * Handles a given SCM command.
 *
 * @param command           - SCM Command
 * @param ozoneContainer    - Ozone Container.
 * @param context           - Current Context.
 * @param connectionManager - The SCMs that we are talking to.
 */
@Override
public void handle(SCMCommand command, OzoneContainer ozoneContainer,
    StateContext context, SCMConnectionManager connectionManager) {
  invocationCount.incrementAndGet();
  final long startTime = Time.monotonicNow();
  final DatanodeDetails dn = context.getParent().getDatanodeDetails();
  final ClosePipelineCommandProto closeCommand =
      ((ClosePipelineCommand)command).getProto();
  final HddsProtos.PipelineID pipelineID = closeCommand.getPipelineID();

  try {
    XceiverServerSpi server = ozoneContainer.getWriteChannel();
    server.removeGroup(pipelineID);
    LOG.info("Close Pipeline #{} command on datanode #{}.", pipelineID,
        dn.getUuidString());
  } catch (IOException e) {
    LOG.error("Can't close pipeline #{}", pipelineID, e);
  } finally {
    long endTime = Time.monotonicNow();
    totalTime += endTime - startTime;
  }
}
 
Example 3
Source File: PipelineSyncTask.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
@Override
protected synchronized void run() {
  try {
    while (canRun()) {
      long start = Time.monotonicNow();
      List<Pipeline> pipelinesFromScm = scmClient.getPipelines();
      reconPipelineManager.initializePipelines(pipelinesFromScm);
      LOG.info("Pipeline sync Thread took {} milliseconds.",
          Time.monotonicNow() - start);
      recordSingleRunCompletion();
      wait(interval);
    }
  } catch (Throwable t) {
    LOG.error("Exception in Pipeline sync Thread.", t);
  }
}
 
Example 4
Source File: DFSClient.java    From hadoop with Apache License 2.0 5 votes vote down vote up
void updateLastLeaseRenewal() {
  synchronized(filesBeingWritten) {
    if (filesBeingWritten.isEmpty()) {
      return;
    }
    lastLeaseRenewal = Time.monotonicNow();
  }
}
 
Example 5
Source File: CacheManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
public final void processCacheReport(final DatanodeID datanodeID,
    final List<Long> blockIds) throws IOException {
  namesystem.writeLock();
  final long startTime = Time.monotonicNow();
  final long endTime;
  try {
    final DatanodeDescriptor datanode = 
        blockManager.getDatanodeManager().getDatanode(datanodeID);
    if (datanode == null || !datanode.isAlive) {
      throw new IOException(
          "processCacheReport from dead or unregistered datanode: " +
          datanode);
    }
    processCacheReportImpl(datanode, blockIds);
  } finally {
    endTime = Time.monotonicNow();
    namesystem.writeUnlock();
  }

  // Log the block report processing stats from Namenode perspective
  final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
  if (metrics != null) {
    metrics.addCacheBlockReport((int) (endTime - startTime));
  }
  LOG.debug("Processed cache report from {}, blocks: {}, " +
      "processing time: {} msecs", datanodeID, blockIds.size(), 
      (endTime - startTime));
}
 
Example 6
Source File: DatanodeInfo.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Updates the datanode storage reports.
 *
 * @param reports list of storage report
 */
public void updateStorageReports(List<StorageReportProto> reports) {
  try {
    lock.writeLock().lock();
    lastStatsUpdatedTime = Time.monotonicNow();
    storageReports = reports;
  } finally {
    lock.writeLock().unlock();
  }
}
 
Example 7
Source File: FSImageFormatProtobuf.java    From big-c with Apache License 2.0 5 votes vote down vote up
void load(File file) throws IOException {
  long start = Time.monotonicNow();
  imgDigest = MD5FileUtils.computeMd5ForFile(file);
  RandomAccessFile raFile = new RandomAccessFile(file, "r");
  FileInputStream fin = new FileInputStream(file);
  try {
    loadInternal(raFile, fin);
    long end = Time.monotonicNow();
    LOG.info("Loaded FSImage in " + (end - start) / 1000 + " seconds.");
  } finally {
    fin.close();
    raFile.close();
  }
}
 
Example 8
Source File: SecondaryNameNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public String toString() {
  return getClass().getSimpleName() + " Status" 
    + "\nName Node Address      : " + nameNodeAddr
    + "\nStart Time             : " + new Date(starttime)
    + "\nLast Checkpoint        : " + (lastCheckpointTime == 0? "--":
		       ((Time.monotonicNow() - lastCheckpointTime) / 1000))
                           + " seconds ago"
    + "\nCheckpoint Period      : " + checkpointConf.getPeriod() + " seconds"
    + "\nCheckpoint Transactions: " + checkpointConf.getTxnCount()
    + "\nCheckpoint Dirs        : " + checkpointDirs
    + "\nCheckpoint Edits Dirs  : " + checkpointEditsDirs;
}
 
Example 9
Source File: TestByteArrayManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static long performanceTest(final int arrayLength, final int maxArrays,
    final int nThreads, final int[] sleepTimeMSs, final ByteArrayManager impl)
        throws Exception {
  final ExecutorService pool = Executors.newFixedThreadPool(nThreads);
  final List<Future<Void>> futures = new ArrayList<Future<Void>>(sleepTimeMSs.length);
  final long startTime = Time.monotonicNow();

  for(int i = 0; i < sleepTimeMSs.length; i++) {
    final long sleepTime = sleepTimeMSs[i];
    futures.add(pool.submit(new Callable<Void>() {
      @Override
      public Void call() throws Exception {
        byte[] array = impl.newByteArray(arrayLength);
        sleepMs(sleepTime);
        impl.release(array);
        return null;
      }
    }));
  }
  for(Future<Void> f : futures) {
    f.get();
  }

  final long endTime = Time.monotonicNow();
  pool.shutdown();
  return endTime - startTime;
}
 
Example 10
Source File: TestBalancer.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Wait until balanced: each datanode gives utilization within 
 * BALANCE_ALLOWED_VARIANCE of average
 * @throws IOException
 * @throws TimeoutException
 */
static void waitForBalancer(long totalUsedSpace, long totalCapacity,
    ClientProtocol client, MiniDFSCluster cluster, Balancer.Parameters p,
    int expectedExcludedNodes) throws IOException, TimeoutException {
  long timeout = TIMEOUT;
  long failtime = (timeout <= 0L) ? Long.MAX_VALUE
      : Time.monotonicNow() + timeout;
  if (!p.nodesToBeIncluded.isEmpty()) {
    totalCapacity = p.nodesToBeIncluded.size() * CAPACITY;
  }
  if (!p.nodesToBeExcluded.isEmpty()) {
      totalCapacity -= p.nodesToBeExcluded.size() * CAPACITY;
  }
  final double avgUtilization = ((double)totalUsedSpace) / totalCapacity;
  boolean balanced;
  do {
    DatanodeInfo[] datanodeReport = 
        client.getDatanodeReport(DatanodeReportType.ALL);
    assertEquals(datanodeReport.length, cluster.getDataNodes().size());
    balanced = true;
    int actualExcludedNodeCount = 0;
    for (DatanodeInfo datanode : datanodeReport) {
      double nodeUtilization = ((double)datanode.getDfsUsed())
          / datanode.getCapacity();
      if (Dispatcher.Util.isExcluded(p.nodesToBeExcluded, datanode)) {
        assertTrue(nodeUtilization == 0);
        actualExcludedNodeCount++;
        continue;
      }
      if (!Dispatcher.Util.isIncluded(p.nodesToBeIncluded, datanode)) {
        assertTrue(nodeUtilization == 0);
        actualExcludedNodeCount++;
        continue;
      }
      if (Math.abs(avgUtilization - nodeUtilization) > BALANCE_ALLOWED_VARIANCE) {
        balanced = false;
        if (Time.monotonicNow() > failtime) {
          throw new TimeoutException(
              "Rebalancing expected avg utilization to become "
              + avgUtilization + ", but on datanode " + datanode
              + " it remains at " + nodeUtilization
              + " after more than " + TIMEOUT + " msec.");
        }
        try {
          Thread.sleep(100);
        } catch (InterruptedException ignored) {
        }
        break;
      }
    }
    assertEquals(expectedExcludedNodes,actualExcludedNodeCount);
  } while (!balanced);
}
 
Example 11
Source File: LeaseRenewer.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** Is the empty period longer than the grace period? */  
private synchronized boolean isRenewerExpired() {
  return emptyTime != Long.MAX_VALUE
      && Time.monotonicNow() - emptyTime > gracePeriod;
}
 
Example 12
Source File: OpenFileCtx.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private boolean checkStreamTimeout(long streamTimeout) {
  return Time.monotonicNow() - lastAccessTime > streamTimeout;
}
 
Example 13
Source File: TestNodeCount.java    From hadoop with Apache License 2.0 4 votes vote down vote up
void initializeTimeout(long timeout) {
  this.timeout = timeout;
  this.failtime = Time.monotonicNow()
      + ((timeout <= 0) ? Long.MAX_VALUE : timeout);
}
 
Example 14
Source File: TopMetrics.java    From big-c with Apache License 2.0 4 votes vote down vote up
public void report(String userName, String cmd) {
  long currTime = Time.monotonicNow();
  report(currTime, userName, cmd);
}
 
Example 15
Source File: ShellBasedIdMapping.java    From hadoop with Apache License 2.0 4 votes vote down vote up
synchronized private boolean isExpired() {
  return Time.monotonicNow() - lastUpdateTime > timeout;
}
 
Example 16
Source File: InvalidateBlocks.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * @return the remianing pending time
 */
@VisibleForTesting
long getInvalidationDelay() {
  return pendingPeriodInMs - (Time.monotonicNow() - startupTime);
}
 
Example 17
Source File: ShellBasedIdMapping.java    From big-c with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting  
synchronized public void clearNameMaps() {
  uidNameMap.clear();
  gidNameMap.clear();
  lastUpdateTime = Time.monotonicNow();
}
 
Example 18
Source File: ShortCircuitCache.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Run the CacheCleaner thread.
 *
 * Whenever a thread requests a ShortCircuitReplica object, we will make
 * sure it gets one.  That ShortCircuitReplica object can then be re-used
 * when another thread requests a ShortCircuitReplica object for the same
 * block.  So in that sense, there is no maximum size to the cache.
 *
 * However, when a ShortCircuitReplica object is unreferenced by the
 * thread(s) that are using it, it becomes evictable.  There are two
 * separate eviction lists-- one for mmaped objects, and another for
 * non-mmaped objects.  We do this in order to avoid having the regular
 * files kick the mmaped files out of the cache too quickly.  Reusing
 * an already-existing mmap gives a huge performance boost, since the
 * page table entries don't have to be re-populated.  Both the mmap
 * and non-mmap evictable lists have maximum sizes and maximum lifespans.
 */
@Override
public void run() {
  ShortCircuitCache.this.lock.lock();
  try {
    if (ShortCircuitCache.this.closed) return;
    long curMs = Time.monotonicNow();

    if (LOG.isDebugEnabled()) {
      LOG.debug(this + ": cache cleaner running at " + curMs);
    }

    int numDemoted = demoteOldEvictableMmaped(curMs);
    int numPurged = 0;
    Long evictionTimeNs = Long.valueOf(0);
    while (true) {
      Entry<Long, ShortCircuitReplica> entry = 
          evictable.ceilingEntry(evictionTimeNs);
      if (entry == null) break;
      evictionTimeNs = entry.getKey();
      long evictionTimeMs = 
          TimeUnit.MILLISECONDS.convert(evictionTimeNs, TimeUnit.NANOSECONDS);
      if (evictionTimeMs + maxNonMmappedEvictableLifespanMs >= curMs) break;
      ShortCircuitReplica replica = entry.getValue();
      if (LOG.isTraceEnabled()) {
        LOG.trace("CacheCleaner: purging " + replica + ": " + 
              StringUtils.getStackTrace(Thread.currentThread()));
      }
      purge(replica);
      numPurged++;
    }

    if (LOG.isDebugEnabled()) {
      LOG.debug(this + ": finishing cache cleaner run started at " +
        curMs + ".  Demoted " + numDemoted + " mmapped replicas; " +
        "purged " + numPurged + " replicas.");
    }
  } finally {
    ShortCircuitCache.this.lock.unlock();
  }
}
 
Example 19
Source File: DFSOutputStream.java    From big-c with Apache License 2.0 4 votes vote down vote up
private LocatedBlock locateFollowingBlock(DatanodeInfo[] excludedNodes)  throws IOException {
  int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
  long sleeptime = 400;
  while (true) {
    long localstart = Time.monotonicNow();
    while (true) {
      try {
        return dfsClient.namenode.addBlock(src, dfsClient.clientName,
            block, excludedNodes, fileId, favoredNodes);
      } catch (RemoteException e) {
        IOException ue = 
          e.unwrapRemoteException(FileNotFoundException.class,
                                  AccessControlException.class,
                                  NSQuotaExceededException.class,
                                  DSQuotaExceededException.class,
                                  UnresolvedPathException.class);
        if (ue != e) { 
          throw ue; // no need to retry these exceptions
        }
        
        
        if (NotReplicatedYetException.class.getName().
            equals(e.getClassName())) {
          if (retries == 0) { 
            throw e;
          } else {
            --retries;
            DFSClient.LOG.info("Exception while adding a block", e);
            long elapsed = Time.monotonicNow() - localstart;
            if (elapsed > 5000) {
              DFSClient.LOG.info("Waiting for replication for "
                  + (elapsed / 1000) + " seconds");
            }
            try {
              DFSClient.LOG.warn("NotReplicatedYetException sleeping " + src
                  + " retries left " + retries);
              Thread.sleep(sleeptime);
              sleeptime *= 2;
            } catch (InterruptedException ie) {
              DFSClient.LOG.warn("Caught exception ", ie);
            }
          }
        } else {
          throw e;
        }

      }
    }
  } 
}
 
Example 20
Source File: Lease.java    From hadoop-ozone with Apache License 2.0 3 votes vote down vote up
/**
 * Returns the time elapsed since the creation of lease.
 *
 * @return elapsed time in milliseconds
 * @throws LeaseExpiredException
 *         If the lease has already timed out
 */
public long getElapsedTime() throws LeaseExpiredException {
  if(hasExpired()) {
    throw new LeaseExpiredException(messageForResource(resource));
  }
  return Time.monotonicNow() - creationTime;
}