org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix Java Examples

The following examples show how to use org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HStore.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * @param path The pathname of the tmp file into which the store was flushed
 * @return store file created.
 */
private HStoreFile commitFile(Path path, long logCacheFlushId, MonitoredTask status)
    throws IOException {
  // Write-out finished successfully, move into the right spot
  Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path);

  status.setStatus("Flushing " + this + ": reopening flushed file");
  HStoreFile sf = createStoreFileAndReader(dstPath);

  StoreFileReader r = sf.getReader();
  this.storeSize.addAndGet(r.length());
  this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes());

  if (LOG.isInfoEnabled()) {
    LOG.info("Added " + sf + ", entries=" + r.getEntries() +
      ", sequenceid=" + logCacheFlushId +
      ", filesize=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1));
  }
  return sf;
}
 
Example #2
Source File: MemStoreFlusher.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * @param conf
 * @param server
 */
public MemStoreFlusher(final Configuration conf,
    final HRegionServer server) {
  super();
  this.conf = conf;
  this.server = server;
  this.threadWakeFrequency =
      conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
  this.blockingWaitTime = conf.getInt("hbase.hstore.blockingWaitTime",
    90000);
  int handlerCount = conf.getInt("hbase.hstore.flusher.count", 2);
  if (handlerCount < 1) {
    LOG.warn("hbase.hstore.flusher.count was configed to {} which is less than 1, corrected to 1",
        handlerCount);
    handlerCount = 1;
  }
  this.flushHandlers = new FlushHandler[handlerCount];
  LOG.info("globalMemStoreLimit="
      + TraditionalBinaryPrefix
          .long2String(this.server.getRegionServerAccounting().getGlobalMemStoreLimit(), "", 1)
      + ", globalMemStoreLimitLowMark="
      + TraditionalBinaryPrefix.long2String(
        this.server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 1)
      + ", Offheap="
      + (this.server.getRegionServerAccounting().isOffheap()));
}
 
Example #3
Source File: StripeStoreFileManager.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void debugDumpState(String string) {
  if (!LOG.isDebugEnabled()) return;
  StringBuilder sb = new StringBuilder();
  sb.append("\n" + string + "; current stripe state is as such:");
  sb.append("\n level 0 with ")
      .append(state.level0Files.size())
      .append(
        " files: "
            + TraditionalBinaryPrefix.long2String(
              StripeCompactionPolicy.getTotalFileSize(state.level0Files), "", 1) + ";");
  for (int i = 0; i < state.stripeFiles.size(); ++i) {
    String endRow = (i == state.stripeEndRows.length)
        ? "(end)" : "[" + Bytes.toString(state.stripeEndRows[i]) + "]";
    sb.append("\n stripe ending in ")
        .append(endRow)
        .append(" with ")
        .append(state.stripeFiles.get(i).size())
        .append(
          " files: "
              + TraditionalBinaryPrefix.long2String(
                StripeCompactionPolicy.getTotalFileSize(state.stripeFiles.get(i)), "", 1) + ";");
  }
  sb.append("\n").append(state.stripeFiles.size()).append(" stripes total.");
  sb.append("\n").append(getStorefileCount()).append(" files total.");
  LOG.debug(sb.toString());
}
 
Example #4
Source File: ContainersMonitorImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private String formatUsageString(long currentVmemUsage, long vmemLimit,
    long currentPmemUsage, long pmemLimit) {
  return String.format("%sB of %sB physical memory used; " +
      "%sB of %sB virtual memory used",
      TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
      TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
      TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
      TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
 
Example #5
Source File: HStore.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Similar to commit, but called in secondary region replicas for replaying the
 * flush cache from primary region. Adds the new files to the store, and drops the
 * snapshot depending on dropMemstoreSnapshot argument.
 * @param fileNames names of the flushed files
 * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot
 */
@Override
public void replayFlush(List<String> fileNames, boolean dropMemstoreSnapshot)
    throws IOException {
  List<HStoreFile> storeFiles = new ArrayList<>(fileNames.size());
  for (String file : fileNames) {
    // open the file as a store file (hfile link, etc)
    StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file);
    HStoreFile storeFile = createStoreFileAndReader(storeFileInfo);
    storeFiles.add(storeFile);
    HStore.this.storeSize.addAndGet(storeFile.getReader().length());
    HStore.this.totalUncompressedBytes
        .addAndGet(storeFile.getReader().getTotalUncompressedBytes());
    if (LOG.isInfoEnabled()) {
      LOG.info(this + " added " + storeFile + ", entries=" + storeFile.getReader().getEntries() +
          ", sequenceid=" + storeFile.getReader().getSequenceID() + ", filesize="
          + TraditionalBinaryPrefix.long2String(storeFile.getReader().length(), "", 1));
    }
  }

  long snapshotId = -1; // -1 means do not drop
  if (dropMemstoreSnapshot && snapshot != null) {
    snapshotId = snapshot.getId();
    snapshot.close();
  }
  HStore.this.updateStorefiles(storeFiles, snapshotId);
}
 
Example #6
Source File: HStore.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void removeUnneededFiles() throws IOException {
  if (!conf.getBoolean("hbase.store.delete.expired.storefile", true)) {
    return;
  }
  if (getColumnFamilyDescriptor().getMinVersions() > 0) {
    LOG.debug("Skipping expired store file removal due to min version of {} being {}",
        this, getColumnFamilyDescriptor().getMinVersions());
    return;
  }
  this.lock.readLock().lock();
  Collection<HStoreFile> delSfs = null;
  try {
    synchronized (filesCompacting) {
      long cfTtl = getStoreFileTtl();
      if (cfTtl != Long.MAX_VALUE) {
        delSfs = storeEngine.getStoreFileManager().getUnneededFiles(
            EnvironmentEdgeManager.currentTime() - cfTtl, filesCompacting);
        addToCompactingFiles(delSfs);
      }
    }
  } finally {
    this.lock.readLock().unlock();
  }

  if (CollectionUtils.isEmpty(delSfs)) {
    return;
  }

  Collection<HStoreFile> newFiles = Collections.emptyList(); // No new files.
  writeCompactionWalRecord(delSfs, newFiles);
  replaceStoreFiles(delSfs, newFiles);
  completeCompaction(delSfs);
  LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in "
      + this + "; total size is "
      + TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1));
}
 
Example #7
Source File: HStore.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Log a very elaborate compaction completion message.
 * @param cr Request.
 * @param sfs Resulting files.
 * @param compactionStartTime Start time.
 */
private void logCompactionEndMessage(
    CompactionRequestImpl cr, List<HStoreFile> sfs, long now, long compactionStartTime) {
  StringBuilder message = new StringBuilder(
    "Completed" + (cr.isMajor() ? " major" : "") + " compaction of "
    + cr.getFiles().size() + (cr.isAllFiles() ? " (all)" : "") + " file(s) in "
    + this + " of " + this.getRegionInfo().getShortNameToLog() + " into ");
  if (sfs.isEmpty()) {
    message.append("none, ");
  } else {
    for (HStoreFile sf: sfs) {
      message.append(sf.getPath().getName());
      message.append("(size=");
      message.append(TraditionalBinaryPrefix.long2String(sf.getReader().length(), "", 1));
      message.append("), ");
    }
  }
  message.append("total size for store is ")
    .append(StringUtils.TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1))
    .append(". This selection was in queue for ")
    .append(StringUtils.formatTimeDiff(compactionStartTime, cr.getSelectionTime()))
    .append(", and took ").append(StringUtils.formatTimeDiff(now, compactionStartTime))
    .append(" to execute.");
  LOG.info(message.toString());
  if (LOG.isTraceEnabled()) {
    int fileCount = storeEngine.getStoreFileManager().getStorefileCount();
    long resultSize = getTotalSize(sfs);
    String traceMessage = "COMPACTION start,end,size out,files in,files out,store size,"
      + "store files [" + compactionStartTime + "," + now + "," + resultSize + ","
        + cr.getFiles().size() + "," + sfs.size() + "," +  storeSize + "," + fileCount + "]";
    LOG.trace(traceMessage);
  }
}
 
Example #8
Source File: HStore.java    From hbase with Apache License 2.0 5 votes vote down vote up
public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException {
  LOG.info("Validating recovered hfile at {} for inclusion in store {}", path, this);
  FileSystem srcFs = path.getFileSystem(conf);
  srcFs.access(path, FsAction.READ_WRITE);
  try (HFile.Reader reader =
      HFile.createReader(srcFs, path, cacheConf, isPrimaryReplicaStore(), conf)) {
    Optional<byte[]> firstKey = reader.getFirstRowKey();
    Preconditions.checkState(firstKey.isPresent(), "First key can not be null");
    Optional<Cell> lk = reader.getLastKey();
    Preconditions.checkState(lk.isPresent(), "Last key can not be null");
    byte[] lastKey = CellUtil.cloneRow(lk.get());
    if (!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) {
      throw new WrongRegionException("Recovered hfile " + path.toString() +
          " does not fit inside region " + this.getRegionInfo().getRegionNameAsString());
    }
  }

  Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path);
  HStoreFile sf = createStoreFileAndReader(dstPath);
  StoreFileReader r = sf.getReader();
  this.storeSize.addAndGet(r.length());
  this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes());

  this.lock.writeLock().lock();
  try {
    this.storeEngine.getStoreFileManager().insertNewFiles(Lists.newArrayList(sf));
  } finally {
    this.lock.writeLock().unlock();
  }

  LOG.info("Loaded recovered hfile to {}, entries={}, sequenceid={}, filesize={}", sf,
    r.getEntries(), r.getSequenceID(), TraditionalBinaryPrefix.long2String(r.length(), "B", 1));
  return sf;
}
 
Example #9
Source File: CompactionRequestImpl.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public String toString() {
  String fsList = filesToCompact.stream().filter(f -> f.getReader() != null)
      .map(f -> TraditionalBinaryPrefix.long2String(f.getReader().length(), "", 1))
      .collect(Collectors.joining(", "));

  return "regionName=" + regionName + ", storeName=" + storeName + ", fileCount=" +
      this.getFiles().size() + ", fileSize=" +
      TraditionalBinaryPrefix.long2String(totalSize, "", 1) +
      ((fsList.isEmpty()) ? "" : " (" + fsList + ")") + ", priority=" + priority + ", time=" +
      selectionTime;
}
 
Example #10
Source File: DistCpV1.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Copies single file to the path specified by tmpfile.
 * @param srcstat  src path and metadata
 * @param tmpfile  temporary file to which copy is to be done
 * @param absdst   actual destination path to which copy is to be done
 * @param reporter
 * @return Number of bytes copied
 */
private long doCopyFile(FileStatus srcstat, Path tmpfile, Path absdst,
                        Reporter reporter) throws IOException {
  long bytesCopied = 0L;
  Path srcPath = srcstat.getPath();
  // open src file
  try (FSDataInputStream in = srcPath.getFileSystem(job).open(srcPath)) {
    reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen());
    // open tmp file
    try (FSDataOutputStream out = create(tmpfile, reporter, srcstat)) {
      LOG.info("Copying file " + srcPath + " of size " +
               srcstat.getLen() + " bytes...");
    
      // copy file
      for(int bytesRead; (bytesRead = in.read(buffer)) >= 0; ) {
        out.write(buffer, 0, bytesRead);
        bytesCopied += bytesRead;
        reporter.setStatus(
            String.format("%.2f ", bytesCopied*100.0/srcstat.getLen())
            + absdst + " [ " +
            TraditionalBinaryPrefix.long2String(bytesCopied, "", 1) + " / "
            + TraditionalBinaryPrefix.long2String(srcstat.getLen(), "", 1)
            + " ]");
      }
    }
  }
  return bytesCopied;
}
 
Example #11
Source File: DistCpV1.java    From big-c with Apache License 2.0 5 votes vote down vote up
private long parseLong(String[] args, int offset) {
  if (offset ==  args.length) {
    throw new IllegalArgumentException("<n> not specified in " + cmd);
  }
  long n = StringUtils.TraditionalBinaryPrefix.string2long(args[offset]);
  if (n <= 0) {
    throw new IllegalArgumentException("n = " + n + " <= 0 in " + cmd);
  }
  return n;
}
 
Example #12
Source File: ContainersMonitorImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
private String formatUsageString(long currentVmemUsage, long vmemLimit,
    long currentPmemUsage, long pmemLimit) {
  return String.format("%sB of %sB physical memory used; " +
      "%sB of %sB virtual memory used",
      TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
      TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
      TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
      TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
 
Example #13
Source File: DistCpV1.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Copies single file to the path specified by tmpfile.
 * @param srcstat  src path and metadata
 * @param tmpfile  temporary file to which copy is to be done
 * @param absdst   actual destination path to which copy is to be done
 * @param reporter
 * @return Number of bytes copied
 */
private long doCopyFile(FileStatus srcstat, Path tmpfile, Path absdst,
                        Reporter reporter) throws IOException {
  long bytesCopied = 0L;
  Path srcPath = srcstat.getPath();
  // open src file
  try (FSDataInputStream in = srcPath.getFileSystem(job).open(srcPath)) {
    reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen());
    // open tmp file
    try (FSDataOutputStream out = create(tmpfile, reporter, srcstat)) {
      LOG.info("Copying file " + srcPath + " of size " +
               srcstat.getLen() + " bytes...");
    
      // copy file
      for(int bytesRead; (bytesRead = in.read(buffer)) >= 0; ) {
        out.write(buffer, 0, bytesRead);
        bytesCopied += bytesRead;
        reporter.setStatus(
            String.format("%.2f ", bytesCopied*100.0/srcstat.getLen())
            + absdst + " [ " +
            TraditionalBinaryPrefix.long2String(bytesCopied, "", 1) + " / "
            + TraditionalBinaryPrefix.long2String(srcstat.getLen(), "", 1)
            + " ]");
      }
    }
  }
  return bytesCopied;
}
 
Example #14
Source File: DistCpV1.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private long parseLong(String[] args, int offset) {
  if (offset ==  args.length) {
    throw new IllegalArgumentException("<n> not specified in " + cmd);
  }
  long n = StringUtils.TraditionalBinaryPrefix.string2long(args[offset]);
  if (n <= 0) {
    throw new IllegalArgumentException("n = " + n + " <= 0 in " + cmd);
  }
  return n;
}
 
Example #15
Source File: DistCpV1.java    From big-c with Apache License 2.0 4 votes vote down vote up
static String bytesString(long b) {
  return b + " bytes (" +
      TraditionalBinaryPrefix.long2String(b, "", 1) + ")";
}
 
Example #16
Source File: Compactor.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Extracts some details about the files to compact that are commonly needed by compactors.
 * @param filesToCompact Files.
 * @param allFiles Whether all files are included for compaction
 * @return The result.
 */
private FileDetails getFileDetails(
    Collection<HStoreFile> filesToCompact, boolean allFiles) throws IOException {
  FileDetails fd = new FileDetails();
  long oldestHFileTimestampToKeepMVCC = System.currentTimeMillis() -
    (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);

  for (HStoreFile file : filesToCompact) {
    if(allFiles && (file.getModificationTimestamp() < oldestHFileTimestampToKeepMVCC)) {
      // when isAllFiles is true, all files are compacted so we can calculate the smallest
      // MVCC value to keep
      if(fd.minSeqIdToKeep < file.getMaxMemStoreTS()) {
        fd.minSeqIdToKeep = file.getMaxMemStoreTS();
      }
    }
    long seqNum = file.getMaxSequenceId();
    fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
    StoreFileReader r = file.getReader();
    if (r == null) {
      LOG.warn("Null reader for " + file.getPath());
      continue;
    }
    // NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
    // blooms can cause progress to be miscalculated or if the user switches bloom
    // type (e.g. from ROW to ROWCOL)
    long keyCount = r.getEntries();
    fd.maxKeyCount += keyCount;
    // calculate the latest MVCC readpoint in any of the involved store files
    Map<byte[], byte[]> fileInfo = r.loadFileInfo();

    // calculate the total size of the compacted files
    fd.totalCompactedFilesSize += r.length();

    byte[] tmp = null;
    // Get and set the real MVCCReadpoint for bulk loaded files, which is the
    // SeqId number.
    if (r.isBulkLoaded()) {
      fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
    }
    else {
      tmp = fileInfo.get(HFile.Writer.MAX_MEMSTORE_TS_KEY);
      if (tmp != null) {
        fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
      }
    }
    tmp = fileInfo.get(HFileInfo.MAX_TAGS_LEN);
    if (tmp != null) {
      fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
    }
    // If required, calculate the earliest put timestamp of all involved storefiles.
    // This is used to remove family delete marker during compaction.
    long earliestPutTs = 0;
    if (allFiles) {
      tmp = fileInfo.get(EARLIEST_PUT_TS);
      if (tmp == null) {
        // There's a file with no information, must be an old one
        // assume we have very old puts
        fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
      } else {
        earliestPutTs = Bytes.toLong(tmp);
        fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
      }
    }
    tmp = fileInfo.get(TIMERANGE_KEY);
    fd.latestPutTs = tmp == null ? HConstants.LATEST_TIMESTAMP: TimeRangeTracker.parseFrom(tmp).getMax();
    LOG.debug("Compacting {}, keycount={}, bloomtype={}, size={}, "
            + "encoding={}, compression={}, seqNum={}{}",
        (file.getPath() == null? null: file.getPath().getName()),
        keyCount,
        r.getBloomFilterType().toString(),
        TraditionalBinaryPrefix.long2String(r.length(), "", 1),
        r.getHFileReader().getDataBlockEncoding(),
        compactionCompression,
        seqNum,
        (allFiles? ", earliestPutTs=" + earliestPutTs: ""));
  }
  return fd;
}
 
Example #17
Source File: MemStoreFlusher.java    From hbase with Apache License 2.0 4 votes vote down vote up
private void logMsg(String type, long val, long max) {
  LOG.info("Blocking updates: {} {} is >= blocking {}", type,
      TraditionalBinaryPrefix.long2String(val, "", 1),
      TraditionalBinaryPrefix.long2String(max, "", 1));
}
 
Example #18
Source File: ContainersMonitorImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  this.monitoringInterval =
      conf.getLong(YarnConfiguration.NM_CONTAINER_MON_INTERVAL_MS,
          YarnConfiguration.DEFAULT_NM_CONTAINER_MON_INTERVAL_MS);

  Class<? extends ResourceCalculatorPlugin> clazz =
      conf.getClass(YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, null,
          ResourceCalculatorPlugin.class);
  this.resourceCalculatorPlugin =
      ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : "
      + this.resourceCalculatorPlugin);
  processTreeClass = conf.getClass(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, null,
          ResourceCalculatorProcessTree.class);
  this.conf = conf;
  LOG.info(" Using ResourceCalculatorProcessTree : "
      + this.processTreeClass);

  this.containerMetricsEnabled =
      conf.getBoolean(YarnConfiguration.NM_CONTAINER_METRICS_ENABLE,
          YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_ENABLE);
  this.containerMetricsPeriodMs =
      conf.getLong(YarnConfiguration.NM_CONTAINER_METRICS_PERIOD_MS,
          YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_PERIOD_MS);

  long configuredPMemForContainers = conf.getLong(
      YarnConfiguration.NM_PMEM_MB,
      YarnConfiguration.DEFAULT_NM_PMEM_MB) * 1024 * 1024l;

  long configuredVCoresForContainers = conf.getLong(
      YarnConfiguration.NM_VCORES,
      YarnConfiguration.DEFAULT_NM_VCORES);


  // Setting these irrespective of whether checks are enabled. Required in
  // the UI.
  // ///////// Physical memory configuration //////
  this.maxPmemAllottedForContainers = configuredPMemForContainers;
  this.maxVCoresAllottedForContainers = configuredVCoresForContainers;

  // ///////// Virtual memory configuration //////
  float vmemRatio = conf.getFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO,
      YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO);
  Preconditions.checkArgument(vmemRatio > 0.99f,
      YarnConfiguration.NM_VMEM_PMEM_RATIO + " should be at least 1.0");
  this.maxVmemAllottedForContainers =
      (long) (vmemRatio * configuredPMemForContainers);

  pmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED,
      YarnConfiguration.DEFAULT_NM_PMEM_CHECK_ENABLED);
  vmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED,
      YarnConfiguration.DEFAULT_NM_VMEM_CHECK_ENABLED);
  LOG.info("Physical memory check enabled: " + pmemCheckEnabled);
  LOG.info("Virtual memory check enabled: " + vmemCheckEnabled);

  nodeCpuPercentageForYARN =
      NodeManagerHardwareUtils.getNodeCpuPercentage(conf);

  if (pmemCheckEnabled) {
    // Logging if actual pmem cannot be determined.
    long totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
    if (this.resourceCalculatorPlugin != null) {
      totalPhysicalMemoryOnNM = this.resourceCalculatorPlugin
          .getPhysicalMemorySize();
      if (totalPhysicalMemoryOnNM <= 0) {
        LOG.warn("NodeManager's totalPmem could not be calculated. "
            + "Setting it to " + UNKNOWN_MEMORY_LIMIT);
        totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
      }
    }

    if (totalPhysicalMemoryOnNM != UNKNOWN_MEMORY_LIMIT &&
        this.maxPmemAllottedForContainers > totalPhysicalMemoryOnNM * 0.80f) {
      LOG.warn("NodeManager configured with "
          + TraditionalBinaryPrefix.long2String(maxPmemAllottedForContainers,
              "", 1)
          + " physical memory allocated to containers, which is more than "
          + "80% of the total physical memory available ("
          + TraditionalBinaryPrefix.long2String(totalPhysicalMemoryOnNM, "",
              1) + "). Thrashing might happen.");
    }
  }
  super.serviceInit(conf);
}
 
Example #19
Source File: DistCpV1.java    From hadoop with Apache License 2.0 4 votes vote down vote up
static String bytesString(long b) {
  return b + " bytes (" +
      TraditionalBinaryPrefix.long2String(b, "", 1) + ")";
}
 
Example #20
Source File: ContainersMonitorImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  this.monitoringInterval =
      conf.getLong(YarnConfiguration.NM_CONTAINER_MON_INTERVAL_MS,
          YarnConfiguration.DEFAULT_NM_CONTAINER_MON_INTERVAL_MS);

  Class<? extends ResourceCalculatorPlugin> clazz =
      conf.getClass(YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, null,
          ResourceCalculatorPlugin.class);
  this.resourceCalculatorPlugin =
      ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : "
      + this.resourceCalculatorPlugin);
  processTreeClass = conf.getClass(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, null,
          ResourceCalculatorProcessTree.class);
  this.conf = conf;
  LOG.info(" Using ResourceCalculatorProcessTree : "
      + this.processTreeClass);

  this.containerMetricsEnabled =
      conf.getBoolean(YarnConfiguration.NM_CONTAINER_METRICS_ENABLE,
          YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_ENABLE);
  this.containerMetricsPeriodMs =
      conf.getLong(YarnConfiguration.NM_CONTAINER_METRICS_PERIOD_MS,
          YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_PERIOD_MS);

  long configuredPMemForContainers = conf.getLong(
      YarnConfiguration.NM_PMEM_MB,
      YarnConfiguration.DEFAULT_NM_PMEM_MB) * 1024 * 1024l;

  long configuredVCoresForContainers = conf.getLong(
      YarnConfiguration.NM_VCORES,
      YarnConfiguration.DEFAULT_NM_VCORES);

  long configuredGCoresForContainers = conf.getLong(
      YarnConfiguration.NM_GCORES,
      YarnConfiguration.DEFAULT_NM_GCORES);

  // Setting these irrespective of whether checks are enabled. Required in
  // the UI.
  // ///////// Physical memory configuration //////
  this.maxPmemAllottedForContainers = configuredPMemForContainers;
  this.maxVCoresAllottedForContainers = configuredVCoresForContainers;
  this.maxGCoresAllottedForContainers = configuredGCoresForContainers;

  // ///////// Virtual memory configuration //////
  float vmemRatio = conf.getFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO,
      YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO);
  Preconditions.checkArgument(vmemRatio > 0.99f,
      YarnConfiguration.NM_VMEM_PMEM_RATIO + " should be at least 1.0");
  this.maxVmemAllottedForContainers =
      (long) (vmemRatio * configuredPMemForContainers);

  pmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED,
      YarnConfiguration.DEFAULT_NM_PMEM_CHECK_ENABLED);
  vmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED,
      YarnConfiguration.DEFAULT_NM_VMEM_CHECK_ENABLED);
  LOG.info("Physical memory check enabled: " + pmemCheckEnabled);
  LOG.info("Virtual memory check enabled: " + vmemCheckEnabled);

  nodeCpuPercentageForYARN =
      NodeManagerHardwareUtils.getNodeCpuPercentage(conf);

  if (pmemCheckEnabled) {
    // Logging if actual pmem cannot be determined.
    long totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
    if (this.resourceCalculatorPlugin != null) {
      totalPhysicalMemoryOnNM = this.resourceCalculatorPlugin
          .getPhysicalMemorySize();
      if (totalPhysicalMemoryOnNM <= 0) {
        LOG.warn("NodeManager's totalPmem could not be calculated. "
            + "Setting it to " + UNKNOWN_MEMORY_LIMIT);
        totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
      }
    }

    if (totalPhysicalMemoryOnNM != UNKNOWN_MEMORY_LIMIT &&
        this.maxPmemAllottedForContainers > totalPhysicalMemoryOnNM * 0.80f) {
      LOG.warn("NodeManager configured with "
          + TraditionalBinaryPrefix.long2String(maxPmemAllottedForContainers,
              "", 1)
          + " physical memory allocated to containers, which is more than "
          + "80% of the total physical memory available ("
          + TraditionalBinaryPrefix.long2String(totalPhysicalMemoryOnNM, "",
              1) + "). Thrashing might happen.");
    }
  }
  super.serviceInit(conf);
}