org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DelegationTokenSecretManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Private helper methods to save delegation keys and tokens in fsimage
 */
private synchronized void saveCurrentTokens(DataOutputStream out,
    String sdPath) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(currentTokens.size());
  Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
      .iterator();
  while (iter.hasNext()) {
    DelegationTokenIdentifier id = iter.next();
    id.write(out);
    DelegationTokenInformation info = currentTokens.get(id);
    out.writeLong(info.getRenewDate());
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #2
Source File: FSEditLogLoader.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Load an edit log, and apply the changes to the in-memory structure
 * This is where we apply edits that we've been writing to disk all
 * along.
 */
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
    StartupOption startOpt, MetaRecoveryContext recovery) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = createStartupProgressStep(edits);
  prog.beginStep(Phase.LOADING_EDITS, step);
  fsNamesys.writeLock();
  try {
    long startTime = monotonicNow();
    FSImage.LOG.info("Start loading edits file " + edits.getName());
    long numEdits = loadEditRecords(edits, false, expectedStartingTxId,
        startOpt, recovery);
    FSImage.LOG.info("Edits file " + edits.getName() 
        + " of size " + edits.length() + " edits # " + numEdits 
        + " loaded in " + (monotonicNow()-startTime)/1000 + " seconds");
    return numEdits;
  } finally {
    edits.close();
    fsNamesys.writeUnlock();
    prog.endStep(Phase.LOADING_EDITS, step);
  }
}
 
Example #3
Source File: DelegationTokenSecretManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Private helper method to load delegation keys from fsimage.
 * @throws IOException on error
 */
private synchronized void loadAllKeys(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfKeys = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfKeys; i++) {
    DelegationKey value = new DelegationKey();
    value.readFields(in);
    addKey(value);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #4
Source File: DelegationTokenSecretManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Private helper methods to load Delegation tokens from fsimage
 */
private synchronized void loadCurrentTokens(DataInput in)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfTokens = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfTokens);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfTokens; i++) {
    DelegationTokenIdentifier id = new DelegationTokenIdentifier();
    id.readFields(in);
    long expiryTime = in.readLong();
    addPersistedDelegationToken(id, expiryTime);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #5
Source File: DelegationTokenSecretManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(allKeys.size());
  Iterator<Integer> iter = allKeys.keySet().iterator();
  while (iter.hasNext()) {
    Integer key = iter.next();
    allKeys.get(key).write(out);
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #6
Source File: DelegationTokenSecretManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Private helper methods to save delegation keys and tokens in fsimage
 */
private synchronized void saveCurrentTokens(DataOutputStream out,
    String sdPath) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(currentTokens.size());
  Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
      .iterator();
  while (iter.hasNext()) {
    DelegationTokenIdentifier id = iter.next();
    id.write(out);
    DelegationTokenInformation info = currentTokens.get(id);
    out.writeLong(info.getRenewDate());
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #7
Source File: CacheManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Load cache directives from the fsimage
 */
private void loadDirectives(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_ENTRIES);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numDirectives = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numDirectives; i++) {
    CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
    // Get pool reference by looking it up in the map
    final String poolName = info.getPool();
    CacheDirective directive =
        new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
            info.getReplication(), info.getExpiration().getAbsoluteMillis());
    addCacheDirective(poolName, directive);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #8
Source File: StartupProgressMetrics.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
  StartupProgressView prog = startupProgress.createView();
  MetricsRecordBuilder builder = collector.addRecord(
    STARTUP_PROGRESS_METRICS_INFO);

  builder.addCounter(info("ElapsedTime", "overall elapsed time"),
    prog.getElapsedTime());
  builder.addGauge(info("PercentComplete", "overall percent complete"),
    prog.getPercentComplete());

  for (Phase phase: prog.getPhases()) {
    addCounter(builder, phase, "Count", " count", prog.getCount(phase));
    addCounter(builder, phase, "ElapsedTime", " elapsed time",
      prog.getElapsedTime(phase));
    addCounter(builder, phase, "Total", " total", prog.getTotal(phase));
    addGauge(builder, phase, "PercentComplete", " percent complete",
      prog.getPercentComplete(phase));
  }
}
 
Example #9
Source File: StartupProgressMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
  StartupProgressView prog = startupProgress.createView();
  MetricsRecordBuilder builder = collector.addRecord(
    STARTUP_PROGRESS_METRICS_INFO);

  builder.addCounter(info("ElapsedTime", "overall elapsed time"),
    prog.getElapsedTime());
  builder.addGauge(info("PercentComplete", "overall percent complete"),
    prog.getPercentComplete());

  for (Phase phase: prog.getPhases()) {
    addCounter(builder, phase, "Count", " count", prog.getCount(phase));
    addCounter(builder, phase, "ElapsedTime", " elapsed time",
      prog.getElapsedTime(phase));
    addCounter(builder, phase, "Total", " total", prog.getTotal(phase));
    addGauge(builder, phase, "PercentComplete", " percent complete",
      prog.getPercentComplete(phase));
  }
}
 
Example #10
Source File: CacheManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Load cache directives from the fsimage
 */
private void loadDirectives(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_ENTRIES);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numDirectives = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numDirectives; i++) {
    CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
    // Get pool reference by looking it up in the map
    final String poolName = info.getPool();
    CacheDirective directive =
        new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
            info.getReplication(), info.getExpiration().getAbsoluteMillis());
    addCacheDirective(poolName, directive);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #11
Source File: FSEditLogLoader.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Load an edit log, and apply the changes to the in-memory structure
 * This is where we apply edits that we've been writing to disk all
 * along.
 */
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
    StartupOption startOpt, MetaRecoveryContext recovery) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = createStartupProgressStep(edits);
  prog.beginStep(Phase.LOADING_EDITS, step);
  fsNamesys.writeLock();
  try {
    long startTime = monotonicNow();
    FSImage.LOG.info("Start loading edits file " + edits.getName());
    long numEdits = loadEditRecords(edits, false, expectedStartingTxId,
        startOpt, recovery);
    FSImage.LOG.info("Edits file " + edits.getName() 
        + " of size " + edits.length() + " edits # " + numEdits 
        + " loaded in " + (monotonicNow()-startTime)/1000 + " seconds");
    return numEdits;
  } finally {
    edits.close();
    fsNamesys.writeUnlock();
    prog.endStep(Phase.LOADING_EDITS, step);
  }
}
 
Example #12
Source File: DelegationTokenSecretManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Private helper method to load delegation keys from fsimage.
 * @throws IOException on error
 */
private synchronized void loadAllKeys(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfKeys = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfKeys; i++) {
    DelegationKey value = new DelegationKey();
    value.readFields(in);
    addKey(value);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #13
Source File: DelegationTokenSecretManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Private helper methods to load Delegation tokens from fsimage
 */
private synchronized void loadCurrentTokens(DataInput in)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfTokens = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfTokens);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfTokens; i++) {
    DelegationTokenIdentifier id = new DelegationTokenIdentifier();
    id.readFields(in);
    long expiryTime = in.readLong();
    addPersistedDelegationToken(id, expiryTime);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #14
Source File: DelegationTokenSecretManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(allKeys.size());
  Iterator<Integer> iter = allKeys.keySet().iterator();
  while (iter.hasNext()) {
    Integer key = iter.next();
    allKeys.get(key).write(out);
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #15
Source File: CacheManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Load cache pools from fsimage
 */
private void loadPools(DataInput in)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_POOLS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfPools = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfPools; i++) {
    addCachePool(FSImageSerialization.readCachePoolInfo(in));
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #16
Source File: CacheManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void saveDirectives(DataOutputStream out, String sdPath)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(directivesById.size());
  for (CacheDirective directive : directivesById.values()) {
    FSImageSerialization.writeCacheDirectiveInfo(out, directive.toInfo());
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #17
Source File: CacheManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Save cache pools to fsimage
 */
private void savePools(DataOutputStream out,
    String sdPath) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_POOLS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(cachePools.size());
  for (CachePool pool: cachePools.values()) {
    FSImageSerialization.writeCachePoolInfo(out, pool.getInfo(true));
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #18
Source File: FSImage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private long loadEdits(Iterable<EditLogInputStream> editStreams,
    FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery)
    throws IOException {
  LOG.debug("About to load edits:\n  " + Joiner.on("\n  ").join(editStreams));
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginPhase(Phase.LOADING_EDITS);
  
  long prevLastAppliedTxId = lastAppliedTxId;  
  try {    
    FSEditLogLoader loader = new FSEditLogLoader(target, lastAppliedTxId);
    
    // Load latest edits
    for (EditLogInputStream editIn : editStreams) {
      LOG.info("Reading " + editIn + " expecting start txid #" +
            (lastAppliedTxId + 1));
      try {
        loader.loadFSEdits(editIn, lastAppliedTxId + 1, startOpt, recovery);
      } finally {
        // Update lastAppliedTxId even in case of error, since some ops may
        // have been successfully applied before the error.
        lastAppliedTxId = loader.getLastAppliedTxId();
      }
      // If we are in recovery mode, we may have skipped over some txids.
      if (editIn.getLastTxId() != HdfsConstants.INVALID_TXID) {
        lastAppliedTxId = editIn.getLastTxId();
      }
    }
  } finally {
    FSEditLog.closeAllStreams(editStreams);
    // update the counts
    updateCountForQuota(target.getBlockManager().getStoragePolicySuite(),
        target.dir.rootDir);
  }
  prog.endPhase(Phase.LOADING_EDITS);
  return lastAppliedTxId - prevLastAppliedTxId;
}
 
Example #19
Source File: CacheManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Load cache pools from fsimage
 */
private void loadPools(DataInput in)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_POOLS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfPools = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfPools; i++) {
    addCachePool(FSImageSerialization.readCachePoolInfo(in));
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #20
Source File: CacheManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void saveDirectives(DataOutputStream out, String sdPath)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(directivesById.size());
  for (CacheDirective directive : directivesById.values()) {
    FSImageSerialization.writeCacheDirectiveInfo(out, directive.toInfo());
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #21
Source File: FSImage.java    From big-c with Apache License 2.0 5 votes vote down vote up
private long loadEdits(Iterable<EditLogInputStream> editStreams,
    FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery)
    throws IOException {
  LOG.debug("About to load edits:\n  " + Joiner.on("\n  ").join(editStreams));
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginPhase(Phase.LOADING_EDITS);
  
  long prevLastAppliedTxId = lastAppliedTxId;  
  try {    
    FSEditLogLoader loader = new FSEditLogLoader(target, lastAppliedTxId);
    
    // Load latest edits
    for (EditLogInputStream editIn : editStreams) {
      LOG.info("Reading " + editIn + " expecting start txid #" +
            (lastAppliedTxId + 1));
      try {
        loader.loadFSEdits(editIn, lastAppliedTxId + 1, startOpt, recovery);
      } finally {
        // Update lastAppliedTxId even in case of error, since some ops may
        // have been successfully applied before the error.
        lastAppliedTxId = loader.getLastAppliedTxId();
      }
      // If we are in recovery mode, we may have skipped over some txids.
      if (editIn.getLastTxId() != HdfsConstants.INVALID_TXID) {
        lastAppliedTxId = editIn.getLastTxId();
      }
    }
  } finally {
    FSEditLog.closeAllStreams(editStreams);
    // update the counts
    updateCountForQuota(target.getBlockManager().getStoragePolicySuite(),
        target.dir.rootDir);
  }
  prog.endPhase(Phase.LOADING_EDITS);
  return lastAppliedTxId - prevLastAppliedTxId;
}
 
Example #22
Source File: CacheManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Save cache pools to fsimage
 */
private void savePools(DataOutputStream out,
    String sdPath) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_POOLS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(cachePools.size());
  for (CachePool pool: cachePools.values()) {
    FSImageSerialization.writeCachePoolInfo(out, pool.getInfo(true));
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #23
Source File: NameNodeLoader.java    From NNAnalytics with Apache License 2.0 4 votes vote down vote up
/**
 * Sends the loading status as JSON to the parameter HTTP response. Copied from NameNode.
 *
 * @param resp the HTTP response
 * @throws IOException error in fetching loading status
 */
public void sendLoadingStatus(HttpServletResponse resp) throws IOException {
  String count = "count";
  String elapsedTime = "elapsedTime";
  String file = "file";
  String name = "name";
  String desc = "desc";
  String percentComplete = "percentComplete";
  String phases = "phases";
  String size = "size";
  String status = "status";
  String steps = "steps";
  String total = "total";

  StartupProgressView view = NameNode.getStartupProgress().createView();
  JsonGenerator json =
      new JsonFactory().createJsonGenerator(resp.getWriter()).useDefaultPrettyPrinter();

  try {
    json.writeStartObject();
    json.writeNumberField(elapsedTime, view.getElapsedTime());
    json.writeNumberField(percentComplete, view.getPercentComplete());
    json.writeArrayFieldStart(phases);

    for (Phase phase : view.getPhases()) {
      json.writeStartObject();
      json.writeStringField(name, phase.getName());
      json.writeStringField(desc, phase.getDescription());
      json.writeStringField(status, view.getStatus(phase).toString());
      json.writeNumberField(percentComplete, view.getPercentComplete(phase));
      json.writeNumberField(elapsedTime, view.getElapsedTime(phase));
      writeStringFieldIfNotNull(json, file, view.getFile(phase));
      writeNumberFieldIfDefined(json, size, view.getSize(phase));
      json.writeArrayFieldStart(steps);

      for (Step step : view.getSteps(phase)) {
        json.writeStartObject();
        StepType stepType = step.getType();
        if (stepType != null) {
          json.writeStringField(name, stepType.getName());
          json.writeStringField(desc, stepType.getDescription());
        }
        json.writeNumberField(count, view.getCount(phase, step));
        writeStringFieldIfNotNull(json, file, step.getFile());
        writeNumberFieldIfDefined(json, size, step.getSize());
        json.writeNumberField(total, view.getTotal(phase, step));
        json.writeNumberField(percentComplete, view.getPercentComplete(phase, step));
        json.writeNumberField(elapsedTime, view.getElapsedTime(phase, step));
        json.writeEndObject();
      }

      json.writeEndArray();
      json.writeEndObject();
    }

    json.writeEndArray();
    json.writeEndObject();
  } finally {
    IOUtils.closeStream(json);
  }
}
 
Example #24
Source File: FSImageFormat.java    From big-c with Apache License 2.0 4 votes vote down vote up
void save(File newFile, FSImageCompression compression) throws IOException {
  checkNotSaved();

  final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
  final INodeDirectory rootDir = sourceNamesystem.dir.rootDir;
  final long numINodes = rootDir.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getNameSpace();
  String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
  Step step = new Step(StepType.INODES, sdPath);
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, numINodes);
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  long startTime = monotonicNow();
  //
  // Write out data
  //
  MessageDigest digester = MD5Hash.getDigester();
  FileOutputStream fout = new FileOutputStream(newFile);
  DigestOutputStream fos = new DigestOutputStream(fout, digester);
  DataOutputStream out = new DataOutputStream(fos);
  try {
    out.writeInt(LAYOUT_VERSION);
    LayoutFlags.write(out);
    // We use the non-locked version of getNamespaceInfo here since
    // the coordinating thread of saveNamespace already has read-locked
    // the namespace for us. If we attempt to take another readlock
    // from the actual saver thread, there's a potential of a
    // fairness-related deadlock. See the comments on HDFS-2223.
    out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
        .getNamespaceID());
    out.writeLong(numINodes);
    out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV1());
    out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV2());
    out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch());
    out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedBlockId());
    out.writeLong(context.getTxId());
    out.writeLong(sourceNamesystem.dir.getLastInodeId());


    sourceNamesystem.getSnapshotManager().write(out);

    // write compression info and set up compressed stream
    out = compression.writeHeaderAndWrapStream(fos);
    LOG.info("Saving image file " + newFile +
             " using " + compression);

    // save the root
    saveINode2Image(rootDir, out, false, referenceMap, counter);
    // save the rest of the nodes
    saveImage(rootDir, out, true, false, counter);
    prog.endStep(Phase.SAVING_CHECKPOINT, step);
    // Now that the step is finished, set counter equal to total to adjust
    // for possible under-counting due to reference inodes.
    prog.setCount(Phase.SAVING_CHECKPOINT, step, numINodes);
    // save files under construction
    // TODO: for HDFS-5428, since we cannot break the compatibility of
    // fsimage, we store part of the under-construction files that are only
    // in snapshots in this "under-construction-file" section. As a
    // temporary solution, we use "/.reserved/.inodes/<inodeid>" as their
    // paths, so that when loading fsimage we do not put them into the lease
    // map. In the future, we can remove this hack when we can bump the
    // layout version.
    sourceNamesystem.saveFilesUnderConstruction(out, snapshotUCMap);

    context.checkCancelled();
    sourceNamesystem.saveSecretManagerStateCompat(out, sdPath);
    context.checkCancelled();
    sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath);
    context.checkCancelled();
    out.flush();
    context.checkCancelled();
    fout.getChannel().force(true);
  } finally {
    out.close();
  }

  saved = true;
  // set md5 of the saved image
  savedDigest = new MD5Hash(digester.digest());

  LOG.info("Image file " + newFile + " of size " + newFile.length()
      + " bytes saved in " + (monotonicNow() - startTime) / 1000
      + " seconds.");
}
 
Example #25
Source File: StartupProgressServlet.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
    throws IOException {
  resp.setContentType("application/json; charset=UTF-8");
  StartupProgress prog = NameNodeHttpServer.getStartupProgressFromContext(
    getServletContext());
  StartupProgressView view = prog.createView();
  JsonGenerator json = new JsonFactory().createJsonGenerator(resp.getWriter());
  try {
    json.writeStartObject();
    json.writeNumberField(ELAPSED_TIME, view.getElapsedTime());
    json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete());
    json.writeArrayFieldStart(PHASES);

    for (Phase phase: view.getPhases()) {
      json.writeStartObject();
      json.writeStringField(NAME, phase.getName());
      json.writeStringField(DESC, phase.getDescription());
      json.writeStringField(STATUS, view.getStatus(phase).toString());
      json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase));
      json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase));
      writeStringFieldIfNotNull(json, FILE, view.getFile(phase));
      writeNumberFieldIfDefined(json, SIZE, view.getSize(phase));
      json.writeArrayFieldStart(STEPS);

      for (Step step: view.getSteps(phase)) {
        json.writeStartObject();
        StepType type = step.getType();
        if (type != null) {
          json.writeStringField(NAME, type.getName());
          json.writeStringField(DESC, type.getDescription());
        }
        json.writeNumberField(COUNT, view.getCount(phase, step));
        writeStringFieldIfNotNull(json, FILE, step.getFile());
        writeNumberFieldIfDefined(json, SIZE, step.getSize());
        json.writeNumberField(TOTAL, view.getTotal(phase, step));
        json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase,
          step));
        json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase, step));
        json.writeEndObject();
      }

      json.writeEndArray();
      json.writeEndObject();
    }

    json.writeEndArray();
    json.writeEndObject();
  } finally {
    IOUtils.cleanup(LOG, json);
  }
}
 
Example #26
Source File: FSImageFormat.java    From hadoop with Apache License 2.0 4 votes vote down vote up
void save(File newFile, FSImageCompression compression) throws IOException {
  checkNotSaved();

  final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
  final INodeDirectory rootDir = sourceNamesystem.dir.rootDir;
  final long numINodes = rootDir.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getNameSpace();
  String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
  Step step = new Step(StepType.INODES, sdPath);
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, numINodes);
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  long startTime = monotonicNow();
  //
  // Write out data
  //
  MessageDigest digester = MD5Hash.getDigester();
  FileOutputStream fout = new FileOutputStream(newFile);
  DigestOutputStream fos = new DigestOutputStream(fout, digester);
  DataOutputStream out = new DataOutputStream(fos);
  try {
    out.writeInt(LAYOUT_VERSION);
    LayoutFlags.write(out);
    // We use the non-locked version of getNamespaceInfo here since
    // the coordinating thread of saveNamespace already has read-locked
    // the namespace for us. If we attempt to take another readlock
    // from the actual saver thread, there's a potential of a
    // fairness-related deadlock. See the comments on HDFS-2223.
    out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
        .getNamespaceID());
    out.writeLong(numINodes);
    out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV1());
    out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV2());
    out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch());
    out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedBlockId());
    out.writeLong(context.getTxId());
    out.writeLong(sourceNamesystem.dir.getLastInodeId());


    sourceNamesystem.getSnapshotManager().write(out);

    // write compression info and set up compressed stream
    out = compression.writeHeaderAndWrapStream(fos);
    LOG.info("Saving image file " + newFile +
             " using " + compression);

    // save the root
    saveINode2Image(rootDir, out, false, referenceMap, counter);
    // save the rest of the nodes
    saveImage(rootDir, out, true, false, counter);
    prog.endStep(Phase.SAVING_CHECKPOINT, step);
    // Now that the step is finished, set counter equal to total to adjust
    // for possible under-counting due to reference inodes.
    prog.setCount(Phase.SAVING_CHECKPOINT, step, numINodes);
    // save files under construction
    // TODO: for HDFS-5428, since we cannot break the compatibility of
    // fsimage, we store part of the under-construction files that are only
    // in snapshots in this "under-construction-file" section. As a
    // temporary solution, we use "/.reserved/.inodes/<inodeid>" as their
    // paths, so that when loading fsimage we do not put them into the lease
    // map. In the future, we can remove this hack when we can bump the
    // layout version.
    sourceNamesystem.saveFilesUnderConstruction(out, snapshotUCMap);

    context.checkCancelled();
    sourceNamesystem.saveSecretManagerStateCompat(out, sdPath);
    context.checkCancelled();
    sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath);
    context.checkCancelled();
    out.flush();
    context.checkCancelled();
    fout.getChannel().force(true);
  } finally {
    out.close();
  }

  saved = true;
  // set md5 of the saved image
  savedDigest = new MD5Hash(digester.digest());

  LOG.info("Image file " + newFile + " of size " + newFile.length()
      + " bytes saved in " + (monotonicNow() - startTime) / 1000
      + " seconds.");
}
 
Example #27
Source File: StartupProgressServlet.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
    throws IOException {
  resp.setContentType("application/json; charset=UTF-8");
  StartupProgress prog = NameNodeHttpServer.getStartupProgressFromContext(
    getServletContext());
  StartupProgressView view = prog.createView();
  JsonGenerator json = new JsonFactory().createJsonGenerator(resp.getWriter());
  try {
    json.writeStartObject();
    json.writeNumberField(ELAPSED_TIME, view.getElapsedTime());
    json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete());
    json.writeArrayFieldStart(PHASES);

    for (Phase phase: view.getPhases()) {
      json.writeStartObject();
      json.writeStringField(NAME, phase.getName());
      json.writeStringField(DESC, phase.getDescription());
      json.writeStringField(STATUS, view.getStatus(phase).toString());
      json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase));
      json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase));
      writeStringFieldIfNotNull(json, FILE, view.getFile(phase));
      writeNumberFieldIfDefined(json, SIZE, view.getSize(phase));
      json.writeArrayFieldStart(STEPS);

      for (Step step: view.getSteps(phase)) {
        json.writeStartObject();
        StepType type = step.getType();
        if (type != null) {
          json.writeStringField(NAME, type.getName());
          json.writeStringField(DESC, type.getDescription());
        }
        json.writeNumberField(COUNT, view.getCount(phase, step));
        writeStringFieldIfNotNull(json, FILE, step.getFile());
        writeNumberFieldIfDefined(json, SIZE, step.getSize());
        json.writeNumberField(TOTAL, view.getTotal(phase, step));
        json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase,
          step));
        json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase, step));
        json.writeEndObject();
      }

      json.writeEndArray();
      json.writeEndObject();
    }

    json.writeEndArray();
    json.writeEndObject();
  } finally {
    IOUtils.cleanup(LOG, json);
  }
}
 
Example #28
Source File: StartupProgressMetrics.java    From hadoop with Apache License 2.0 3 votes vote down vote up
/**
 * Adds a gauge with a name built by using the specified phase's name as prefix
 * and then appending the specified suffix.
 * 
 * @param builder MetricsRecordBuilder to receive counter
 * @param phase Phase to add
 * @param nameSuffix String suffix of metric name
 * @param descSuffix String suffix of metric description
 * @param value float gauge value
 */
private static void addGauge(MetricsRecordBuilder builder, Phase phase,
    String nameSuffix, String descSuffix, float value) {
  MetricsInfo metricsInfo = info(phase.getName() + nameSuffix,
    phase.getDescription() + descSuffix);
  builder.addGauge(metricsInfo, value);
}
 
Example #29
Source File: StartupProgressMetrics.java    From hadoop with Apache License 2.0 3 votes vote down vote up
/**
 * Adds a counter with a name built by using the specified phase's name as
 * prefix and then appending the specified suffix.
 * 
 * @param builder MetricsRecordBuilder to receive counter
 * @param phase Phase to add
 * @param nameSuffix String suffix of metric name
 * @param descSuffix String suffix of metric description
 * @param value long counter value
 */
private static void addCounter(MetricsRecordBuilder builder, Phase phase,
    String nameSuffix, String descSuffix, long value) {
  MetricsInfo metricsInfo = info(phase.getName() + nameSuffix,
    phase.getDescription() + descSuffix);
  builder.addCounter(metricsInfo, value);
}
 
Example #30
Source File: StartupProgressMetrics.java    From big-c with Apache License 2.0 3 votes vote down vote up
/**
 * Adds a counter with a name built by using the specified phase's name as
 * prefix and then appending the specified suffix.
 * 
 * @param builder MetricsRecordBuilder to receive counter
 * @param phase Phase to add
 * @param nameSuffix String suffix of metric name
 * @param descSuffix String suffix of metric description
 * @param value long counter value
 */
private static void addCounter(MetricsRecordBuilder builder, Phase phase,
    String nameSuffix, String descSuffix, long value) {
  MetricsInfo metricsInfo = info(phase.getName() + nameSuffix,
    phase.getDescription() + descSuffix);
  builder.addCounter(metricsInfo, value);
}