org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DelegationTokenSecretManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Private helper methods to save delegation keys and tokens in fsimage
 */
private synchronized void saveCurrentTokens(DataOutputStream out,
    String sdPath) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(currentTokens.size());
  Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
      .iterator();
  while (iter.hasNext()) {
    DelegationTokenIdentifier id = iter.next();
    id.write(out);
    DelegationTokenInformation info = currentTokens.get(id);
    out.writeLong(info.getRenewDate());
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #2
Source File: DelegationTokenSecretManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Private helper method to load delegation keys from fsimage.
 * @throws IOException on error
 */
private synchronized void loadAllKeys(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfKeys = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfKeys; i++) {
    DelegationKey value = new DelegationKey();
    value.readFields(in);
    addKey(value);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #3
Source File: DelegationTokenSecretManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Private helper methods to load Delegation tokens from fsimage
 */
private synchronized void loadCurrentTokens(DataInput in)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfTokens = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfTokens);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfTokens; i++) {
    DelegationTokenIdentifier id = new DelegationTokenIdentifier();
    id.readFields(in);
    long expiryTime = in.readLong();
    addPersistedDelegationToken(id, expiryTime);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #4
Source File: DelegationTokenSecretManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(allKeys.size());
  Iterator<Integer> iter = allKeys.keySet().iterator();
  while (iter.hasNext()) {
    Integer key = iter.next();
    allKeys.get(key).write(out);
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #5
Source File: DelegationTokenSecretManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Private helper methods to save delegation keys and tokens in fsimage
 */
private synchronized void saveCurrentTokens(DataOutputStream out,
    String sdPath) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(currentTokens.size());
  Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
      .iterator();
  while (iter.hasNext()) {
    DelegationTokenIdentifier id = iter.next();
    id.write(out);
    DelegationTokenInformation info = currentTokens.get(id);
    out.writeLong(info.getRenewDate());
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #6
Source File: CacheManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Load cache directives from the fsimage
 */
private void loadDirectives(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_ENTRIES);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numDirectives = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numDirectives; i++) {
    CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
    // Get pool reference by looking it up in the map
    final String poolName = info.getPool();
    CacheDirective directive =
        new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
            info.getReplication(), info.getExpiration().getAbsoluteMillis());
    addCacheDirective(poolName, directive);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #7
Source File: CacheManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Load cache directives from the fsimage
 */
private void loadDirectives(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_ENTRIES);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numDirectives = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numDirectives; i++) {
    CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
    // Get pool reference by looking it up in the map
    final String poolName = info.getPool();
    CacheDirective directive =
        new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
            info.getReplication(), info.getExpiration().getAbsoluteMillis());
    addCacheDirective(poolName, directive);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #8
Source File: DelegationTokenSecretManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Private helper method to load delegation keys from fsimage.
 * @throws IOException on error
 */
private synchronized void loadAllKeys(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfKeys = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfKeys; i++) {
    DelegationKey value = new DelegationKey();
    value.readFields(in);
    addKey(value);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #9
Source File: DelegationTokenSecretManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Private helper methods to load Delegation tokens from fsimage
 */
private synchronized void loadCurrentTokens(DataInput in)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfTokens = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfTokens);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfTokens; i++) {
    DelegationTokenIdentifier id = new DelegationTokenIdentifier();
    id.readFields(in);
    long expiryTime = in.readLong();
    addPersistedDelegationToken(id, expiryTime);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #10
Source File: DelegationTokenSecretManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(allKeys.size());
  Iterator<Integer> iter = allKeys.keySet().iterator();
  while (iter.hasNext()) {
    Integer key = iter.next();
    allKeys.get(key).write(out);
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #11
Source File: CacheManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void saveDirectives(DataOutputStream out, String sdPath)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(directivesById.size());
  for (CacheDirective directive : directivesById.values()) {
    FSImageSerialization.writeCacheDirectiveInfo(out, directive.toInfo());
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #12
Source File: CacheManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Load cache pools from fsimage
 */
private void loadPools(DataInput in)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_POOLS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfPools = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfPools; i++) {
    addCachePool(FSImageSerialization.readCachePoolInfo(in));
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #13
Source File: CacheManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Save cache pools to fsimage
 */
private void savePools(DataOutputStream out,
    String sdPath) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_POOLS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(cachePools.size());
  for (CachePool pool: cachePools.values()) {
    FSImageSerialization.writeCachePoolInfo(out, pool.getInfo(true));
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #14
Source File: CacheManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Load cache pools from fsimage
 */
private void loadPools(DataInput in)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_POOLS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfPools = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfPools; i++) {
    addCachePool(FSImageSerialization.readCachePoolInfo(in));
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
Example #15
Source File: CacheManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Save cache pools to fsimage
 */
private void savePools(DataOutputStream out,
    String sdPath) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_POOLS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(cachePools.size());
  for (CachePool pool: cachePools.values()) {
    FSImageSerialization.writeCachePoolInfo(out, pool.getInfo(true));
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #16
Source File: CacheManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void saveDirectives(DataOutputStream out, String sdPath)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(directivesById.size());
  for (CacheDirective directive : directivesById.values()) {
    FSImageSerialization.writeCacheDirectiveInfo(out, directive.toInfo());
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example #17
Source File: NameNodeLoader.java    From NNAnalytics with Apache License 2.0 4 votes vote down vote up
/**
 * Sends the loading status as JSON to the parameter HTTP response. Copied from NameNode.
 *
 * @param resp the HTTP response
 * @throws IOException error in fetching loading status
 */
public void sendLoadingStatus(HttpServletResponse resp) throws IOException {
  String count = "count";
  String elapsedTime = "elapsedTime";
  String file = "file";
  String name = "name";
  String desc = "desc";
  String percentComplete = "percentComplete";
  String phases = "phases";
  String size = "size";
  String status = "status";
  String steps = "steps";
  String total = "total";

  StartupProgressView view = NameNode.getStartupProgress().createView();
  JsonGenerator json =
      new JsonFactory().createJsonGenerator(resp.getWriter()).useDefaultPrettyPrinter();

  try {
    json.writeStartObject();
    json.writeNumberField(elapsedTime, view.getElapsedTime());
    json.writeNumberField(percentComplete, view.getPercentComplete());
    json.writeArrayFieldStart(phases);

    for (Phase phase : view.getPhases()) {
      json.writeStartObject();
      json.writeStringField(name, phase.getName());
      json.writeStringField(desc, phase.getDescription());
      json.writeStringField(status, view.getStatus(phase).toString());
      json.writeNumberField(percentComplete, view.getPercentComplete(phase));
      json.writeNumberField(elapsedTime, view.getElapsedTime(phase));
      writeStringFieldIfNotNull(json, file, view.getFile(phase));
      writeNumberFieldIfDefined(json, size, view.getSize(phase));
      json.writeArrayFieldStart(steps);

      for (Step step : view.getSteps(phase)) {
        json.writeStartObject();
        StepType stepType = step.getType();
        if (stepType != null) {
          json.writeStringField(name, stepType.getName());
          json.writeStringField(desc, stepType.getDescription());
        }
        json.writeNumberField(count, view.getCount(phase, step));
        writeStringFieldIfNotNull(json, file, step.getFile());
        writeNumberFieldIfDefined(json, size, step.getSize());
        json.writeNumberField(total, view.getTotal(phase, step));
        json.writeNumberField(percentComplete, view.getPercentComplete(phase, step));
        json.writeNumberField(elapsedTime, view.getElapsedTime(phase, step));
        json.writeEndObject();
      }

      json.writeEndArray();
      json.writeEndObject();
    }

    json.writeEndArray();
    json.writeEndObject();
  } finally {
    IOUtils.closeStream(json);
  }
}
 
Example #18
Source File: FSImageFormat.java    From big-c with Apache License 2.0 4 votes vote down vote up
void save(File newFile, FSImageCompression compression) throws IOException {
  checkNotSaved();

  final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
  final INodeDirectory rootDir = sourceNamesystem.dir.rootDir;
  final long numINodes = rootDir.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getNameSpace();
  String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
  Step step = new Step(StepType.INODES, sdPath);
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, numINodes);
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  long startTime = monotonicNow();
  //
  // Write out data
  //
  MessageDigest digester = MD5Hash.getDigester();
  FileOutputStream fout = new FileOutputStream(newFile);
  DigestOutputStream fos = new DigestOutputStream(fout, digester);
  DataOutputStream out = new DataOutputStream(fos);
  try {
    out.writeInt(LAYOUT_VERSION);
    LayoutFlags.write(out);
    // We use the non-locked version of getNamespaceInfo here since
    // the coordinating thread of saveNamespace already has read-locked
    // the namespace for us. If we attempt to take another readlock
    // from the actual saver thread, there's a potential of a
    // fairness-related deadlock. See the comments on HDFS-2223.
    out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
        .getNamespaceID());
    out.writeLong(numINodes);
    out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV1());
    out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV2());
    out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch());
    out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedBlockId());
    out.writeLong(context.getTxId());
    out.writeLong(sourceNamesystem.dir.getLastInodeId());


    sourceNamesystem.getSnapshotManager().write(out);

    // write compression info and set up compressed stream
    out = compression.writeHeaderAndWrapStream(fos);
    LOG.info("Saving image file " + newFile +
             " using " + compression);

    // save the root
    saveINode2Image(rootDir, out, false, referenceMap, counter);
    // save the rest of the nodes
    saveImage(rootDir, out, true, false, counter);
    prog.endStep(Phase.SAVING_CHECKPOINT, step);
    // Now that the step is finished, set counter equal to total to adjust
    // for possible under-counting due to reference inodes.
    prog.setCount(Phase.SAVING_CHECKPOINT, step, numINodes);
    // save files under construction
    // TODO: for HDFS-5428, since we cannot break the compatibility of
    // fsimage, we store part of the under-construction files that are only
    // in snapshots in this "under-construction-file" section. As a
    // temporary solution, we use "/.reserved/.inodes/<inodeid>" as their
    // paths, so that when loading fsimage we do not put them into the lease
    // map. In the future, we can remove this hack when we can bump the
    // layout version.
    sourceNamesystem.saveFilesUnderConstruction(out, snapshotUCMap);

    context.checkCancelled();
    sourceNamesystem.saveSecretManagerStateCompat(out, sdPath);
    context.checkCancelled();
    sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath);
    context.checkCancelled();
    out.flush();
    context.checkCancelled();
    fout.getChannel().force(true);
  } finally {
    out.close();
  }

  saved = true;
  // set md5 of the saved image
  savedDigest = new MD5Hash(digester.digest());

  LOG.info("Image file " + newFile + " of size " + newFile.length()
      + " bytes saved in " + (monotonicNow() - startTime) / 1000
      + " seconds.");
}
 
Example #19
Source File: StartupProgressServlet.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
    throws IOException {
  resp.setContentType("application/json; charset=UTF-8");
  StartupProgress prog = NameNodeHttpServer.getStartupProgressFromContext(
    getServletContext());
  StartupProgressView view = prog.createView();
  JsonGenerator json = new JsonFactory().createJsonGenerator(resp.getWriter());
  try {
    json.writeStartObject();
    json.writeNumberField(ELAPSED_TIME, view.getElapsedTime());
    json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete());
    json.writeArrayFieldStart(PHASES);

    for (Phase phase: view.getPhases()) {
      json.writeStartObject();
      json.writeStringField(NAME, phase.getName());
      json.writeStringField(DESC, phase.getDescription());
      json.writeStringField(STATUS, view.getStatus(phase).toString());
      json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase));
      json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase));
      writeStringFieldIfNotNull(json, FILE, view.getFile(phase));
      writeNumberFieldIfDefined(json, SIZE, view.getSize(phase));
      json.writeArrayFieldStart(STEPS);

      for (Step step: view.getSteps(phase)) {
        json.writeStartObject();
        StepType type = step.getType();
        if (type != null) {
          json.writeStringField(NAME, type.getName());
          json.writeStringField(DESC, type.getDescription());
        }
        json.writeNumberField(COUNT, view.getCount(phase, step));
        writeStringFieldIfNotNull(json, FILE, step.getFile());
        writeNumberFieldIfDefined(json, SIZE, step.getSize());
        json.writeNumberField(TOTAL, view.getTotal(phase, step));
        json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase,
          step));
        json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase, step));
        json.writeEndObject();
      }

      json.writeEndArray();
      json.writeEndObject();
    }

    json.writeEndArray();
    json.writeEndObject();
  } finally {
    IOUtils.cleanup(LOG, json);
  }
}
 
Example #20
Source File: FSImageFormat.java    From hadoop with Apache License 2.0 4 votes vote down vote up
void save(File newFile, FSImageCompression compression) throws IOException {
  checkNotSaved();

  final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
  final INodeDirectory rootDir = sourceNamesystem.dir.rootDir;
  final long numINodes = rootDir.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getNameSpace();
  String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
  Step step = new Step(StepType.INODES, sdPath);
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, numINodes);
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  long startTime = monotonicNow();
  //
  // Write out data
  //
  MessageDigest digester = MD5Hash.getDigester();
  FileOutputStream fout = new FileOutputStream(newFile);
  DigestOutputStream fos = new DigestOutputStream(fout, digester);
  DataOutputStream out = new DataOutputStream(fos);
  try {
    out.writeInt(LAYOUT_VERSION);
    LayoutFlags.write(out);
    // We use the non-locked version of getNamespaceInfo here since
    // the coordinating thread of saveNamespace already has read-locked
    // the namespace for us. If we attempt to take another readlock
    // from the actual saver thread, there's a potential of a
    // fairness-related deadlock. See the comments on HDFS-2223.
    out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
        .getNamespaceID());
    out.writeLong(numINodes);
    out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV1());
    out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV2());
    out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch());
    out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedBlockId());
    out.writeLong(context.getTxId());
    out.writeLong(sourceNamesystem.dir.getLastInodeId());


    sourceNamesystem.getSnapshotManager().write(out);

    // write compression info and set up compressed stream
    out = compression.writeHeaderAndWrapStream(fos);
    LOG.info("Saving image file " + newFile +
             " using " + compression);

    // save the root
    saveINode2Image(rootDir, out, false, referenceMap, counter);
    // save the rest of the nodes
    saveImage(rootDir, out, true, false, counter);
    prog.endStep(Phase.SAVING_CHECKPOINT, step);
    // Now that the step is finished, set counter equal to total to adjust
    // for possible under-counting due to reference inodes.
    prog.setCount(Phase.SAVING_CHECKPOINT, step, numINodes);
    // save files under construction
    // TODO: for HDFS-5428, since we cannot break the compatibility of
    // fsimage, we store part of the under-construction files that are only
    // in snapshots in this "under-construction-file" section. As a
    // temporary solution, we use "/.reserved/.inodes/<inodeid>" as their
    // paths, so that when loading fsimage we do not put them into the lease
    // map. In the future, we can remove this hack when we can bump the
    // layout version.
    sourceNamesystem.saveFilesUnderConstruction(out, snapshotUCMap);

    context.checkCancelled();
    sourceNamesystem.saveSecretManagerStateCompat(out, sdPath);
    context.checkCancelled();
    sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath);
    context.checkCancelled();
    out.flush();
    context.checkCancelled();
    fout.getChannel().force(true);
  } finally {
    out.close();
  }

  saved = true;
  // set md5 of the saved image
  savedDigest = new MD5Hash(digester.digest());

  LOG.info("Image file " + newFile + " of size " + newFile.length()
      + " bytes saved in " + (monotonicNow() - startTime) / 1000
      + " seconds.");
}
 
Example #21
Source File: StartupProgressServlet.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
    throws IOException {
  resp.setContentType("application/json; charset=UTF-8");
  StartupProgress prog = NameNodeHttpServer.getStartupProgressFromContext(
    getServletContext());
  StartupProgressView view = prog.createView();
  JsonGenerator json = new JsonFactory().createJsonGenerator(resp.getWriter());
  try {
    json.writeStartObject();
    json.writeNumberField(ELAPSED_TIME, view.getElapsedTime());
    json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete());
    json.writeArrayFieldStart(PHASES);

    for (Phase phase: view.getPhases()) {
      json.writeStartObject();
      json.writeStringField(NAME, phase.getName());
      json.writeStringField(DESC, phase.getDescription());
      json.writeStringField(STATUS, view.getStatus(phase).toString());
      json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase));
      json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase));
      writeStringFieldIfNotNull(json, FILE, view.getFile(phase));
      writeNumberFieldIfDefined(json, SIZE, view.getSize(phase));
      json.writeArrayFieldStart(STEPS);

      for (Step step: view.getSteps(phase)) {
        json.writeStartObject();
        StepType type = step.getType();
        if (type != null) {
          json.writeStringField(NAME, type.getName());
          json.writeStringField(DESC, type.getDescription());
        }
        json.writeNumberField(COUNT, view.getCount(phase, step));
        writeStringFieldIfNotNull(json, FILE, step.getFile());
        writeNumberFieldIfDefined(json, SIZE, step.getSize());
        json.writeNumberField(TOTAL, view.getTotal(phase, step));
        json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase,
          step));
        json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase, step));
        json.writeEndObject();
      }

      json.writeEndArray();
      json.writeEndObject();
    }

    json.writeEndArray();
    json.writeEndObject();
  } finally {
    IOUtils.cleanup(LOG, json);
  }
}