org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CacheManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
public final void processCacheReport(final DatanodeID datanodeID,
    final List<Long> blockIds) throws IOException {
  namesystem.writeLock();
  final long startTime = Time.monotonicNow();
  final long endTime;
  try {
    final DatanodeDescriptor datanode = 
        blockManager.getDatanodeManager().getDatanode(datanodeID);
    if (datanode == null || !datanode.isAlive) {
      throw new IOException(
          "processCacheReport from dead or unregistered datanode: " +
          datanode);
    }
    processCacheReportImpl(datanode, blockIds);
  } finally {
    endTime = Time.monotonicNow();
    namesystem.writeUnlock();
  }

  // Log the block report processing stats from Namenode perspective
  final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
  if (metrics != null) {
    metrics.addCacheBlockReport((int) (endTime - startTime));
  }
  LOG.debug("Processed cache report from {}, blocks: {}, " +
      "processing time: {} msecs", datanodeID, blockIds.size(), 
      (endTime - startTime));
}
 
Example #2
Source File: NameNode.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
private void initialize(Configuration conf) throws IOException {
  InetSocketAddress socAddr = NameNode.getAddress(conf);
  int handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
  
  // set service-level authorization security policy
  if (serviceAuthEnabled = 
        conf.getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = 
      (PolicyProvider)(ReflectionUtils.newInstance(
          conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
              HDFSPolicyProvider.class, PolicyProvider.class), 
          conf));
    SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
  }

  // create rpc server 
  this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                              handlerCount, false, conf);

  // The rpc-server port can be ephemeral... ensure we have the correct info
  this.serverAddress = this.server.getListenerAddress(); 
  FileSystem.setDefaultUri(conf, getUri(serverAddress));
  LOG.info("Namenode up at: " + this.serverAddress);

  myMetrics = new NameNodeMetrics(conf, this);

  this.namesystem = new FSNamesystem(this, conf);
  startHttpServer(conf);
  this.server.start();  //start RPC server   
  startTrashEmptier(conf);
}
 
Example #3
Source File: TestEditLog.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Regression test for HDFS-1112/HDFS-3020. Ensures that, even if
 * logSync isn't called periodically, the edit log will sync itself.
 */
@Test
public void testAutoSync() throws Exception {
  File logDir = new File(TEST_DIR, "testAutoSync");
  logDir.mkdirs();
  FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
  
  String oneKB = StringUtils.byteToHexString(
      new byte[500]);
  
  try {
    log.openForWrite();
    NameNodeMetrics mockMetrics = Mockito.mock(NameNodeMetrics.class);
    log.setMetricsForTests(mockMetrics);

    for (int i = 0; i < 400; i++) {
      log.logDelete(oneKB, 1L, false);
    }
    // After ~400KB, we're still within the 512KB buffer size
    Mockito.verify(mockMetrics, Mockito.times(0)).addSync(Mockito.anyLong());
    
    // After ~400KB more, we should have done an automatic sync
    for (int i = 0; i < 400; i++) {
      log.logDelete(oneKB, 1L, false);
    }
    Mockito.verify(mockMetrics, Mockito.times(1)).addSync(Mockito.anyLong());

  } finally {
    log.close();
  }
}
 
Example #4
Source File: TestEditLog.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Regression test for HDFS-1112/HDFS-3020. Ensures that, even if
 * logSync isn't called periodically, the edit log will sync itself.
 */
@Test
public void testAutoSync() throws Exception {
  File logDir = new File(TEST_DIR, "testAutoSync");
  logDir.mkdirs();
  FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
  
  String oneKB = StringUtils.byteToHexString(
      new byte[500]);
  
  try {
    log.openForWrite();
    NameNodeMetrics mockMetrics = Mockito.mock(NameNodeMetrics.class);
    log.setMetricsForTests(mockMetrics);

    for (int i = 0; i < 400; i++) {
      log.logDelete(oneKB, 1L, false);
    }
    // After ~400KB, we're still within the 512KB buffer size
    Mockito.verify(mockMetrics, Mockito.times(0)).addSync(Mockito.anyLong());
    
    // After ~400KB more, we should have done an automatic sync
    for (int i = 0; i < 400; i++) {
      log.logDelete(oneKB, 1L, false);
    }
    Mockito.verify(mockMetrics, Mockito.times(1)).addSync(Mockito.anyLong());

  } finally {
    log.close();
  }
}
 
Example #5
Source File: CacheManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public final void processCacheReport(final DatanodeID datanodeID,
    final List<Long> blockIds) throws IOException {
  namesystem.writeLock();
  final long startTime = Time.monotonicNow();
  final long endTime;
  try {
    final DatanodeDescriptor datanode = 
        blockManager.getDatanodeManager().getDatanode(datanodeID);
    if (datanode == null || !datanode.isAlive) {
      throw new IOException(
          "processCacheReport from dead or unregistered datanode: " +
          datanode);
    }
    processCacheReportImpl(datanode, blockIds);
  } finally {
    endTime = Time.monotonicNow();
    namesystem.writeUnlock();
  }

  // Log the block report processing stats from Namenode perspective
  final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
  if (metrics != null) {
    metrics.addCacheBlockReport((int) (endTime - startTime));
  }
  LOG.debug("Processed cache report from {}, blocks: {}, " +
      "processing time: {} msecs", datanodeID, blockIds.size(), 
      (endTime - startTime));
}
 
Example #6
Source File: FSEditLog.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Used only by tests.
 */
@VisibleForTesting
void setMetricsForTests(NameNodeMetrics metrics) {
  this.metrics = metrics;
}
 
Example #7
Source File: BlockManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * The given storage is reporting all its blocks.
 * Update the (storage-->block list) and (block-->storage list) maps.
 *
 * @return true if all known storages of the given DN have finished reporting.
 * @throws IOException
 */
public boolean processReport(final DatanodeID nodeID,
    final DatanodeStorage storage,
    final BlockListAsLongs newReport, BlockReportContext context,
    boolean lastStorageInRpc) throws IOException {
  namesystem.writeLock();
  final long startTime = Time.monotonicNow(); //after acquiring write lock
  final long endTime;
  DatanodeDescriptor node;
  Collection<Block> invalidatedBlocks = null;

  try {
    node = datanodeManager.getDatanode(nodeID);
    if (node == null || !node.isAlive) {
      throw new IOException(
          "ProcessReport from dead or unregistered node: " + nodeID);
    }

    // To minimize startup time, we discard any second (or later) block reports
    // that we receive while still in startup phase.
    DatanodeStorageInfo storageInfo = node.getStorageInfo(storage.getStorageID());

    if (storageInfo == null) {
      // We handle this for backwards compatibility.
      storageInfo = node.updateStorage(storage);
    }
    if (namesystem.isInStartupSafeMode()
        && storageInfo.getBlockReportCount() > 0) {
      blockLog.info("BLOCK* processReport: "
          + "discarded non-initial block report from {}"
          + " because namenode still in startup phase", nodeID);
      return !node.hasStaleStorages();
    }

    if (storageInfo.getBlockReportCount() == 0) {
      // The first block report can be processed a lot more efficiently than
      // ordinary block reports.  This shortens restart times.
      processFirstBlockReport(storageInfo, newReport);
    } else {
      invalidatedBlocks = processReport(storageInfo, newReport);
    }
    
    storageInfo.receivedBlockReport();
    if (context != null) {
      storageInfo.setLastBlockReportId(context.getReportId());
      if (lastStorageInRpc) {
        int rpcsSeen = node.updateBlockReportContext(context);
        if (rpcsSeen >= context.getTotalRpcs()) {
          List<DatanodeStorageInfo> zombies = node.removeZombieStorages();
          if (zombies.isEmpty()) {
            LOG.debug("processReport 0x{}: no zombie storages found.",
                Long.toHexString(context.getReportId()));
          } else {
            for (DatanodeStorageInfo zombie : zombies) {
              removeZombieReplicas(context, zombie);
            }
          }
          node.clearBlockReportContext();
        } else {
          LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
                  "report.", Long.toHexString(context.getReportId()),
              (context.getTotalRpcs() - rpcsSeen)
          );
        }
      }
    }
  } finally {
    endTime = Time.monotonicNow();
    namesystem.writeUnlock();
  }

  if (invalidatedBlocks != null) {
    for (Block b : invalidatedBlocks) {
      blockLog.info("BLOCK* processReport: {} on node {} size {} does not " +
          "belong to any file", b, node, b.getNumBytes());
    }
  }

  // Log the block report processing stats from Namenode perspective
  final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
  if (metrics != null) {
    metrics.addBlockReport((int) (endTime - startTime));
  }
  blockLog.info("BLOCK* processReport: from storage {} node {}, " +
      "blocks: {}, hasStaleStorage: {}, processing time: {} msecs", storage
      .getStorageID(), nodeID, newReport.getNumberOfBlocks(),
      node.hasStaleStorages(), (endTime - startTime));
  return !node.hasStaleStorages();
}
 
Example #8
Source File: NameNode.java    From big-c with Apache License 2.0 4 votes vote down vote up
static void initMetrics(Configuration conf, NamenodeRole role) {
  metrics = NameNodeMetrics.create(conf, role);
}
 
Example #9
Source File: NameNode.java    From big-c with Apache License 2.0 4 votes vote down vote up
public static NameNodeMetrics getNameNodeMetrics() {
  return metrics;
}
 
Example #10
Source File: ImageServlet.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
protected void doPut(final HttpServletRequest request,
    final HttpServletResponse response) throws ServletException, IOException {
  try {
    ServletContext context = getServletContext();
    final FSImage nnImage = NameNodeHttpServer.getFsImageFromContext(context);
    final Configuration conf = (Configuration) getServletContext()
        .getAttribute(JspHelper.CURRENT_CONF);
    final PutImageParams parsedParams = new PutImageParams(request, response,
        conf);
    final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();

    validateRequest(context, conf, request, response, nnImage,
        parsedParams.getStorageInfoString());

    UserGroupInformation.getCurrentUser().doAs(
        new PrivilegedExceptionAction<Void>() {

          @Override
          public Void run() throws Exception {

            final long txid = parsedParams.getTxId();

            final NameNodeFile nnf = parsedParams.getNameNodeFile();

            if (!nnImage.addToCheckpointing(txid)) {
              response.sendError(HttpServletResponse.SC_CONFLICT,
                  "Either current namenode is checkpointing or another"
                      + " checkpointer is already in the process of "
                      + "uploading a checkpoint made at transaction ID "
                      + txid);
              return null;
            }
            try {
              if (nnImage.getStorage().findImageFile(nnf, txid) != null) {
                response.sendError(HttpServletResponse.SC_CONFLICT,
                    "Either current namenode has checkpointed or "
                        + "another checkpointer already uploaded an "
                        + "checkpoint for txid " + txid);
                return null;
              }

              InputStream stream = request.getInputStream();
              try {
                long start = monotonicNow();
                MD5Hash downloadImageDigest = TransferFsImage
                    .handleUploadImageRequest(request, txid,
                        nnImage.getStorage(), stream,
                        parsedParams.getFileSize(), getThrottler(conf));
                nnImage.saveDigestAndRenameCheckpointImage(nnf, txid,
                    downloadImageDigest);
                // Metrics non-null only when used inside name node
                if (metrics != null) {
                  long elapsed = monotonicNow() - start;
                  metrics.addPutImage(elapsed);
                }
                // Now that we have a new checkpoint, we might be able to
                // remove some old ones.
                nnImage.purgeOldStorage(nnf);
              } finally {
                stream.close();
              }
            } finally {
              nnImage.removeFromCheckpointing(txid);
            }
            return null;
          }

        });
  } catch (Throwable t) {
    String errMsg = "PutImage failed. " + StringUtils.stringifyException(t);
    response.sendError(HttpServletResponse.SC_GONE, errMsg);
    throw new IOException(errMsg);
  }
}
 
Example #11
Source File: NameNode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
static void initMetrics(Configuration conf, NamenodeRole role) {
  metrics = NameNodeMetrics.create(conf, role);
}
 
Example #12
Source File: FSEditLog.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Used only by tests.
 */
@VisibleForTesting
void setMetricsForTests(NameNodeMetrics metrics) {
  this.metrics = metrics;
}
 
Example #13
Source File: BlockManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * The given storage is reporting all its blocks.
 * Update the (storage-->block list) and (block-->storage list) maps.
 *
 * @return true if all known storages of the given DN have finished reporting.
 * @throws IOException
 */
public boolean processReport(final DatanodeID nodeID,
    final DatanodeStorage storage,
    final BlockListAsLongs newReport, BlockReportContext context,
    boolean lastStorageInRpc) throws IOException {
  namesystem.writeLock();
  final long startTime = Time.monotonicNow(); //after acquiring write lock
  final long endTime;
  DatanodeDescriptor node;
  Collection<Block> invalidatedBlocks = null;

  try {
    node = datanodeManager.getDatanode(nodeID);
    if (node == null || !node.isAlive) {
      throw new IOException(
          "ProcessReport from dead or unregistered node: " + nodeID);
    }

    // To minimize startup time, we discard any second (or later) block reports
    // that we receive while still in startup phase.
    DatanodeStorageInfo storageInfo = node.getStorageInfo(storage.getStorageID());

    if (storageInfo == null) {
      // We handle this for backwards compatibility.
      storageInfo = node.updateStorage(storage);
    }
    if (namesystem.isInStartupSafeMode()
        && storageInfo.getBlockReportCount() > 0) {
      blockLog.info("BLOCK* processReport: "
          + "discarded non-initial block report from {}"
          + " because namenode still in startup phase", nodeID);
      return !node.hasStaleStorages();
    }

    if (storageInfo.getBlockReportCount() == 0) {
      // The first block report can be processed a lot more efficiently than
      // ordinary block reports.  This shortens restart times.
      processFirstBlockReport(storageInfo, newReport);
    } else {
      invalidatedBlocks = processReport(storageInfo, newReport);
    }
    
    storageInfo.receivedBlockReport();
    if (context != null) {
      storageInfo.setLastBlockReportId(context.getReportId());
      if (lastStorageInRpc) {
        int rpcsSeen = node.updateBlockReportContext(context);
        if (rpcsSeen >= context.getTotalRpcs()) {
          List<DatanodeStorageInfo> zombies = node.removeZombieStorages();
          if (zombies.isEmpty()) {
            LOG.debug("processReport 0x{}: no zombie storages found.",
                Long.toHexString(context.getReportId()));
          } else {
            for (DatanodeStorageInfo zombie : zombies) {
              removeZombieReplicas(context, zombie);
            }
          }
          node.clearBlockReportContext();
        } else {
          LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
                  "report.", Long.toHexString(context.getReportId()),
              (context.getTotalRpcs() - rpcsSeen)
          );
        }
      }
    }
  } finally {
    endTime = Time.monotonicNow();
    namesystem.writeUnlock();
  }

  if (invalidatedBlocks != null) {
    for (Block b : invalidatedBlocks) {
      blockLog.info("BLOCK* processReport: {} on node {} size {} does not " +
          "belong to any file", b, node, b.getNumBytes());
    }
  }

  // Log the block report processing stats from Namenode perspective
  final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
  if (metrics != null) {
    metrics.addBlockReport((int) (endTime - startTime));
  }
  blockLog.info("BLOCK* processReport: from storage {} node {}, " +
      "blocks: {}, hasStaleStorage: {}, processing time: {} msecs", storage
      .getStorageID(), nodeID, newReport.getNumberOfBlocks(),
      node.hasStaleStorages(), (endTime - startTime));
  return !node.hasStaleStorages();
}
 
Example #14
Source File: ImageServlet.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
protected void doPut(final HttpServletRequest request,
    final HttpServletResponse response) throws ServletException, IOException {
  try {
    ServletContext context = getServletContext();
    final FSImage nnImage = NameNodeHttpServer.getFsImageFromContext(context);
    final Configuration conf = (Configuration) getServletContext()
        .getAttribute(JspHelper.CURRENT_CONF);
    final PutImageParams parsedParams = new PutImageParams(request, response,
        conf);
    final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();

    validateRequest(context, conf, request, response, nnImage,
        parsedParams.getStorageInfoString());

    UserGroupInformation.getCurrentUser().doAs(
        new PrivilegedExceptionAction<Void>() {

          @Override
          public Void run() throws Exception {

            final long txid = parsedParams.getTxId();

            final NameNodeFile nnf = parsedParams.getNameNodeFile();

            if (!nnImage.addToCheckpointing(txid)) {
              response.sendError(HttpServletResponse.SC_CONFLICT,
                  "Either current namenode is checkpointing or another"
                      + " checkpointer is already in the process of "
                      + "uploading a checkpoint made at transaction ID "
                      + txid);
              return null;
            }
            try {
              if (nnImage.getStorage().findImageFile(nnf, txid) != null) {
                response.sendError(HttpServletResponse.SC_CONFLICT,
                    "Either current namenode has checkpointed or "
                        + "another checkpointer already uploaded an "
                        + "checkpoint for txid " + txid);
                return null;
              }

              InputStream stream = request.getInputStream();
              try {
                long start = monotonicNow();
                MD5Hash downloadImageDigest = TransferFsImage
                    .handleUploadImageRequest(request, txid,
                        nnImage.getStorage(), stream,
                        parsedParams.getFileSize(), getThrottler(conf));
                nnImage.saveDigestAndRenameCheckpointImage(nnf, txid,
                    downloadImageDigest);
                // Metrics non-null only when used inside name node
                if (metrics != null) {
                  long elapsed = monotonicNow() - start;
                  metrics.addPutImage(elapsed);
                }
                // Now that we have a new checkpoint, we might be able to
                // remove some old ones.
                nnImage.purgeOldStorage(nnf);
              } finally {
                stream.close();
              }
            } finally {
              nnImage.removeFromCheckpointing(txid);
            }
            return null;
          }

        });
  } catch (Throwable t) {
    String errMsg = "PutImage failed. " + StringUtils.stringifyException(t);
    response.sendError(HttpServletResponse.SC_GONE, errMsg);
    throw new IOException(errMsg);
  }
}
 
Example #15
Source File: TestSaveNamespace.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void saveNamespaceWithInjectedFault(Fault fault) throws IOException {
  Configuration conf = getConf();
  NameNode.myMetrics = new NameNodeMetrics(conf, null);
  NameNode.format(conf);
  NameNode nn = new NameNode(conf);
  FSNamesystem fsn = nn.getNamesystem();

  // Replace the FSImage with a spy
  FSImage originalImage = fsn.dir.fsImage;
  FSImage spyImage = spy(originalImage);
  spyImage.imageDigest = originalImage.imageDigest;
  fsn.dir.fsImage = spyImage;

  // inject fault
  switch(fault) {
  case SAVE_FSIMAGE:
    // The spy throws a RuntimeException when writing to the second directory
    doAnswer(new FaultySaveImage()).
      when(spyImage).saveFSImage((String)anyObject(), (DataOutputStream) anyObject());
    break;
  case MOVE_CURRENT:
    // The spy throws a RuntimeException when calling moveCurrent()
    doThrow(new RuntimeException("Injected fault: moveCurrent")).
      when(spyImage).moveCurrent((StorageDirectory)anyObject());
    break;
  case MOVE_LAST_CHECKPOINT:
    // The spy throws a RuntimeException when calling moveLastCheckpoint()
    doThrow(new RuntimeException("Injected fault: moveLastCheckpoint")).
      when(spyImage).moveLastCheckpoint((StorageDirectory)anyObject());
    break;
  }

  try {
    doAnEdit(fsn, 1);

    // Save namespace - this will fail because we inject a fault.
    fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    try {
      fsn.saveNamespace(false, false);
    } catch (Exception e) {
      LOG.info("Test caught expected exception", e);
    }

    // Now shut down and restart the namesystem
    nn.stop();
    nn = null;

    // Start a new namesystem, which should be able to recover
    // the namespace from the previous incarnation.
    nn = new NameNode(conf);
    fsn = nn.getNamesystem();

    // Make sure the image loaded including our edit.
    checkEditExists(fsn, 1);
  } finally {
    if (nn != null) {
      nn.stop();
    }
  }
}
 
Example #16
Source File: TestSaveNamespace.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void testSaveWhileEditsRolled() throws Exception {
  Configuration conf = getConf();
  NameNode.myMetrics = new NameNodeMetrics(conf, null);
  NameNode.format(conf);
  NameNode nn = new NameNode(conf);
  FSNamesystem fsn = nn.getNamesystem();

  // Replace the FSImage with a spy
  final FSImage originalImage = fsn.dir.fsImage;
  FSImage spyImage = spy(originalImage);
  fsn.dir.fsImage = spyImage;

  try {
    doAnEdit(fsn, 1);
    CheckpointSignature sig = fsn.rollEditLog();
    LOG.warn("Checkpoint signature: " + sig);
    // Do another edit
    doAnEdit(fsn, 2);

    // Save namespace
    fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fsn.saveNamespace(false, false);

    // Now shut down and restart the NN
    nn.stop();
    nn = null;

    // Start a new namesystem, which should be able to recover
    // the namespace from the previous incarnation.
    nn = new NameNode(conf);
    fsn = nn.getNamesystem();

    // Make sure the image loaded including our edits.
    checkEditExists(fsn, 1);
    checkEditExists(fsn, 2);
  } finally {
    if (nn != null) {
      nn.stop();
    }
  }
}
 
Example #17
Source File: NameNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public static NameNodeMetrics getNameNodeMetrics() {
  return myMetrics;
}
 
Example #18
Source File: NameNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Initialize name-node.
 * 
 */
private void initialize() throws IOException {    
  // set service-level authorization security policy
  if (serviceAuthEnabled =
      getConf().getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = 
      (PolicyProvider)(ReflectionUtils.newInstance(
          getConf().getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
              HDFSPolicyProvider.class, PolicyProvider.class), 
          getConf()));
    SecurityUtil.setPolicy(new ConfiguredPolicy(getConf(), policyProvider));
  }

  // This is a check that the port is free
  // create a socket and bind to it, throw exception if port is busy
  // This has to be done before we are reading Namesystem not to waste time and fail fast
  InetSocketAddress clientSocket = NameNode.getAddress(getConf());
  ServerSocket socket = new ServerSocket();
  socket.bind(clientSocket);
  socket.close();
  InetSocketAddress dnSocket = NameNode.getDNProtocolAddress(getConf());
  if (dnSocket != null) {
    socket = new ServerSocket();
    socket.bind(dnSocket);
    socket.close();
    //System.err.println("Tested " + dnSocket);
  }
  
  long serverVersion = ClientProtocol.versionID;
  this.clientProtocolMethodsFingerprint = ProtocolSignature
      .getMethodsSigFingerPrint(ClientProtocol.class, serverVersion);
  
  myMetrics = new NameNodeMetrics(getConf(), this);

  this.clusterName = getConf().get("dfs.cluster.name");
  this.namesystem = new FSNamesystem(this, getConf());
  // HACK: from removal of FSNamesystem.getFSNamesystem().
  JspHelper.fsn = this.namesystem;

  this.startDNServer();
  startHttpServer(getConf());
}
 
Example #19
Source File: NameNode.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
public static NameNodeMetrics getNameNodeMetrics() {
  return myMetrics;
}
 
Example #20
Source File: NameNode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public static NameNodeMetrics getNameNodeMetrics() {
  return metrics;
}