org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestBPOfferService.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setupMocks() throws Exception {
  mockNN1 = setupNNMock(0);
  mockNN2 = setupNNMock(1);

  // Set up a mock DN with the bare-bones configuration
  // objects, etc.
  mockDn = Mockito.mock(DataNode.class);
  Mockito.doReturn(true).when(mockDn).shouldRun();
  Configuration conf = new Configuration();
  File dnDataDir = new File(new File(TEST_BUILD_DATA, "dfs"), "data");
  conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
  Mockito.doReturn(conf).when(mockDn).getConf();
  Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
  Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn"))
  .when(mockDn).getMetrics();

  // Set up a simulated dataset with our fake BP
  mockFSDataset = Mockito.spy(new SimulatedFSDataset(null, conf));
  mockFSDataset.addBlockPool(FAKE_BPID, conf);

  // Wire the dataset to the DN.
  Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
}
 
Example #2
Source File: TestBPOfferService.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setupMocks() throws Exception {
  mockNN1 = setupNNMock(0);
  mockNN2 = setupNNMock(1);

  // Set up a mock DN with the bare-bones configuration
  // objects, etc.
  mockDn = Mockito.mock(DataNode.class);
  Mockito.doReturn(true).when(mockDn).shouldRun();
  Configuration conf = new Configuration();
  File dnDataDir = new File(new File(TEST_BUILD_DATA, "dfs"), "data");
  conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
  Mockito.doReturn(conf).when(mockDn).getConf();
  Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
  Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn"))
  .when(mockDn).getMetrics();

  // Set up a simulated dataset with our fake BP
  mockFSDataset = Mockito.spy(new SimulatedFSDataset(null, conf));
  mockFSDataset.addBlockPool(FAKE_BPID, conf);

  // Wire the dataset to the DN.
  Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
}
 
Example #3
Source File: DataNode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Initialize global settings for DN
 */
protected void initGlobalSetting(Configuration conf,
    AbstractList<File> dataDirs) throws IOException {
  this.dataDirs = dataDirs;
  this.conf = conf;
  storage = new DataStorage(this);
  
  // global DN settings
  initConfig(conf);
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  initIpcServer(conf);

  myMetrics = new DataNodeMetrics(conf, storage.getStorageID());
}
 
Example #4
Source File: TestDataNodeMetrics.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public void testDataNodeMetrics() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  try {
    FileSystem fs = cluster.getFileSystem();
    final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; 
    DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
        LONG_FILE_LEN, (short)1, 1L);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    DataNodeMetrics metrics = datanode.getMetrics();
    assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example #5
Source File: TestBPOfferService.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test datanode block pool initialization error handling.
 * Failure in initializing a block pool should not cause NPE.
 */
@Test
public void testBPInitErrorHandling() throws Exception {
  final DataNode mockDn = Mockito.mock(DataNode.class);
  Mockito.doReturn(true).when(mockDn).shouldRun();
  Configuration conf = new Configuration();
  File dnDataDir = new File(
    new File(TEST_BUILD_DATA, "testBPInitErrorHandling"), "data");
  conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
  Mockito.doReturn(conf).when(mockDn).getConf();
  Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
  Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn")).
    when(mockDn).getMetrics();
  final AtomicInteger count = new AtomicInteger();
  Mockito.doAnswer(new Answer<Void>() {
    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      if (count.getAndIncrement() == 0) {
        throw new IOException("faked initBlockPool exception");
      }
      // The initBlockPool is called again. Now mock init is done.
      Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
      return null;
    }
  }).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
  BPOfferService bpos = setupBPOSForNNs(mockDn, mockNN1, mockNN2);
  List<BPServiceActor> actors = bpos.getBPServiceActors();
  assertEquals(2, actors.size());
  bpos.start();
  try {
    waitForInitialization(bpos);
    // even if one of the actor initialization fails, the other one will be
    // finish block report.
    waitForBlockReport(mockNN1, mockNN2);
  } finally {
    bpos.stop();
  }
}
 
Example #6
Source File: TestBPOfferService.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test datanode block pool initialization error handling.
 * Failure in initializing a block pool should not cause NPE.
 */
@Test
public void testBPInitErrorHandling() throws Exception {
  final DataNode mockDn = Mockito.mock(DataNode.class);
  Mockito.doReturn(true).when(mockDn).shouldRun();
  Configuration conf = new Configuration();
  File dnDataDir = new File(
    new File(TEST_BUILD_DATA, "testBPInitErrorHandling"), "data");
  conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
  Mockito.doReturn(conf).when(mockDn).getConf();
  Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
  Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn")).
    when(mockDn).getMetrics();
  final AtomicInteger count = new AtomicInteger();
  Mockito.doAnswer(new Answer<Void>() {
    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      if (count.getAndIncrement() == 0) {
        throw new IOException("faked initBlockPool exception");
      }
      // The initBlockPool is called again. Now mock init is done.
      Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
      return null;
    }
  }).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
  BPOfferService bpos = setupBPOSForNNs(mockDn, mockNN1, mockNN2);
  List<BPServiceActor> actors = bpos.getBPServiceActors();
  assertEquals(2, actors.size());
  bpos.start();
  try {
    waitForInitialization(bpos);
    // even if one of the actor initialization fails, the other one will be
    // finish block report.
    waitForBlockReport(mockNN1, mockNN2);
  } finally {
    bpos.stop();
  }
}
 
Example #7
Source File: TestStuckDataNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** This creates a slow writer and check to see
  * if pipeline heartbeats work fine
  */
 public void testStuckDataNode() throws Exception {
   final int DATANODE_NUM = 3;
   Configuration conf = new Configuration();
   final int timeout = 8000;
   conf.setInt("dfs.socket.timeout",timeout);

   final Path p = new Path("/pipelineHeartbeat/foo");
   System.out.println("p=" + p);

   MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
   DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();

DataNodeMetrics metrics = cluster.getDataNodes().get(0).myMetrics;
MetricsTimeVaryingLong spyBytesWritten = spy(metrics.bytesWritten);
DelayAnswer delayAnswer = new DelayAnswer(); 
doAnswer(delayAnswer).when(spyBytesWritten).inc(anyInt());
metrics.bytesWritten = spyBytesWritten;

try {
   	// create a new file.
   	FSDataOutputStream stm = fs.create(p);
   	stm.write(1);
   	stm.sync();
   	stm.write(2);
   	stm.close();

   	// verify that entire file is good
   	FSDataInputStream in = fs.open(p);
   	assertEquals(1, in.read());
   	assertEquals(2, in.read());
   	in.close();
   } finally {
     fs.close();
     cluster.shutdown();
   }
 }
 
Example #8
Source File: DataNode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   List<StorageLocation> dataDirs,
                   SecureResources resources
                   ) throws IOException {

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  synchronized (this) {
    this.dataDirs = dataDirs;
  }
  this.conf = conf;
  this.dnConf = new DNConf(conf);
  checkSecureConfig(dnConf, conf, resources);

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  if (dnConf.maxLockedMemory > 0) {
    if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
      throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) is greater than zero and native code is not available.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
    }
    if (Path.WINDOWS) {
      NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
    } else {
      long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
      if (dnConf.maxLockedMemory > ulimit) {
        throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) of %d bytes is more than the datanode's available" +
          " RLIMIT_MEMLOCK ulimit of %d bytes.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          dnConf.maxLockedMemory,
          ulimit));
      }
    }
  }
  LOG.info("Starting DataNode with maxLockedMemory = " +
      dnConf.maxLockedMemory);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();

  // Login is done by now. Set the DN user name.
  dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
  LOG.info("dnUserName = " + dnUserName);
  LOG.info("supergroup = " + supergroup);
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
  saslClient = new SaslDataTransferClient(dnConf.conf, 
      dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
  saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
}
 
Example #9
Source File: DataNode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public DataNodeMetrics getMetrics() {
  return metrics;
}
 
Example #10
Source File: DataNode.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   List<StorageLocation> dataDirs,
                   SecureResources resources
                   ) throws IOException {

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  synchronized (this) {
    this.dataDirs = dataDirs;
  }
  this.conf = conf;
  this.dnConf = new DNConf(conf);
  checkSecureConfig(dnConf, conf, resources);

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  if (dnConf.maxLockedMemory > 0) {
    if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
      throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) is greater than zero and native code is not available.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
    }
    if (Path.WINDOWS) {
      NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
    } else {
      long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
      if (dnConf.maxLockedMemory > ulimit) {
        throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) of %d bytes is more than the datanode's available" +
          " RLIMIT_MEMLOCK ulimit of %d bytes.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          dnConf.maxLockedMemory,
          ulimit));
      }
    }
  }
  LOG.info("Starting DataNode with maxLockedMemory = " +
      dnConf.maxLockedMemory);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();

  // Login is done by now. Set the DN user name.
  dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
  LOG.info("dnUserName = " + dnUserName);
  LOG.info("supergroup = " + supergroup);
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
  saslClient = new SaslDataTransferClient(dnConf.conf, 
      dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
  saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
}
 
Example #11
Source File: DataNode.java    From big-c with Apache License 2.0 4 votes vote down vote up
public DataNodeMetrics getMetrics() {
  return metrics;
}
 
Example #12
Source File: DataNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
DataNodeMetrics getMetrics() {
  return myMetrics;
}
 
Example #13
Source File: DataNode.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
DataNodeMetrics getMetrics() {
  return myMetrics;
}