org.apache.hadoop.hdfs.LogVerificationAppender Java Examples

The following examples show how to use org.apache.hadoop.hdfs.LogVerificationAppender. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestOzoneManagerHAMetadataOnly.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@Test
public void testOMRetryProxy() throws Exception {
  int maxFailoverAttempts = getOzoneClientFailoverMaxAttempts();
  // Stop all the OMs.
  for (int i = 0; i < getNumOfOMs(); i++) {
    getCluster().stopOzoneManager(i);
  }

  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);

  try {
    createVolumeTest(true);
    // After making N (set maxRetries value) connection attempts to OMs,
    // the RpcClient should give up.
    fail("TestOMRetryProxy should fail when there are no OMs running");
  } catch (ConnectException e) {
    Assert.assertEquals(1,
        appender.countLinesWithMessage("Failed to connect to OMs:"));
    Assert.assertEquals(maxFailoverAttempts,
        appender.countLinesWithMessage("Trying to failover"));
    Assert.assertEquals(1,
        appender.countLinesWithMessage("Attempted " +
            maxFailoverAttempts + " failovers."));
  }
}
 
Example #2
Source File: TestReplicationPolicy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * In this testcase, it tries to choose more targets than available nodes and
 * check the result. 
 * @throws Exception
 */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
  // make data node 0 & 1 to be not qualified to choose: not enough disk space
  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }
  
  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);
  
  // try to choose NUM_OF_DATANODES which is more than actually available
  // nodes.
  DatanodeStorageInfo[] targets = chooseTarget(NUM_OF_DATANODES);
  assertEquals(targets.length, NUM_OF_DATANODES - 2);

  final List<LoggingEvent> log = appender.getLog();
  assertNotNull(log);
  assertFalse(log.size() == 0);
  final LoggingEvent lastLogEntry = log.get(log.size() - 1);
  
  assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
  // Suppose to place replicas on each node but two data nodes are not
  // available for placing replica, so here we expect a short of 2
  assertTrue(((String) lastLogEntry.getMessage()).contains("in need of 2"));

  resetHeartbeatForStorages();
}
 
Example #3
Source File: TestReplicationPolicy.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * In this testcase, it tries to choose more targets than available nodes and
 * check the result. 
 * @throws Exception
 */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
  // make data node 0 & 1 to be not qualified to choose: not enough disk space
  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }
  
  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);
  
  // try to choose NUM_OF_DATANODES which is more than actually available
  // nodes.
  DatanodeStorageInfo[] targets = chooseTarget(NUM_OF_DATANODES);
  assertEquals(targets.length, NUM_OF_DATANODES - 2);

  final List<LoggingEvent> log = appender.getLog();
  assertNotNull(log);
  assertFalse(log.size() == 0);
  final LoggingEvent lastLogEntry = log.get(log.size() - 1);
  
  assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
  // Suppose to place replicas on each node but two data nodes are not
  // available for placing replica, so here we expect a short of 2
  assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
  
  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }
}
 
Example #4
Source File: TestStartup.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void testImageChecksum(boolean compress) throws Exception {
  MiniDFSCluster cluster = null;
  if (compress) {
    config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
  }

  try {
      LOG.info("\n===========================================\n" +
               "Starting empty cluster");
      
      cluster = new MiniDFSCluster.Builder(config)
        .numDataNodes(0)
        .format(true)
        .build();
      cluster.waitActive();
      
      FileSystem fs = cluster.getFileSystem();
      fs.mkdirs(new Path("/test"));
      
      LOG.info("Shutting down cluster #1");
      cluster.shutdown();
      cluster = null;

      // Corrupt the md5 files in all the namedirs
      corruptFSImageMD5(true);

      // Attach our own log appender so we can verify output
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);

      // Try to start a new cluster
      LOG.info("\n===========================================\n" +
      "Starting same cluster after simulated crash");
      try {
        cluster = new MiniDFSCluster.Builder(config)
          .numDataNodes(0)
          .format(false)
          .build();
        fail("Should not have successfully started with corrupt image");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "Failed to load an FSImage file!", ioe);
        int md5failures = appender.countExceptionsWithMessage(
            " is corrupt with MD5 checksum of ");
        // Two namedirs, so should have seen two failures
        assertEquals(2, md5failures);
      }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #5
Source File: TestFsDatasetCache.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=600000)
public void testFilesExceedMaxLockedMemory() throws Exception {
  LOG.info("beginning testFilesExceedMaxLockedMemory");

  // Create some test files that will exceed total cache capacity
  final int numFiles = 5;
  final long fileSize = CACHE_CAPACITY / (numFiles-1);

  final Path[] testFiles = new Path[numFiles];
  final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
  final long[] fileSizes = new long[numFiles];
  for (int i=0; i<numFiles; i++) {
    testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i);
    DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short)1, 0xDFAl);
    fileLocs[i] = (HdfsBlockLocation[])fs.getFileBlockLocations(
        testFiles[i], 0, fileSize);
    // Get the file size (sum of blocks)
    long[] sizes = getBlockSizes(fileLocs[i]);
    for (int j=0; j<sizes.length; j++) {
      fileSizes[i] += sizes[j];
    }
  }

  // Cache the first n-1 files
  long total = 0;
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(cacheBlocks(fileLocs[i]));
    total = DFSTestUtil.verifyExpectedCacheUsage(
        rounder.round(total + fileSizes[i]), 4 * (i + 1), fsd);
  }

  // nth file should hit a capacity exception
  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);
  setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      int lines = appender.countLinesWithMessage(
          "more bytes in the cache: " +
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
      return lines > 0;
    }
  }, 500, 30000);
  // Also check the metrics for the failure
  assertTrue("Expected more than 0 failed cache attempts",
      fsd.getNumBlocksFailedToCache() > 0);

  // Uncache the n-1 files
  int curCachedBlocks = 16;
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
    long uncachedBytes = rounder.round(fileSizes[i]);
    total -= uncachedBytes;
    curCachedBlocks -= uncachedBytes / BLOCK_SIZE;
    DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd);
  }
  LOG.info("finishing testFilesExceedMaxLockedMemory");
}
 
Example #6
Source File: TestStartup.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void testImageChecksum(boolean compress) throws Exception {
  MiniDFSCluster cluster = null;
  if (compress) {
    config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
  }

  try {
      LOG.info("\n===========================================\n" +
               "Starting empty cluster");
      
      cluster = new MiniDFSCluster.Builder(config)
        .numDataNodes(0)
        .format(true)
        .build();
      cluster.waitActive();
      
      FileSystem fs = cluster.getFileSystem();
      fs.mkdirs(new Path("/test"));
      
      LOG.info("Shutting down cluster #1");
      cluster.shutdown();
      cluster = null;

      // Corrupt the md5 files in all the namedirs
      corruptFSImageMD5(true);

      // Attach our own log appender so we can verify output
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);

      // Try to start a new cluster
      LOG.info("\n===========================================\n" +
      "Starting same cluster after simulated crash");
      try {
        cluster = new MiniDFSCluster.Builder(config)
          .numDataNodes(0)
          .format(false)
          .build();
        fail("Should not have successfully started with corrupt image");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "Failed to load an FSImage file!", ioe);
        int md5failures = appender.countExceptionsWithMessage(
            " is corrupt with MD5 checksum of ");
        // Two namedirs, so should have seen two failures
        assertEquals(2, md5failures);
      }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #7
Source File: TestFsDatasetCache.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=600000)
public void testFilesExceedMaxLockedMemory() throws Exception {
  LOG.info("beginning testFilesExceedMaxLockedMemory");

  // Create some test files that will exceed total cache capacity
  final int numFiles = 5;
  final long fileSize = CACHE_CAPACITY / (numFiles-1);

  final Path[] testFiles = new Path[numFiles];
  final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
  final long[] fileSizes = new long[numFiles];
  for (int i=0; i<numFiles; i++) {
    testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i);
    DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short)1, 0xDFAl);
    fileLocs[i] = (HdfsBlockLocation[])fs.getFileBlockLocations(
        testFiles[i], 0, fileSize);
    // Get the file size (sum of blocks)
    long[] sizes = getBlockSizes(fileLocs[i]);
    for (int j=0; j<sizes.length; j++) {
      fileSizes[i] += sizes[j];
    }
  }

  // Cache the first n-1 files
  long total = 0;
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(cacheBlocks(fileLocs[i]));
    total = DFSTestUtil.verifyExpectedCacheUsage(
        rounder.round(total + fileSizes[i]), 4 * (i + 1), fsd);
  }

  // nth file should hit a capacity exception
  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);
  setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      int lines = appender.countLinesWithMessage(
          "more bytes in the cache: " +
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
      return lines > 0;
    }
  }, 500, 30000);
  // Also check the metrics for the failure
  assertTrue("Expected more than 0 failed cache attempts",
      fsd.getNumBlocksFailedToCache() > 0);

  // Uncache the n-1 files
  int curCachedBlocks = 16;
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
    long uncachedBytes = rounder.round(fileSizes[i]);
    total -= uncachedBytes;
    curCachedBlocks -= uncachedBytes / BLOCK_SIZE;
    DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd);
  }
  LOG.info("finishing testFilesExceedMaxLockedMemory");
}