Java Code Examples for org.apache.hadoop.test.GenericTestUtils

The following examples show how to use org.apache.hadoop.test.GenericTestUtils. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: TestZKFailoverController.java    License: Apache License 2.0 6 votes vote down vote up
@Test(timeout=15000)
public void testGracefulFailoverToUnhealthy() throws Exception {
  try {
    cluster.start();

    cluster.waitForActiveLockHolder(0);

    // Mark it unhealthy, wait for it to exit election
    cluster.setHealthy(1, false);
    cluster.waitForElectorState(1, ActiveStandbyElector.State.INIT);
    
    // Ask for failover, it should fail, because it's unhealthy
    try {
      cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
      fail("Did not fail to graceful failover to unhealthy service!");
    } catch (ServiceFailedException sfe) {
      GenericTestUtils.assertExceptionContains(
          cluster.getService(1).toString() + 
          " is not currently healthy.", sfe);
    }
  } finally {
    cluster.stop();
  }
}
 
Example 2
Source Project: big-c   Source File: TestPermissionSymlinks.java    License: Apache License 2.0 6 votes vote down vote up
private void doDeleteLinkParentNotWritable() throws Exception {
  // Try to delete where the symlink's parent dir is not writable
  try {
    user.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws IOException {
        FileContext myfc = FileContext.getFileContext(conf);
        myfc.delete(link, false);
        return null;
      }
    });
    fail("Deleted symlink without write permissions on parent!");
  } catch (AccessControlException e) {
    GenericTestUtils.assertExceptionContains("Permission denied", e);
  }
}
 
Example 3
Source Project: big-c   Source File: TestLazyPersistFiles.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Delete lazy-persist file that has not been persisted to disk.
 * Memory is freed up and file is gone.
 * @throws IOException
 */
@Test
public void testDeleteBeforePersist()
  throws Exception {
  startUpCluster(true, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));

  Path path = new Path("/" + METHOD_NAME + ".dat");
  makeTestFile(path, BLOCK_SIZE, true);
  LocatedBlocks locatedBlocks =
    ensureFileReplicasOnStorageType(path, RAM_DISK);

  // Delete before persist
  client.delete(path.toString(), false);
  Assert.assertFalse(fs.exists(path));

  assertThat(verifyDeletedBlocks(locatedBlocks), is(true));

  verifyRamDiskJMXMetric("RamDiskBlocksDeletedBeforeLazyPersisted", 1);
}
 
Example 4
Source Project: hadoop   Source File: TestDatanodeProtocolRetryPolicy.java    License: Apache License 2.0 6 votes vote down vote up
private void waitForBlockReport(
    final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      try {
        Mockito.verify(mockNN).blockReport(
            Mockito.eq(datanodeRegistration),
            Mockito.eq(POOL_ID),
            Mockito.<StorageBlockReport[]>anyObject(),
            Mockito.<BlockReportContext>anyObject());
        return true;
      } catch (Throwable t) {
        LOG.info("waiting on block report: " + t.getMessage());
        return false;
      }
    }
  }, 500, 100000);
}
 
Example 5
Source Project: hadoop-ozone   Source File: TestDatanodeStateMachine.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Assert that starting statemachine executes the Init State.
 */
@Test
public void testStartStopDatanodeStateMachine() throws IOException,
    InterruptedException, TimeoutException {
  try (DatanodeStateMachine stateMachine =
      new DatanodeStateMachine(getNewDatanodeDetails(), conf, null, null)) {
    stateMachine.startDaemon();
    SCMConnectionManager connectionManager =
        stateMachine.getConnectionManager();
    GenericTestUtils.waitFor(
        () -> {
          int size = connectionManager.getValues().size();
          LOG.info("connectionManager.getValues().size() is {}", size);
          return size == 1;
        }, 1000, 30000);

    stateMachine.stopDaemon();
    assertTrue(stateMachine.isDaemonStopped());
  }
}
 
Example 6
Source Project: big-c   Source File: TestBPOfferService.java    License: Apache License 2.0 6 votes vote down vote up
private ReceivedDeletedBlockInfo[] waitForBlockReceived(
    final ExtendedBlock fakeBlock,
    final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
  final String fakeBlockPoolId = fakeBlock.getBlockPoolId();
  final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor =
    ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class);
  GenericTestUtils.waitFor(new Supplier<Boolean>() {

    @Override
    public Boolean get() {
      try {
        Mockito.verify(mockNN).blockReceivedAndDeleted(
          Mockito.<DatanodeRegistration>anyObject(),
          Mockito.eq(fakeBlockPoolId),
          captor.capture());
        return true;
      } catch (Throwable t) {
        return false;
      }
    }
  }, 100, 10000);
  return captor.getValue()[0].getBlocks();
}
 
Example 7
Source Project: big-c   Source File: TestServletFilter.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Similar to the above test case, except that it uses a different API to add
 * the filter. Regression test for HADOOP-8786.
 */
@Test
public void testContextSpecificServletFilterWhenInitThrowsException()
    throws Exception {
  Configuration conf = new Configuration();
  HttpServer2 http = createTestServer(conf);
  HttpServer2.defineFilter(http.webAppContext,
      "ErrorFilter", ErrorFilter.class.getName(),
      null, null);
  try {
    http.start();
    fail("expecting exception");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains(
        "Unable to initialize WebAppContext", e);
  }
}
 
Example 8
Source Project: big-c   Source File: TestBPOfferService.java    License: Apache License 2.0 6 votes vote down vote up
private void waitForOneToFail(final BPOfferService bpos)
    throws Exception {
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      List<BPServiceActor> actors = bpos.getBPServiceActors();
      int failedcount = 0;
      for (BPServiceActor actor : actors) {
        if (!actor.isAlive()) {
          failedcount++;
        }
      }
      return failedcount == 1;
    }
  }, 100, 10000);
}
 
Example 9
Source Project: hadoop   Source File: TestEditLog.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testFailedOpen() throws Exception {
  File logDir = new File(TEST_DIR, "testFailedOpen");
  logDir.mkdirs();
  FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
  try {
    FileUtil.setWritable(logDir, false);
    log.openForWrite();
    fail("Did no throw exception on only having a bad dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "too few journals successfully started", ioe);
  } finally {
    FileUtil.setWritable(logDir, true);
    log.close();
  }
}
 
Example 10
Source Project: hadoop-ozone   Source File: TestSCMSafeModeManager.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testFailWithIncorrectValueForHealthyPipelinePercent()
    throws Exception {
  try {
    OzoneConfiguration conf = createConf(100,
        0.9);
    MockNodeManager mockNodeManager = new MockNodeManager(true, 10);
    PipelineManager pipelineManager = new SCMPipelineManager(conf,
        mockNodeManager, scmMetadataStore.getPipelineTable(), queue);
    scmSafeModeManager = new SCMSafeModeManager(
        conf, containers, pipelineManager, queue);
    fail("testFailWithIncorrectValueForHealthyPipelinePercent");
  } catch (IllegalArgumentException ex) {
    GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" +
        " 1.0", ex);
  }
}
 
Example 11
Source Project: hadoop   Source File: TestBlockRecovery.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * BlockRecoveryFI_09. some/all DNs failed to update replicas.
 *
 * @throws IOException in case of an error
 */
@Test
public void testFailedReplicaUpdate() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doThrow(new IOException()).when(spyDN).updateReplicaUnderRecovery(
      block, RECOVERY_ID, BLOCK_ID, block.getNumBytes());
  try {
    spyDN.syncBlock(rBlock, initBlockRecords(spyDN));
    fail("Sync should fail");
  } catch (IOException e) {
    e.getMessage().startsWith("Cannot recover ");
  }
}
 
Example 12
Source Project: big-c   Source File: TestEditLogJournalFailures.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAllEditsDirFailOnWrite() throws IOException {
  assertTrue(doAnEdit());
  // Invalidate both edits journals.
  invalidateEditsDirAtIndex(0, true, true);
  invalidateEditsDirAtIndex(1, true, true);
  // The NN has not terminated (no ExitException thrown)
  try {
    doAnEdit();
    fail("The previous edit could not be synced to any persistent storage, "
        + " should have halted the NN");
  } catch (RemoteException re) {
    assertTrue(re.getClassName().contains("ExitException"));
    GenericTestUtils.assertExceptionContains(
        "Could not sync enough journals to persistent storage due to " +
        "No journals available to flush. " +
        "Unsynced transactions: 1", re);
  }
}
 
Example 13
Source Project: big-c   Source File: BlockReportTestBase.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test writes a file and closes it.
 * Block reported is generated with a bad GS for a single block.
 * Block report is forced and the check for # of corrupted blocks is performed.
 *
 * @throws IOException in case of an error
 */
@Test(timeout=300000)
public void blockReport_03() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  writeFile(METHOD_NAME, FILE_SIZE, filePath);

  // all blocks belong to the same file, hence same BP
  DataNode dn = cluster.getDataNodes().get(DN_N0);
  String poolId = cluster.getNamesystem().getBlockPoolId();
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
  StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
  sendBlockReports(dnR, poolId, reports);
  printStats();

  assertThat("Wrong number of corrupt blocks",
             cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
  assertThat("Wrong number of PendingDeletion blocks",
             cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
}
 
Example 14
Source Project: hadoop-ozone   Source File: TestMetadataStore.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDestroyDB() throws IOException {
  // create a new DB to test db destroy
  OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);

  File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
      + "-" + storeImpl.toLowerCase() + "-toDestroy");
  MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
      .setConf(conf)
      .setCreateIfMissing(true)
      .setDbFile(dbDir)
      .build();

  dbStore.put(getBytes("key1"), getBytes("value1"));
  dbStore.put(getBytes("key2"), getBytes("value2"));

  assertFalse(dbStore.isEmpty());
  assertTrue(dbDir.exists());
  assertTrue(dbDir.listFiles().length > 0);

  dbStore.destroy();

  assertFalse(dbDir.exists());
}
 
Example 15
Source Project: big-c   Source File: TestFiPipelines.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test initiates and sets actions created by injection framework. The actions
 * work with both aspects of sending acknologment packets in a pipeline.
 * Creates and closes a file of certain length < packet size.
 * Injected actions will check if number of visible bytes at datanodes equals
 * to number of acknoleged bytes
 *
 * @throws IOException in case of an error
 */
@Test
public void pipeline_04() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + METHOD_NAME);
  }

  final PipelinesTestUtil.PipelinesTest pipst =
    (PipelinesTestUtil.PipelinesTest) PipelinesTestUtil.initTest();

  pipst.fiCallSetNumBytes.set(new PipelinesTestUtil.ReceivedCheckAction(METHOD_NAME));
  pipst.fiCallSetBytesAcked.set(new PipelinesTestUtil.AckedCheckAction(METHOD_NAME));

  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  FSDataOutputStream fsOut = fs.create(filePath);
  TestPipelines.writeData(fsOut, 2);
  fs.close();
}
 
Example 16
Source Project: hadoop   Source File: TestLazyPersistFiles.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testLazyPersistBlocksAreSaved()
    throws IOException, InterruptedException {
  startUpCluster(true, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  // Create a test file
  makeTestFile(path, BLOCK_SIZE * 10, true);
  LocatedBlocks locatedBlocks = ensureFileReplicasOnStorageType(path, RAM_DISK);

  // Sleep for a short time to allow the lazy writer thread to do its job
  Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);
  
  LOG.info("Verifying copy was saved to lazyPersist/");

  // Make sure that there is a saved copy of the replica on persistent
  // storage.
  ensureLazyPersistBlocksAreSaved(locatedBlocks);
}
 
Example 17
Source Project: big-c   Source File: TestBlockRecovery.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * BlockRecoveryFI_09. some/all DNs failed to update replicas.
 *
 * @throws IOException in case of an error
 */
@Test
public void testFailedReplicaUpdate() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doThrow(new IOException()).when(spyDN).updateReplicaUnderRecovery(
      block, RECOVERY_ID, BLOCK_ID, block.getNumBytes());
  try {
    spyDN.syncBlock(rBlock, initBlockRecords(spyDN));
    fail("Sync should fail");
  } catch (IOException e) {
    e.getMessage().startsWith("Cannot recover ");
  }
}
 
Example 18
Source Project: hadoop-ozone   Source File: TestKeyDeletingService.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * In this test, we create a bunch of keys and delete them. Then we start the
 * KeyDeletingService and pass a SCMClient which does not fail. We make sure
 * that all the keys that we deleted is picked up and deleted by
 * OzoneManager.
 *
 * @throws IOException - on Failure.
 */

@Test(timeout = 30000)
public void checkIfDeleteServiceisDeletingKeys()
    throws IOException, TimeoutException, InterruptedException {
  OzoneConfiguration conf = createConfAndInitValues();
  OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf);
  KeyManager keyManager =
      new KeyManagerImpl(
          new ScmBlockLocationTestingClient(null, null, 0),
          metaMgr, conf, UUID.randomUUID().toString(), null);
  keyManager.start(conf);
  final int keyCount = 100;
  createAndDeleteKeys(keyManager, keyCount, 1);
  KeyDeletingService keyDeletingService =
      (KeyDeletingService) keyManager.getDeletingService();
  GenericTestUtils.waitFor(
      () -> keyDeletingService.getDeletedKeyCount().get() >= keyCount,
      1000, 10000);
  Assert.assertTrue(keyDeletingService.getRunCount().get() > 1);
  Assert.assertEquals(
      keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), 0);
}
 
Example 19
Source Project: big-c   Source File: TestOpensslCipher.java    License: Apache License 2.0 6 votes vote down vote up
@Test(timeout=120000)
public void testDoFinalArguments() throws Exception {
  Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null);
  OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding");
  Assert.assertTrue(cipher != null);
  
  cipher.init(OpensslCipher.ENCRYPT_MODE, key, iv);
  
  // Require direct buffer
  ByteBuffer output = ByteBuffer.allocate(1024);
  
  try {
    cipher.doFinal(output);
    Assert.fail("Output buffer should be direct buffer.");
  } catch (IllegalArgumentException e) {
    GenericTestUtils.assertExceptionContains(
        "Direct buffer is required", e);
  }
}
 
Example 20
Source Project: big-c   Source File: TestMover.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test Mover Cli by specifying a list of files/directories using option "-p".
 * There is only one namenode (and hence name service) specified in the conf.
 */
@Test
public void testMoverCli() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration()).numDataNodes(0).build();
  try {
    final Configuration conf = cluster.getConfiguration(0);
    try {
      Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "bar");
      Assert.fail("Expected exception for illegal path bar");
    } catch (IllegalArgumentException e) {
      GenericTestUtils.assertExceptionContains("bar is not absolute", e);
    }

    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf);
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertTrue(movePaths.containsKey(nn));
    Assert.assertNull(movePaths.get(nn));

    movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar");
    namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, movePaths.size());
    nn = namenodes.iterator().next();
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 21
Source Project: big-c   Source File: TestUTF8.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test that decoding invalid UTF8 throws an appropriate error message.
 */
public void testInvalidUTF8() throws Exception {
  byte[] invalid = new byte[] {
      0x01, 0x02, (byte)0xff, (byte)0xff, 0x01, 0x02, 0x03, 0x04, 0x05 };
  try {
    UTF8.fromBytes(invalid);
    fail("did not throw an exception");
  } catch (UTFDataFormatException utfde) {
    GenericTestUtils.assertExceptionContains(
        "Invalid UTF8 at ffff01020304", utfde);
  }
}
 
Example 22
Source Project: big-c   Source File: TestDirectoryScanner.java    License: Apache License 2.0 5 votes vote down vote up
@Test (timeout=300000)
public void testRetainBlockOnPersistentStorage() throws Exception {
  cluster = new MiniDFSCluster
      .Builder(CONF)
      .storageTypes(new StorageType[] { StorageType.RAM_DISK, StorageType.DEFAULT })
      .numDataNodes(1)
      .build();
  try {
    cluster.waitActive();
    DataNode dataNode = cluster.getDataNodes().get(0);
    bpid = cluster.getNamesystem().getBlockPoolId();
    fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
    client = cluster.getFileSystem().getClient();
    scanner = new DirectoryScanner(dataNode, fds, CONF);
    scanner.setRetainDiffs(true);
    FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));

    // Add a file with 1 block
    List<LocatedBlock> blocks =
        createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH, false);

    // Ensure no difference between volumeMap and disk.
    scan(1, 0, 0, 0, 0, 0);

    // Make a copy of the block on RAM_DISK and ensure that it is
    // picked up by the scanner.
    duplicateBlock(blocks.get(0).getBlock().getBlockId());
    scan(2, 1, 0, 0, 0, 0, 1);
    verifyStorageType(blocks.get(0).getBlock().getBlockId(), false);
    scan(1, 0, 0, 0, 0, 0);

  } finally {
    if (scanner != null) {
      scanner.shutdown();
      scanner = null;
    }
    cluster.shutdown();
    cluster = null;
  }
}
 
Example 23
Source Project: big-c   Source File: CryptoStreamsTestBase.java    License: Apache License 2.0 5 votes vote down vote up
/** Test skip. */
@Test(timeout=120000)
public void testSkip() throws Exception {
  OutputStream out = getOutputStream(defaultBufferSize);
  writeData(out);
      
  // Default buffer size
  InputStream in = getInputStream(defaultBufferSize);
  byte[] result = new byte[dataLen];
  int n1 = readAll(in, result, 0, dataLen / 3);
  Assert.assertEquals(n1, ((Seekable) in).getPos());
  
  long skipped = in.skip(dataLen / 3);
  int n2 = readAll(in, result, 0, dataLen);
  
  Assert.assertEquals(dataLen, n1 + skipped + n2);
  byte[] readData = new byte[n2];
  System.arraycopy(result, 0, readData, 0, n2);
  byte[] expectedData = new byte[n2];
  System.arraycopy(data, dataLen - n2, expectedData, 0, n2);
  Assert.assertArrayEquals(readData, expectedData);
  
  try {
    skipped = in.skip(-3);
    Assert.fail("Skip Negative length should fail.");
  } catch (IllegalArgumentException e) {
    GenericTestUtils.assertExceptionContains("Negative skip length", e);
  }
  
  // Skip after EOF
  skipped = in.skip(3);
  Assert.assertEquals(skipped, 0);
  
  in.close();
}
 
Example 24
Source Project: big-c   Source File: DFSTestUtil.java    License: Apache License 2.0 5 votes vote down vote up
public static void setNameNodeLogLevel(Level level) {
  GenericTestUtils.setLogLevel(FSNamesystem.LOG, level);
  GenericTestUtils.setLogLevel(BlockManager.LOG, level);
  GenericTestUtils.setLogLevel(LeaseManager.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level);
  GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, level);
}
 
Example 25
Source Project: hadoop   Source File: TestLazyPersistFiles.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * If one or more replicas of a lazyPersist file are lost, then the file
 * must be discarded by the NN, instead of being kept around as a
 * 'corrupt' file.
 */
@Test
public void testLazyPersistFilesAreDiscarded()
    throws IOException, InterruptedException {
  startUpCluster(true, 2);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");

  makeTestFile(path1, BLOCK_SIZE, true);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);

  // Stop the DataNode and sleep for the time it takes the NN to
  // detect the DN as being dead.
  cluster.shutdownDataNodes();
  Thread.sleep(30000L);
  assertThat(cluster.getNamesystem().getNumDeadDataNodes(), is(1));

  // Next, wait for the replication monitor to mark the file as corrupt
  Thread.sleep(2 * DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT * 1000);

  // Wait for the LazyPersistFileScrubber to run
  Thread.sleep(2 * LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC * 1000);

  // Ensure that path1 does not exist anymore, whereas path2 does.
  assert(!fs.exists(path1));

  // We should have zero blocks that needs replication i.e. the one
  // belonging to path2.
  assertThat(cluster.getNameNode()
                    .getNamesystem()
                    .getBlockManager()
                    .getUnderReplicatedBlocksCount(),
             is(0L));
}
 
Example 26
Source Project: hadoop   Source File: TestShortCircuitCache.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testDataXceiverCleansUpSlotsOnFailure() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverCleansUpSlotsOnFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  final Path TEST_PATH2 = new Path("/test_file2");
  final int TEST_FILE_LEN = 4096;
  final int SEED = 0xFADE1;
  DFSTestUtil.createFile(fs, TEST_PATH1, TEST_FILE_LEN,
      (short)1, SEED);
  DFSTestUtil.createFile(fs, TEST_PATH2, TEST_FILE_LEN,
      (short)1, SEED);

  // The first read should allocate one shared memory segment and slot.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // The second read should fail, and we should only have 1 segment and 1 slot
  // left.
  fs.getClient().getConf().brfFailureInjector =
      new TestCleanupFailureInjector();
  try {
    DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());
  cluster.shutdown();
  sockDir.close();
}
 
Example 27
Source Project: hadoop   Source File: TestRbwSpaceReservation.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
@Test(timeout = 30000)
public void testRBWFileCreationError() throws Exception {

  final short replication = 1;
  startCluster(BLOCK_SIZE, replication, -1);

  final FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes()
      .get(0).getFSDataset().getVolumes().get(0);
  final String methodName = GenericTestUtils.getMethodName();
  final Path file = new Path("/" + methodName + ".01.dat");

  // Mock BlockPoolSlice so that RBW file creation gives IOExcception
  BlockPoolSlice blockPoolSlice = Mockito.mock(BlockPoolSlice.class);
  Mockito.when(blockPoolSlice.createRbwFile((Block) Mockito.any()))
      .thenThrow(new IOException("Synthetic IO Exception Throgh MOCK"));

  Field field = FsVolumeImpl.class.getDeclaredField("bpSlices");
  field.setAccessible(true);
  Map<String, BlockPoolSlice> bpSlices = (Map<String, BlockPoolSlice>) field
      .get(fsVolumeImpl);
  bpSlices.put(fsVolumeImpl.getBlockPoolList()[0], blockPoolSlice);

  try {
    // Write 1 byte to the file
    FSDataOutputStream os = fs.create(file, replication);
    os.write(new byte[1]);
    os.hsync();
    os.close();
    fail("Expecting IOException file creation failure");
  } catch (IOException e) {
    // Exception can be ignored (expected)
  }

  // Ensure RBW space reserved is released
  assertTrue("Expected ZERO but got " + fsVolumeImpl.getReservedForRbw(),
      fsVolumeImpl.getReservedForRbw() == 0);
}
 
Example 28
Source Project: big-c   Source File: TestLazyPersistFiles.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testPolicyNotSetByDefault() throws IOException {
  startUpCluster(false, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, 0, false);
  // Stat the file and check that the LAZY_PERSIST policy is not
  // returned back.
  HdfsFileStatus status = client.getFileInfo(path.toString());
  assertThat(status.getStoragePolicy(), not(LAZY_PERSIST_POLICY_ID));
}
 
Example 29
Source Project: hadoop-ozone   Source File: TestDatanodeVersionFile.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testVerifyLayOut() throws IOException{
  int invalidLayOutVersion = 100;
  dnVersionFile = new DatanodeVersionFile(
      storageID, clusterID, datanodeUUID, cTime, invalidLayOutVersion);
  dnVersionFile.createVersionFile(versionFile);
  Properties props = dnVersionFile.readFrom(versionFile);

  try {
    HddsVolumeUtil.getLayOutVersion(props, versionFile);
    fail("Test failure in testVerifyLayOut");
  } catch (InconsistentStorageStateException ex) {
    GenericTestUtils.assertExceptionContains("Invalid layOutVersion.", ex);
  }
}
 
Example 30
Source Project: big-c   Source File: TestJournal.java    License: Apache License 2.0 5 votes vote down vote up
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
  Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
  StorageDirectory sd = journal.getStorage().getStorageDir(0);
  File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);
  
  // Journal should be locked, since the format() call locks it.
  GenericTestUtils.assertExists(lockFile);

  journal.newEpoch(FAKE_NSINFO,  1);
  try {
    new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
        mockErrorReporter);
    fail("Did not fail to create another journal in same dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "Cannot lock storage", ioe);
  }
  
  journal.close();
  
  // Journal should no longer be locked after the close() call.
  // Hence, should be able to create a new Journal in the same dir.
  Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID,
      StartupOption.REGULAR, mockErrorReporter);
  journal2.newEpoch(FAKE_NSINFO, 2);
  journal2.close();
}