Java Code Examples for org.apache.hadoop.fs.FileSystem#close()

The following examples show how to use org.apache.hadoop.fs.FileSystem#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHttpFSWithKerberos.java    From big-c with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("deprecation")
private void testDelegationTokenWithFS(Class fileSystemClass)
  throws Exception {
  createHttpFSServer();
  Configuration conf = new Configuration();
  conf.set("fs.webhdfs.impl", fileSystemClass.getName());
  conf.set("fs.hdfs.impl.disable.cache", "true");
  URI uri = new URI( "webhdfs://" +
                     TestJettyHelper.getJettyURL().toURI().getAuthority());
  FileSystem fs = FileSystem.get(uri, conf);
  Token<?> tokens[] = fs.addDelegationTokens("foo", null);
  fs.close();
  Assert.assertEquals(1, tokens.length);
  fs = FileSystem.get(uri, conf);
  ((DelegationTokenRenewer.Renewable) fs).setDelegationToken(tokens[0]);
  fs.listStatus(new Path("/"));
  fs.close();
}
 
Example 2
Source File: TestInitializeSharedEdits.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void assertCanStartHaNameNodes(String pathSuffix)
    throws ServiceFailedException, IOException, URISyntaxException,
    InterruptedException {
  // Now should be able to start both NNs. Pass "false" here so that we don't
  // try to waitActive on all NNs, since the second NN doesn't exist yet.
  cluster.restartNameNode(0, false);
  cluster.restartNameNode(1, true);
  
  // Make sure HA is working.
  cluster.getNameNode(0).getRpcServer().transitionToActive(
      new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
  FileSystem fs = null;
  try {
    Path newPath = new Path(TEST_PATH, pathSuffix);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(newPath));
    HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
        cluster.getNameNode(1));
    assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
        newPath.toString(), false).isDir());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
 
Example 3
Source File: TestInitializeSharedEdits.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void assertCanStartHaNameNodes(String pathSuffix)
    throws ServiceFailedException, IOException, URISyntaxException,
    InterruptedException {
  // Now should be able to start both NNs. Pass "false" here so that we don't
  // try to waitActive on all NNs, since the second NN doesn't exist yet.
  cluster.restartNameNode(0, false);
  cluster.restartNameNode(1, true);
  
  // Make sure HA is working.
  cluster.getNameNode(0).getRpcServer().transitionToActive(
      new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
  FileSystem fs = null;
  try {
    Path newPath = new Path(TEST_PATH, pathSuffix);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(newPath));
    HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
        cluster.getNameNode(1));
    assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
        newPath.toString(), false).isDir());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
 
Example 4
Source File: TestFileSystemCaching.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testDeleteOnExit() throws IOException {
  FileSystem mockFs = mock(FileSystem.class);
  FileSystem fs = new FilterFileSystem(mockFs);
  Path path = new Path("/a");

  // delete on close if path does exist
  when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
  assertTrue(fs.deleteOnExit(path));
  verify(mockFs).getFileStatus(eq(path));
  reset(mockFs);
  when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
  fs.close();
  verify(mockFs).getFileStatus(eq(path));
  verify(mockFs).delete(eq(path), eq(true));
}
 
Example 5
Source File: YarnJobDescriptor.java    From sylph with Apache License 2.0 6 votes vote down vote up
@Override
public void run()
{
    LOG.info("Cancelling deployment from Deployment Failure Hook");
    failSessionDuringDeployment(yarnClient, yarnApplication);
    LOG.info("Deleting files in {}.", yarnFilesDir);
    try {
        FileSystem fs = FileSystem.get(yarnClient.getConfig());

        if (!fs.delete(yarnFilesDir, true)) {
            throw new IOException("Deleting files in " + yarnFilesDir + " was unsuccessful");
        }

        fs.close();
    }
    catch (IOException e) {
        LOG.error("Failed to delete Flink Jar and configuration files in HDFS", e);
    }
}
 
Example 6
Source File: FrameworkUtils.java    From data-polygamy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
public static void createDir(String dir, Configuration conf, boolean s3) throws IOException {
    if (s3) {
    	Path path = new Path(dir);
        FileSystem fs = FileSystem.get(path.toUri(), conf);
        
        if (!fs.exists(path))
            fs.mkdirs(path);
        
        fs.close();
    } else {
        FileSystem hdfs = FileSystem.get(new Configuration());
        Path hdfsFile = new Path(hdfs.getHomeDirectory() + "/" + dir);

        if (!hdfs.exists(hdfsFile))
            hdfs.mkdirs(hdfsFile);
    }
}
 
Example 7
Source File: TestShortCircuitLocalRead.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
                                                        int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path)
              .isDirectory() == true);

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 8
Source File: BaseTestHttpFSWith.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void testOpen() throws Exception {
  FileSystem fs = FileSystem.get(getProxiedFSConf());
  Path path = new Path(getProxiedFSTestDir(), "foo.txt");
  OutputStream os = fs.create(path);
  os.write(1);
  os.close();
  fs.close();
  fs = getHttpFSFileSystem();
  InputStream is = fs.open(new Path(path.toUri().getPath()));
  Assert.assertEquals(is.read(), 1);
  is.close();
  fs.close();
}
 
Example 9
Source File: DremioFileSystemCache.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
/**
 * Close all FileSystem instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
public synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<>();

  // Make a copy of the keys in the map since we'll be modifying
  // the map while iterating over it, which isn't safe.
  List<Key> keys = new ArrayList<Key>();
  keys.addAll(map.keySet());

  for (Key key : keys) {
    final FileSystem fs = map.get(key);

    if (onlyAutomatic && !toAutoClose.contains(key)) {
      continue;
    }

    //remove from cache
    map.remove(key);
    toAutoClose.remove(key);

    if (fs != null) {
      try {
        fs.close();
      }
      catch(IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
 
Example 10
Source File: BaseTestHttpFSWith.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void testMkdirs() throws Exception {
  Path path = new Path(getProxiedFSTestDir(), "foo");
  FileSystem fs = getHttpFSFileSystem();
  fs.mkdirs(path);
  fs.close();
  fs = FileSystem.get(getProxiedFSConf());
  Assert.assertTrue(fs.exists(path));
  fs.close();
}
 
Example 11
Source File: TestMiniMRDFSCaching.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
 
Example 12
Source File: HdfsRm.java    From BigData-In-Practice with Apache License 2.0 5 votes vote down vote up
/**
 * 列出文件夹下的所有文件
 */
public static void listFiles(String remotePathStr, boolean recursive) throws IOException {
    Path remotePath = new Path(remotePathStr);
    FileSystem fileSystem = SysUtil.getFileSystem();

    RemoteIterator<LocatedFileStatus> iterator = fileSystem.listFiles(remotePath, recursive);
    System.out.println(String.format("文件夹《%s》下的所有文件:", remotePathStr));
    while (iterator.hasNext()) {
        System.out.println(iterator.next());
    }
    fileSystem.close();
}
 
Example 13
Source File: TestShortCircuitLocalRead.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
                                                        int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path)
              .isDirectory() == true);

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 14
Source File: TestHttpsFileSystem.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testSWebHdfsFileSystem() throws Exception {
  FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, "swebhdfs");
  final Path f = new Path("/testswebhdfs");
  FSDataOutputStream os = fs.create(f);
  os.write(23);
  os.close();
  Assert.assertTrue(fs.exists(f));
  InputStream is = fs.open(f);
  Assert.assertEquals(23, is.read());
  is.close();
  fs.close();
}
 
Example 15
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that a fault while downloading edits the first time after the 2NN
 * starts up does not prevent future checkpointing.
 */
@Test(timeout = 30000)
public void testEditFailureOnFirstCheckpoint() throws IOException {
  Configuration conf = new HdfsConfiguration();
  SecondaryNameNode secondary = null;
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    fs.mkdirs(new Path("test-file-1"));
    
    // Make sure the on-disk fsimage on the NN has txid > 0.
    FSNamesystem fsns = cluster.getNamesystem();
    fsns.enterSafeMode(false);
    fsns.saveNamespace();
    fsns.leaveSafeMode();
    
    secondary = startSecondaryNameNode(conf);

    // Cause edit rename to fail during next checkpoint
    Mockito.doThrow(new IOException("Injecting failure before edit rename"))
        .when(faultInjector).beforeEditsRename();
    
    try {
      secondary.doCheckpoint();
      fail("Fault injection failed.");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "Injecting failure before edit rename", ioe);
    }
    Mockito.reset(faultInjector);
    
    // Next checkpoint should succeed
    secondary.doCheckpoint();
  } finally {
    if (secondary != null) {
      secondary.shutdown();
    }
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
    Mockito.reset(faultInjector);
  }
}
 
Example 16
Source File: TestDFSUpgradeWithHA.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Ensure that an admin cannot finalize an HA upgrade without at least one NN
 * being active.
 */
@Test
public void testCannotFinalizeIfNoActive() throws IOException,
    URISyntaxException {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .numDataNodes(0)
        .build();

    File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkPreviousDirExistence(sharedDir, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkPreviousDirExistence(sharedDir, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Restart NN0 without the -upgrade flag, to make sure that works.
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
    cluster.restartNameNode(0, false);
    
    // Make sure we can still do FS ops after upgrading.
    cluster.transitionToActive(0);
    assertTrue(fs.mkdirs(new Path("/foo3")));
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    // Now restart NN1 and make sure that we can do ops against that as well.
    cluster.restartNameNode(1);
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    assertTrue(fs.mkdirs(new Path("/foo4")));
    
    assertCTimesEqual(cluster);
    
    // Now there's no active NN.
    cluster.transitionToStandby(1);

    try {
      runFinalizeCommand(cluster);
      fail("Should not have been able to finalize upgrade with no NN active");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "Cannot finalize with no NameNode active", ioe);
    }
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 17
Source File: TestFileCreation.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test deleteOnExit
 */
@Test
public void testDeleteOnExit() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  FileSystem localfs = FileSystem.getLocal(conf);

  try {

    // Creates files in HDFS and local file system.
    //
    Path file1 = new Path("filestatus.dat");
    Path file2 = new Path("filestatus2.dat");
    Path file3 = new Path("filestatus3.dat");
    FSDataOutputStream stm1 = createFile(fs, file1, 1);
    FSDataOutputStream stm2 = createFile(fs, file2, 1);
    FSDataOutputStream stm3 = createFile(localfs, file3, 1);
    System.out.println("DeleteOnExit: Created files.");

    // write to files and close. Purposely, do not close file2.
    writeFile(stm1);
    writeFile(stm3);
    stm1.close();
    stm2.close();
    stm3.close();

    // set delete on exit flag on files.
    fs.deleteOnExit(file1);
    fs.deleteOnExit(file2);
    localfs.deleteOnExit(file3);

    // close the file system. This should make the above files
    // disappear.
    fs.close();
    localfs.close();
    fs = null;
    localfs = null;

    // reopen file system and verify that file does not exist.
    fs = cluster.getFileSystem();
    localfs = FileSystem.getLocal(conf);

    assertTrue(file1 + " still exists inspite of deletOnExit set.",
               !fs.exists(file1));
    assertTrue(file2 + " still exists inspite of deletOnExit set.",
               !fs.exists(file2));
    assertTrue(file3 + " still exists inspite of deletOnExit set.",
               !localfs.exists(file3));
    System.out.println("DeleteOnExit successful.");

  } finally {
    IOUtils.closeStream(fs);
    IOUtils.closeStream(localfs);
    cluster.shutdown();
  }
}
 
Example 18
Source File: TestEncryptedTransfer.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testEncryptedReadWithAES() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY,
        "AES/CTR/NoPadding");
    cluster = new MiniDFSCluster.Builder(conf).build();

    FileSystem fs = getFileSystem(conf);
    writeTestDataToFile(fs);
    assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
    FileChecksum checksum = fs.getFileChecksum(TEST_PATH);
    fs.close();
    cluster.shutdown();

    setEncryptionConfigKeys(conf);

    cluster = new MiniDFSCluster.Builder(conf)
        .manageDataDfsDirs(false)
        .manageNameDfsDirs(false)
        .format(false)
        .startupOption(StartupOption.REGULAR)
        .build();

    fs = getFileSystem(conf);
    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
        LogFactory.getLog(SaslDataTransferServer.class));
    LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
        LogFactory.getLog(DataTransferSaslUtil.class));
    try {
      assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
      assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
    } finally {
      logs.stopCapturing();
      logs1.stopCapturing();
    }

    fs.close();

    if (resolverClazz == null) {
      // Test client and server negotiate cipher option
      GenericTestUtils.assertMatches(logs.getOutput(),
          "Server using cipher suite");
      // Check the IOStreamPair
      GenericTestUtils.assertMatches(logs1.getOutput(),
          "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 19
Source File: TestDFSUpgradeWithHA.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testRollbackWithJournalNodes() throws IOException,
    URISyntaxException {
  MiniQJMHACluster qjCluster = null;
  FileSystem fs = null;
  try {
    Builder builder = new MiniQJMHACluster.Builder(conf);
    builder.getDfsBuilder()
        .numDataNodes(0);
    qjCluster = builder.build();

    MiniDFSCluster cluster = qjCluster.getDfsCluster();
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkJnPreviousDirExistence(qjCluster, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));

    final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster);

    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkJnPreviousDirExistence(qjCluster, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));

    final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster);
    assertTrue(cidDuringUpgrade > cidBeforeUpgrade);

    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    cluster.restartNameNode(1);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, true);
    checkJnPreviousDirExistence(qjCluster, true);
    assertCTimesEqual(cluster);
    
    // Shut down the NNs, but deliberately leave the JNs up and running.
    Collection<URI> nn1NameDirs = cluster.getNameDirs(0);
    cluster.shutdown();

    conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs));
    NameNode.doRollback(conf, false);

    final long cidAfterRollback = getCommittedTxnIdValue(qjCluster);
    assertTrue(cidBeforeUpgrade < cidAfterRollback);
    // make sure the committedTxnId has been reset correctly after rollback
    assertTrue(cidDuringUpgrade > cidAfterRollback);

    // The rollback operation should have rolled back the first NN's local
    // dirs, and the shared dir, but not the other NN's dirs. Those have to be
    // done by bootstrapping the standby.
    checkNnPreviousDirExistence(cluster, 0, false);
    checkJnPreviousDirExistence(qjCluster, false);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (qjCluster != null) {
      qjCluster.shutdown();
    }
  }
}
 
Example 20
Source File: TestReadSlowDataNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Test that copy on write for blocks works correctly
 * 
 * @throws NoSuchFieldException
 * @throws SecurityException
 * @throws IllegalAccessException
 * @throws IllegalArgumentException
 */
public void testSlowDn() throws IOException, SecurityException,
    NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
  Configuration conf = new Configuration();
  conf.setLong("dfs.min.read.speed.bps", 1024 * 200);

  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  FileSystem fs = cluster.getFileSystem();
  FSDataInputStream in = null;
  try {

    // create a new file, write to it and close it.
    //
    Path file1 = new Path("/filestatus.dat");
    FSDataOutputStream stm = createFile(fs, file1, 2);
    writeFile(stm);
    stm.close();

    in = fs.open(file1);
    in.readByte();

    DFSInputStream dfsClientIn = findDFSClientInputStream(in);      
    Field blockReaderField = DFSInputStream.class.getDeclaredField("blockReader");
    blockReaderField.setAccessible(true);
    BlockReader blockReader = (BlockReader) blockReaderField.get(dfsClientIn);

    blockReader.setArtificialSlowdown(1000);
    blockReader.isReadLocal = false;
    blockReader.isReadRackLocal = false;
    blockReader.ENABLE_THROW_FOR_SLOW = true;
    for (int i = 0; i < 1024; i++) {
      in.readByte();
    }

    blockReader.setArtificialSlowdown(0);
    for (int i = 1024; i < fileSize - 1; i++) {
      in.readByte();
    }

    ConcurrentHashMap<DatanodeInfo, DatanodeInfo> deadNodes = getDeadNodes(dfsClientIn);
    TestCase.assertEquals(1, deadNodes.size());
  } finally {
    if (in != null) {
      in.close();
    }
    fs.close();
    cluster.shutdown();
  }
}