Java Code Examples for org.apache.hadoop.fs.FileUtil#setWritable()

The following examples show how to use org.apache.hadoop.fs.FileUtil#setWritable() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFileJournalManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that internal renames are done using native code on platforms that
 * have it.  The native rename includes more detailed information about the
 * failure, which can be useful for troubleshooting.
 */
@Test
public void testDoPreUpgradeIOError() throws IOException {
  File storageDir = new File(TestEditLog.TEST_DIR, "preupgradeioerror");
  List<URI> editUris = Collections.singletonList(storageDir.toURI());
  NNStorage storage = setupEdits(editUris, 5);
  StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
  assertNotNull(sd);
  // Change storage directory so that renaming current to previous.tmp fails.
  FileUtil.setWritable(storageDir, false);
  FileJournalManager jm = null;
  try {
    jm = new FileJournalManager(conf, sd, storage);
    exception.expect(IOException.class);
    if (NativeCodeLoader.isNativeCodeLoaded()) {
      exception.expectMessage("failure in native rename");
    }
    jm.doPreUpgrade();
  } finally {
    IOUtils.cleanup(LOG, jm);
    // Restore permissions on storage directory and make sure we can delete.
    FileUtil.setWritable(storageDir, true);
    FileUtil.fullyDelete(storageDir);
  }
}
 
Example 2
Source File: TestEditLog.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testFailedOpen() throws Exception {
  File logDir = new File(TEST_DIR, "testFailedOpen");
  logDir.mkdirs();
  FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
  try {
    FileUtil.setWritable(logDir, false);
    log.openForWrite();
    fail("Did no throw exception on only having a bad dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "too few journals successfully started", ioe);
  } finally {
    FileUtil.setWritable(logDir, true);
    log.close();
  }
}
 
Example 3
Source File: TestAtomicFileOutputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testFailToRename() throws IOException {
  assumeTrue(Shell.WINDOWS);
  OutputStream fos = null;
  try {
    fos = new AtomicFileOutputStream(DST_FILE);
    fos.write(TEST_STRING.getBytes());
    FileUtil.setWritable(TEST_DIR, false);
    exception.expect(IOException.class);
    exception.expectMessage("failure in native rename");
    try {
      fos.close();
    } finally {
      fos = null;
    }
  } finally {
    IOUtils.cleanup(null, fos);
    FileUtil.setWritable(TEST_DIR, true);
  }
}
 
Example 4
Source File: TestFileJournalManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that internal renames are done using native code on platforms that
 * have it.  The native rename includes more detailed information about the
 * failure, which can be useful for troubleshooting.
 */
@Test
public void testDoPreUpgradeIOError() throws IOException {
  File storageDir = new File(TestEditLog.TEST_DIR, "preupgradeioerror");
  List<URI> editUris = Collections.singletonList(storageDir.toURI());
  NNStorage storage = setupEdits(editUris, 5);
  StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
  assertNotNull(sd);
  // Change storage directory so that renaming current to previous.tmp fails.
  FileUtil.setWritable(storageDir, false);
  FileJournalManager jm = null;
  try {
    jm = new FileJournalManager(conf, sd, storage);
    exception.expect(IOException.class);
    if (NativeCodeLoader.isNativeCodeLoaded()) {
      exception.expectMessage("failure in native rename");
    }
    jm.doPreUpgrade();
  } finally {
    IOUtils.cleanup(LOG, jm);
    // Restore permissions on storage directory and make sure we can delete.
    FileUtil.setWritable(storageDir, true);
    FileUtil.fullyDelete(storageDir);
  }
}
 
Example 5
Source File: TestEditLog.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testFailedOpen() throws Exception {
  File logDir = new File(TEST_DIR, "testFailedOpen");
  logDir.mkdirs();
  FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
  try {
    FileUtil.setWritable(logDir, false);
    log.openForWrite();
    fail("Did no throw exception on only having a bad dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "too few journals successfully started", ioe);
  } finally {
    FileUtil.setWritable(logDir, true);
    log.close();
  }
}
 
Example 6
Source File: TestAtomicFileOutputStream.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testFailToRename() throws IOException {
  assumeTrue(Shell.WINDOWS);
  OutputStream fos = null;
  try {
    fos = new AtomicFileOutputStream(DST_FILE);
    fos.write(TEST_STRING.getBytes());
    FileUtil.setWritable(TEST_DIR, false);
    exception.expect(IOException.class);
    exception.expectMessage("failure in native rename");
    try {
      fos.close();
    } finally {
      fos = null;
    }
  } finally {
    IOUtils.cleanup(null, fos);
    FileUtil.setWritable(TEST_DIR, true);
  }
}
 
Example 7
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testNameDirError() throws IOException {
  LOG.info("Starting testNameDirError");
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
      .build();
  
  Collection<URI> nameDirs = cluster.getNameDirs(0);
  cluster.shutdown();
  cluster = null;
  
  for (URI nameDirUri : nameDirs) {
    File dir = new File(nameDirUri.getPath());
    
    try {
      // Simulate the mount going read-only
      FileUtil.setWritable(dir, false);
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
          .format(false).build();
      fail("NN should have failed to start with " + dir + " set unreadable");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "storage directory does not exist or is not accessible", ioe);
    } finally {
      cleanup(cluster);
      cluster = null;
      FileUtil.setWritable(dir, true);
    }
  }
}
 
Example 8
Source File: TestDataNodeVolumeFailure.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@After
public void tearDown() throws Exception {
  if(data_fail != null) {
    FileUtil.setWritable(data_fail, true);
  }
  if(failedDir != null) {
    FileUtil.setWritable(failedDir, true);
  }
  if(cluster != null) {
    cluster.shutdown();
  }
}
 
Example 9
Source File: TestDiskError.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test to check that a DN goes down when all its volumes have failed.
 */
@Test
public void testShutdown() throws Exception {
  if (System.getProperty("os.name").startsWith("Windows")) {
    /**
     * This test depends on OS not allowing file creations on a directory
     * that does not have write permissions for the user. Apparently it is 
     * not the case on Windows (at least under Cygwin), and possibly AIX.
     * This is disabled on Windows.
     */
    return;
  }
  // Bring up two more datanodes
  cluster.startDataNodes(conf, 2, true, null, null);
  cluster.waitActive();
  final int dnIndex = 0;
  String bpid = cluster.getNamesystem().getBlockPoolId();
  File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
  File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  storageDir = cluster.getInstanceStorageDir(dnIndex, 1);
  File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  try {
    // make the data directory of the first datanode to be readonly
    assertTrue("Couldn't chmod local vol", dir1.setReadOnly());
    assertTrue("Couldn't chmod local vol", dir2.setReadOnly());

    // create files and make sure that first datanode will be down
    DataNode dn = cluster.getDataNodes().get(dnIndex);
    for (int i=0; dn.isDatanodeUp(); i++) {
      Path fileName = new Path("/test.txt"+i);
      DFSTestUtil.createFile(fs, fileName, 1024, (short)2, 1L);
      DFSTestUtil.waitReplication(fs, fileName, (short)2);
      fs.delete(fileName, true);
    }
  } finally {
    // restore its old permission
    FileUtil.setWritable(dir1, true);
    FileUtil.setWritable(dir2, true);
  }
}
 
Example 10
Source File: TemporarySocketDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public TemporarySocketDirectory() {
  String tmp = System.getProperty("java.io.tmpdir", "/tmp");
  dir = new File(tmp, "socks." + (System.currentTimeMillis() +
      "." + (new Random().nextInt())));
  dir.mkdirs();
  FileUtil.setWritable(dir, true);
}
 
Example 11
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testNameDirError() throws IOException {
  LOG.info("Starting testNameDirError");
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
      .build();
  
  Collection<URI> nameDirs = cluster.getNameDirs(0);
  cluster.shutdown();
  cluster = null;
  
  for (URI nameDirUri : nameDirs) {
    File dir = new File(nameDirUri.getPath());
    
    try {
      // Simulate the mount going read-only
      FileUtil.setWritable(dir, false);
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
          .format(false).build();
      fail("NN should have failed to start with " + dir + " set unreadable");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "storage directory does not exist or is not accessible", ioe);
    } finally {
      cleanup(cluster);
      cluster = null;
      FileUtil.setWritable(dir, true);
    }
  }
}
 
Example 12
Source File: TestDataNodeVolumeFailure.java    From big-c with Apache License 2.0 5 votes vote down vote up
@After
public void tearDown() throws Exception {
  if(data_fail != null) {
    FileUtil.setWritable(data_fail, true);
  }
  if(failedDir != null) {
    FileUtil.setWritable(failedDir, true);
  }
  if(cluster != null) {
    cluster.shutdown();
  }
}
 
Example 13
Source File: TestDiskError.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test to check that a DN goes down when all its volumes have failed.
 */
@Test
public void testShutdown() throws Exception {
  if (System.getProperty("os.name").startsWith("Windows")) {
    /**
     * This test depends on OS not allowing file creations on a directory
     * that does not have write permissions for the user. Apparently it is 
     * not the case on Windows (at least under Cygwin), and possibly AIX.
     * This is disabled on Windows.
     */
    return;
  }
  // Bring up two more datanodes
  cluster.startDataNodes(conf, 2, true, null, null);
  cluster.waitActive();
  final int dnIndex = 0;
  String bpid = cluster.getNamesystem().getBlockPoolId();
  File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
  File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  storageDir = cluster.getInstanceStorageDir(dnIndex, 1);
  File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  try {
    // make the data directory of the first datanode to be readonly
    assertTrue("Couldn't chmod local vol", dir1.setReadOnly());
    assertTrue("Couldn't chmod local vol", dir2.setReadOnly());

    // create files and make sure that first datanode will be down
    DataNode dn = cluster.getDataNodes().get(dnIndex);
    for (int i=0; dn.isDatanodeUp(); i++) {
      Path fileName = new Path("/test.txt"+i);
      DFSTestUtil.createFile(fs, fileName, 1024, (short)2, 1L);
      DFSTestUtil.waitReplication(fs, fileName, (short)2);
      fs.delete(fileName, true);
    }
  } finally {
    // restore its old permission
    FileUtil.setWritable(dir1, true);
    FileUtil.setWritable(dir2, true);
  }
}
 
Example 14
Source File: TemporarySocketDirectory.java    From big-c with Apache License 2.0 5 votes vote down vote up
public TemporarySocketDirectory() {
  String tmp = System.getProperty("java.io.tmpdir", "/tmp");
  dir = new File(tmp, "socks." + (System.currentTimeMillis() +
      "." + (new Random().nextInt())));
  dir.mkdirs();
  FileUtil.setWritable(dir, true);
}
 
Example 15
Source File: TestCryptoStreamsForLocalFS.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@After
public void cleanUp() throws IOException {
  FileUtil.setWritable(base, true);
  FileUtil.fullyDelete(base);
  assertTrue(!base.exists());
}
 
Example 16
Source File: TestCryptoStreamsForLocalFS.java    From big-c with Apache License 2.0 4 votes vote down vote up
@After
public void cleanUp() throws IOException {
  FileUtil.setWritable(base, true);
  FileUtil.fullyDelete(base);
  assertTrue(!base.exists());
}