Java Code Examples for org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset#setFactory()

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset#setFactory() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestPread.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void dfsPreadTest(Configuration conf, boolean disableTransferTo, boolean verifyChecksum)
    throws IOException {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  if (disableTransferTo) {
    conf.setBoolean("dfs.datanode.transferTo.allowed", false);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fileSys = cluster.getFileSystem();
  fileSys.setVerifyChecksum(verifyChecksum);
  try {
    Path file1 = new Path("preadtest.dat");
    writeFile(fileSys, file1);
    pReadFile(fileSys, file1);
    datanodeRestartTest(cluster, fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 2
Source File: TestFileAppend.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * FileNotFoundException is expected for appending to a non-exisiting file
 * 
 * @throws FileNotFoundException as the result
 */
@Test(expected = FileNotFoundException.class)
public void testFileNotFound() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    Path file1 = new Path("/nonexistingfile.dat");
    fs.append(file1);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 3
Source File: TestFileAppend4.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  this.conf = new Configuration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }

  // lower heartbeat interval for fast recognition of DN death
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
  // handle under-replicated blocks quickly (for replication asserts)
  conf.setInt(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
  
  // handle failures in the DFSClient pipeline quickly
  // (for cluster.shutdown(); fs.close() idiom)
  conf.setInt("ipc.client.connect.max.retries", 1);
}
 
Example 4
Source File: TestFileAppend4.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  this.conf = new Configuration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }

  // lower heartbeat interval for fast recognition of DN death
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
  // handle under-replicated blocks quickly (for replication asserts)
  conf.setInt(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
  
  // handle failures in the DFSClient pipeline quickly
  // (for cluster.shutdown(); fs.close() idiom)
  conf.setInt("ipc.client.connect.max.retries", 1);
}
 
Example 5
Source File: TestSmallBlock.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Tests small block size in in DFS.
 */
@Test
public void testSmallBlock() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path file1 = new Path("smallblocktest.dat");
    writeFile(fileSys, file1);
    checkFile(fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 6
Source File: TestFileAppend.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * FileNotFoundException is expected for appending to a non-exisiting file
 * 
 * @throws FileNotFoundException as the result
 */
@Test(expected = FileNotFoundException.class)
public void testFileNotFound() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    Path file1 = new Path("/nonexistingfile.dat");
    fs.append(file1);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 7
Source File: TestBalancer.java    From big-c with Apache License 2.0 5 votes vote down vote up
static void initConf(Configuration conf) {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
  SimulatedFSDataset.setFactory(conf);
  conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
}
 
Example 8
Source File: TestFileCreation.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test that all open files are closed when client dies abnormally.
 */
@Test
public void testDFSClientDeath() throws IOException, InterruptedException {
  Configuration conf = new HdfsConfiguration();
  System.out.println("Testing adbornal client death.");
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  DistributedFileSystem dfs = (DistributedFileSystem) fs;
  DFSClient dfsclient = dfs.dfs;
  try {

    // create a new file in home directory. Do not close it.
    //
    Path file1 = new Path("/clienttest.dat");
    FSDataOutputStream stm = createFile(fs, file1, 1);
    System.out.println("Created file clienttest.dat");

    // write to file
    writeFile(stm);

    // close the dfsclient before closing the output stream.
    // This should close all existing file.
    dfsclient.close();

    // reopen file system and verify that file exists.
    assertTrue(file1 + " does not exist.", 
        AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1));
  } finally {
    cluster.shutdown();
  }
}
 
Example 9
Source File: TestFileCreation.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test deleteOnExit
 */
@Test
public void testDeleteOnExit() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  FileSystem localfs = FileSystem.getLocal(conf);

  try {

    // Creates files in HDFS and local file system.
    //
    Path file1 = new Path("filestatus.dat");
    Path file2 = new Path("filestatus2.dat");
    Path file3 = new Path("filestatus3.dat");
    FSDataOutputStream stm1 = createFile(fs, file1, 1);
    FSDataOutputStream stm2 = createFile(fs, file2, 1);
    FSDataOutputStream stm3 = createFile(localfs, file3, 1);
    System.out.println("DeleteOnExit: Created files.");

    // write to files and close. Purposely, do not close file2.
    writeFile(stm1);
    writeFile(stm3);
    stm1.close();
    stm2.close();
    stm3.close();

    // set delete on exit flag on files.
    fs.deleteOnExit(file1);
    fs.deleteOnExit(file2);
    localfs.deleteOnExit(file3);

    // close the file system. This should make the above files
    // disappear.
    fs.close();
    localfs.close();
    fs = null;
    localfs = null;

    // reopen file system and verify that file does not exist.
    fs = cluster.getFileSystem();
    localfs = FileSystem.getLocal(conf);

    assertTrue(file1 + " still exists inspite of deletOnExit set.",
               !fs.exists(file1));
    assertTrue(file2 + " still exists inspite of deletOnExit set.",
               !fs.exists(file2));
    assertTrue(file3 + " still exists inspite of deletOnExit set.",
               !localfs.exists(file3));
    System.out.println("DeleteOnExit successful.");

  } finally {
    IOUtils.closeStream(fs);
    IOUtils.closeStream(localfs);
    cluster.shutdown();
  }
}
 
Example 10
Source File: TestShortCircuitLocalRead.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=10000)
public void testSkipWithVerifyChecksum() throws IOException {
  int size = blockSize;
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      "/tmp/testSkipWithVerifyChecksum._PORT");
  DomainSocket.disableBindPathValidation();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    // check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory", fs.getFileStatus(path)
        .isDirectory() == true);
    
    byte[] fileData = AppendTestUtil.randomBytes(seed, size*3);
    // create a new file in home directory. Do not close it.
    Path file1 = new Path("filelocal.dat");
    FSDataOutputStream stm = createFile(fs, file1, 1);

    // write to file
    stm.write(fileData);
    stm.close();
    
    // now test the skip function
    FSDataInputStream instm = fs.open(file1);
    byte[] actual = new byte[fileData.length];
    // read something from the block first, otherwise BlockReaderLocal.skip()
    // will not be invoked
    int nread = instm.read(actual, 0, 3);
    long skipped = 2*size+3;
    instm.seek(skipped);
    nread = instm.read(actual, (int)(skipped + nread), 3);
    instm.close();
      
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 11
Source File: TestShortCircuitLocalRead.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test that file data can be read by reading the block file
 * directly from the local store.
 */
public void doTestShortCircuitReadImpl(boolean ignoreChecksum, int size,
    int readOffset, String shortCircuitUser, String readingUser,
    boolean legacyShortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
      ignoreChecksum);
  // Set a random client context name so that we don't share a cache with
  // other invocations of this function.
  conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,
      UUID.randomUUID().toString());
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        "TestShortCircuitLocalRead._PORT.sock").getAbsolutePath());
  if (shortCircuitUser != null) {
    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
        shortCircuitUser);
    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
  }
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    // check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory", fs.getFileStatus(path)
        .isDirectory() == true);
    
    byte[] fileData = AppendTestUtil.randomBytes(seed, size);
    Path file1 = fs.makeQualified(new Path("filelocal.dat"));
    FSDataOutputStream stm = createFile(fs, file1, 1);
    stm.write(fileData);
    stm.close();
    
    URI uri = cluster.getURI();
    checkFileContent(uri, file1, fileData, readOffset, readingUser, conf,
        legacyShortCircuitFails);
    checkFileContentDirect(uri, file1, fileData, readOffset, readingUser,
        conf, legacyShortCircuitFails);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 12
Source File: TestFileCreation.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test if file creation and disk space consumption works right
 * @param netIf the local interface, if any, clients should use to access DNs
 * @param useDnHostname whether the client should contact DNs by hostname
 */
public void checkFileCreation(String netIf, boolean useDnHostname)
    throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (netIf != null) {
    conf.set(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
  }
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname);
  if (useDnHostname) {
    // Since the mini cluster only listens on the loopback we have to
    // ensure the hostname used to access DNs maps to the loopback. We
    // do this by telling the DN to advertise localhost as its hostname
    // instead of the default hostname.
    conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
  }
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .checkDataNodeHostConfig(true)
    .build();
  FileSystem fs = cluster.getFileSystem();
  try {

    //
    // check that / exists
    //
    Path path = new Path("/");
    System.out.println("Path : \"" + path.toString() + "\"");
    System.out.println(fs.getFileStatus(path).isDirectory()); 
    assertTrue("/ should be a directory", 
               fs.getFileStatus(path).isDirectory());

    //
    // Create a directory inside /, then try to overwrite it
    //
    Path dir1 = new Path("/test_dir");
    fs.mkdirs(dir1);
    System.out.println("createFile: Creating " + dir1.getName() + 
      " for overwrite of existing directory.");
    try {
      fs.create(dir1, true); // Create path, overwrite=true
      fs.close();
      assertTrue("Did not prevent directory from being overwritten.", false);
    } catch (FileAlreadyExistsException e) {
      // expected
    }

    //
    // create a new file in home directory. Do not close it.
    //
    Path file1 = new Path("filestatus.dat");
    Path parent = file1.getParent();
    fs.mkdirs(parent);
    DistributedFileSystem dfs = (DistributedFileSystem)fs;
    dfs.setQuota(file1.getParent(), 100L, blockSize*5);
    FSDataOutputStream stm = createFile(fs, file1, 1);

    // verify that file exists in FS namespace
    assertTrue(file1 + " should be a file", 
               fs.getFileStatus(file1).isFile());
    System.out.println("Path : \"" + file1 + "\"");

    // write to file
    writeFile(stm);

    stm.close();

    // verify that file size has changed to the full size
    long len = fs.getFileStatus(file1).getLen();
    assertTrue(file1 + " should be of size " + fileSize +
               " but found to be of size " + len, 
                len == fileSize);
    
    // verify the disk space the file occupied
    long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
    assertEquals(file1 + " should take " + fileSize + " bytes disk space " +
        "but found to take " + diskSpace + " bytes", fileSize, diskSpace);
    
    // Check storage usage 
    // can't check capacities for real storage since the OS file system may be changing under us.
    if (simulatedStorage) {
      DataNode dn = cluster.getDataNodes().get(0);
      FsDatasetSpi<?> dataset = DataNodeTestUtils.getFSDataset(dn);
      assertEquals(fileSize, dataset.getDfsUsed());
      assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY-fileSize,
          dataset.getRemaining());
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 13
Source File: TestLargeBlock.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that we can write to and read from large blocks
 * @param blockSize size of the block
 * @throws IOException in case of errors
 */
public void runTest(final long blockSize) throws IOException {

  // write a file that is slightly larger than 1 block
  final long fileSize = blockSize + 1L;

  Configuration conf = new Configuration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  try {

    // create a new file in test data directory
    Path file1 = new Path("/tmp/TestLargeBlock", blockSize + ".dat");
    FSDataOutputStream stm = createFile(fs, file1, 1, blockSize);
    LOG.info("File " + file1 + " created with file size " +
        fileSize +
        " blocksize " + blockSize);

    // verify that file exists in FS namespace
    assertTrue(file1 + " should be a file", 
                fs.getFileStatus(file1).isFile());

    // write to file
    writeFile(stm, fileSize);
    LOG.info("File " + file1 + " written to.");

    // close file
    stm.close();
    LOG.info("File " + file1 + " closed.");

    // Make sure a client can read it
    checkFullFile(fs, file1, fileSize);

    // verify that file size has changed
    long len = fs.getFileStatus(file1).getLen();
    assertTrue(file1 + " should be of size " +  fileSize +
               " but found to be of size " + len, 
                len == fileSize);

  } finally {
    cluster.shutdown();
  }
}
 
Example 14
Source File: TestFileCreation.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that the filesystem removes the last block from a file if its
 * lease expires.
 */
@Test
public void testFileCreationError2() throws IOException {
  long leasePeriod = 1000;
  System.out.println("testFileCreationError2 start");
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  // create cluster
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  DistributedFileSystem dfs = null;
  try {
    cluster.waitActive();
    dfs = cluster.getFileSystem();
    DFSClient client = dfs.dfs;

    // create a new file.
    //
    Path file1 = new Path("/filestatus.dat");
    createFile(dfs, file1, 1);
    System.out.println("testFileCreationError2: "
                       + "Created file filestatus.dat with one replicas.");

    LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                file1.toString(), 0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "The file has " + locations.locatedBlockCount() + " blocks.");

    // add one block to the file
    LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
        client.clientName, null, null, INodeId.GRANDFATHER_INODE_ID, null);
    System.out.println("testFileCreationError2: "
        + "Added block " + location.getBlock());

    locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    int count = locations.locatedBlockCount();
    System.out.println("testFileCreationError2: "
        + "The file now has " + count + " blocks.");
    
    // set the soft and hard limit to be 1 second so that the
    // namenode triggers lease recovery
    cluster.setLeasePeriod(leasePeriod, leasePeriod);

    // wait for the lease to expire
    try {
      Thread.sleep(5 * leasePeriod);
    } catch (InterruptedException e) {
    }

    // verify that the last block was synchronized.
    locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "locations = " + locations.locatedBlockCount());
    assertEquals(0, locations.locatedBlockCount());
    System.out.println("testFileCreationError2 successful");
  } finally {
    IOUtils.closeStream(dfs);
    cluster.shutdown();
  }
}
 
Example 15
Source File: TestFileCreation.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that a file which is open for write is overwritten by another
 * client. Regression test for HDFS-3755.
 */
@Test
public void testOverwriteOpenForWrite() throws Exception {
  Configuration conf = new HdfsConfiguration();
  SimulatedFSDataset.setFactory(conf);
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();

  UserGroupInformation otherUgi = UserGroupInformation.createUserForTesting(
      "testuser", new String[]{"testgroup"});
  FileSystem fs2 = otherUgi.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(cluster.getConfiguration(0));
    }
  });

  String metricsName = RPC_DETAILED_METRICS + cluster.getNameNodePort();

  try {
    Path p = new Path("/testfile");
    FSDataOutputStream stm1 = fs.create(p);
    stm1.write(1);

    assertCounter("CreateNumOps", 1L, getMetrics(metricsName));

    // Create file again without overwrite
    try {
      fs2.create(p, false);
      fail("Did not throw!");
    } catch (IOException abce) {
      GenericTestUtils.assertExceptionContains("Failed to CREATE_FILE", abce);
    }
    assertCounter("AlreadyBeingCreatedExceptionNumOps",
        1L, getMetrics(metricsName));
    FSDataOutputStream stm2 = fs2.create(p, true);
    stm2.write(2);
    stm2.close();
    
    try {
      stm1.close();
      fail("Should have exception closing stm1 since it was deleted");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains("No lease on /testfile", ioe);
      GenericTestUtils.assertExceptionContains("File does not exist.", ioe);
    }
    
  } finally {
    IOUtils.closeStream(fs);
    IOUtils.closeStream(fs2);
    cluster.shutdown();
  }
}
 
Example 16
Source File: TestFileCreation.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test deleteOnExit
 */
@Test
public void testDeleteOnExit() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  FileSystem localfs = FileSystem.getLocal(conf);

  try {

    // Creates files in HDFS and local file system.
    //
    Path file1 = new Path("filestatus.dat");
    Path file2 = new Path("filestatus2.dat");
    Path file3 = new Path("filestatus3.dat");
    FSDataOutputStream stm1 = createFile(fs, file1, 1);
    FSDataOutputStream stm2 = createFile(fs, file2, 1);
    FSDataOutputStream stm3 = createFile(localfs, file3, 1);
    System.out.println("DeleteOnExit: Created files.");

    // write to files and close. Purposely, do not close file2.
    writeFile(stm1);
    writeFile(stm3);
    stm1.close();
    stm2.close();
    stm3.close();

    // set delete on exit flag on files.
    fs.deleteOnExit(file1);
    fs.deleteOnExit(file2);
    localfs.deleteOnExit(file3);

    // close the file system. This should make the above files
    // disappear.
    fs.close();
    localfs.close();
    fs = null;
    localfs = null;

    // reopen file system and verify that file does not exist.
    fs = cluster.getFileSystem();
    localfs = FileSystem.getLocal(conf);

    assertTrue(file1 + " still exists inspite of deletOnExit set.",
               !fs.exists(file1));
    assertTrue(file2 + " still exists inspite of deletOnExit set.",
               !fs.exists(file2));
    assertTrue(file3 + " still exists inspite of deletOnExit set.",
               !localfs.exists(file3));
    System.out.println("DeleteOnExit successful.");

  } finally {
    IOUtils.closeStream(fs);
    IOUtils.closeStream(localfs);
    cluster.shutdown();
  }
}
 
Example 17
Source File: TestFileCreation.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test if file creation and disk space consumption works right
 * @param netIf the local interface, if any, clients should use to access DNs
 * @param useDnHostname whether the client should contact DNs by hostname
 */
public void checkFileCreation(String netIf, boolean useDnHostname)
    throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (netIf != null) {
    conf.set(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
  }
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname);
  if (useDnHostname) {
    // Since the mini cluster only listens on the loopback we have to
    // ensure the hostname used to access DNs maps to the loopback. We
    // do this by telling the DN to advertise localhost as its hostname
    // instead of the default hostname.
    conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
  }
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .checkDataNodeHostConfig(true)
    .build();
  FileSystem fs = cluster.getFileSystem();
  try {

    //
    // check that / exists
    //
    Path path = new Path("/");
    System.out.println("Path : \"" + path.toString() + "\"");
    System.out.println(fs.getFileStatus(path).isDirectory()); 
    assertTrue("/ should be a directory", 
               fs.getFileStatus(path).isDirectory());

    //
    // Create a directory inside /, then try to overwrite it
    //
    Path dir1 = new Path("/test_dir");
    fs.mkdirs(dir1);
    System.out.println("createFile: Creating " + dir1.getName() + 
      " for overwrite of existing directory.");
    try {
      fs.create(dir1, true); // Create path, overwrite=true
      fs.close();
      assertTrue("Did not prevent directory from being overwritten.", false);
    } catch (FileAlreadyExistsException e) {
      // expected
    }

    //
    // create a new file in home directory. Do not close it.
    //
    Path file1 = new Path("filestatus.dat");
    Path parent = file1.getParent();
    fs.mkdirs(parent);
    DistributedFileSystem dfs = (DistributedFileSystem)fs;
    dfs.setQuota(file1.getParent(), 100L, blockSize*5);
    FSDataOutputStream stm = createFile(fs, file1, 1);

    // verify that file exists in FS namespace
    assertTrue(file1 + " should be a file", 
               fs.getFileStatus(file1).isFile());
    System.out.println("Path : \"" + file1 + "\"");

    // write to file
    writeFile(stm);

    stm.close();

    // verify that file size has changed to the full size
    long len = fs.getFileStatus(file1).getLen();
    assertTrue(file1 + " should be of size " + fileSize +
               " but found to be of size " + len, 
                len == fileSize);
    
    // verify the disk space the file occupied
    long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
    assertEquals(file1 + " should take " + fileSize + " bytes disk space " +
        "but found to take " + diskSpace + " bytes", fileSize, diskSpace);
    
    // Check storage usage 
    // can't check capacities for real storage since the OS file system may be changing under us.
    if (simulatedStorage) {
      DataNode dn = cluster.getDataNodes().get(0);
      FsDatasetSpi<?> dataset = DataNodeTestUtils.getFSDataset(dn);
      assertEquals(fileSize, dataset.getDfsUsed());
      assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY-fileSize,
          dataset.getRemaining());
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 18
Source File: TestReplication.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Tests replication in DFS.
 */
public void runReplication(boolean simulated) throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
  if (simulated) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                             .numDataNodes(numDatanodes)
                                             .racks(racks).build();
  cluster.waitActive();
  
  InetSocketAddress addr = new InetSocketAddress("localhost",
                                                 cluster.getNameNodePort());
  DFSClient client = new DFSClient(addr, conf);
  
  DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
  assertEquals("Number of Datanodes ", numDatanodes, info.length);
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path file1 = new Path("/smallblocktest.dat");
    writeFile(fileSys, file1, 3);
    checkFile(fileSys, file1, 3);
    cleanupFile(fileSys, file1);
    writeFile(fileSys, file1, 10);
    checkFile(fileSys, file1, 10);
    cleanupFile(fileSys, file1);
    writeFile(fileSys, file1, 4);
    checkFile(fileSys, file1, 4);
    cleanupFile(fileSys, file1);
    writeFile(fileSys, file1, 1);
    checkFile(fileSys, file1, 1);
    cleanupFile(fileSys, file1);
    writeFile(fileSys, file1, 2);
    checkFile(fileSys, file1, 2);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 19
Source File: TestShortCircuitLocalRead.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=10000)
public void testSkipWithVerifyChecksum() throws IOException {
  int size = blockSize;
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      "/tmp/testSkipWithVerifyChecksum._PORT");
  DomainSocket.disableBindPathValidation();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    // check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory", fs.getFileStatus(path)
        .isDirectory() == true);
    
    byte[] fileData = AppendTestUtil.randomBytes(seed, size*3);
    // create a new file in home directory. Do not close it.
    Path file1 = new Path("filelocal.dat");
    FSDataOutputStream stm = createFile(fs, file1, 1);

    // write to file
    stm.write(fileData);
    stm.close();
    
    // now test the skip function
    FSDataInputStream instm = fs.open(file1);
    byte[] actual = new byte[fileData.length];
    // read something from the block first, otherwise BlockReaderLocal.skip()
    // will not be invoked
    int nread = instm.read(actual, 0, 3);
    long skipped = 2*size+3;
    instm.seek(skipped);
    nread = instm.read(actual, (int)(skipped + nread), 3);
    instm.close();
      
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 20
Source File: TestShortCircuitLocalRead.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that file data can be read by reading the block file
 * directly from the local store.
 */
public void doTestShortCircuitReadImpl(boolean ignoreChecksum, int size,
    int readOffset, String shortCircuitUser, String readingUser,
    boolean legacyShortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
      ignoreChecksum);
  // Set a random client context name so that we don't share a cache with
  // other invocations of this function.
  conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,
      UUID.randomUUID().toString());
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        "TestShortCircuitLocalRead._PORT.sock").getAbsolutePath());
  if (shortCircuitUser != null) {
    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
        shortCircuitUser);
    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
  }
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    // check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory", fs.getFileStatus(path)
        .isDirectory() == true);
    
    byte[] fileData = AppendTestUtil.randomBytes(seed, size);
    Path file1 = fs.makeQualified(new Path("filelocal.dat"));
    FSDataOutputStream stm = createFile(fs, file1, 1);
    stm.write(fileData);
    stm.close();
    
    URI uri = cluster.getURI();
    checkFileContent(uri, file1, fileData, readOffset, readingUser, conf,
        legacyShortCircuitFails);
    checkFileContentDirect(uri, file1, fileData, readOffset, readingUser,
        conf, legacyShortCircuitFails);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}