Java Code Examples for org.apache.hadoop.fs.FSDataInputStream#setDropBehind()

The following examples show how to use org.apache.hadoop.fs.FSDataInputStream#setDropBehind() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestCachingStrategy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static long readHdfsFile(FileSystem fs, Path p, long length,
    Boolean dropBehind) throws Exception {
  FSDataInputStream fis = null;
  long totalRead = 0;
  try {
    fis = fs.open(p);
    if (dropBehind != null) {
      fis.setDropBehind(dropBehind);
    }
    byte buf[] = new byte[8196];
    while (length > 0) {
      int amt = (length > buf.length) ? buf.length : (int)length;
      int ret = fis.read(buf, 0, amt);
      if (ret == -1) {
        return totalRead;
      }
      totalRead += ret;
      length -= ret;
    }
  } catch (IOException e) {
    LOG.error("ioexception", e);
  } finally {
    if (fis != null) {
      fis.close();
    }
  }
  throw new RuntimeException("unreachable");
}
 
Example 2
Source File: TestCachingStrategy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout=120000)
public void testSeekAfterSetDropBehind() throws Exception {
  // start a cluster
  LOG.info("testSeekAfterSetDropBehind");
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  String TEST_PATH = "/test";
  int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
        .build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
    // verify that we can seek after setDropBehind
    FSDataInputStream fis = fs.open(new Path(TEST_PATH));
    try {
      Assert.assertTrue(fis.read() != -1); // create BlockReader
      fis.setDropBehind(false); // clear BlockReader
      fis.seek(2); // seek
    } finally {
      fis.close();
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 3
Source File: TestCachingStrategy.java    From big-c with Apache License 2.0 5 votes vote down vote up
static long readHdfsFile(FileSystem fs, Path p, long length,
    Boolean dropBehind) throws Exception {
  FSDataInputStream fis = null;
  long totalRead = 0;
  try {
    fis = fs.open(p);
    if (dropBehind != null) {
      fis.setDropBehind(dropBehind);
    }
    byte buf[] = new byte[8196];
    while (length > 0) {
      int amt = (length > buf.length) ? buf.length : (int)length;
      int ret = fis.read(buf, 0, amt);
      if (ret == -1) {
        return totalRead;
      }
      totalRead += ret;
      length -= ret;
    }
  } catch (IOException e) {
    LOG.error("ioexception", e);
  } finally {
    if (fis != null) {
      fis.close();
    }
  }
  throw new RuntimeException("unreachable");
}
 
Example 4
Source File: TestCachingStrategy.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=120000)
public void testSeekAfterSetDropBehind() throws Exception {
  // start a cluster
  LOG.info("testSeekAfterSetDropBehind");
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  String TEST_PATH = "/test";
  int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
        .build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
    // verify that we can seek after setDropBehind
    FSDataInputStream fis = fs.open(new Path(TEST_PATH));
    try {
      Assert.assertTrue(fis.read() != -1); // create BlockReader
      fis.setDropBehind(false); // clear BlockReader
      fis.seek(2); // seek
    } finally {
      fis.close();
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}