Java Code Examples for org.apache.hadoop.fs.FSDataInputStream#read()

The following examples show how to use org.apache.hadoop.fs.FSDataInputStream#read() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHftpFileSystem.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void readHftpFile(
  boolean strictContentLength, boolean sendContentLength
)
  throws IOException, URISyntaxException {
  int bufSize = 128 * 1024;
  byte[] buf = DFSTestUtil.generateSequentialBytes(0, bufSize);
  final ByteArrayInputStream inputStream = new ByteArrayInputStream(buf);
  final long contentLength = bufSize + 1;
  Configuration conf = new Configuration();

  conf.setBoolean(HftpFileSystem.STRICT_CONTENT_LENGTH, strictContentLength);

  HftpFileSystem fileSystem =
    new MockHftpFileSystem(
      sendContentLength ? contentLength : null, inputStream, conf
    );
  FSDataInputStream dataInputStream = fileSystem.open(new Path("dont-care"));
  byte[] readBuf = new byte[1024];

  while (dataInputStream.read(readBuf) > -1) {
    //nothing
  }

  dataInputStream.close();
}
 
Example 2
Source File: AbstractContractOpenTest.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testOpenFileTwice() throws Throwable {
  describe("verify that two opened file streams are independent");
  Path path = path("testopenfiletwice.txt");
  byte[] block = dataset(TEST_FILE_LEN, 0, 255);
  //this file now has a simple rule: offset => value
  createFile(getFileSystem(), path, false, block);
  //open first
  FSDataInputStream instream1 = getFileSystem().open(path);
  int c = instream1.read();
  assertEquals(0,c);
  FSDataInputStream instream2 = null;
  try {
    instream2 = getFileSystem().open(path);
    assertEquals("first read of instream 2", 0, instream2.read());
    assertEquals("second read of instream 1", 1, instream1.read());
    instream1.close();
    assertEquals("second read of instream 2", 1, instream2.read());
    //close instream1 again
    instream1.close();
  } finally {
    IOUtils.closeStream(instream1);
    IOUtils.closeStream(instream2);
  }
}
 
Example 3
Source File: TestRaidNode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void validateFile(FileSystem fileSys, Path name1, Path name2, long crc) 
  throws IOException {

  FileStatus stat1 = fileSys.getFileStatus(name1);
  FileStatus stat2 = fileSys.getFileStatus(name2);
  assertTrue(" Length of file " + name1 + " is " + stat1.getLen() + 
             " is different from length of file " + name1 + " " + stat2.getLen(),
             stat1.getLen() == stat2.getLen());

  CRC32 newcrc = new CRC32();
  FSDataInputStream stm = fileSys.open(name2);
  final byte[] b = new byte[4192];
  int num = 0;
  while (num >= 0) {
    num = stm.read(b);
    if (num < 0) {
      break;
    }
    newcrc.update(b, 0, num);
  }
  stm.close();
  if (newcrc.getValue() != crc) {
    fail("CRC mismatch of files " + name1 + " with file " + name2);
  }
}
 
Example 4
Source File: TestBlockMissingException.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void validateFile(FileSystem fileSys, Path name)
  throws IOException {

  FSDataInputStream stm = fileSys.open(name);
  final byte[] b = new byte[4192];
  int num = 0;
  boolean gotException = false;

  try {
    while (num >= 0) {
      num = stm.read(b);
      if (num < 0) {
        break;
      }
    }
  } catch (BlockMissingException e) {
    gotException = true;
  }
  stm.close();
  assertTrue("Expected BlockMissingException ", gotException);
}
 
Example 5
Source File: TestSwiftFileSystemExtendedContract.java    From stocator with Apache License 2.0 6 votes vote down vote up
@Test(timeout = TestConstants.SWIFT_TEST_TIMEOUT)
public void testWriteReadFile() throws Exception {
  final Path f = new Path(getBaseURI() + "/test/test");
  final FSDataOutputStream fsDataOutputStream = sFileSystem.create(f);
  final String message = "Test string";
  fsDataOutputStream.write(message.getBytes());
  fsDataOutputStream.close();
  assertExists("created file", f);
  FSDataInputStream open = null;
  try {
    open = sFileSystem.open(f);
    final byte[] bytes = new byte[512];
    final int read = open.read(bytes);
    final byte[] buffer = new byte[read];
    System.arraycopy(bytes, 0, buffer, 0, read);
    assertEquals(message, new String(buffer));
  } finally {
    sFileSystem.delete(f, false);
    IOUtils.closeStream(open);
  }
}
 
Example 6
Source File: TestSwiftFileSystemRename.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameFile() throws Exception {
  assumeRenameSupported();

  final Path old = new Path("/test/alice/file");
  final Path newPath = new Path("/test/bob/file");
  fs.mkdirs(newPath.getParent());
  final FSDataOutputStream fsDataOutputStream = fs.create(old);
  final byte[] message = "Some data".getBytes();
  fsDataOutputStream.write(message);
  fsDataOutputStream.close();

  assertTrue(fs.exists(old));
  rename(old, newPath, true, false, true);

  final FSDataInputStream bobStream = fs.open(newPath);
  final byte[] bytes = new byte[512];
  final int read = bobStream.read(bytes);
  bobStream.close();
  final byte[] buffer = new byte[read];
  System.arraycopy(bytes, 0, buffer, 0, read);
  assertEquals(new String(message), new String(buffer));
}
 
Example 7
Source File: BlockIOUtils.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Read from an input stream at least <code>necessaryLen</code> and if possible,
 * <code>extraLen</code> also if available. Analogous to
 * {@link IOUtils#readFully(InputStream, byte[], int, int)}, but uses positional read and
 * specifies a number of "extra" bytes that would be desirable but not absolutely necessary to
 * read.
 * @param buff ByteBuff to read into.
 * @param dis the input stream to read from
 * @param position the position within the stream from which to start reading
 * @param necessaryLen the number of bytes that are absolutely necessary to read
 * @param extraLen the number of extra bytes that would be nice to read
 * @return true if and only if extraLen is > 0 and reading those extra bytes was successful
 * @throws IOException if failed to read the necessary bytes
 */
public static boolean preadWithExtra(ByteBuff buff, FSDataInputStream dis, long position,
    int necessaryLen, int extraLen) throws IOException {
  int remain = necessaryLen + extraLen;
  byte[] buf = new byte[remain];
  int bytesRead = 0;
  while (bytesRead < necessaryLen) {
    int ret = dis.read(position + bytesRead, buf, bytesRead, remain);
    if (ret < 0) {
      throw new IOException("Premature EOF from inputStream (positional read returned " + ret
          + ", was trying to read " + necessaryLen + " necessary bytes and " + extraLen
          + " extra bytes, successfully read " + bytesRead);
    }
    bytesRead += ret;
    remain -= ret;
  }
  // Copy the bytes from on-heap bytes[] to ByteBuffer[] now, and after resolving HDFS-3246, we
  // will read the bytes to ByteBuffer[] directly without allocating any on-heap byte[].
  // TODO I keep the bytes copy here, because I want to abstract the ByteBuffer[]
  // preadWithExtra method for the upper layer, only need to refactor this method if the
  // ByteBuffer pread is OK.
  copyToByteBuff(buf, 0, bytesRead, buff);
  return (extraLen > 0) && (bytesRead == necessaryLen + extraLen);
}
 
Example 8
Source File: DFSTestUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Verify that two files have different contents.
 *
 * @param fs The file system containing the two files.
 * @param p1 The path of the first file.
 * @param p2 The path of the second file.
 * @param len The length of the two files.
 * @throws IOException
 */
public static void verifyFilesNotEqual(FileSystem fs, Path p1, Path p2,
    int len)
        throws IOException {
  final FSDataInputStream in1 = fs.open(p1);
  final FSDataInputStream in2 = fs.open(p2);
  try {
    for (int i = 0; i < len; i++) {
      if (in1.read() != in2.read()) {
        return;
      }
    }
    fail("files are equal, but should not be");
  } finally {
    in1.close();
    in2.close();
  }
}
 
Example 9
Source File: AbstractContractOpenTest.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testOpenFileTwice() throws Throwable {
  describe("verify that two opened file streams are independent");
  Path path = path("testopenfiletwice.txt");
  byte[] block = dataset(TEST_FILE_LEN, 0, 255);
  //this file now has a simple rule: offset => value
  createFile(getFileSystem(), path, false, block);
  //open first
  FSDataInputStream instream1 = getFileSystem().open(path);
  int c = instream1.read();
  assertEquals(0,c);
  FSDataInputStream instream2 = null;
  try {
    instream2 = getFileSystem().open(path);
    assertEquals("first read of instream 2", 0, instream2.read());
    assertEquals("second read of instream 1", 1, instream1.read());
    instream1.close();
    assertEquals("second read of instream 2", 1, instream2.read());
    //close instream1 again
    instream1.close();
  } finally {
    IOUtils.closeStream(instream1);
    IOUtils.closeStream(instream2);
  }
}
 
Example 10
Source File: TestFileLink.java    From hbase with Apache License 2.0 5 votes vote down vote up
private static void skipBuffer(FSDataInputStream in, byte v) throws IOException {
  byte[] data = new byte[8192];
  try {
    int n;
    while ((n = in.read(data)) == data.length) {
      for (int i = 0; i < data.length; ++i) {
        if (data[i] != v)
          throw new Exception("File changed");
      }
    }
  } catch (Exception e) {
  }
}
 
Example 11
Source File: TestClientProtocolForPipelineRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Test whether corrupt replicas are detected correctly during pipeline
 * recoveries.
 */
@Test
public void testPipelineRecoveryForLastBlock() throws IOException {
  DFSClientFaultInjector faultInjector
      = Mockito.mock(DFSClientFaultInjector.class);
  DFSClientFaultInjector oldInjector = DFSClientFaultInjector.instance;
  DFSClientFaultInjector.instance = faultInjector;
  Configuration conf = new HdfsConfiguration();

  conf.setInt(DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, 3);
  MiniDFSCluster cluster = null;

  try {
    int numDataNodes = 3;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
    cluster.waitActive();
    FileSystem fileSys = cluster.getFileSystem();

    Path file = new Path("dataprotocol1.dat");
    Mockito.when(faultInjector.failPacket()).thenReturn(true);
    DFSTestUtil.createFile(fileSys, file, 68000000L, (short)numDataNodes, 0L);

    // At this point, NN should have accepted only valid replicas.
    // Read should succeed.
    FSDataInputStream in = fileSys.open(file);
    try {
      int c = in.read();
      // Test will fail with BlockMissingException if NN does not update the
      // replica state based on the latest report.
    } catch (org.apache.hadoop.hdfs.BlockMissingException bme) {
      Assert.fail("Block is missing because the file was closed with"
          + " corrupt replicas.");
    }
  } finally {
    DFSClientFaultInjector.instance = oldInjector;
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 12
Source File: AppendTestUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static void check(DistributedFileSystem fs, Path p, int position,
    int length) throws IOException {
  byte[] buf = new byte[length];
  int i = 0;
  try {
    FSDataInputStream in = fs.open(p);
    in.read(position, buf, 0, buf.length);
    for(i = position; i < length + position; i++) {
      assertEquals((byte) i, buf[i - position]);
    }
    in.close();
  } catch(IOException ioe) {
    throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
  }
}
 
Example 13
Source File: TestFiDataTransferProtocol2.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * 1. create files with dfs
 * 2. write MIN_N_PACKET to MAX_N_PACKET packets
 * 3. close file
 * 4. open the same file
 * 5. read the bytes and compare results
 */
private static void writeSeveralPackets(String methodName) throws IOException {
  final Random r = FiTestUtil.RANDOM.get();
  final int nPackets = FiTestUtil.nextRandomInt(MIN_N_PACKET, MAX_N_PACKET + 1);
  final int lastPacketSize = FiTestUtil.nextRandomInt(1, PACKET_SIZE + 1);
  final int size = (nPackets - 1)*PACKET_SIZE + lastPacketSize;

  FiTestUtil.LOG.info("size=" + size + ", nPackets=" + nPackets
      + ", lastPacketSize=" + lastPacketSize);

  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
      ).numDataNodes(REPLICATION + 2).build();
  final FileSystem dfs = cluster.getFileSystem();
  try {
    final Path p = new Path("/" + methodName + "/foo");
    final FSDataOutputStream out = createFile(dfs, p);

    final long seed = r.nextLong();
    final Random ran = new Random(seed);
    ran.nextBytes(bytes);
    out.write(bytes, 0, size);
    out.close();

    final FSDataInputStream in = dfs.open(p);
    int totalRead = 0;
    int nRead = 0;
    while ((nRead = in.read(toRead, totalRead, size - totalRead)) > 0) {
      totalRead += nRead;
    }
    Assert.assertEquals("Cannot read file.", size, totalRead);
    for (int i = 0; i < size; i++) {
      Assert.assertTrue("File content differ.", bytes[i] == toRead[i]);
    }
  }
  finally {
    dfs.close();
    cluster.shutdown();
  }
}
 
Example 14
Source File: TestFsDatasetCacheRevocation.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test that when we have an uncache request, and the client refuses to release
 * the replica for a long time, we will un-mlock it.
 */
@Test(timeout=120000)
public void testRevocation() throws Exception {
  assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
  BlockReaderTestUtil.enableHdfsCachingTracing();
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  Configuration conf = getDefaultConf();
  // Set a really short revocation timeout.
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 250L);
  // Poll very often
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
  MiniDFSCluster cluster = null;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem dfs = cluster.getFileSystem();

  // Create and cache a file.
  final String TEST_FILE = "/test_file2";
  DFSTestUtil.createFile(dfs, new Path(TEST_FILE),
      BLOCK_SIZE, (short)1, 0xcafe);
  dfs.addCachePool(new CachePoolInfo("pool"));
  long cacheDirectiveId =
      dfs.addCacheDirective(new CacheDirectiveInfo.Builder().
          setPool("pool").setPath(new Path(TEST_FILE)).
          setReplication((short) 1).build());
  FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
  DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);

  // Mmap the file.
  FSDataInputStream in = dfs.open(new Path(TEST_FILE));
  ByteBuffer buf =
      in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));

  // Attempt to uncache file.  The file should get uncached.
  LOG.info("removing cache directive {}", cacheDirectiveId);
  dfs.removeCacheDirective(cacheDirectiveId);
  LOG.info("finished removing cache directive {}", cacheDirectiveId);
  Thread.sleep(1000);
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);

  // Cleanup
  in.releaseBuffer(buf);
  in.close();
  cluster.shutdown();
}
 
Example 15
Source File: TestHFlush.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * The method starts new cluster with defined Configuration; creates a file
 * with specified block_size and writes 10 equal sections in it; it also calls
 * hflush/hsync after each write and throws an IOException in case of an error.
 * 
 * @param conf cluster configuration
 * @param fileName of the file to be created and processed as required
 * @param block_size value to be used for the file's creation
 * @param replicas is the number of replicas
 * @param isSync hsync or hflush         
 * @param syncFlags specify the semantic of the sync/flush
 * @throws IOException in case of any errors
 */
public static void doTheJob(Configuration conf, final String fileName,
    long block_size, short replicas, boolean isSync,
    EnumSet<SyncFlag> syncFlags) throws IOException {
  byte[] fileContent;
  final int SECTIONS = 10;

  fileContent = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                             .numDataNodes(replicas).build();
  // Make sure we work with DFS in order to utilize all its functionality
  DistributedFileSystem fileSystem = cluster.getFileSystem();

  FSDataInputStream is;
  try {
    Path path = new Path(fileName);
    final String pathName = new Path(fileSystem.getWorkingDirectory(), path)
        .toUri().getPath();
    FSDataOutputStream stm = fileSystem.create(path, false, 4096, replicas,
        block_size);
    System.out.println("Created file " + fileName);

    int tenth = AppendTestUtil.FILE_SIZE/SECTIONS;
    int rounding = AppendTestUtil.FILE_SIZE - tenth * SECTIONS;
    for (int i=0; i<SECTIONS; i++) {
      System.out.println("Writing " + (tenth * i) + " to "
          + (tenth * (i + 1)) + " section to file " + fileName);
      // write to the file
      stm.write(fileContent, tenth * i, tenth);
      
      // Wait while hflush/hsync pushes all packets through built pipeline
      if (isSync) {
        ((DFSOutputStream)stm.getWrappedStream()).hsync(syncFlags);
      } else {
        ((DFSOutputStream)stm.getWrappedStream()).hflush();
      }
      
      // Check file length if updatelength is required
      if (isSync && syncFlags.contains(SyncFlag.UPDATE_LENGTH)) {
        long currentFileLength = fileSystem.getFileStatus(path).getLen();
        assertEquals(
          "File size doesn't match for hsync/hflush with updating the length",
          tenth * (i + 1), currentFileLength);
      } else if (isSync && syncFlags.contains(SyncFlag.END_BLOCK)) {
        LocatedBlocks blocks = fileSystem.dfs.getLocatedBlocks(pathName, 0);
        assertEquals(i + 1, blocks.getLocatedBlocks().size());
      }

      byte [] toRead = new byte[tenth];
      byte [] expected = new byte[tenth];
      System.arraycopy(fileContent, tenth * i, expected, 0, tenth);
      // Open the same file for read. Need to create new reader after every write operation(!)
      is = fileSystem.open(path);
      is.seek(tenth * i);
      int readBytes = is.read(toRead, 0, tenth);
      System.out.println("Has read " + readBytes);
      assertTrue("Should've get more bytes", (readBytes > 0) && (readBytes <= tenth));
      is.close();
      checkData(toRead, 0, readBytes, expected, "Partial verification");
    }
    System.out.println("Writing " + (tenth * SECTIONS) + " to " + (tenth * SECTIONS + rounding) + " section to file " + fileName);
    stm.write(fileContent, tenth * SECTIONS, rounding);
    stm.close();

    assertEquals("File size doesn't match ", AppendTestUtil.FILE_SIZE, fileSystem.getFileStatus(path).getLen());
    AppendTestUtil.checkFullFile(fileSystem, path, fileContent.length, fileContent, "hflush()");
  } finally {
    fileSystem.close();
    cluster.shutdown();
  }
}
 
Example 16
Source File: TestTruncatedInputBug.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * When mark() is used on BufferedInputStream, the request
 * size on the checksum file system can be small.  However,
 * checksum file system currently depends on the request size
 * >= bytesPerSum to work properly.
 */
public void testTruncatedInputBug() throws IOException {
  final int ioBufSize = 512;
  final int fileSize = ioBufSize*4;
  int filePos = 0;

  Configuration conf = new Configuration();
  conf.setInt("io.file.buffer.size", ioBufSize);
  FileSystem fileSys = FileSystem.getLocal(conf);

  try {
    // First create a test input file.
    Path testFile = new Path(TEST_ROOT_DIR, "HADOOP-1489");
    writeFile(fileSys, testFile, fileSize);
    assertTrue(fileSys.exists(testFile));
    assertTrue(fileSys.getLength(testFile) == fileSize);

    // Now read the file for ioBufSize bytes
    FSDataInputStream in = fileSys.open(testFile, ioBufSize);
    // seek beyond data buffered by open
    filePos += ioBufSize * 2 + (ioBufSize - 10);  
    in.seek(filePos);

    // read 4 more bytes before marking
    for (int i = 0; i < 4; ++i) {  
      if (in.read() == -1) {
        break;
      }
      ++filePos;
    }

    // Now set mark() to trigger the bug
    // NOTE: in the fixed code, mark() does nothing (not supported) and
    //   hence won't trigger this bug.
    in.mark(1);
    System.out.println("MARKED");
    
    // Try to read the rest
    while (filePos < fileSize) {
      if (in.read() == -1) {
        break;
      }
      ++filePos;
    }
    in.close();

    System.out.println("Read " + filePos + " bytes."
                       + " file size=" + fileSize);
    assertTrue(filePos == fileSize);

  } finally {
    try {
      fileSys.close();
    } catch (Exception e) {
      // noop
    }
  }
}
 
Example 17
Source File: TestDFSIsUnderConstruction.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void testSecondLastBlockNotReceived() throws Exception {
  String fileName = "/testSecondLastBlockNotReceived";
  Path growingFile = new Path(fileName); 
  FSDataInputStream fis = null;
  FSDataOutputStream fos = fs.create(growingFile, false, 1024, (short)1, 1024);
  try {
    int fileLength = 2096;
    AppendTestUtil.write(fos, 0, fileLength);
    fos.sync();

    fis = fs.open(growingFile);
    for (int i = 0; i < fileLength; i++) {
      fis.read();
    }
    fis.close();

    FSNamesystem fsns = cluster.getNameNode().namesystem;
    INode[] inodes = fsns.dir.getExistingPathINodes(fileName);
    BlockInfo[] bis = ((INodeFile) (inodes[inodes.length - 1])).getBlocks();
    bis[bis.length - 2].setNumBytes(1);

    try {
      fis = fs.open(growingFile);
      TestCase.fail();
    } catch (IOException e) {
    }
    bis[bis.length - 2].setNumBytes(1024);

    bis[bis.length - 1].setNumBytes(1);
    fis = fs.open(growingFile);
    for (int i = 0; i < fileLength; i++) {
      fis.read();
    }
  } finally {
    if (fos != null) {
      fos.close();
    }
    if (fis != null) {
      fis.close();
    }
  }
}
 
Example 18
Source File: FooterGatherer.java    From Bats with Apache License 2.0 4 votes vote down vote up
private static final void readFully(FSDataInputStream stream, long start, byte[] output, int offset, int len) throws IOException{
  int bytesRead = 0;
  while(bytesRead > -1 && bytesRead < len){
    bytesRead += stream.read(start+bytesRead, output, offset + bytesRead, len-bytesRead);
  }
}
 
Example 19
Source File: TestSnapshotFileLength.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Adding as part of jira HDFS-5343
 * Test for checking the cat command on snapshot path it
 *  cannot read a file beyond snapshot file length
 * @throws Exception
 */
@Test (timeout = 600000)
public void testSnapshotFileLengthWithCatCommand() throws Exception {

  FSDataInputStream fis = null;
  FileStatus fileStatus = null;

  int bytesRead;
  byte[] buffer = new byte[BLOCKSIZE * 8];

  hdfs.mkdirs(sub);
  Path file1 = new Path(sub, file1Name);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);

  hdfs.allowSnapshot(sub);
  hdfs.createSnapshot(sub, snapshot1);

  DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE);

  // Make sure we can read the entire file via its non-snapshot path.
  fileStatus = hdfs.getFileStatus(file1);
  assertEquals("Unexpected file length", BLOCKSIZE * 2, fileStatus.getLen());
  fis = hdfs.open(file1);
  bytesRead = fis.read(buffer, 0, buffer.length);
  assertEquals("Unexpected # bytes read", BLOCKSIZE * 2, bytesRead);
  fis.close();

  Path file1snap1 =
      SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
  fis = hdfs.open(file1snap1);
  fileStatus = hdfs.getFileStatus(file1snap1);
  assertEquals(fileStatus.getLen(), BLOCKSIZE);
  // Make sure we can only read up to the snapshot length.
  bytesRead = fis.read(buffer, 0, buffer.length);
  assertEquals("Unexpected # bytes read", BLOCKSIZE, bytesRead);
  fis.close();

  PrintStream outBackup = System.out;
  PrintStream errBackup = System.err;
  ByteArrayOutputStream bao = new ByteArrayOutputStream();
  System.setOut(new PrintStream(bao));
  System.setErr(new PrintStream(bao));
  // Make sure we can cat the file upto to snapshot length
  FsShell shell = new FsShell();
  try {
    ToolRunner.run(conf, shell, new String[] { "-cat",
    "/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" });
    assertEquals("Unexpected # bytes from -cat", BLOCKSIZE, bao.size());
  } finally {
    System.setOut(outBackup);
    System.setErr(errBackup);
  }
}
 
Example 20
Source File: TestKosmosFileSystem.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void testFileIO() throws Exception {
    Path subDir1 = new Path("dir.1");
    Path file1 = new Path("dir.1/foo.1");

    kosmosFileSystem.mkdirs(baseDir);
    assertTrue(kosmosFileSystem.isDirectory(baseDir));
    kosmosFileSystem.setWorkingDirectory(baseDir);

    kosmosFileSystem.mkdirs(subDir1);

    FSDataOutputStream s1 = kosmosFileSystem.create(file1, true, 4096, (short) 1, (long) 4096, null);

    int bufsz = 4096;
    byte[] data = new byte[bufsz];

    for (int i = 0; i < data.length; i++)
        data[i] = (byte) (i % 16);

    // write 4 bytes and read them back; read API should return a byte per call
    s1.write(32);
    s1.write(32);
    s1.write(32);
    s1.write(32);
    // write some data
    s1.write(data, 0, data.length);
    // flush out the changes
    s1.close();

    // Read the stuff back and verify it is correct
    FSDataInputStream s2 = kosmosFileSystem.open(file1, 4096);
    int v;

    v = s2.read();
    assertEquals(v, 32);
    v = s2.read();
    assertEquals(v, 32);
    v = s2.read();
    assertEquals(v, 32);
    v = s2.read();
    assertEquals(v, 32);

    assertEquals(s2.available(), data.length);

    byte[] buf = new byte[bufsz];
    s2.read(buf, 0, buf.length);
    for (int i = 0; i < data.length; i++)
        assertEquals(data[i], buf[i]);

    assertEquals(s2.available(), 0);

    s2.close();

    kosmosFileSystem.delete(file1, true);
    assertFalse(kosmosFileSystem.exists(file1));        
    kosmosFileSystem.delete(subDir1, true);
    assertFalse(kosmosFileSystem.exists(subDir1));        
    kosmosFileSystem.delete(baseDir, true);
    assertFalse(kosmosFileSystem.exists(baseDir));        
}