Java Code Examples for org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset#DEFAULT_DATABYTE

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset#DEFAULT_DATABYTE . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSmallBlock.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
Example 2
Source File: TestSmallBlock.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
Example 3
Source File: TestSmallBlock.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
Example 4
Source File: TestSmallBlock.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
Example 5
Source File: TestFileAppend.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {;}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, AppendTestUtil.FILE_SIZE);
    if (locations.length < AppendTestUtil.NUM_BLOCKS) {
      System.out.println("Number of blocks found " + locations.length);
      done = false;
      continue;
    }
    for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
      if (locations[idx].getHosts().length < repl) {
        System.out.println("Block index " + idx + " not yet replciated.");
        done = false;
        break;
      }
    }
  }
  byte[] expected = 
      new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    System.arraycopy(fileContents, 0, expected, 0, expected.length);
  }
  // do a sanity check. Read the file
  // do not check file status since the file is not yet closed.
  AppendTestUtil.checkFullFile(fileSys, name,
      AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE,
      expected, "Read 1", false);
}
 
Example 6
Source File: TestFileAppend.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {;}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, AppendTestUtil.FILE_SIZE);
    if (locations.length < AppendTestUtil.NUM_BLOCKS) {
      System.out.println("Number of blocks found " + locations.length);
      done = false;
      continue;
    }
    for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
      if (locations[idx].getHosts().length < repl) {
        System.out.println("Block index " + idx + " not yet replciated.");
        done = false;
        break;
      }
    }
  }
  byte[] expected = 
      new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    System.arraycopy(fileContents, 0, expected, 0, expected.length);
  }
  // do a sanity check. Read the file
  // do not check file status since the file is not yet closed.
  AppendTestUtil.checkFullFile(fileSys, name,
      AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE,
      expected, "Read 1", false);
}
 
Example 7
Source File: TestFileLocalRead.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, fileSize);
    if (locations.length < numBlocks) {
      done = false;
      continue;
    }
    for (int idx = 0; idx < locations.length; idx++) {
      if (locations[idx].getHosts().length < repl) {
        done = false;
        break;
      }
    }
  }
  FSDataInputStream stm = fileSys.open(name);
  final byte[] expected;
  if (simulatedStorage) {
    expected = new byte[numBlocks * blockSize];
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    expected = AppendTestUtil.randomBytes(seed, numBlocks*blockSize);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[numBlocks * blockSize];
  System.out.println("Verifying file ");
  stm.readFully(0, actual);
  stm.close();
  checkData(actual, 0, expected, "Read 1");
}
 
Example 8
Source File: TestFileCreation.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, fileSize);
    if (locations.length < numBlocks) {
      done = false;
      continue;
    }
    for (int idx = 0; idx < locations.length; idx++) {
      if (locations[idx].getHosts().length < repl) {
        done = false;
        break;
      }
    }
  }
  FSDataInputStream stm = fileSys.open(name);
  final byte[] expected;
  if (simulatedStorage) {
    expected = new byte[numBlocks * blockSize];
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    expected = AppendTestUtil.randomBytes(seed, numBlocks*blockSize);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[numBlocks * blockSize];
  stm.readFully(0, actual);
  stm.close();
  checkData(actual, 0, expected, "Read 1");
}
 
Example 9
Source File: TestFileCreation.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, fileSize);
    if (locations.length < numBlocks) {
      done = false;
      continue;
    }
    for (int idx = 0; idx < locations.length; idx++) {
      if (locations[idx].getHosts().length < repl) {
        done = false;
        break;
      }
    }
  }
  FSDataInputStream stm = fileSys.open(name);
  final byte[] expected;
  if (simulatedStorage) {
    expected = new byte[numBlocks * blockSize];
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    expected = AppendTestUtil.randomBytes(seed, numBlocks*blockSize);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[numBlocks * blockSize];
  stm.readFully(0, actual);
  stm.close();
  checkData(actual, 0, expected, "Read 1");
}
 
Example 10
Source File: TestPread.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void pReadFile(FileSystem fileSys, Path name) throws IOException {
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[12 * blockSize];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read first 4K bytes
  byte[] actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  // now do a pread for the first 8K bytes
  actual = new byte[8192];
  doPread(stm, 0L, actual, 0, 8192);
  checkAndEraseData(actual, 0, expected, "Pread Test 1");
  // Now check to see if the normal read returns 4K-8K byte range
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 4096, expected, "Pread Test 2");
  // Now see if we can cross a single block boundary successfully
  // read 4K bytes from blockSize - 2K offset
  stm.readFully(blockSize - 2048, actual, 0, 4096);
  checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 3");
  // now see if we can cross two block boundaries successfully
  // read blockSize + 4K bytes from blockSize - 2K offset
  actual = new byte[blockSize + 4096];
  stm.readFully(blockSize - 2048, actual);
  checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 4");
  // now see if we can cross two block boundaries that are not cached
  // read blockSize + 4K bytes from 10*blockSize - 2K offset
  actual = new byte[blockSize + 4096];
  stm.readFully(10 * blockSize - 2048, actual);
  checkAndEraseData(actual, (10 * blockSize - 2048), expected, "Pread Test 5");
  // now check that even after all these preads, we can still read
  // bytes 8K-12K
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 8192, expected, "Pread Test 6");
  // done
  stm.close();
  // check block location caching
  stm = fileSys.open(name);
  stm.readFully(1, actual, 0, 4096);
  stm.readFully(4*blockSize, actual, 0, 4096);
  stm.readFully(7*blockSize, actual, 0, 4096);
  actual = new byte[3*4096];
  stm.readFully(0*blockSize, actual, 0, 3*4096);
  checkAndEraseData(actual, 0, expected, "Pread Test 7");
  actual = new byte[8*4096];
  stm.readFully(3*blockSize, actual, 0, 8*4096);
  checkAndEraseData(actual, 3*blockSize, expected, "Pread Test 8");
  // read the tail
  stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize/2);
  IOException res = null;
  try { // read beyond the end of the file
    stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize);
  } catch (IOException e) {
    // should throw an exception
    res = e;
  }
  assertTrue("Error reading beyond file boundary.", res != null);
  
  stm.close();
}
 
Example 11
Source File: TestPread.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void pReadFile(FileSystem fileSys, Path name) throws IOException {
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[12 * blockSize];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read first 4K bytes
  byte[] actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  // now do a pread for the first 8K bytes
  actual = new byte[8192];
  doPread(stm, 0L, actual, 0, 8192);
  checkAndEraseData(actual, 0, expected, "Pread Test 1");
  // Now check to see if the normal read returns 4K-8K byte range
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 4096, expected, "Pread Test 2");
  // Now see if we can cross a single block boundary successfully
  // read 4K bytes from blockSize - 2K offset
  stm.readFully(blockSize - 2048, actual, 0, 4096);
  checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 3");
  // now see if we can cross two block boundaries successfully
  // read blockSize + 4K bytes from blockSize - 2K offset
  actual = new byte[blockSize + 4096];
  stm.readFully(blockSize - 2048, actual);
  checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 4");
  // now see if we can cross two block boundaries that are not cached
  // read blockSize + 4K bytes from 10*blockSize - 2K offset
  actual = new byte[blockSize + 4096];
  stm.readFully(10 * blockSize - 2048, actual);
  checkAndEraseData(actual, (10 * blockSize - 2048), expected, "Pread Test 5");
  // now check that even after all these preads, we can still read
  // bytes 8K-12K
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 8192, expected, "Pread Test 6");
  // done
  stm.close();
  // check block location caching
  stm = fileSys.open(name);
  stm.readFully(1, actual, 0, 4096);
  stm.readFully(4*blockSize, actual, 0, 4096);
  stm.readFully(7*blockSize, actual, 0, 4096);
  actual = new byte[3*4096];
  stm.readFully(0*blockSize, actual, 0, 3*4096);
  checkAndEraseData(actual, 0, expected, "Pread Test 7");
  actual = new byte[8*4096];
  stm.readFully(3*blockSize, actual, 0, 8*4096);
  checkAndEraseData(actual, 3*blockSize, expected, "Pread Test 8");
  // read the tail
  stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize/2);
  IOException res = null;
  try { // read beyond the end of the file
    stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize);
  } catch (IOException e) {
    // should throw an exception
    res = e;
  }
  assertTrue("Error reading beyond file boundary.", res != null);
  
  stm.close();
}
 
Example 12
Source File: TestFileAppend.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, fileSize);
    if (locations.length < numBlocks) {
      System.out.println("Number of blocks found " + locations.length);
      done = false;
      continue;
    }
    for (int idx = 0; idx < numBlocks; idx++) {
      if (locations[idx].getHosts().length < repl) {
        System.out.println("Block index " + idx + " not yet replciated.");
        done = false;
        break;
      }
    }
  }
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[numBlocks * blockSize];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = fileContents[i];
    }
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[numBlocks * blockSize];
  stm.readFully(0, actual);
  checkData(actual, 0, expected, "Read 1");
}
 
Example 13
Source File: TestScatterGather.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void pReadFile(FileSystem fileSys, Path name) throws IOException {
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[(int)(12*blockSize)];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. pread the first 4K bytes
  List<ByteBuffer> rlist = stm.readFullyScatterGather(0, 4096);
  checkAndEraseData(rlist, 4096, 0, expected, "Read Sanity Test");

  // now do a pread for the first 8K bytes
  byte[] actual = new byte[8192];
  doPread(stm, 0L, actual, 0, 8192);
  checkAndEraseData(actual, 0, expected, "Pread Test 1");

  // Now check to see if the normal read returns 0K-8K byte range
  actual = new byte[8192];
  stm.readFully(actual);
  checkAndEraseData(actual, 0, expected, "Pread Test 2");

  // Now see if we can cross a single block boundary successfully
  // read 4K bytes from blockSize - 2K offset
  rlist = stm.readFullyScatterGather(blockSize - 2048, 4096);
  checkAndEraseData(rlist, 4096, (int)(blockSize-2048), expected, "Pread Test 3");

  // now see if we can cross two block boundaries successfully
  // read blockSize + 4K bytes from blockSize - 2K offset
  int size = (int)(blockSize+4096);
  rlist = stm.readFullyScatterGather(blockSize - 2048, size);
  checkAndEraseData(rlist, size, (int)(blockSize-2048), expected, "Pread Test 4");

  // now see if we can cross two block boundaries that are not cached
  // read blockSize + 4K bytes from 10*blockSize - 2K offset
  size = (int)(blockSize+4096);
  rlist = stm.readFullyScatterGather(10*blockSize - 2048, size);
  checkAndEraseData(rlist, size, (int)(10*blockSize-2048), expected, "Pread Test 5");

  // now check that even after all these preads, we can still read
  // bytes 8K-12K
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 8192, expected, "Pread Test 6");

  // pread beyond the end of the file. It should return the last half block.
  size = blockSize/2;
  rlist = stm.readFullyScatterGather(11*blockSize+size, blockSize);
  checkAndEraseData(rlist, size, (int)(11*blockSize+size), expected, "Pread Test 5");

  IOException res = null;
  try { // normal read beyond the end of the file
    stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize);
  } catch (IOException e) {
    // should throw an exception
    res = e;
  }
  assertTrue("Error reading beyond file boundary.", res != null);
  
  stm.close();
}
 
Example 14
Source File: TestPread.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void pReadFile(FileSystem fileSys, Path name) throws IOException {
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[(int)(12*blockSize)];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read first 4K bytes
  byte[] actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  // now do a pread for the first 8K bytes
  actual = new byte[8192];
  doPread(stm, 0L, actual, 0, 8192);
  checkAndEraseData(actual, 0, expected, "Pread Test 1");
  // Now check to see if the normal read returns 4K-8K byte range
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 4096, expected, "Pread Test 2");
  // Now see if we can cross a single block boundary successfully
  // read 4K bytes from blockSize - 2K offset
  stm.readFully(blockSize - 2048, actual, 0, 4096);
  checkAndEraseData(actual, (int)(blockSize-2048), expected, "Pread Test 3");
  // now see if we can cross two block boundaries successfully
  // read blockSize + 4K bytes from blockSize - 2K offset
  actual = new byte[(int)(blockSize+4096)];
  stm.readFully(blockSize - 2048, actual);
  checkAndEraseData(actual, (int)(blockSize-2048), expected, "Pread Test 4");
  // now see if we can cross two block boundaries that are not cached
  // read blockSize + 4K bytes from 10*blockSize - 2K offset
  actual = new byte[(int)(blockSize+4096)];
  stm.readFully(10*blockSize - 2048, actual);
  checkAndEraseData(actual, (int)(10*blockSize-2048), expected, "Pread Test 5");
  // now check that even after all these preads, we can still read
  // bytes 8K-12K
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 8192, expected, "Pread Test 6");
  // done
  stm.close();
  // check block location caching
  stm = fileSys.open(name);
  stm.readFully(1, actual, 0, 4096);
  stm.readFully(4*blockSize, actual, 0, 4096);
  stm.readFully(7*blockSize, actual, 0, 4096);
  actual = new byte[3*4096];
  stm.readFully(0*blockSize, actual, 0, 3*4096);
  checkAndEraseData(actual, 0, expected, "Pread Test 7");
  actual = new byte[8*4096];
  stm.readFully(3*blockSize, actual, 0, 8*4096);
  checkAndEraseData(actual, 3*blockSize, expected, "Pread Test 8");
  // read the tail
  stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize/2);
  IOException res = null;
  try { // read beyond the end of the file
    stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize);
  } catch (IOException e) {
    // should throw an exception
    res = e;
  }
  assertTrue("Error reading beyond file boundary.", res != null);
  
  stm.close();
}
 
Example 15
Source File: TestFileAppend.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, fileSize);
    if (locations.length < numBlocks) {
      System.out.println("Number of blocks found " + locations.length);
      done = false;
      continue;
    }
    for (int idx = 0; idx < numBlocks; idx++) {
      if (locations[idx].getHosts().length < repl) {
        System.out.println("Block index " + idx + " not yet replciated.");
        done = false;
        break;
      }
    }
  }
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[numBlocks * blockSize];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = fileContents[i];
    }
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[numBlocks * blockSize];
  stm.readFully(0, actual);
  checkData(actual, 0, expected, "Read 1");
}
 
Example 16
Source File: TestPread.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
private void pReadFile(FileSystem fileSys, Path name) throws IOException {
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[(int)(12*blockSize)];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read first 4K bytes
  byte[] actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  // now do a pread for the first 8K bytes
  actual = new byte[8192];
  doPread(stm, 0L, actual, 0, 8192);
  checkAndEraseData(actual, 0, expected, "Pread Test 1");
  // Now check to see if the normal read returns 4K-8K byte range
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 4096, expected, "Pread Test 2");
  // Now see if we can cross a single block boundary successfully
  // read 4K bytes from blockSize - 2K offset
  stm.readFully(blockSize - 2048, actual, 0, 4096);
  checkAndEraseData(actual, (int)(blockSize-2048), expected, "Pread Test 3");
  // now see if we can cross two block boundaries successfully
  // read blockSize + 4K bytes from blockSize - 2K offset
  actual = new byte[(int)(blockSize+4096)];
  stm.readFully(blockSize - 2048, actual);
  checkAndEraseData(actual, (int)(blockSize-2048), expected, "Pread Test 4");
  // now see if we can cross two block boundaries that are not cached
  // read blockSize + 4K bytes from 10*blockSize - 2K offset
  actual = new byte[(int)(blockSize+4096)];
  stm.readFully(10*blockSize - 2048, actual);
  checkAndEraseData(actual, (int)(10*blockSize-2048), expected, "Pread Test 5");
  // now check that even after all these preads, we can still read
  // bytes 8K-12K
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 8192, expected, "Pread Test 6");
  // done
  stm.close();
  // check block location caching
  stm = fileSys.open(name);
  stm.readFully(1, actual, 0, 4096);
  stm.readFully(4*blockSize, actual, 0, 4096);
  stm.readFully(7*blockSize, actual, 0, 4096);
  actual = new byte[3*4096];
  stm.readFully(0*blockSize, actual, 0, 3*4096);
  checkAndEraseData(actual, 0, expected, "Pread Test 7");
  actual = new byte[8*4096];
  stm.readFully(3*blockSize, actual, 0, 8*4096);
  checkAndEraseData(actual, 3*blockSize, expected, "Pread Test 8");
  // read the tail
  stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize/2);
  IOException res = null;
  try { // read beyond the end of the file
    stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize);
  } catch (IOException e) {
    // should throw an exception
    res = e;
  }
  assertTrue("Error reading beyond file boundary.", res != null);
  
  stm.close();
}