org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestPread.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void dfsPreadTest(Configuration conf, boolean disableTransferTo, boolean verifyChecksum)
    throws IOException {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  if (disableTransferTo) {
    conf.setBoolean("dfs.datanode.transferTo.allowed", false);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fileSys = cluster.getFileSystem();
  fileSys.setVerifyChecksum(verifyChecksum);
  try {
    Path file1 = new Path("preadtest.dat");
    writeFile(fileSys, file1);
    pReadFile(fileSys, file1);
    datanodeRestartTest(cluster, fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example #2
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Multiple-NameNode version of injectBlocks.
 */
public void injectBlocks(int nameNodeIndex, int dataNodeIndex,
    Iterable<Block> blocksToInject) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
  final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  String bpid = getNamesystem(nameNodeIndex).getBlockPoolId();
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(bpid, blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
 
Example #3
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * This method is valid only if the data nodes have simulated data
 * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
 * @param blocksToInject - the blocks
 * @param bpid - (optional) the block pool id to use for injecting blocks.
 *             If not supplied then it is queried from the in-process NameNode.
 * @throws IOException
 *              if not simulatedFSDataset
 *             if any of blocks already exist in the data node
 *   
 */
public void injectBlocks(int dataNodeIndex,
    Iterable<Block> blocksToInject, String bpid) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
  final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  if (bpid == null) {
    bpid = getNamesystem().getBlockPoolId();
  }
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(bpid, blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
 
Example #4
Source File: TestFileAppend4.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  this.conf = new Configuration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }

  // lower heartbeat interval for fast recognition of DN death
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
  // handle under-replicated blocks quickly (for replication asserts)
  conf.setInt(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
  
  // handle failures in the DFSClient pipeline quickly
  // (for cluster.shutdown(); fs.close() idiom)
  conf.setInt("ipc.client.connect.max.retries", 1);
}
 
Example #5
Source File: TestSimulatedFSDataset.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public void testInvalidate() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  int bytesAdded = addSomeBlocks(fsdataset);
  Block[] deleteBlocks = new Block[2];
  deleteBlocks[0] = new Block(1, 0, 0);
  deleteBlocks[1] = new Block(2, 0, 0);
  fsdataset.invalidate(deleteBlocks);
  checkInvalidBlock(deleteBlocks[0]);
  checkInvalidBlock(deleteBlocks[1]);
  long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
  assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());
  
  
  
  // Now make sure the rest of the blocks are valid
  for (int i=3; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(b));
  }
}
 
Example #6
Source File: TestSmallBlock.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Tests small block size in in DFS.
 */
@Test
public void testSmallBlock() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path file1 = new Path("smallblocktest.dat");
    writeFile(fileSys, file1);
    checkFile(fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example #7
Source File: TestSmallBlock.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
Example #8
Source File: TestSmallBlock.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
Example #9
Source File: TestPread.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void dfsPreadTest(Configuration conf, boolean disableTransferTo, boolean verifyChecksum)
    throws IOException {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  if (disableTransferTo) {
    conf.setBoolean("dfs.datanode.transferTo.allowed", false);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fileSys = cluster.getFileSystem();
  fileSys.setVerifyChecksum(verifyChecksum);
  try {
    Path file1 = new Path("preadtest.dat");
    writeFile(fileSys, file1);
    pReadFile(fileSys, file1);
    datanodeRestartTest(cluster, fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example #10
Source File: TestFileAppend.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * FileNotFoundException is expected for appending to a non-exisiting file
 * 
 * @throws FileNotFoundException as the result
 */
@Test(expected = FileNotFoundException.class)
public void testFileNotFound() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    Path file1 = new Path("/nonexistingfile.dat");
    fs.append(file1);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example #11
Source File: TestSmallBlock.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Tests small block size in in DFS.
 */
@Test
public void testSmallBlock() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path file1 = new Path("smallblocktest.dat");
    writeFile(fileSys, file1);
    checkFile(fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example #12
Source File: MiniDFSCluster.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * This method is valid only if the data nodes have simulated data
 * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
 * @param blocksToInject - the blocks
 * @param bpid - (optional) the block pool id to use for injecting blocks.
 *             If not supplied then it is queried from the in-process NameNode.
 * @throws IOException
 *              if not simulatedFSDataset
 *             if any of blocks already exist in the data node
 *   
 */
public void injectBlocks(int dataNodeIndex,
    Iterable<Block> blocksToInject, String bpid) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
  final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  if (bpid == null) {
    bpid = getNamesystem().getBlockPoolId();
  }
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(bpid, blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
 
Example #13
Source File: TestSmallBlock.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
Example #14
Source File: MiniDFSCluster.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Multiple-NameNode version of injectBlocks.
 */
public void injectBlocks(int nameNodeIndex, int dataNodeIndex,
    Iterable<Block> blocksToInject) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
  final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  String bpid = getNamesystem(nameNodeIndex).getBlockPoolId();
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(bpid, blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
 
Example #15
Source File: TestSimulatedFSDataset.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void testInvalidate() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  int bytesAdded = addSomeBlocks(fsdataset);
  Block[] deleteBlocks = new Block[2];
  deleteBlocks[0] = new Block(1, 0, 0);
  deleteBlocks[1] = new Block(2, 0, 0);
  fsdataset.invalidate(0,deleteBlocks);
  checkInvalidBlock(deleteBlocks[0]);
  checkInvalidBlock(deleteBlocks[1]);
  long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
  assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());
  
  
  
  // Now make sure the rest of the blocks are valid
  for (int i=3; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(0, b, false));
  }
}
 
Example #16
Source File: TestFileAppend.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * FileNotFoundException is expected for appending to a non-exisiting file
 * 
 * @throws FileNotFoundException as the result
 */
@Test(expected = FileNotFoundException.class)
public void testFileNotFound() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    Path file1 = new Path("/nonexistingfile.dat");
    fs.append(file1);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example #17
Source File: TestSmallBlock.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
Example #18
Source File: TestFileAppend4.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  this.conf = new Configuration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }

  // lower heartbeat interval for fast recognition of DN death
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
  // handle under-replicated blocks quickly (for replication asserts)
  conf.setInt(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
  
  // handle failures in the DFSClient pipeline quickly
  // (for cluster.shutdown(); fs.close() idiom)
  conf.setInt("ipc.client.connect.max.retries", 1);
}
 
Example #19
Source File: TestFileAppend4.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Override
  public void setUp() throws Exception {
    this.conf = new Configuration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    conf.setBoolean("dfs.support.append", true);

    // lower heartbeat interval for fast recognition of DN death
    conf.setInt("heartbeat.recheck.interval", 1000);
    conf.setInt("dfs.heartbeat.interval", 1);
    conf.setInt("dfs.socket.timeout", 5000);
    // handle under-replicated blocks quickly (for replication asserts)
//    conf.set("dfs.replication.pending.timeout.sec", Integer.toString(5));
    conf.setInt("dfs.replication.pending.timeout.sec", 5);
    conf.setInt("dfs.replication.interval", 1);
    // handle failures in the DFSClient pipeline quickly
    // (for cluster.shutdown(); fs.close() idiom)
    conf.setInt("ipc.client.connect.max.retries", 1);
    conf.setInt("dfs.client.block.recovery.retries", 1);
    // Delay blockReceived calls from DNs to be more similar to a real
    // cluster. 10ms is enough so that client often gets there first.
    conf.setInt("dfs.datanode.artificialBlockReceivedDelay", 10);
  }
 
Example #20
Source File: TestSimulatedFSDataset.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public void testGetBlockReport() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  Block[] blockReport = fsdataset.getBlockReport();
  assertEquals(0, blockReport.length);
  int bytesAdded = addSomeBlocks(fsdataset);
  blockReport = fsdataset.getBlockReport();
  assertEquals(NUMBLOCKS, blockReport.length);
  for (Block b: blockReport) {
    assertNotNull(b);
    assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
  }
}
 
Example #21
Source File: TestReplication.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Tests replication in DFS.
 */
public void runReplication(boolean simulated) throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean("dfs.replication.considerLoad", false);
  if (simulated) {
    conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
  }
  MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, racks);
  cluster.waitActive();
  
  InetSocketAddress addr = new InetSocketAddress("localhost",
                                                 cluster.getNameNodePort());
  DFSClient client = new DFSClient(addr, conf);
  
  DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
  assertEquals("Number of Datanodes ", numDatanodes, info.length);
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path file1 = new Path("/smallblocktest.dat");
    writeFile(fileSys, file1, 3);
    checkFile(fileSys, file1, 3);
    cleanupFile(fileSys, file1);
    writeFile(fileSys, file1, 10);
    checkFile(fileSys, file1, 10);
    cleanupFile(fileSys, file1);
    writeFile(fileSys, file1, 4);
    checkFile(fileSys, file1, 4);
    cleanupFile(fileSys, file1);
    writeFile(fileSys, file1, 1);
    checkFile(fileSys, file1, 1);
    cleanupFile(fileSys, file1);
    writeFile(fileSys, file1, 2);
    checkFile(fileSys, file1, 2);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example #22
Source File: TestSimulatedFSDataset.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public void testWriteRead() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  addSomeBlocks(fsdataset);
  for (int i=1; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(b));
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
    checkBlockDataAndSize(fsdataset, b, blockIdToLen(i));
  }
}
 
Example #23
Source File: TestSimulatedFSDataset.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void testWriteRead() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  addSomeBlocks(fsdataset);
  for (int i=1; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(0, b, false));
    assertEquals(blockIdToLen(i), fsdataset.getFinalizedBlockLength(0,b));
    checkBlockDataAndSize(fsdataset, b, blockIdToLen(i));
  }
}
 
Example #24
Source File: TestSimulatedFSDataset.java    From RDFS with Apache License 2.0 5 votes vote down vote up
void  checkBlockDataAndSize(FSDatasetInterface fsdataset, 
            Block b, long expectedLen) throws IOException { 
  InputStream input = fsdataset.getBlockInputStream(0,b);
  long lengthRead = 0;
  int data;
  while ((data = input.read()) != -1) {
    assertEquals(SimulatedFSDataset.DEFAULT_DATABYTE, data);
    lengthRead++;
  }
  assertEquals(expectedLen, lengthRead);
}
 
Example #25
Source File: TestSimulatedFSDataset.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void testStorageUsage() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  assertEquals(fsdataset.getDfsUsed(), 0);
  assertEquals(fsdataset.getRemaining(), fsdataset.getCapacity());
  int bytesAdded = addSomeBlocks(fsdataset);
  assertEquals(bytesAdded, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded,  fsdataset.getRemaining());
  
}
 
Example #26
Source File: TestFileLocalRead.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Test that file data can be read by reading the block file
 * directly from the local store.
 */
public void testFileLocalRead() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean("dfs.read.shortcircuit", true);
  if (simulatedStorage) {
    conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
  }
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  FileSystem fs = cluster.getFileSystem();
  try {

    //
    // check that / exists
    //
    Path path = new Path("/");
    System.out.println("Path : \"" + path.toString() + "\"");
    System.out.println(fs.getFileStatus(path).isDir()); 
    assertTrue("/ should be a directory", 
               fs.getFileStatus(path).isDir() == true);

    // 
    // create a new file in home directory. Do not close it.
    //
    Path file1 = new Path("filelocal.dat");
    FSDataOutputStream stm = createFile(fs, file1, 1);

    // write to file
    writeFile(stm);
    stm.close();

    // Make sure a client can read it before it is closed.
    checkFile(fs, file1, 1);

  } finally {
    cluster.shutdown();
  }
}
 
Example #27
Source File: TestFileLocalRead.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, fileSize);
    if (locations.length < numBlocks) {
      done = false;
      continue;
    }
    for (int idx = 0; idx < locations.length; idx++) {
      if (locations[idx].getHosts().length < repl) {
        done = false;
        break;
      }
    }
  }
  FSDataInputStream stm = fileSys.open(name);
  final byte[] expected;
  if (simulatedStorage) {
    expected = new byte[numBlocks * blockSize];
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    expected = AppendTestUtil.randomBytes(seed, numBlocks*blockSize);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[numBlocks * blockSize];
  System.out.println("Verifying file ");
  stm.readFully(0, actual);
  stm.close();
  checkData(actual, 0, expected, "Read 1");
}
 
Example #28
Source File: MiniDFSCluster.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * This method is valid only if the data nodes have simulated data
 * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
 * @param blocksToInject - the blocks
 * @throws IOException
 *              if not simulatedFSDataset
 *             if any of blocks already exist in the data node
 *   
 */
public void injectBlocks(int dataNodeIndex, Block[] blocksToInject) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  FSDatasetInterface dataSet = dataNodes.get(dataNodeIndex).datanode.getFSDataset();
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(getNameNode().getNamespaceID(), blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleNSBlockReport(0);
}
 
Example #29
Source File: TestReplication.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Tests replication in DFS.
 */
public void runReplication(boolean simulated) throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean("dfs.replication.considerLoad", false);
  if (simulated) {
    conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
  }
  MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, racks);
  cluster.waitActive();
  
  InetSocketAddress addr = new InetSocketAddress("localhost",
                                                 cluster.getNameNodePort());
  DFSClient client = new DFSClient(addr, conf);
  
  DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
  assertEquals("Number of Datanodes ", numDatanodes, info.length);
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path file1 = new Path("/smallblocktest.dat");
    writeFile(fileSys, file1, 3);
    checkFile(fileSys, file1, 3);
    cleanupFile(fileSys, file1);
    writeFile(fileSys, file1, 10);
    checkFile(fileSys, file1, 10);
    cleanupFile(fileSys, file1);
    writeFile(fileSys, file1, 4);
    checkFile(fileSys, file1, 4);
    cleanupFile(fileSys, file1);
    writeFile(fileSys, file1, 1);
    checkFile(fileSys, file1, 1);
    cleanupFile(fileSys, file1);
    writeFile(fileSys, file1, 2);
    checkFile(fileSys, file1, 2);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example #30
Source File: TestFileAppend.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {;}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, AppendTestUtil.FILE_SIZE);
    if (locations.length < AppendTestUtil.NUM_BLOCKS) {
      System.out.println("Number of blocks found " + locations.length);
      done = false;
      continue;
    }
    for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
      if (locations[idx].getHosts().length < repl) {
        System.out.println("Block index " + idx + " not yet replciated.");
        done = false;
        break;
      }
    }
  }
  byte[] expected = 
      new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    System.arraycopy(fileContents, 0, expected, 0, expected.length);
  }
  // do a sanity check. Read the file
  // do not check file status since the file is not yet closed.
  AppendTestUtil.checkFullFile(fileSys, name,
      AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE,
      expected, "Read 1", false);
}