Java Code Examples for org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: TestFileAppend.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * FileNotFoundException is expected for appending to a non-exisiting file
 * 
 * @throws FileNotFoundException as the result
 */
@Test(expected = FileNotFoundException.class)
public void testFileNotFound() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    Path file1 = new Path("/nonexistingfile.dat");
    fs.append(file1);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 2
Source Project: hadoop   Source File: TestSmallBlock.java    License: Apache License 2.0 6 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
Example 3
Source Project: hadoop   Source File: TestSmallBlock.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests small block size in in DFS.
 */
@Test
public void testSmallBlock() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path file1 = new Path("smallblocktest.dat");
    writeFile(fileSys, file1);
    checkFile(fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 4
Source Project: hadoop   Source File: MiniDFSCluster.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * This method is valid only if the data nodes have simulated data
 * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
 * @param blocksToInject - the blocks
 * @param bpid - (optional) the block pool id to use for injecting blocks.
 *             If not supplied then it is queried from the in-process NameNode.
 * @throws IOException
 *              if not simulatedFSDataset
 *             if any of blocks already exist in the data node
 *   
 */
public void injectBlocks(int dataNodeIndex,
    Iterable<Block> blocksToInject, String bpid) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
  final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  if (bpid == null) {
    bpid = getNamesystem().getBlockPoolId();
  }
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(bpid, blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
 
Example 5
Source Project: hadoop   Source File: MiniDFSCluster.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Multiple-NameNode version of injectBlocks.
 */
public void injectBlocks(int nameNodeIndex, int dataNodeIndex,
    Iterable<Block> blocksToInject) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
  final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  String bpid = getNamesystem(nameNodeIndex).getBlockPoolId();
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(bpid, blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
 
Example 6
Source Project: hadoop   Source File: TestFileAppend4.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  this.conf = new Configuration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }

  // lower heartbeat interval for fast recognition of DN death
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
  // handle under-replicated blocks quickly (for replication asserts)
  conf.setInt(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
  
  // handle failures in the DFSClient pipeline quickly
  // (for cluster.shutdown(); fs.close() idiom)
  conf.setInt("ipc.client.connect.max.retries", 1);
}
 
Example 7
Source Project: hadoop   Source File: TestPread.java    License: Apache License 2.0 6 votes vote down vote up
private void dfsPreadTest(Configuration conf, boolean disableTransferTo, boolean verifyChecksum)
    throws IOException {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  if (disableTransferTo) {
    conf.setBoolean("dfs.datanode.transferTo.allowed", false);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fileSys = cluster.getFileSystem();
  fileSys.setVerifyChecksum(verifyChecksum);
  try {
    Path file1 = new Path("preadtest.dat");
    writeFile(fileSys, file1);
    pReadFile(fileSys, file1);
    datanodeRestartTest(cluster, fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 8
Source Project: big-c   Source File: TestFileAppend.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * FileNotFoundException is expected for appending to a non-exisiting file
 * 
 * @throws FileNotFoundException as the result
 */
@Test(expected = FileNotFoundException.class)
public void testFileNotFound() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    Path file1 = new Path("/nonexistingfile.dat");
    fs.append(file1);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 9
Source Project: big-c   Source File: TestSmallBlock.java    License: Apache License 2.0 6 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
Example 10
Source Project: big-c   Source File: TestSmallBlock.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests small block size in in DFS.
 */
@Test
public void testSmallBlock() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path file1 = new Path("smallblocktest.dat");
    writeFile(fileSys, file1);
    checkFile(fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 11
Source Project: big-c   Source File: MiniDFSCluster.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * This method is valid only if the data nodes have simulated data
 * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
 * @param blocksToInject - the blocks
 * @param bpid - (optional) the block pool id to use for injecting blocks.
 *             If not supplied then it is queried from the in-process NameNode.
 * @throws IOException
 *              if not simulatedFSDataset
 *             if any of blocks already exist in the data node
 *   
 */
public void injectBlocks(int dataNodeIndex,
    Iterable<Block> blocksToInject, String bpid) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
  final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  if (bpid == null) {
    bpid = getNamesystem().getBlockPoolId();
  }
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(bpid, blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
 
Example 12
Source Project: big-c   Source File: MiniDFSCluster.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Multiple-NameNode version of injectBlocks.
 */
public void injectBlocks(int nameNodeIndex, int dataNodeIndex,
    Iterable<Block> blocksToInject) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
  final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  String bpid = getNamesystem(nameNodeIndex).getBlockPoolId();
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(bpid, blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
 
Example 13
Source Project: big-c   Source File: TestFileAppend4.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  this.conf = new Configuration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }

  // lower heartbeat interval for fast recognition of DN death
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
  // handle under-replicated blocks quickly (for replication asserts)
  conf.setInt(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
  
  // handle failures in the DFSClient pipeline quickly
  // (for cluster.shutdown(); fs.close() idiom)
  conf.setInt("ipc.client.connect.max.retries", 1);
}
 
Example 14
Source Project: big-c   Source File: TestPread.java    License: Apache License 2.0 6 votes vote down vote up
private void dfsPreadTest(Configuration conf, boolean disableTransferTo, boolean verifyChecksum)
    throws IOException {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  if (disableTransferTo) {
    conf.setBoolean("dfs.datanode.transferTo.allowed", false);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fileSys = cluster.getFileSystem();
  fileSys.setVerifyChecksum(verifyChecksum);
  try {
    Path file1 = new Path("preadtest.dat");
    writeFile(fileSys, file1);
    pReadFile(fileSys, file1);
    datanodeRestartTest(cluster, fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 15
Source Project: RDFS   Source File: TestSmallBlock.java    License: Apache License 2.0 6 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
Example 16
Source Project: RDFS   Source File: TestFileAppend4.java    License: Apache License 2.0 6 votes vote down vote up
@Override
  public void setUp() throws Exception {
    this.conf = new Configuration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    conf.setBoolean("dfs.support.append", true);

    // lower heartbeat interval for fast recognition of DN death
    conf.setInt("heartbeat.recheck.interval", 1000);
    conf.setInt("dfs.heartbeat.interval", 1);
    conf.setInt("dfs.socket.timeout", 5000);
    // handle under-replicated blocks quickly (for replication asserts)
//    conf.set("dfs.replication.pending.timeout.sec", Integer.toString(5));
    conf.setInt("dfs.replication.pending.timeout.sec", 5);
    conf.setInt("dfs.replication.interval", 1);
    // handle failures in the DFSClient pipeline quickly
    // (for cluster.shutdown(); fs.close() idiom)
    conf.setInt("ipc.client.connect.max.retries", 1);
    conf.setInt("dfs.client.block.recovery.retries", 1);
    // Delay blockReceived calls from DNs to be more similar to a real
    // cluster. 10ms is enough so that client often gets there first.
    conf.setInt("dfs.datanode.artificialBlockReceivedDelay", 10);
  }
 
Example 17
Source Project: RDFS   Source File: TestSimulatedFSDataset.java    License: Apache License 2.0 6 votes vote down vote up
public void testInvalidate() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  int bytesAdded = addSomeBlocks(fsdataset);
  Block[] deleteBlocks = new Block[2];
  deleteBlocks[0] = new Block(1, 0, 0);
  deleteBlocks[1] = new Block(2, 0, 0);
  fsdataset.invalidate(0,deleteBlocks);
  checkInvalidBlock(deleteBlocks[0]);
  checkInvalidBlock(deleteBlocks[1]);
  long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
  assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());
  
  
  
  // Now make sure the rest of the blocks are valid
  for (int i=3; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(0, b, false));
  }
}
 
Example 18
Source Project: hadoop-gpu   Source File: TestSmallBlock.java    License: Apache License 2.0 6 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
Example 19
Source Project: hadoop-gpu   Source File: TestSimulatedFSDataset.java    License: Apache License 2.0 6 votes vote down vote up
public void testInvalidate() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  int bytesAdded = addSomeBlocks(fsdataset);
  Block[] deleteBlocks = new Block[2];
  deleteBlocks[0] = new Block(1, 0, 0);
  deleteBlocks[1] = new Block(2, 0, 0);
  fsdataset.invalidate(deleteBlocks);
  checkInvalidBlock(deleteBlocks[0]);
  checkInvalidBlock(deleteBlocks[1]);
  long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
  assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());
  
  
  
  // Now make sure the rest of the blocks are valid
  for (int i=3; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(b));
  }
}
 
Example 20
Source Project: hadoop   Source File: TestFileAppend.java    License: Apache License 2.0 5 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {;}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, AppendTestUtil.FILE_SIZE);
    if (locations.length < AppendTestUtil.NUM_BLOCKS) {
      System.out.println("Number of blocks found " + locations.length);
      done = false;
      continue;
    }
    for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
      if (locations[idx].getHosts().length < repl) {
        System.out.println("Block index " + idx + " not yet replciated.");
        done = false;
        break;
      }
    }
  }
  byte[] expected = 
      new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    System.arraycopy(fileContents, 0, expected, 0, expected.length);
  }
  // do a sanity check. Read the file
  // do not check file status since the file is not yet closed.
  AppendTestUtil.checkFullFile(fileSys, name,
      AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE,
      expected, "Read 1", false);
}
 
Example 21
Source Project: hadoop   Source File: TestBalancer.java    License: Apache License 2.0 5 votes vote down vote up
static void initConf(Configuration conf) {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
  SimulatedFSDataset.setFactory(conf);
  conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
}
 
Example 22
Source Project: hadoop   Source File: TestFileCreation.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test that all open files are closed when client dies abnormally.
 */
@Test
public void testDFSClientDeath() throws IOException, InterruptedException {
  Configuration conf = new HdfsConfiguration();
  System.out.println("Testing adbornal client death.");
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  DistributedFileSystem dfs = (DistributedFileSystem) fs;
  DFSClient dfsclient = dfs.dfs;
  try {

    // create a new file in home directory. Do not close it.
    //
    Path file1 = new Path("/clienttest.dat");
    FSDataOutputStream stm = createFile(fs, file1, 1);
    System.out.println("Created file clienttest.dat");

    // write to file
    writeFile(stm);

    // close the dfsclient before closing the output stream.
    // This should close all existing file.
    dfsclient.close();

    // reopen file system and verify that file exists.
    assertTrue(file1 + " does not exist.", 
        AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1));
  } finally {
    cluster.shutdown();
  }
}
 
Example 23
Source Project: big-c   Source File: TestFileAppend.java    License: Apache License 2.0 5 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {;}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, AppendTestUtil.FILE_SIZE);
    if (locations.length < AppendTestUtil.NUM_BLOCKS) {
      System.out.println("Number of blocks found " + locations.length);
      done = false;
      continue;
    }
    for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
      if (locations[idx].getHosts().length < repl) {
        System.out.println("Block index " + idx + " not yet replciated.");
        done = false;
        break;
      }
    }
  }
  byte[] expected = 
      new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    System.arraycopy(fileContents, 0, expected, 0, expected.length);
  }
  // do a sanity check. Read the file
  // do not check file status since the file is not yet closed.
  AppendTestUtil.checkFullFile(fileSys, name,
      AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE,
      expected, "Read 1", false);
}
 
Example 24
Source Project: big-c   Source File: TestBalancer.java    License: Apache License 2.0 5 votes vote down vote up
static void initConf(Configuration conf) {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
  SimulatedFSDataset.setFactory(conf);
  conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
}
 
Example 25
Source Project: big-c   Source File: TestFileCreation.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test that all open files are closed when client dies abnormally.
 */
@Test
public void testDFSClientDeath() throws IOException, InterruptedException {
  Configuration conf = new HdfsConfiguration();
  System.out.println("Testing adbornal client death.");
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  DistributedFileSystem dfs = (DistributedFileSystem) fs;
  DFSClient dfsclient = dfs.dfs;
  try {

    // create a new file in home directory. Do not close it.
    //
    Path file1 = new Path("/clienttest.dat");
    FSDataOutputStream stm = createFile(fs, file1, 1);
    System.out.println("Created file clienttest.dat");

    // write to file
    writeFile(stm);

    // close the dfsclient before closing the output stream.
    // This should close all existing file.
    dfsclient.close();

    // reopen file system and verify that file exists.
    assertTrue(file1 + " does not exist.", 
        AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1));
  } finally {
    cluster.shutdown();
  }
}
 
Example 26
Source Project: RDFS   Source File: MiniDFSCluster.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * This method is valid only if the data nodes have simulated data
 * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
 * @param blocksToInject - the blocks
 * @throws IOException
 *              if not simulatedFSDataset
 *             if any of blocks already exist in the data node
 *   
 */
public void injectBlocks(int dataNodeIndex, Block[] blocksToInject) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  FSDatasetInterface dataSet = dataNodes.get(dataNodeIndex).datanode.getFSDataset();
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(getNameNode().getNamespaceID(), blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleNSBlockReport(0);
}
 
Example 27
Source Project: RDFS   Source File: TestFileLocalRead.java    License: Apache License 2.0 5 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, fileSize);
    if (locations.length < numBlocks) {
      done = false;
      continue;
    }
    for (int idx = 0; idx < locations.length; idx++) {
      if (locations[idx].getHosts().length < repl) {
        done = false;
        break;
      }
    }
  }
  FSDataInputStream stm = fileSys.open(name);
  final byte[] expected;
  if (simulatedStorage) {
    expected = new byte[numBlocks * blockSize];
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    expected = AppendTestUtil.randomBytes(seed, numBlocks*blockSize);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[numBlocks * blockSize];
  System.out.println("Verifying file ");
  stm.readFully(0, actual);
  stm.close();
  checkData(actual, 0, expected, "Read 1");
}
 
Example 28
Source Project: RDFS   Source File: TestFileLocalRead.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test that file data can be read by reading the block file
 * directly from the local store.
 */
public void testFileLocalRead() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean("dfs.read.shortcircuit", true);
  if (simulatedStorage) {
    conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
  }
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  FileSystem fs = cluster.getFileSystem();
  try {

    //
    // check that / exists
    //
    Path path = new Path("/");
    System.out.println("Path : \"" + path.toString() + "\"");
    System.out.println(fs.getFileStatus(path).isDir()); 
    assertTrue("/ should be a directory", 
               fs.getFileStatus(path).isDir() == true);

    // 
    // create a new file in home directory. Do not close it.
    //
    Path file1 = new Path("filelocal.dat");
    FSDataOutputStream stm = createFile(fs, file1, 1);

    // write to file
    writeFile(stm);
    stm.close();

    // Make sure a client can read it before it is closed.
    checkFile(fs, file1, 1);

  } finally {
    cluster.shutdown();
  }
}
 
Example 29
Source Project: RDFS   Source File: TestSimulatedFSDataset.java    License: Apache License 2.0 5 votes vote down vote up
public void testStorageUsage() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  assertEquals(fsdataset.getDfsUsed(), 0);
  assertEquals(fsdataset.getRemaining(), fsdataset.getCapacity());
  int bytesAdded = addSomeBlocks(fsdataset);
  assertEquals(bytesAdded, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded,  fsdataset.getRemaining());
  
}
 
Example 30
Source Project: RDFS   Source File: TestSimulatedFSDataset.java    License: Apache License 2.0 5 votes vote down vote up
void  checkBlockDataAndSize(FSDatasetInterface fsdataset, 
            Block b, long expectedLen) throws IOException { 
  InputStream input = fsdataset.getBlockInputStream(0,b);
  long lengthRead = 0;
  int data;
  while ((data = input.read()) != -1) {
    assertEquals(SimulatedFSDataset.DEFAULT_DATABYTE, data);
    lengthRead++;
  }
  assertEquals(expectedLen, lengthRead);
}