Java Code Examples for org.apache.hadoop.fs.FileSystem.getFileBlockLocations()

The following are Jave code examples for showing how to use getFileBlockLocations() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop   File: TestSmallBlock.java   View Source Code Vote up 6 votes
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
Example 2
Project: hadoop   File: TestFileConcurrentReader.java   View Source Code Vote up 6 votes
private void waitForBlocks(FileSystem fileSys, Path name)
  throws IOException {
  // wait until we have at least one block in the file to read.
  boolean done = false;

  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {
    }
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, blockSize);
    if (locations.length < 1) {
      done = false;
      continue;
    }
  }
}
 
Example 3
Project: hadoop   File: TestNativeAzureFileSystemBlockLocations.java   View Source Code Vote up 6 votes
private static BlockLocation[] getBlockLocationsOutput(int fileSize,
    int blockSize, long start, long len, String blockLocationHost)
    throws Exception {
  Configuration conf = new Configuration();
  conf.set(NativeAzureFileSystem.AZURE_BLOCK_SIZE_PROPERTY_NAME, ""
      + blockSize);
  if (blockLocationHost != null) {
    conf.set(NativeAzureFileSystem.AZURE_BLOCK_LOCATION_HOST_PROPERTY_NAME,
        blockLocationHost);
  }
  AzureBlobStorageTestAccount testAccount = AzureBlobStorageTestAccount
      .createMock(conf);
  FileSystem fs = testAccount.getFileSystem();
  Path testFile = createTestFile(fs, fileSize);
  FileStatus stat = fs.getFileStatus(testFile);
  BlockLocation[] locations = fs.getFileBlockLocations(stat, start, len);
  testAccount.cleanup();
  return locations;
}
 
Example 4
Project: hadoop   File: MultiFileSplit.java   View Source Code Vote up 5 votes
public String[] getLocations() throws IOException {
  HashSet<String> hostSet = new HashSet<String>();
  for (Path file : getPaths()) {
    FileSystem fs = file.getFileSystem(getJob());
    FileStatus status = fs.getFileStatus(file);
    BlockLocation[] blkLocations = fs.getFileBlockLocations(status,
                                        0, status.getLen());
    if (blkLocations != null && blkLocations.length > 0) {
      addToSet(hostSet, blkLocations[0].getHosts());
    }
  }
  return hostSet.toArray(new String[hostSet.size()]);
}
 
Example 5
Project: hadoop   File: CombineFileInputFormat.java   View Source Code Vote up 5 votes
protected BlockLocation[] getFileBlockLocations(
  FileSystem fs, FileStatus stat) throws IOException {
  if (stat instanceof LocatedFileStatus) {
    return ((LocatedFileStatus) stat).getBlockLocations();
  }
  return fs.getFileBlockLocations(stat, 0, stat.getLen());
}
 
Example 6
Project: hadoop   File: DFSTestUtil.java   View Source Code Vote up 5 votes
/**
 * Wait for the given file to reach the given replication factor.
 * @throws TimeoutException if we fail to sufficiently replicate the file
 */
public static void waitReplication(FileSystem fs, Path fileName, short replFactor)
    throws IOException, InterruptedException, TimeoutException {
  boolean correctReplFactor;
  final int ATTEMPTS = 40;
  int count = 0;

  do {
    correctReplFactor = true;
    BlockLocation locs[] = fs.getFileBlockLocations(
      fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
    count++;
    for (int j = 0; j < locs.length; j++) {
      String[] hostnames = locs[j].getNames();
      if (hostnames.length != replFactor) {
        correctReplFactor = false;
        System.out.println("Block " + j + " of file " + fileName
            + " has replication factor " + hostnames.length
            + " (desired " + replFactor + "); locations "
            + Joiner.on(' ').join(hostnames));
        Thread.sleep(1000);
        break;
      }
    }
    if (correctReplFactor) {
      System.out.println("All blocks of file " + fileName
          + " verified to have replication factor " + replFactor);
    }
  } while (!correctReplFactor && count < ATTEMPTS);

  if (count == ATTEMPTS) {
    throw new TimeoutException("Timed out waiting for " + fileName +
        " to reach " + replFactor + " replicas");
  }
}
 
Example 7
Project: hadoop   File: TestFileAppend.java   View Source Code Vote up 5 votes
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {;}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, AppendTestUtil.FILE_SIZE);
    if (locations.length < AppendTestUtil.NUM_BLOCKS) {
      System.out.println("Number of blocks found " + locations.length);
      done = false;
      continue;
    }
    for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
      if (locations[idx].getHosts().length < repl) {
        System.out.println("Block index " + idx + " not yet replciated.");
        done = false;
        break;
      }
    }
  }
  byte[] expected = 
      new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    System.arraycopy(fileContents, 0, expected, 0, expected.length);
  }
  // do a sanity check. Read the file
  // do not check file status since the file is not yet closed.
  AppendTestUtil.checkFullFile(fileSys, name,
      AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE,
      expected, "Read 1", false);
}
 
Example 8
Project: hadoop   File: TestSafeMode.java   View Source Code Vote up 5 votes
void checkGetBlockLocationsWorks(FileSystem fs, Path fileName) throws IOException {
  FileStatus stat = fs.getFileStatus(fileName);
  try {  
    fs.getFileBlockLocations(stat, 0, 1000);
  } catch (SafeModeException e) {
    assertTrue("Should have not got safemode exception", false);
  } catch (RemoteException re) {
    assertTrue("Should have not got safemode exception", false);   
  }    
}
 
Example 9
Project: ditb   File: FSUtils.java   View Source Code Vote up 5 votes
/**
 * Compute HDFS blocks distribution of a given file, or a portion of the file
 * @param fs file system
 * @param status file status of the file
 * @param start start position of the portion
 * @param length length of the portion
 * @return The HDFS blocks distribution
 */
static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
  final FileSystem fs, FileStatus status, long start, long length)
  throws IOException {
  HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
  BlockLocation [] blockLocations =
    fs.getFileBlockLocations(status, start, length);
  for(BlockLocation bl : blockLocations) {
    String [] hosts = bl.getHosts();
    long len = bl.getLength();
    blocksDistribution.addHostsAndBlockWeight(hosts, len);
  }

  return blocksDistribution;
}
 
Example 10
Project: aliyun-maxcompute-data-collectors   File: CombineFileInputFormat.java   View Source Code Vote up 4 votes
protected BlockLocation[] getFileBlockLocations(
  FileSystem fs, FileStatus stat) throws IOException {
  return fs.getFileBlockLocations(stat, 0, stat.getLen());
}
 
Example 11
Project: hadoop   File: TestMRCJCFileInputFormat.java   View Source Code Vote up 4 votes
public void testLocality() throws Exception {
  JobConf job = new JobConf(conf);
  dfs = newDFSCluster(job);
  FileSystem fs = dfs.getFileSystem();
  System.out.println("FileSystem " + fs.getUri());

  Path inputDir = new Path("/foo/");
  String fileName = "part-0000";
  createInputs(fs, inputDir, fileName);

  // split it using a file input format
  TextInputFormat.addInputPath(job, inputDir);
  TextInputFormat inFormat = new TextInputFormat();
  inFormat.configure(job);
  InputSplit[] splits = inFormat.getSplits(job, 1);
  FileStatus fileStatus = fs.getFileStatus(new Path(inputDir, fileName));
  BlockLocation[] locations =
    fs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
  System.out.println("Made splits");

  // make sure that each split is a block and the locations match
  for(int i=0; i < splits.length; ++i) {
    FileSplit fileSplit = (FileSplit) splits[i];
    System.out.println("File split: " + fileSplit);
    for (String h: fileSplit.getLocations()) {
      System.out.println("Location: " + h);
    }
    System.out.println("Block: " + locations[i]);
    assertEquals(locations[i].getOffset(), fileSplit.getStart());
    assertEquals(locations[i].getLength(), fileSplit.getLength());
    String[] blockLocs = locations[i].getHosts();
    String[] splitLocs = fileSplit.getLocations();
    assertEquals(2, blockLocs.length);
    assertEquals(2, splitLocs.length);
    assertTrue((blockLocs[0].equals(splitLocs[0]) &&
                blockLocs[1].equals(splitLocs[1])) ||
               (blockLocs[1].equals(splitLocs[0]) &&
                blockLocs[0].equals(splitLocs[1])));
  }

  assertEquals("Expected value of " + FileInputFormat.NUM_INPUT_FILES,
               1, job.getLong(FileInputFormat.NUM_INPUT_FILES, 0));
}
 
Example 12
Project: hadoop   File: CombineFileInputFormat.java   View Source Code Vote up 4 votes
OneFileInfo(FileStatus stat, Configuration conf,
            boolean isSplitable,
            HashMap<String, List<OneBlockInfo>> rackToBlocks,
            HashMap<OneBlockInfo, String[]> blockToNodes,
            HashMap<String, Set<OneBlockInfo>> nodeToBlocks,
            HashMap<String, Set<String>> rackToNodes,
            long maxSize)
            throws IOException {
  this.fileSize = 0;

  // get block locations from file system
  BlockLocation[] locations;
  if (stat instanceof LocatedFileStatus) {
    locations = ((LocatedFileStatus) stat).getBlockLocations();
  } else {
    FileSystem fs = stat.getPath().getFileSystem(conf);
    locations = fs.getFileBlockLocations(stat, 0, stat.getLen());
  }
  // create a list of all block and their locations
  if (locations == null) {
    blocks = new OneBlockInfo[0];
  } else {

    if(locations.length == 0 && !stat.isDirectory()) {
      locations = new BlockLocation[] { new BlockLocation() };
    }

    if (!isSplitable) {
      // if the file is not splitable, just create the one block with
      // full file length
      blocks = new OneBlockInfo[1];
      fileSize = stat.getLen();
      blocks[0] = new OneBlockInfo(stat.getPath(), 0, fileSize,
          locations[0].getHosts(), locations[0].getTopologyPaths());
    } else {
      ArrayList<OneBlockInfo> blocksList = new ArrayList<OneBlockInfo>(
          locations.length);
      for (int i = 0; i < locations.length; i++) {
        fileSize += locations[i].getLength();

        // each split can be a maximum of maxSize
        long left = locations[i].getLength();
        long myOffset = locations[i].getOffset();
        long myLength = 0;
        do {
          if (maxSize == 0) {
            myLength = left;
          } else {
            if (left > maxSize && left < 2 * maxSize) {
              // if remainder is between max and 2*max - then
              // instead of creating splits of size max, left-max we
              // create splits of size left/2 and left/2. This is
              // a heuristic to avoid creating really really small
              // splits.
              myLength = left / 2;
            } else {
              myLength = Math.min(maxSize, left);
            }
          }
          OneBlockInfo oneblock = new OneBlockInfo(stat.getPath(),
              myOffset, myLength, locations[i].getHosts(),
              locations[i].getTopologyPaths());
          left -= myLength;
          myOffset += myLength;

          blocksList.add(oneblock);
        } while (left > 0);
      }
      blocks = blocksList.toArray(new OneBlockInfo[blocksList.size()]);
    }
    
    populateBlockInfo(blocks, rackToBlocks, blockToNodes, 
                      nodeToBlocks, rackToNodes);
  }
}
 
Example 13
Project: hadoop   File: TestHostsFiles.java   View Source Code Vote up 4 votes
@Test
public void testHostsExcludeInUI() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 2;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  // Two blocks and four racks
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decommission one of the hosts with the block, this should cause 
    // the block to get replicated to another host on the same rack,
    // otherwise the rack policy is violated.
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    String name = locs[0].getNames()[0];
    String names = name + "\n" + "localhost:42\n";
    LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath());
    DFSTestUtil.writeFile(localFileSys, excludeFile, name);
    ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
    DFSTestUtil.waitForDecommission(fs, name);

    // Check the block still has sufficient # replicas across racks
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName =
        new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
    String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
    assertTrue("Live nodes should contain the decommissioned node",
        nodes.contains("Decommissioned"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 14
Project: hadoop   File: TestBlocksWithNotEnoughRacks.java   View Source Code Vote up 4 votes
@Test
public void testNodeDecomissionRespectsRackPolicy() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 2;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  // Two blocks and four racks
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decommission one of the hosts with the block, this should cause 
    // the block to get replicated to another host on the same rack,
    // otherwise the rack policy is violated.
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    String name = locs[0].getNames()[0];
    DFSTestUtil.writeFile(localFileSys, excludeFile, name);
    ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
    DFSTestUtil.waitForDecommission(fs, name);

    // Check the block still has sufficient # replicas across racks
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
Example 15
Project: hadoop   File: TestBlocksWithNotEnoughRacks.java   View Source Code Vote up 4 votes
@Test
public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() 
    throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 5;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());

  // All hosts are on two racks, only one host on /rack2
  String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Lower the replication factor so the blocks are over replicated
    REPLICATION_FACTOR = 2;
    fs.setReplication(filePath, REPLICATION_FACTOR);

    // Decommission one of the hosts with the block that is not on
    // the lone host on rack2 (if we decomission that host it would
    // be impossible to respect the rack policy).
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    for (String top : locs[0].getTopologyPaths()) {
      if (!top.startsWith("/rack2")) {
        String name = top.substring("/rack1".length()+1);
        DFSTestUtil.writeFile(localFileSys, excludeFile, name);
        ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
        DFSTestUtil.waitForDecommission(fs, name);
        break;
      }
    }

    // Check the block still has sufficient # replicas across racks,
    // ie we didn't remove the replica on the host on /rack1.
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
Example 16
Project: hadoop   File: TestOverReplicatedBlocks.java   View Source Code Vote up 4 votes
/**
 * The test verifies that replica for deletion is chosen on a node,
 * with the oldest heartbeat, when this heartbeat is larger than the
 * tolerable heartbeat interval.
 * It creates a file with several blocks and replication 4.
 * The last DN is configured to send heartbeats rarely.
 * 
 * Test waits until the tolerable heartbeat interval expires, and reduces
 * replication of the file. All replica deletions should be scheduled for the
 * last node. No replicas will actually be deleted, since last DN doesn't
 * send heartbeats. 
 */
@Test
public void testChooseReplicaToDelete() throws Exception {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    fs = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
    cluster.startDataNodes(conf, 1, true, null, null, null);
    DataNode lastDN = cluster.getDataNodes().get(3);
    DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
        lastDN, namesystem.getBlockPoolId());
    String lastDNid = dnReg.getDatanodeUuid();

    final Path fileName = new Path("/foo2");
    DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)4);

    // Wait for tolerable number of heartbeats plus one
    DatanodeDescriptor nodeInfo = null;
    long lastHeartbeat = 0;
    long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 *
      (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
    do {
      nodeInfo = namesystem.getBlockManager().getDatanodeManager()
          .getDatanode(dnReg);
      lastHeartbeat = nodeInfo.getLastUpdateMonotonic();
    } while (monotonicNow() - lastHeartbeat < waitTime);
    fs.setReplication(fileName, (short)3);

    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(fileName), 0, Long.MAX_VALUE);

    // All replicas for deletion should be scheduled on lastDN.
    // And should not actually be deleted, because lastDN does not heartbeat.
    namesystem.readLock();
    Collection<Block> dnBlocks = 
      namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
    assertEquals("Replicas on node " + lastDNid + " should have been deleted",
        SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
    namesystem.readUnlock();
    for(BlockLocation location : locs)
      assertEquals("Block should still have 4 replicas",
          4, location.getNames().length);
  } finally {
    if(fs != null) fs.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
Example 17
Project: hadoop   File: TestDataNodeHotSwapVolumes.java   View Source Code Vote up 4 votes
/** Return the number of replicas for a given block in the file. */
private static int getNumReplicas(FileSystem fs, Path file,
    int blockIdx) throws IOException {
  BlockLocation locs[] = fs.getFileBlockLocations(file, 0, Long.MAX_VALUE);
  return locs.length < blockIdx + 1 ? 0 : locs[blockIdx].getNames().length;
}
 
Example 18
Project: hadoop   File: TestDatanodeDeath.java   View Source Code Vote up 4 votes
static private void checkFile(FileSystem fileSys, Path name, int repl,
                       int numblocks, int filesize, long seed)
  throws IOException {
  boolean done = false;
  int attempt = 0;

  long len = fileSys.getFileStatus(name).getLen();
  assertTrue(name + " should be of size " + filesize +
             " but found to be of size " + len, 
             len == filesize);

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    attempt++;
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, filesize);

    if (locations.length < numblocks) {
      if (attempt > 100) {
        System.out.println("File " + name + " has only " +
                           locations.length + " blocks, " +
                           " but is expected to have " + numblocks +
                           " blocks.");
      }
      done = false;
      continue;
    }
    for (int idx = 0; idx < locations.length; idx++) {
      if (locations[idx].getHosts().length < repl) {
        if (attempt > 100) {
          System.out.println("File " + name + " has " +
                             locations.length + " blocks: " +
                             " The " + idx + " block has only " +
                             locations[idx].getHosts().length + 
                             " replicas but is expected to have " 
                             + repl + " replicas.");
        }
        done = false;
        break;
      }
    }
  }
  FSDataInputStream stm = fileSys.open(name);
  final byte[] expected = AppendTestUtil.randomBytes(seed, fileSize);

  // do a sanity check. Read the file
  byte[] actual = new byte[filesize];
  stm.readFully(0, actual);
  checkData(actual, 0, expected, "Read 1");
}