Java Code Examples for org.apache.hadoop.fs.FileSystem.setReplication()

The following are Jave code examples for showing how to use setReplication() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop   File: BaseTestHttpFSWith.java   Source Code and License Vote up 6 votes
private void testSetReplication() throws Exception {
  FileSystem fs = FileSystem.get(getProxiedFSConf());
  Path path = new Path(getProxiedFSTestDir(), "foo.txt");
  OutputStream os = fs.create(path);
  os.write(1);
  os.close();
  fs.close();
  fs.setReplication(path, (short) 2);

  fs = getHttpFSFileSystem();
  fs.setReplication(path, (short) 1);
  fs.close();

  fs = FileSystem.get(getProxiedFSConf());
  FileStatus status1 = fs.getFileStatus(path);
  fs.close();
  Assert.assertEquals(status1.getReplication(), (short) 1);
}
 
Example 2
Project: hadoop   File: TestOverReplicatedBlocks.java   Source Code and License Vote up 6 votes
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
Example 3
Project: hadoop   File: DistCpV1.java   Source Code and License Vote up 6 votes
/**
 * Increase the replication factor of _distcp_src_files to
 * sqrt(min(maxMapsOnCluster, numMaps)). This is to reduce the chance of
 * failing of distcp because of "not having a replication of _distcp_src_files
 * available for reading for some maps".
 */
private static void setReplication(Configuration conf, JobConf jobConf,
                       Path srcfilelist, int numMaps) throws IOException {
  int numMaxMaps = new JobClient(jobConf).getClusterStatus().getMaxMapTasks();
  short replication = (short) Math.ceil(
                              Math.sqrt(Math.min(numMaxMaps, numMaps)));
  FileSystem fs = srcfilelist.getFileSystem(conf);
  FileStatus srcStatus = fs.getFileStatus(srcfilelist);

  if (srcStatus.getReplication() < replication) {
    if (!fs.setReplication(srcfilelist, replication)) {
      throw new IOException("Unable to increase the replication of file " +
                            srcfilelist);
    }
  }
}
 
Example 4
Project: hadoop   File: UtilsForTests.java   Source Code and License Vote up 5 votes
static void writeFile(NameNode namenode, Configuration conf, Path name, 
                      short replication)
    throws IOException, TimeoutException, InterruptedException {
  FileSystem fileSys = FileSystem.get(conf);
  SequenceFile.Writer writer = 
    SequenceFile.createWriter(fileSys, conf, name, 
                              BytesWritable.class, BytesWritable.class,
                              CompressionType.NONE);
  writer.append(new BytesWritable(), new BytesWritable());
  writer.close();
  fileSys.setReplication(name, replication);
  DFSTestUtil.waitReplication(fileSys, name, replication);
}
 
Example 5
Project: hadoop   File: JobSplitWriter.java   Source Code and License Vote up 5 votes
private static FSDataOutputStream createFile(FileSystem fs, Path splitFile, 
    Configuration job)  throws IOException {
  FSDataOutputStream out = FileSystem.create(fs, splitFile, 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION));
  int replication = job.getInt(Job.SUBMIT_REPLICATION, 10);
  fs.setReplication(splitFile, (short)replication);
  writeSplitHeader(out);
  return out;
}
 
Example 6
Project: hadoop   File: DFSTestUtil.java   Source Code and License Vote up 5 votes
void setReplication(FileSystem fs, String topdir, short value) 
                                            throws IOException {
  Path root = new Path(topdir);
  for (int idx = 0; idx < nFiles; idx++) {
    Path fPath = new Path(root, files[idx].getName());
    fs.setReplication(fPath, value);
  }
}
 
Example 7
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveDefaults() throws IOException {
  FileSystem fs = FileSystem.get(config);
  
  // preserve replication, block size, user, group, permission, 
  // checksum type and timestamps    
  EnumSet<FileAttribute> attributes = 
      DistCpUtils.unpackAttributes(
          DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.substring(1));

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);
  
  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 8
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveReplicationOnDirectory() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.REPLICATION);

  Path dst = new Path("/tmp/abc");
  Path src = new Path("/tmp/src");

  createDirectory(fs, src);
  createDirectory(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  // Replication shouldn't apply to dirs so this should still be 0 == 0
  Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 9
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveNothingOnFile() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.noneOf(FileAttribute.class);

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 10
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreservePermissionOnFile() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.PERMISSION);

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 11
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveGroupOnFile() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.GROUP);

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 12
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveUserOnFile() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.USER);

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 13
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveReplicationOnFile() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.REPLICATION);

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 14
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveTimestampOnFile() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.TIMES);

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 15
Project: hadoop   File: TestStandbyIsHot.java   Source Code and License Vote up 4 votes
@Test(timeout=60000)
public void testStandbyIsHot() throws Exception {
  Configuration conf = new Configuration();
  // We read from the standby to watch block locations
  HAUtil.setAllowStandbyReads(conf, true);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .build();
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    
    Thread.sleep(1000);
    System.err.println("==================================");
    DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
    // Have to force an edit log roll so that the standby catches up
    nn1.getRpcServer().rollEditLog();
    System.err.println("==================================");

    // Block locations should show up on standby.
    LOG.info("Waiting for block locations to appear on standby node");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 3);

    // Trigger immediate heartbeats and block reports so
    // that the active "trusts" all of the DNs
    cluster.triggerHeartbeats();
    cluster.triggerBlockReports();

    // Change replication
    LOG.info("Changing replication to 1");
    fs.setReplication(TEST_FILE_PATH, (short)1);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());
    waitForBlockLocations(cluster, nn1, TEST_FILE, 1);

    nn1.getRpcServer().rollEditLog();
    
    LOG.info("Waiting for lowered replication to show up on standby");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 1);
    
    // Change back to 3
    LOG.info("Changing replication to 3");
    fs.setReplication(TEST_FILE_PATH, (short)3);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());
    nn1.getRpcServer().rollEditLog();
    
    LOG.info("Waiting for higher replication to show up on standby");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 3);
    
  } finally {
    cluster.shutdown();
  }
}
 
Example 16
Project: hadoop   File: TestBlocksWithNotEnoughRacks.java   Source Code and License Vote up 4 votes
@Test
public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() 
    throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 5;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());

  // All hosts are on two racks, only one host on /rack2
  String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Lower the replication factor so the blocks are over replicated
    REPLICATION_FACTOR = 2;
    fs.setReplication(filePath, REPLICATION_FACTOR);

    // Decommission one of the hosts with the block that is not on
    // the lone host on rack2 (if we decomission that host it would
    // be impossible to respect the rack policy).
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    for (String top : locs[0].getTopologyPaths()) {
      if (!top.startsWith("/rack2")) {
        String name = top.substring("/rack1".length()+1);
        DFSTestUtil.writeFile(localFileSys, excludeFile, name);
        ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
        DFSTestUtil.waitForDecommission(fs, name);
        break;
      }
    }

    // Check the block still has sufficient # replicas across racks,
    // ie we didn't remove the replica on the host on /rack1.
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
Example 17
Project: hadoop   File: TestOverReplicatedBlocks.java   Source Code and License Vote up 4 votes
/**
 * The test verifies that replica for deletion is chosen on a node,
 * with the oldest heartbeat, when this heartbeat is larger than the
 * tolerable heartbeat interval.
 * It creates a file with several blocks and replication 4.
 * The last DN is configured to send heartbeats rarely.
 * 
 * Test waits until the tolerable heartbeat interval expires, and reduces
 * replication of the file. All replica deletions should be scheduled for the
 * last node. No replicas will actually be deleted, since last DN doesn't
 * send heartbeats. 
 */
@Test
public void testChooseReplicaToDelete() throws Exception {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    fs = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
    cluster.startDataNodes(conf, 1, true, null, null, null);
    DataNode lastDN = cluster.getDataNodes().get(3);
    DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
        lastDN, namesystem.getBlockPoolId());
    String lastDNid = dnReg.getDatanodeUuid();

    final Path fileName = new Path("/foo2");
    DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)4);

    // Wait for tolerable number of heartbeats plus one
    DatanodeDescriptor nodeInfo = null;
    long lastHeartbeat = 0;
    long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 *
      (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
    do {
      nodeInfo = namesystem.getBlockManager().getDatanodeManager()
          .getDatanode(dnReg);
      lastHeartbeat = nodeInfo.getLastUpdateMonotonic();
    } while (monotonicNow() - lastHeartbeat < waitTime);
    fs.setReplication(fileName, (short)3);

    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(fileName), 0, Long.MAX_VALUE);

    // All replicas for deletion should be scheduled on lastDN.
    // And should not actually be deleted, because lastDN does not heartbeat.
    namesystem.readLock();
    Collection<Block> dnBlocks = 
      namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
    assertEquals("Replicas on node " + lastDNid + " should have been deleted",
        SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
    namesystem.readUnlock();
    for(BlockLocation location : locs)
      assertEquals("Block should still have 4 replicas",
          4, location.getNames().length);
  } finally {
    if(fs != null) fs.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
Example 18
Project: hadoop   File: TestReplication.java   Source Code and License Vote up 4 votes
private void testBadBlockReportOnTransfer(
    boolean corruptBlockByDeletingBlockFile) throws Exception {
  Configuration conf = new HdfsConfiguration();
  FileSystem fs = null;
  DFSClient dfsClient = null;
  LocatedBlocks blocks = null;
  int replicaCount = 0;
  short replFactor = 1;
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dfsClient = new DFSClient(new InetSocketAddress("localhost",
                            cluster.getNameNodePort()), conf);

  // Create file with replication factor of 1
  Path file1 = new Path("/tmp/testBadBlockReportOnTransfer/file1");
  DFSTestUtil.createFile(fs, file1, 1024, replFactor, 0);
  DFSTestUtil.waitReplication(fs, file1, replFactor);

  // Corrupt the block belonging to the created file
  ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);

  int blockFilesCorrupted =
      corruptBlockByDeletingBlockFile?
          cluster.corruptBlockOnDataNodesByDeletingBlockFile(block) :
            cluster.corruptBlockOnDataNodes(block);       

  assertEquals("Corrupted too few blocks", replFactor, blockFilesCorrupted); 

  // Increase replication factor, this should invoke transfer request
  // Receiving datanode fails on checksum and reports it to namenode
  replFactor = 2;
  fs.setReplication(file1, replFactor);

  // Now get block details and check if the block is corrupt
  blocks = dfsClient.getNamenode().
            getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
  while (blocks.get(0).isCorrupt() != true) {
    try {
      LOG.info("Waiting until block is marked as corrupt...");
      Thread.sleep(1000);
    } catch (InterruptedException ie) {
    }
    blocks = dfsClient.getNamenode().
              getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
  }
  replicaCount = blocks.get(0).getLocations().length;
  assertTrue(replicaCount == 1);
  cluster.shutdown();
}
 
Example 19
Project: hadoop   File: TestReplication.java   Source Code and License Vote up 4 votes
private void changeBlockLen(MiniDFSCluster cluster, int lenDelta)
    throws IOException, InterruptedException, TimeoutException {
  final Path fileName = new Path("/file1");
  final short REPLICATION_FACTOR = (short)1;
  final FileSystem fs = cluster.getFileSystem();
  final int fileLen = fs.getConf().getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
  DFSTestUtil.createFile(fs, fileName, fileLen, REPLICATION_FACTOR, 0);
  DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);

  ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);

  // Change the length of a replica
  for (int i=0; i<cluster.getDataNodes().size(); i++) {
    if (DFSTestUtil.changeReplicaLength(cluster, block, i, lenDelta)) {
      break;
    }
  }

  // increase the file's replication factor
  fs.setReplication(fileName, (short)(REPLICATION_FACTOR+1));

  // block replication triggers corrupt block detection
  DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", 
      cluster.getNameNodePort()), fs.getConf());
  LocatedBlocks blocks = dfsClient.getNamenode().getBlockLocations(
      fileName.toString(), 0, fileLen);
  if (lenDelta < 0) { // replica truncated
  	while (!blocks.get(0).isCorrupt() || 
  			REPLICATION_FACTOR != blocks.get(0).getLocations().length) {
  		Thread.sleep(100);
  		blocks = dfsClient.getNamenode().getBlockLocations(
  				fileName.toString(), 0, fileLen);
  	}
  } else { // no corruption detected; block replicated
  	while (REPLICATION_FACTOR+1 != blocks.get(0).getLocations().length) {
  		Thread.sleep(100);
  		blocks = dfsClient.getNamenode().getBlockLocations(
  				fileName.toString(), 0, fileLen);
  	}
  }
  fs.delete(fileName, true);
}
 
Example 20
Project: hadoop   File: FSOperations.java   Source Code and License Vote up 3 votes
/**
 * Executes the filesystem operation.
 *
 * @param fs filesystem instance to use.
 *
 * @return <code>true</code> if the replication value was set,
 *         <code>false</code> otherwise.
 *
 * @throws IOException thrown if an IO error occured.
 */
@Override
@SuppressWarnings("unchecked")
public JSONObject execute(FileSystem fs) throws IOException {
  boolean ret = fs.setReplication(path, replication);
  JSONObject json = new JSONObject();
  json.put(HttpFSFileSystem.SET_REPLICATION_JSON, ret);
  return json;
}