Java Code Examples for org.apache.hadoop.hdfs.DistributedFileSystem#close()

The following examples show how to use org.apache.hadoop.hdfs.DistributedFileSystem#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestBootstrapStandbyWithQJM.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws Exception {
  Configuration conf = new Configuration();
  // Turn off IPC client caching, so that the suite can handle
  // the restart of the daemons between test cases.
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);

  MiniQJMHACluster miniQjmHaCluster = new MiniQJMHACluster.Builder(conf).build();
  cluster = miniQjmHaCluster.getDfsCluster();
  jCluster = miniQjmHaCluster.getJournalCluster();
  
  // make nn0 active
  cluster.transitionToActive(0);
  // do sth to generate in-progress edit log data
  DistributedFileSystem dfs = (DistributedFileSystem) 
      HATestUtil.configureFailoverFs(cluster, conf);
  dfs.mkdirs(new Path("/test2"));
  dfs.close();
}
 
Example 2
Source File: TestBootstrapStandbyWithQJM.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws Exception {
  Configuration conf = new Configuration();
  // Turn off IPC client caching, so that the suite can handle
  // the restart of the daemons between test cases.
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);

  MiniQJMHACluster miniQjmHaCluster = new MiniQJMHACluster.Builder(conf).build();
  cluster = miniQjmHaCluster.getDfsCluster();
  jCluster = miniQjmHaCluster.getJournalCluster();
  
  // make nn0 active
  cluster.transitionToActive(0);
  // do sth to generate in-progress edit log data
  DistributedFileSystem dfs = (DistributedFileSystem) 
      HATestUtil.configureFailoverFs(cluster, conf);
  dfs.mkdirs(new Path("/test2"));
  dfs.close();
}
 
Example 3
Source File: TestMRCJCSocketFactory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void closeDfs(DistributedFileSystem dfs) {
  try {
    if (dfs != null)
      dfs.close();

  } catch (Exception ignored) {
    // nothing we can do
    ignored.printStackTrace();
  }
}
 
Example 4
Source File: TestStuckDataNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** This creates a slow writer and check to see
  * if pipeline heartbeats work fine
  */
 public void testStuckDataNode() throws Exception {
   final int DATANODE_NUM = 3;
   Configuration conf = new Configuration();
   final int timeout = 8000;
   conf.setInt("dfs.socket.timeout",timeout);

   final Path p = new Path("/pipelineHeartbeat/foo");
   System.out.println("p=" + p);

   MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
   DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();

DataNodeMetrics metrics = cluster.getDataNodes().get(0).myMetrics;
MetricsTimeVaryingLong spyBytesWritten = spy(metrics.bytesWritten);
DelayAnswer delayAnswer = new DelayAnswer(); 
doAnswer(delayAnswer).when(spyBytesWritten).inc(anyInt());
metrics.bytesWritten = spyBytesWritten;

try {
   	// create a new file.
   	FSDataOutputStream stm = fs.create(p);
   	stm.write(1);
   	stm.sync();
   	stm.write(2);
   	stm.close();

   	// verify that entire file is good
   	FSDataInputStream in = fs.open(p);
   	assertEquals(1, in.read());
   	assertEquals(2, in.read());
   	in.close();
   } finally {
     fs.close();
     cluster.shutdown();
   }
 }
 
Example 5
Source File: TestBlockMissingException.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Test DFS Raid
 */
public void testBlockMissingException() throws Exception {
  LOG.info("Test testBlockMissingException started.");
  long blockSize = 1024L;
  int numBlocks = 4;
  conf = new Configuration();
  try {
    dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfs.waitActive();
    fileSys = (DistributedFileSystem)dfs.getFileSystem();
    Path file1 = new Path("/user/dhruba/raidtest/file1");
    createOldFile(fileSys, file1, 1, numBlocks, blockSize);

    // extract block locations from File system. Wait till file is closed.
    LocatedBlocks locations = null;
    locations = fileSys.dfs.namenode.getBlockLocations(file1.toString(),
                                                           0, numBlocks * blockSize);
    // remove block of file
    LOG.info("Remove first block of file");
    corruptBlock(file1, locations.get(0).getBlock());

    // validate that the system throws BlockMissingException
    validateFile(fileSys, file1);
  } finally {
    if (fileSys != null) fileSys.close();
    if (dfs != null) dfs.shutdown();
  }
  LOG.info("Test testBlockMissingException completed.");
}
 
Example 6
Source File: TestByteBufLineReader.java    From tajo with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 120000)
public void testReaderWithDFS() throws Exception {
  final Configuration conf = TestFileTablespace.getTestHdfsConfiguration();

  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2).format(true).build();

  TajoConf tajoConf = new TajoConf(conf);
  tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

  Path tablePath = new Path("/testReaderWithDFS");
  Path filePath = new Path(tablePath, "data.dat");
  try {
    DistributedFileSystem fs = cluster.getFileSystem();
    FSDataOutputStream out = fs.create(filePath, true);
    out.write(LINE.getBytes(Charset.defaultCharset()));
    out.write('\n');
    out.close();

    assertTrue(fs.exists(filePath));
    FSDataInputStream inputStream = fs.open(filePath);
    assertTrue(inputStream.getWrappedStream() instanceof ByteBufferReadable);

    ByteBufLineReader lineReader = new ByteBufLineReader(new FSDataInputChannel(inputStream));
    assertEquals(LINE, lineReader.readLine());
    lineReader.seek(0);
    assertEquals(LINE, lineReader.readLine());
    assertNull(lineReader.readLine());

    lineReader.close();
    fs.close();
  } finally {
    cluster.shutdown();
  }
}
 
Example 7
Source File: TestHASafeMode.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * DFS#isInSafeMode should check the ActiveNNs safemode in HA enabled cluster. HDFS-3507
 * 
 * @throws Exception
 */
@Test
public void testIsInSafemode() throws Exception {
  // Check for the standby nn without client failover.
  NameNode nn2 = cluster.getNameNode(1);
  assertTrue("nn2 should be in standby state", nn2.isStandbyState());

  InetSocketAddress nameNodeAddress = nn2.getNameNodeAddress();
  Configuration conf = new Configuration();
  DistributedFileSystem dfs = new DistributedFileSystem();
  try {
    dfs.initialize(
        URI.create("hdfs://" + nameNodeAddress.getHostName() + ":"
            + nameNodeAddress.getPort()), conf);
    dfs.isInSafeMode();
    fail("StandBy should throw exception for isInSafeMode");
  } catch (IOException e) {
    if (e instanceof RemoteException) {
      IOException sbExcpetion = ((RemoteException) e).unwrapRemoteException();
      assertTrue("StandBy nn should not support isInSafeMode",
          sbExcpetion instanceof StandbyException);
    } else {
      throw e;
    }
  } finally {
    if (null != dfs) {
      dfs.close();
    }
  }

  // Check with Client FailOver
  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  DistributedFileSystem dfsWithFailOver = (DistributedFileSystem) fs;
  assertTrue("ANN should be in SafeMode", dfsWithFailOver.isInSafeMode());

  cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
  assertFalse("ANN should be out of SafeMode", dfsWithFailOver.isInSafeMode());
}
 
Example 8
Source File: TestMRCJCSocketFactory.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void closeDfs(DistributedFileSystem dfs) {
  try {
    if (dfs != null)
      dfs.close();

  } catch (Exception ignored) {
    // nothing we can do
    ignored.printStackTrace();
  }
}
 
Example 9
Source File: TestHASafeMode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * DFS#isInSafeMode should check the ActiveNNs safemode in HA enabled cluster. HDFS-3507
 * 
 * @throws Exception
 */
@Test
public void testIsInSafemode() throws Exception {
  // Check for the standby nn without client failover.
  NameNode nn2 = cluster.getNameNode(1);
  assertTrue("nn2 should be in standby state", nn2.isStandbyState());

  InetSocketAddress nameNodeAddress = nn2.getNameNodeAddress();
  Configuration conf = new Configuration();
  DistributedFileSystem dfs = new DistributedFileSystem();
  try {
    dfs.initialize(
        URI.create("hdfs://" + nameNodeAddress.getHostName() + ":"
            + nameNodeAddress.getPort()), conf);
    dfs.isInSafeMode();
    fail("StandBy should throw exception for isInSafeMode");
  } catch (IOException e) {
    if (e instanceof RemoteException) {
      IOException sbExcpetion = ((RemoteException) e).unwrapRemoteException();
      assertTrue("StandBy nn should not support isInSafeMode",
          sbExcpetion instanceof StandbyException);
    } else {
      throw e;
    }
  } finally {
    if (null != dfs) {
      dfs.close();
    }
  }

  // Check with Client FailOver
  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  DistributedFileSystem dfsWithFailOver = (DistributedFileSystem) fs;
  assertTrue("ANN should be in SafeMode", dfsWithFailOver.isInSafeMode());

  cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
  assertFalse("ANN should be out of SafeMode", dfsWithFailOver.isInSafeMode());
}
 
Example 10
Source File: HDFSTool.java    From WIFIProbe with Apache License 2.0 5 votes vote down vote up
public static void concat(String dir) throws IOException {


        String directory = NodeConfig.HDFS_PATH + dir;
        Configuration conf = new Configuration();
        DistributedFileSystem fs = (DistributedFileSystem)FileSystem.get(URI.create(directory), conf);
        FileStatus fileList[] = fs.listStatus(new Path(directory));

        if (fileList.length>=2) {

            ArrayList<Path>  srcs = new ArrayList<Path>(fileList.length);
            for (FileStatus fileStatus : fileList) {
                if ( fileStatus.isFile() &&
                        (fileStatus.getLen()&~fileStatus.getBlockSize())<fileStatus.getBlockSize()/2 ) {
                    srcs.add(fileStatus.getPath());
                }
            }

            if (srcs.size()>=2) {
                Logger.println("come to here");
                Path appended = srcs.get(0);
                Path[] sources = new Path[srcs.size()-1];
                for (int i=0; i<srcs.size()-1; i++) {
                    sources[i] = srcs.get(i+1);
                }
                Logger.println(fs==null);
                Logger.println(appended==null);
                Logger.println(sources==null);
                fs.concat(appended, sources);
                Logger.println("concat to : " + appended.getName());
                Logger.println(Arrays.toString(sources));
            }

            fs.close();
        }


    }
 
Example 11
Source File: TestBootstrapStandbyWithBKJM.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * While boostrapping, in_progress transaction entries should be skipped.
 * Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck"
 */
@Test
public void testBootstrapStandbyWithActiveNN() throws Exception {
  // make nn0 active
  cluster.transitionToActive(0);
 
  // do ops and generate in-progress edit log data
  Configuration confNN1 = cluster.getConfiguration(1);
  DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil
      .configureFailoverFs(cluster, confNN1);
  for (int i = 1; i <= 10; i++) {
    dfs.mkdirs(new Path("/test" + i));
  }
  dfs.close();

  // shutdown nn1 and delete its edit log files
  cluster.shutdownNameNode(1);
  deleteEditLogIfExists(confNN1);
  cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER, true);
  cluster.getNameNodeRpc(0).saveNamespace();
  cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, true);

  // check without -skipSharedEditsCheck, Bootstrap should fail for BKJM
  // immediately after saveNamespace
  int rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive" },
    confNN1);
  Assert.assertEquals("Mismatches return code", 6, rc);

  // check with -skipSharedEditsCheck
  rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive",
      "-skipSharedEditsCheck" }, confNN1);
  Assert.assertEquals("Mismatches return code", 0, rc);

  // Checkpoint as fast as we can, in a tight loop.
  confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
  cluster.restartNameNode(1);
  cluster.transitionToStandby(1);
 
  NameNode nn0 = cluster.getNameNode(0);
  HATestUtil.waitForStandbyToCatchUp(nn0, cluster.getNameNode(1));
  long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
      .getFSImage().getMostRecentCheckpointTxId();
  HATestUtil.waitForCheckpoint(cluster, 1,
      ImmutableList.of((int) expectedCheckpointTxId));

  // Should have copied over the namespace
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of((int) expectedCheckpointTxId));
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
 
Example 12
Source File: TestEnhancedByteBufferAccess.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testClientMmapDisable() throws Exception {
  HdfsConfiguration conf = initZeroCopyTest();
  conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, false);
  MiniDFSCluster cluster = null;
  final Path TEST_PATH = new Path("/a");
  final int TEST_FILE_LENGTH = 16385;
  final int RANDOM_SEED = 23453;
  final String CONTEXT = "testClientMmapDisable";
  FSDataInputStream fsIn = null;
  DistributedFileSystem fs = null;
  conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);

  try {
    // With DFS_CLIENT_MMAP_ENABLED set to false, we should not do memory
    // mapped reads.
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, TEST_PATH,
        TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
    DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
    fsIn = fs.open(TEST_PATH);
    try {
      fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.fail("expected zero-copy read to fail when client mmaps " +
          "were disabled.");
    } catch (UnsupportedOperationException e) {
    }
  } finally {
    if (fsIn != null) fsIn.close();
    if (fs != null) fs.close();
    if (cluster != null) cluster.shutdown();
  }

  fsIn = null;
  fs = null;
  cluster = null;
  try {
    // Now try again with DFS_CLIENT_MMAP_CACHE_SIZE == 0.  It should work.
    conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, true);
    conf.setInt(DFS_CLIENT_MMAP_CACHE_SIZE, 0);
    conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1");
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, TEST_PATH,
        TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
    DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
    fsIn = fs.open(TEST_PATH);
    ByteBuffer buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    fsIn.releaseBuffer(buf);
    // Test EOF behavior
    IOUtils.skipFully(fsIn, TEST_FILE_LENGTH - 1);
    buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    Assert.assertEquals(null, buf);
  } finally {
    if (fsIn != null) fsIn.close();
    if (fs != null) fs.close();
    if (cluster != null) cluster.shutdown();
  }
}
 
Example 13
Source File: TestFsck.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the # of misreplaced replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMisPlacedReplicas() throws IOException {
  // Desired replication factor
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  short NUM_DN = 2;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  String [] racks = {"/rack1", "/rack1"};
  String [] hosts = {"host1", "host2"};
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
        .racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    // Add a new node on different rack, so previous blocks' replicas 
    // are considered to be misplaced
    nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
    NUM_DN++;
    
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_DN, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // check misReplicatedBlock number.
    assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 14
Source File: TestFsck.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the # of missing block replicas and expected replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMissingReplicas() throws IOException {
  // Desired replication factor
  // Set this higher than NUM_REPLICAS so it's under-replicated
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  final short NUM_REPLICAS = 1;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_REPLICAS, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // Also print the output from the fsck, for ex post facto sanity checks
    System.out.println(result.toString());
    assertEquals(res.missingReplicas, 
        (NUM_BLOCKS*REPL_FACTOR) - (NUM_BLOCKS*NUM_REPLICAS));
    assertEquals(res.numExpectedReplicas, NUM_BLOCKS*REPL_FACTOR);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 15
Source File: TestFsck.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the # of missing block replicas and expected replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMissingReplicas() throws IOException {
  // Desired replication factor
  // Set this higher than NUM_REPLICAS so it's under-replicated
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  final short NUM_REPLICAS = 1;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_REPLICAS, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // Also print the output from the fsck, for ex post facto sanity checks
    System.out.println(result.toString());
    assertEquals(res.missingReplicas, 
        (NUM_BLOCKS*REPL_FACTOR) - (NUM_BLOCKS*NUM_REPLICAS));
    assertEquals(res.numExpectedReplicas, NUM_BLOCKS*REPL_FACTOR);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 16
Source File: TestFsck.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the # of misreplaced replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMisPlacedReplicas() throws IOException {
  // Desired replication factor
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  short NUM_DN = 2;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  String [] racks = {"/rack1", "/rack1"};
  String [] hosts = {"host1", "host2"};
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
        .racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    // Add a new node on different rack, so previous blocks' replicas 
    // are considered to be misplaced
    nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
    NUM_DN++;
    
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_DN, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // check misReplicatedBlock number.
    assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 17
Source File: TestBootstrapStandbyWithBKJM.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * While boostrapping, in_progress transaction entries should be skipped.
 * Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck"
 */
@Test
public void testBootstrapStandbyWithActiveNN() throws Exception {
  // make nn0 active
  cluster.transitionToActive(0);
 
  // do ops and generate in-progress edit log data
  Configuration confNN1 = cluster.getConfiguration(1);
  DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil
      .configureFailoverFs(cluster, confNN1);
  for (int i = 1; i <= 10; i++) {
    dfs.mkdirs(new Path("/test" + i));
  }
  dfs.close();

  // shutdown nn1 and delete its edit log files
  cluster.shutdownNameNode(1);
  deleteEditLogIfExists(confNN1);
  cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER, true);
  cluster.getNameNodeRpc(0).saveNamespace();
  cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, true);

  // check without -skipSharedEditsCheck, Bootstrap should fail for BKJM
  // immediately after saveNamespace
  int rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive" },
    confNN1);
  Assert.assertEquals("Mismatches return code", 6, rc);

  // check with -skipSharedEditsCheck
  rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive",
      "-skipSharedEditsCheck" }, confNN1);
  Assert.assertEquals("Mismatches return code", 0, rc);

  // Checkpoint as fast as we can, in a tight loop.
  confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
  cluster.restartNameNode(1);
  cluster.transitionToStandby(1);
 
  NameNode nn0 = cluster.getNameNode(0);
  HATestUtil.waitForStandbyToCatchUp(nn0, cluster.getNameNode(1));
  long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
      .getFSImage().getMostRecentCheckpointTxId();
  HATestUtil.waitForCheckpoint(cluster, 1,
      ImmutableList.of((int) expectedCheckpointTxId));

  // Should have copied over the namespace
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of((int) expectedCheckpointTxId));
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
 
Example 18
Source File: TestEnhancedByteBufferAccess.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testClientMmapDisable() throws Exception {
  HdfsConfiguration conf = initZeroCopyTest();
  conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, false);
  MiniDFSCluster cluster = null;
  final Path TEST_PATH = new Path("/a");
  final int TEST_FILE_LENGTH = 16385;
  final int RANDOM_SEED = 23453;
  final String CONTEXT = "testClientMmapDisable";
  FSDataInputStream fsIn = null;
  DistributedFileSystem fs = null;
  conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);

  try {
    // With DFS_CLIENT_MMAP_ENABLED set to false, we should not do memory
    // mapped reads.
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, TEST_PATH,
        TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
    DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
    fsIn = fs.open(TEST_PATH);
    try {
      fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.fail("expected zero-copy read to fail when client mmaps " +
          "were disabled.");
    } catch (UnsupportedOperationException e) {
    }
  } finally {
    if (fsIn != null) fsIn.close();
    if (fs != null) fs.close();
    if (cluster != null) cluster.shutdown();
  }

  fsIn = null;
  fs = null;
  cluster = null;
  try {
    // Now try again with DFS_CLIENT_MMAP_CACHE_SIZE == 0.  It should work.
    conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, true);
    conf.setInt(DFS_CLIENT_MMAP_CACHE_SIZE, 0);
    conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1");
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, TEST_PATH,
        TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
    DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
    fsIn = fs.open(TEST_PATH);
    ByteBuffer buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    fsIn.releaseBuffer(buf);
    // Test EOF behavior
    IOUtils.skipFully(fsIn, TEST_FILE_LENGTH - 1);
    buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    Assert.assertEquals(null, buf);
  } finally {
    if (fsIn != null) fsIn.close();
    if (fs != null) fs.close();
    if (cluster != null) cluster.shutdown();
  }
}
 
Example 19
Source File: OfflineEditsViewerHelper.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Run file operations to create edits for all op codes
 * to be tested.
 */
private void runOperations() throws IOException {

  LOG.info("Creating edits by performing fs operations");
  // no check, if it's not it throws an exception which is what we want
  DistributedFileSystem dfs =
    (DistributedFileSystem)cluster.getFileSystem();
  // OP_ADD 0, OP_SET_GENSTAMP 10
  Path pathFileCreate = new Path("/file_create");
  FSDataOutputStream s = dfs.create(pathFileCreate);
  // OP_CLOSE 9
  s.close();
  // OP_RENAME 1
  Path pathFileMoved = new Path("/file_moved");
  dfs.rename(pathFileCreate, pathFileMoved);
  // OP_DELETE 2
  dfs.delete(pathFileMoved, false);
  // OP_MKDIR 3
  Path pathDirectoryMkdir = new Path("/directory_mkdir");
  dfs.mkdirs(pathDirectoryMkdir);
  // OP_SET_REPLICATION 4
  s = dfs.create(pathFileCreate);
  s.close();
  dfs.setReplication(pathFileCreate, (short)1);
  // OP_SET_PERMISSIONS 7
  Short permission = 0777;
  dfs.setPermission(pathFileCreate, new FsPermission(permission));
  // OP_SET_OWNER 8
  dfs.setOwner(pathFileCreate, new String("newOwner"), null);
  // OP_CLOSE 9 see above
  // OP_SET_GENSTAMP 10 see above
  // OP_SET_NS_QUOTA 11 obsolete
  // OP_CLEAR_NS_QUOTA 12 obsolete
  // OP_TIMES 13
  long mtime = 1285195527000L; // Wed, 22 Sep 2010 22:45:27 GMT
  long atime = mtime;
  dfs.setTimes(pathFileCreate, mtime, atime);
  // OP_SET_QUOTA 14
  dfs.setQuota(pathDirectoryMkdir, 1000L, FSConstants.QUOTA_DONT_SET);
  // OP_CONCAT_DELETE 16
  Path   pathConcatTarget = new Path("/file_concat_target");
  Path[] pathConcatFiles  = new Path[2];
  pathConcatFiles[0]      = new Path("/file_concat_0");
  pathConcatFiles[1]      = new Path("/file_concat_1");

  long  length      = blockSize * 3; // multiple of blocksize for concat
  short replication = 1;
  long  seed        = 1;

  DFSTestUtil.createFile(dfs, pathConcatTarget, length, replication, seed);
  DFSTestUtil.createFile(dfs, pathConcatFiles[0], length, replication, seed);
  DFSTestUtil.createFile(dfs, pathConcatFiles[1], length, replication, seed);
  dfs.concat(pathConcatTarget, pathConcatFiles, false);

  // sync to disk, otherwise we parse partial edits
  cluster.getNameNode().getFSImage().getEditLog().logSync();
  dfs.close();
}