Java Code Examples for org.apache.hadoop.hdfs.DistributedFileSystem#create()

The following examples show how to use org.apache.hadoop.hdfs.DistributedFileSystem#create() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSaveNamespace.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test for save namespace should succeed when parent directory renamed with
 * open lease and destination directory exist. 
 * This test is a regression for HDFS-2827
 */
@Test
public void testSaveNamespaceWithRenamedLease() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
      .numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
  OutputStream out = null;
  try {
    fs.mkdirs(new Path("/test-target"));
    out = fs.create(new Path("/test-source/foo")); // don't close
    fs.rename(new Path("/test-source/"), new Path("/test-target/"));

    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    cluster.getNameNodeRpc().saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  } finally {
    IOUtils.cleanup(LOG, out, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 2
Source File: TestStochasticLoadBalancerHeterogeneousCostRules.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testLoadingFomHDFS() throws Exception {
  HTU.startMiniDFSCluster(3);
  try {
    MiniDFSCluster cluster = HTU.getDFSCluster();
    DistributedFileSystem fs = cluster.getFileSystem();
    // Writing file
    Path path = new Path(fs.getHomeDirectory(), DEFAULT_RULES_FILE_NAME);
    FSDataOutputStream stream = fs.create(path);
    stream.write("server1 10".getBytes());
    stream.flush();
    stream.close();

    Configuration configuration = HTU.getConfiguration();

    // start costFunction
    configuration.set(
      HeterogeneousRegionCountCostFunction.HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_FILE,
      path.toString());
    this.costFunction = new HeterogeneousRegionCountCostFunction(configuration);
    this.costFunction.loadRules();
    Assert.assertEquals(1, this.costFunction.getNumberOfRulesLoaded());
  } finally {
    HTU.shutdownMiniCluster();
  }
}
 
Example 3
Source File: TestMover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testScheduleSameBlock() throws IOException {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(4).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String file = "/testScheduleSameBlock/file";
    
    {
      final FSDataOutputStream out = dfs.create(new Path(file));
      out.writeChars("testScheduleSameBlock");
      out.close();
    }

    final Mover mover = newMover(conf);
    mover.init();
    final Mover.Processor processor = mover.new Processor();

    final LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
    final List<MLocation> locations = MLocation.toLocations(lb);
    final MLocation ml = locations.get(0);
    final DBlock db = mover.newDBlock(lb.getBlock().getLocalBlock(), locations);

    final List<StorageType> storageTypes = new ArrayList<StorageType>(
        Arrays.asList(StorageType.DEFAULT, StorageType.DEFAULT));
    Assert.assertTrue(processor.scheduleMoveReplica(db, ml, storageTypes));
    Assert.assertFalse(processor.scheduleMoveReplica(db, ml, storageTypes));
  } finally {
    cluster.shutdown();
  }
}
 
Example 4
Source File: TestMover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testMoverFailedRetry() throws Exception {
  // HDFS-8147
  final Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(3)
      .storageTypes(
          new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
              {StorageType.DISK, StorageType.ARCHIVE},
              {StorageType.DISK, StorageType.ARCHIVE}}).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String file = "/testMoverFailedRetry";
    // write to DISK
    final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
    out.writeChars("testMoverFailedRetry");
    out.close();

    // Delete block file so, block move will fail with FileNotFoundException
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
    cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
    // move to ARCHIVE
    dfs.setStoragePolicy(new Path(file), "COLD");
    int rc = ToolRunner.run(conf, new Mover.Cli(),
        new String[] {"-p", file.toString()});
    Assert.assertEquals("Movement should fail after some retry",
        ExitStatus.IO_EXCEPTION.getExitCode(), rc);
  } finally {
    cluster.shutdown();
  }
}
 
Example 5
Source File: TestStuckDataNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** This creates a slow writer and check to see
  * if pipeline heartbeats work fine
  */
 public void testStuckDataNode() throws Exception {
   final int DATANODE_NUM = 3;
   Configuration conf = new Configuration();
   final int timeout = 8000;
   conf.setInt("dfs.socket.timeout",timeout);

   final Path p = new Path("/pipelineHeartbeat/foo");
   System.out.println("p=" + p);

   MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
   DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();

DataNodeMetrics metrics = cluster.getDataNodes().get(0).myMetrics;
MetricsTimeVaryingLong spyBytesWritten = spy(metrics.bytesWritten);
DelayAnswer delayAnswer = new DelayAnswer(); 
doAnswer(delayAnswer).when(spyBytesWritten).inc(anyInt());
metrics.bytesWritten = spyBytesWritten;

try {
   	// create a new file.
   	FSDataOutputStream stm = fs.create(p);
   	stm.write(1);
   	stm.sync();
   	stm.write(2);
   	stm.close();

   	// verify that entire file is good
   	FSDataInputStream in = fs.open(p);
   	assertEquals(1, in.read());
   	assertEquals(2, in.read());
   	in.close();
   } finally {
     fs.close();
     cluster.shutdown();
   }
 }
 
Example 6
Source File: TestEmptyAbandonBlock.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Test
public void testAbandon() throws Exception {
  NameNode nn = cluster.getNameNode();
  DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
  String fileName = "/testAbandon";
  fs.create(new Path(fileName));
  LocatedBlock lbk = nn.addBlock(fileName, fs.getClient().getClientName());
  INodeFileUnderConstruction cons = (INodeFileUnderConstruction) nn.namesystem.dir
      .getInode(fileName);
  cons.setTargets(null);
  nn.abandonBlock(lbk.getBlock(), fileName, fs.getClient().getClientName());
  assertEquals(0, nn.getBlockLocations(fileName, 0, Long.MAX_VALUE)
      .locatedBlockCount());
}
 
Example 7
Source File: TestHBaseWalOnEC.java    From hbase with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  try {
    MiniDFSCluster cluster = UTIL.startMiniDFSCluster(3); // Need 3 DNs for RS-3-2 policy
    DistributedFileSystem fs = cluster.getFileSystem();

    Method enableAllECPolicies =
      DFSTestUtil.class.getMethod("enableAllECPolicies", DistributedFileSystem.class);
    enableAllECPolicies.invoke(null, fs);

    DFSClient client = fs.getClient();
    Method setErasureCodingPolicy =
      DFSClient.class.getMethod("setErasureCodingPolicy", String.class, String.class);
    setErasureCodingPolicy.invoke(client, "/", "RS-3-2-1024k"); // try a built-in policy

    try (FSDataOutputStream out = fs.create(new Path("/canary"))) {
      // If this comes back as having hflush then some test setup assumption is wrong.
      // Fail the test so that a developer has to look and triage
      assertFalse("Did not enable EC!", out.hasCapability(StreamCapabilities.HFLUSH));
    }
  } catch (NoSuchMethodException e) {
    // We're not testing anything interesting if EC is not available, so skip the rest of the test
    Assume.assumeNoException("Using an older version of hadoop; EC not available.", e);
  }

  UTIL.getConfiguration().setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true);

}
 
Example 8
Source File: TestDataNodeMetrics.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testReceivePacketMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final int interval = 1;
  conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();

    Path testFile = new Path("/testFlushNanosMetric.txt");
    FSDataOutputStream fout = fs.create(testFile);
    fout.write(new byte[1]);
    fout.hsync();
    fout.close();
    List<DataNode> datanodes = cluster.getDataNodes();
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
    // Expect two flushes, 1 for the flush that occurs after writing, 
    // 1 that occurs on closing the data and metadata files.
    assertCounter("FlushNanosNumOps", 2L, dnMetrics);
    // Expect two syncs, one from the hsync, one on close.
    assertCounter("FsyncNanosNumOps", 2L, dnMetrics);
    // Wait for at least 1 rollover
    Thread.sleep((interval + 1) * 1000);
    // Check the receivePacket percentiles that should be non-zero
    String sec = interval + "s";
    assertQuantileGauges("FlushNanos" + sec, dnMetrics);
    assertQuantileGauges("FsyncNanos" + sec, dnMetrics);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 9
Source File: TestFsck.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void writeFile(final DistributedFileSystem dfs,
    Path dir, String fileName) throws IOException {
  Path filePath = new Path(dir.toString() + Path.SEPARATOR + fileName);
  final FSDataOutputStream out = dfs.create(filePath);
  out.writeChars("teststring");
  out.close();
}
 
Example 10
Source File: TestMover.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testMoverFailedRetry() throws Exception {
  // HDFS-8147
  final Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(3)
      .storageTypes(
          new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
              {StorageType.DISK, StorageType.ARCHIVE},
              {StorageType.DISK, StorageType.ARCHIVE}}).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String file = "/testMoverFailedRetry";
    // write to DISK
    final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
    out.writeChars("testMoverFailedRetry");
    out.close();

    // Delete block file so, block move will fail with FileNotFoundException
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
    cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
    // move to ARCHIVE
    dfs.setStoragePolicy(new Path(file), "COLD");
    int rc = ToolRunner.run(conf, new Mover.Cli(),
        new String[] {"-p", file.toString()});
    Assert.assertEquals("Movement should fail after some retry",
        ExitStatus.IO_EXCEPTION.getExitCode(), rc);
  } finally {
    cluster.shutdown();
  }
}
 
Example 11
Source File: TestTransferRbw.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testTransferRbw() throws Exception {
  final HdfsConfiguration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
      ).numDataNodes(REPLICATION).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem fs = cluster.getFileSystem();

    //create a file, write some data and leave it open. 
    final Path p = new Path("/foo");
    final int size = (1 << 16) + RAN.nextInt(1 << 16);
    LOG.info("size = " + size);
    final FSDataOutputStream out = fs.create(p, REPLICATION);
    final byte[] bytes = new byte[1024];
    for(int remaining = size; remaining > 0; ) {
      RAN.nextBytes(bytes);
      final int len = bytes.length < remaining? bytes.length: remaining;
      out.write(bytes, 0, len);
      out.hflush();
      remaining -= len;
    }

    //get the RBW
    final ReplicaBeingWritten oldrbw;
    final DataNode newnode;
    final DatanodeInfo newnodeinfo;
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    {
      final DataNode oldnode = cluster.getDataNodes().get(0);
      oldrbw = getRbw(oldnode, bpid);
      LOG.info("oldrbw = " + oldrbw);
      
      //add a datanode
      cluster.startDataNodes(conf, 1, true, null, null);
      newnode = cluster.getDataNodes().get(REPLICATION);
      
      final DatanodeInfo oldnodeinfo;
      {
        final DatanodeInfo[] datatnodeinfos = cluster.getNameNodeRpc(
            ).getDatanodeReport(DatanodeReportType.LIVE);
        Assert.assertEquals(2, datatnodeinfos.length);
        int i = 0;
        for(DatanodeRegistration dnReg = newnode.getDNRegistrationForBP(bpid);
            i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++);
        Assert.assertTrue(i < datatnodeinfos.length);
        newnodeinfo = datatnodeinfos[i];
        oldnodeinfo = datatnodeinfos[1 - i];
      }
      
      //transfer RBW
      final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(),
          oldrbw.getGenerationStamp());
      final BlockOpResponseProto s = DFSTestUtil.transferRbw(
          b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo);
      Assert.assertEquals(Status.SUCCESS, s.getStatus());
    }

    //check new rbw
    final ReplicaBeingWritten newrbw = getRbw(newnode, bpid);
    LOG.info("newrbw = " + newrbw);
    Assert.assertEquals(oldrbw.getBlockId(), newrbw.getBlockId());
    Assert.assertEquals(oldrbw.getGenerationStamp(), newrbw.getGenerationStamp());
    Assert.assertEquals(oldrbw.getVisibleLength(), newrbw.getVisibleLength());

    LOG.info("DONE");
  } finally {
    cluster.shutdown();
  }
}
 
Example 12
Source File: TestFSImage.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void testPersistHelper(Configuration conf) throws IOException {
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    FSNamesystem fsn = cluster.getNamesystem();
    DistributedFileSystem fs = cluster.getFileSystem();

    final Path dir = new Path("/abc/def");
    final Path file1 = new Path(dir, "f1");
    final Path file2 = new Path(dir, "f2");

    // create an empty file f1
    fs.create(file1).close();

    // create an under-construction file f2
    FSDataOutputStream out = fs.create(file2);
    out.writeBytes("hello");
    ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet
        .of(SyncFlag.UPDATE_LENGTH));

    // checkpoint
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

    cluster.restartNameNode();
    cluster.waitActive();
    fs = cluster.getFileSystem();

    assertTrue(fs.isDirectory(dir));
    assertTrue(fs.exists(file1));
    assertTrue(fs.exists(file2));

    // check internals of file2
    INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile();
    assertEquals("hello".length(), file2Node.computeFileSize());
    assertTrue(file2Node.isUnderConstruction());
    BlockInfoContiguous[] blks = file2Node.getBlocks();
    assertEquals(1, blks.length);
    assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
    // check lease manager
    Lease lease = fsn.leaseManager.getLeaseByPath(file2.toString());
    Assert.assertNotNull(lease);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 13
Source File: TestOfflineImageViewer.java    From big-c with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void createOriginalFSImage() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setLong(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
    conf.setLong(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
        "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem hdfs = cluster.getFileSystem();

    // Create a reasonable namespace
    for (int i = 0; i < NUM_DIRS; i++) {
      Path dir = new Path("/dir" + i);
      hdfs.mkdirs(dir);
      writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
      for (int j = 0; j < FILES_PER_DIR; j++) {
        Path file = new Path(dir, "file" + j);
        FSDataOutputStream o = hdfs.create(file);
        o.write(23);
        o.close();

        writtenFiles.put(file.toString(),
            pathToFileEntry(hdfs, file.toString()));
      }
    }

    // Create an empty directory
    Path emptydir = new Path("/emptydir");
    hdfs.mkdirs(emptydir);
    writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));

    //Create a directory whose name should be escaped in XML
    Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
    hdfs.mkdirs(invalidXMLDir);

    // Get delegation tokens so we log the delegation token op
    Token<?>[] delegationTokens = hdfs
        .addDelegationTokens(TEST_RENEWER, null);
    for (Token<?> t : delegationTokens) {
      LOG.debug("got token " + t);
    }

    final Path snapshot = new Path("/snapshot");
    hdfs.mkdirs(snapshot);
    hdfs.allowSnapshot(snapshot);
    hdfs.mkdirs(new Path("/snapshot/1"));
    hdfs.delete(snapshot, true);

    // Set XAttrs so the fsimage contains XAttr ops
    final Path xattr = new Path("/xattr");
    hdfs.mkdirs(xattr);
    hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 });
    hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 });
    // OIV should be able to handle empty value XAttrs
    hdfs.setXAttr(xattr, "user.a3", null);
    writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));

    // Write results to the fsimage file
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    hdfs.saveNamespace();

    // Determine location of fsimage file
    originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
        .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (originalFsimage == null) {
      throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    LOG.debug("original FS image file is " + originalFsimage);
  } finally {
    if (cluster != null)
      cluster.shutdown();
  }
}
 
Example 14
Source File: TestHASafeMode.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** Test NN crash and client crash/stuck immediately after block allocation */
@Test(timeout = 100000)
public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
  cluster.getConfiguration(0).set(
      DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f");
  String testData = "testData";
  // to make sure we write the full block before creating dummy block at NN.
  cluster.getConfiguration(0).setInt("io.bytes.per.checksum",
      testData.length());
  cluster.restartNameNode(0);
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    cluster.transitionToStandby(1);
    DistributedFileSystem dfs = cluster.getFileSystem(0);
    String pathString = "/tmp1.txt";
    Path filePath = new Path(pathString);
    FSDataOutputStream create = dfs.create(filePath,
        FsPermission.getDefault(), true, 1024, (short) 3, testData.length(),
        null);
    create.write(testData.getBytes());
    create.hflush();
    long fileId = ((DFSOutputStream)create.
        getWrappedStream()).getFileId();
    FileStatus fileStatus = dfs.getFileStatus(filePath);
    DFSClient client = DFSClientAdapter.getClient(dfs);
    // add one dummy block at NN, but not write to DataNode
    ExtendedBlock previousBlock =
        DFSClientAdapter.getPreviousBlock(client, fileId);
    DFSClientAdapter.getNamenode(client).addBlock(
        pathString,
        client.getClientName(),
        new ExtendedBlock(previousBlock),
        new DatanodeInfo[0],
        DFSClientAdapter.getFileId((DFSOutputStream) create
            .getWrappedStream()), null);
    cluster.restartNameNode(0, true);
    cluster.restartDataNode(0);
    cluster.transitionToActive(0);
    // let the block reports be processed.
    Thread.sleep(2000);
    FSDataInputStream is = dfs.open(filePath);
    is.close();
    dfs.recoverLease(filePath);// initiate recovery
    assertTrue("Recovery also should be success", dfs.recoverLease(filePath));
  } finally {
    cluster.shutdown();
  }
}
 
Example 15
Source File: OfflineEditsViewerHelper.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Run file operations to create edits for all op codes
 * to be tested.
 */
private void runOperations() throws IOException {

  LOG.info("Creating edits by performing fs operations");
  // no check, if it's not it throws an exception which is what we want
  DistributedFileSystem dfs =
    (DistributedFileSystem)cluster.getFileSystem();
  // OP_ADD 0, OP_SET_GENSTAMP 10
  Path pathFileCreate = new Path("/file_create");
  FSDataOutputStream s = dfs.create(pathFileCreate);
  // OP_CLOSE 9
  s.close();
  // OP_RENAME 1
  Path pathFileMoved = new Path("/file_moved");
  dfs.rename(pathFileCreate, pathFileMoved);
  // OP_DELETE 2
  dfs.delete(pathFileMoved, false);
  // OP_MKDIR 3
  Path pathDirectoryMkdir = new Path("/directory_mkdir");
  dfs.mkdirs(pathDirectoryMkdir);
  // OP_SET_REPLICATION 4
  s = dfs.create(pathFileCreate);
  s.close();
  dfs.setReplication(pathFileCreate, (short)1);
  // OP_SET_PERMISSIONS 7
  Short permission = 0777;
  dfs.setPermission(pathFileCreate, new FsPermission(permission));
  // OP_SET_OWNER 8
  dfs.setOwner(pathFileCreate, new String("newOwner"), null);
  // OP_CLOSE 9 see above
  // OP_SET_GENSTAMP 10 see above
  // OP_SET_NS_QUOTA 11 obsolete
  // OP_CLEAR_NS_QUOTA 12 obsolete
  // OP_TIMES 13
  long mtime = 1285195527000L; // Wed, 22 Sep 2010 22:45:27 GMT
  long atime = mtime;
  dfs.setTimes(pathFileCreate, mtime, atime);
  // OP_SET_QUOTA 14
  dfs.setQuota(pathDirectoryMkdir, 1000L, FSConstants.QUOTA_DONT_SET);
  // OP_CONCAT_DELETE 16
  Path   pathConcatTarget = new Path("/file_concat_target");
  Path[] pathConcatFiles  = new Path[2];
  pathConcatFiles[0]      = new Path("/file_concat_0");
  pathConcatFiles[1]      = new Path("/file_concat_1");

  long  length      = blockSize * 3; // multiple of blocksize for concat
  short replication = 1;
  long  seed        = 1;

  DFSTestUtil.createFile(dfs, pathConcatTarget, length, replication, seed);
  DFSTestUtil.createFile(dfs, pathConcatFiles[0], length, replication, seed);
  DFSTestUtil.createFile(dfs, pathConcatFiles[1], length, replication, seed);
  dfs.concat(pathConcatTarget, pathConcatFiles, false);

  // sync to disk, otherwise we parse partial edits
  cluster.getNameNode().getFSImage().getEditLog().logSync();
  dfs.close();
}
 
Example 16
Source File: DatanodeBenThread.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void write() throws Exception {
  long endTime = System.currentTimeMillis() + rtc.max_time;
  long currentId = 0;
  FSDataOutputStream out = null;
  DistributedFileSystem dfs = (DistributedFileSystem) fs;
  while (System.currentTimeMillis() < endTime
      && currentId < rtc.max_files) {
    if (running_type == RUNNING_TYPE.PREPARE) {
      //The number of files reach the minimum limit, exit
      if (getNumberOfFiles() > rtc.min_file) 
        break;
    }
    Path fileName = new Path(outputPath, file_prefix + currentId);
    try { 
      out = dfs.create(fileName,
                       FsPermission.getDefault(),
                       false,
                       dfs.getConf().getInt("io.file.buffer.size", 4096),
                       (short)replication,
                       dfs.getDefaultBlockSize(),
                       dfs.getConf().getInt("io.bytes.per.checksum", 512),
                       null,
                       rtc.victims);
      long size = 0;
      while (true) {
        rb.nextBytes(buffer);
        tb.getTokens(rtc.buffer_size);
        out.write(buffer, 0, rtc.buffer_size);
        size += rtc.buffer_size;
        if (System.currentTimeMillis() > endTime 
            || size + rtc.buffer_size > max_size) {
          // Roll the file
          out.close();
          out = null;
          currentId++;
          files_processed++;
          processed_size += size;
          write_size += size;
          Path fullName = fs.makeQualified(fileName);
          BlockLocation bl = dfs.getClient().getBlockLocations(
              fullName.toUri().getPath(), 0L, 1L)[0];
          String hosts = "";
          for (String host: bl.getHosts()) {
            hosts += host + " ";
          }
          LOG.info("[close (" + size + "B)] " + hosts + " file " + fullName);
          break;
        }
      }
    } catch (Exception e) {
      LOG.error("Error in writing file:" + fileName, e);
      this.errors.add(e);
    } finally {
      IOUtils.closeStream(out);
    }
  }
}
 
Example 17
Source File: TestTransferRbw.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testTransferRbw() throws Exception {
  final HdfsConfiguration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
      ).numDataNodes(REPLICATION).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem fs = cluster.getFileSystem();

    //create a file, write some data and leave it open. 
    final Path p = new Path("/foo");
    final int size = (1 << 16) + RAN.nextInt(1 << 16);
    LOG.info("size = " + size);
    final FSDataOutputStream out = fs.create(p, REPLICATION);
    final byte[] bytes = new byte[1024];
    for(int remaining = size; remaining > 0; ) {
      RAN.nextBytes(bytes);
      final int len = bytes.length < remaining? bytes.length: remaining;
      out.write(bytes, 0, len);
      out.hflush();
      remaining -= len;
    }

    //get the RBW
    final ReplicaBeingWritten oldrbw;
    final DataNode newnode;
    final DatanodeInfo newnodeinfo;
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    {
      final DataNode oldnode = cluster.getDataNodes().get(0);
      oldrbw = getRbw(oldnode, bpid);
      LOG.info("oldrbw = " + oldrbw);
      
      //add a datanode
      cluster.startDataNodes(conf, 1, true, null, null);
      newnode = cluster.getDataNodes().get(REPLICATION);
      
      final DatanodeInfo oldnodeinfo;
      {
        final DatanodeInfo[] datatnodeinfos = cluster.getNameNodeRpc(
            ).getDatanodeReport(DatanodeReportType.LIVE);
        Assert.assertEquals(2, datatnodeinfos.length);
        int i = 0;
        for(DatanodeRegistration dnReg = newnode.getDNRegistrationForBP(bpid);
            i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++);
        Assert.assertTrue(i < datatnodeinfos.length);
        newnodeinfo = datatnodeinfos[i];
        oldnodeinfo = datatnodeinfos[1 - i];
      }
      
      //transfer RBW
      final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(),
          oldrbw.getGenerationStamp());
      final BlockOpResponseProto s = DFSTestUtil.transferRbw(
          b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo);
      Assert.assertEquals(Status.SUCCESS, s.getStatus());
    }

    //check new rbw
    final ReplicaBeingWritten newrbw = getRbw(newnode, bpid);
    LOG.info("newrbw = " + newrbw);
    Assert.assertEquals(oldrbw.getBlockId(), newrbw.getBlockId());
    Assert.assertEquals(oldrbw.getGenerationStamp(), newrbw.getGenerationStamp());
    Assert.assertEquals(oldrbw.getVisibleLength(), newrbw.getVisibleLength());

    LOG.info("DONE");
  } finally {
    cluster.shutdown();
  }
}
 
Example 18
Source File: TestHSync.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void testHSyncOperation(boolean testWithAppend) throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final DistributedFileSystem fs = cluster.getFileSystem();

  final Path p = new Path("/testHSync/foo");
  final int len = 1 << 16;
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  if (testWithAppend) {
    // re-open the file with append call
    out.close();
    out = fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.SYNC_BLOCK),
        4096, null);
  }
  out.hflush();
  // hflush does not sync
  checkSyncMetric(cluster, 0);
  out.hsync();
  // hsync on empty file does nothing
  checkSyncMetric(cluster, 0);
  out.write(1);
  checkSyncMetric(cluster, 0);
  out.hsync();
  checkSyncMetric(cluster, 1);
  // avoiding repeated hsyncs is a potential future optimization
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.hflush();
  // hflush still does not sync
  checkSyncMetric(cluster, 2);
  out.close();
  // close is sync'ing
  checkSyncMetric(cluster, 3);

  // same with a file created with out SYNC_BLOCK
  out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      4096, (short) 1, len, null);
  out.hsync();
  checkSyncMetric(cluster, 3);
  out.write(1);
  checkSyncMetric(cluster, 3);
  out.hsync();
  checkSyncMetric(cluster, 4);
  // repeated hsyncs
  out.hsync();
  checkSyncMetric(cluster, 5);
  out.close();
  // close does not sync (not opened with SYNC_BLOCK)
  checkSyncMetric(cluster, 5);
  cluster.shutdown();
}
 
Example 19
Source File: TestFSImage.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void testPersistHelper(Configuration conf) throws IOException {
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    FSNamesystem fsn = cluster.getNamesystem();
    DistributedFileSystem fs = cluster.getFileSystem();

    final Path dir = new Path("/abc/def");
    final Path file1 = new Path(dir, "f1");
    final Path file2 = new Path(dir, "f2");

    // create an empty file f1
    fs.create(file1).close();

    // create an under-construction file f2
    FSDataOutputStream out = fs.create(file2);
    out.writeBytes("hello");
    ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet
        .of(SyncFlag.UPDATE_LENGTH));

    // checkpoint
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

    cluster.restartNameNode();
    cluster.waitActive();
    fs = cluster.getFileSystem();

    assertTrue(fs.isDirectory(dir));
    assertTrue(fs.exists(file1));
    assertTrue(fs.exists(file2));

    // check internals of file2
    INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile();
    assertEquals("hello".length(), file2Node.computeFileSize());
    assertTrue(file2Node.isUnderConstruction());
    BlockInfoContiguous[] blks = file2Node.getBlocks();
    assertEquals(1, blks.length);
    assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
    // check lease manager
    Lease lease = fsn.leaseManager.getLeaseByPath(file2.toString());
    Assert.assertNotNull(lease);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 20
Source File: TestHASafeMode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** Test NN crash and client crash/stuck immediately after block allocation */
@Test(timeout = 100000)
public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
  cluster.getConfiguration(0).set(
      DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f");
  String testData = "testData";
  // to make sure we write the full block before creating dummy block at NN.
  cluster.getConfiguration(0).setInt("io.bytes.per.checksum",
      testData.length());
  cluster.restartNameNode(0);
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    cluster.transitionToStandby(1);
    DistributedFileSystem dfs = cluster.getFileSystem(0);
    String pathString = "/tmp1.txt";
    Path filePath = new Path(pathString);
    FSDataOutputStream create = dfs.create(filePath,
        FsPermission.getDefault(), true, 1024, (short) 3, testData.length(),
        null);
    create.write(testData.getBytes());
    create.hflush();
    long fileId = ((DFSOutputStream)create.
        getWrappedStream()).getFileId();
    FileStatus fileStatus = dfs.getFileStatus(filePath);
    DFSClient client = DFSClientAdapter.getClient(dfs);
    // add one dummy block at NN, but not write to DataNode
    ExtendedBlock previousBlock =
        DFSClientAdapter.getPreviousBlock(client, fileId);
    DFSClientAdapter.getNamenode(client).addBlock(
        pathString,
        client.getClientName(),
        new ExtendedBlock(previousBlock),
        new DatanodeInfo[0],
        DFSClientAdapter.getFileId((DFSOutputStream) create
            .getWrappedStream()), null);
    cluster.restartNameNode(0, true);
    cluster.restartDataNode(0);
    cluster.transitionToActive(0);
    // let the block reports be processed.
    Thread.sleep(2000);
    FSDataInputStream is = dfs.open(filePath);
    is.close();
    dfs.recoverLease(filePath);// initiate recovery
    assertTrue("Recovery also should be success", dfs.recoverLease(filePath));
  } finally {
    cluster.shutdown();
  }
}