org.apache.hadoop.hdfs.DFSOutputStream Java Examples

The following examples show how to use org.apache.hadoop.hdfs.DFSOutputStream. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestBlocksScheduledCounter.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void testBlocksScheduledCounter() throws IOException {
  
  MiniDFSCluster cluster = new MiniDFSCluster(new Configuration(), 1, 
                                              true, null);
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  
  //open a file an write a few bytes:
  FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"));
  for (int i=0; i<1024; i++) {
    out.write(i);
  }
  // flush to make sure a block is allocated.
  ((DFSOutputStream)(out.getWrappedStream())).sync();
  
  ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
  cluster.getNameNode().namesystem.DFSNodesStatus(dnList, dnList);
  DatanodeDescriptor dn = dnList.get(0);
  
  assertEquals(1, dn.getBlocksScheduled());
 
  // close the file and the counter should go to zero.
  out.close();   
  assertEquals(0, dn.getBlocksScheduled());
}
 
Example #2
Source File: TestRetryCacheWithHA.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
void invoke() throws Exception {
  DatanodeInfo[] newNodes = new DatanodeInfo[2];
  newNodes[0] = nodes[0];
  newNodes[1] = nodes[1];
  String[] storageIDs = {"s0", "s1"};
  
  client.getNamenode().updatePipeline(client.getClientName(), oldBlock,
      newBlock, newNodes, storageIDs);
  // close can fail if the out.close() commit the block after block received
  // notifications from Datanode.
  // Since datanodes and output stream have still old genstamps, these
  // blocks will be marked as corrupt after HDFS-5723 if RECEIVED
  // notifications reaches namenode first and close() will fail.
  DFSTestUtil.abortStream((DFSOutputStream) out.getWrappedStream());
}
 
Example #3
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Similar with testRenameUCFileInSnapshot, but do renaming first and then 
 * append file without closing it. Unit test for HDFS-5425.
 */
@Test
public void testAppendFileAfterRenameInSnapshot() throws Exception {
  final Path test = new Path("/test");
  final Path foo = new Path(test, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  // rename bar --> bar2
  final Path bar2 = new Path(foo, "bar2");
  hdfs.rename(bar, bar2);
  // append file and keep it as underconstruction.
  FSDataOutputStream out = hdfs.append(bar2);
  out.writeByte(0);
  ((DFSOutputStream) out.getWrappedStream()).hsync(
      EnumSet.of(SyncFlag.UPDATE_LENGTH));

  // save namespace and restart
  restartClusterAndCheckImage(true);
}
 
Example #4
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Similar with testRenameUCFileInSnapshot, but do renaming first and then 
 * append file without closing it. Unit test for HDFS-5425.
 */
@Test
public void testAppendFileAfterRenameInSnapshot() throws Exception {
  final Path test = new Path("/test");
  final Path foo = new Path(test, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  // rename bar --> bar2
  final Path bar2 = new Path(foo, "bar2");
  hdfs.rename(bar, bar2);
  // append file and keep it as underconstruction.
  FSDataOutputStream out = hdfs.append(bar2);
  out.writeByte(0);
  ((DFSOutputStream) out.getWrappedStream()).hsync(
      EnumSet.of(SyncFlag.UPDATE_LENGTH));

  // save namespace and restart
  restartClusterAndCheckImage(true);
}
 
Example #5
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
void invoke() throws Exception {
  DatanodeInfo[] newNodes = new DatanodeInfo[2];
  newNodes[0] = nodes[0];
  newNodes[1] = nodes[1];
  String[] storageIDs = {"s0", "s1"};
  
  client.getNamenode().updatePipeline(client.getClientName(), oldBlock,
      newBlock, newNodes, storageIDs);
  // close can fail if the out.close() commit the block after block received
  // notifications from Datanode.
  // Since datanodes and output stream have still old genstamps, these
  // blocks will be marked as corrupt after HDFS-5723 if RECEIVED
  // notifications reaches namenode first and close() will fail.
  DFSTestUtil.abortStream((DFSOutputStream) out.getWrappedStream());
}
 
Example #6
Source File: Hdfs.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public HdfsDataOutputStream createInternal(Path f,
    EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
    int bufferSize, short replication, long blockSize, Progressable progress,
    ChecksumOpt checksumOpt, boolean createParent) throws IOException {

  final DFSOutputStream dfsos = dfs.primitiveCreate(getUriPath(f),
    absolutePermission, createFlag, createParent, replication, blockSize,
    progress, bufferSize, checksumOpt);
  return dfs.createWrappedOutputStream(dfsos, statistics,
      dfsos.getInitialLen());
}
 
Example #7
Source File: DistributedFileSystem.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** This optional operation is not yet supported. */
public FSDataOutputStream append(Path f, int bufferSize,
    Progressable progress) throws IOException {

  DFSOutputStream op = (DFSOutputStream)dfs.append(getPathName(f), bufferSize, progress);
  return new FSDataOutputStream(op, statistics, op.getInitialLen());
}
 
Example #8
Source File: TestParallelRBW.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void createFile(FileSystem fs, FSDataOutputStream out,
    String fileName, int fileLen) throws IOException {
  Random random = new Random(fileName.hashCode());
  byte buffer[] = new byte[fileLen];
  random.nextBytes(buffer);
  out.write(buffer);
  out.sync();
  ((DFSOutputStream) out.getWrappedStream()).abortForTests();
}
 
Example #9
Source File: TestFileAppend4.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void runDNRestartCorruptType(CorruptionType corrupt) throws Exception {
  cluster = new MiniDFSCluster(conf, 3, true, null);
  FileSystem fs1 = cluster.getFileSystem();
  try {
    short rep = 3; // replication
    assertTrue(BLOCK_SIZE%4 == 0);

    file1 = new Path("/dnDeath.dat");

    // write 1/2 block & close
    stm = fs1.create(file1, true, 1024, rep, 4096);
    AppendTestUtil.write(stm, 0, 1024);
    stm.sync();
    loseLeases(fs1);

    DFSOutputStream dfso = (DFSOutputStream)stm.getWrappedStream();
    dfso.abortForTests();

    // close the primary DN
    DataNodeProperties badDN = cluster.stopDataNode(0);

    // Truncate the block on the primary DN
    corruptDataNode(0, corrupt);

    // Start the DN back up
    cluster.restartDataNode(badDN);

    // Recover the lease
    FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
    recoverFile(fs2);

    assertFileSize(fs2, 1024);
    checkFile(fs2, 1024);
  } finally {
    // explicitly do not shut down fs1, since it's been frozen up by
    // killing the DataStreamer and not allowing recovery
    cluster.shutdown();
  }
}
 
Example #10
Source File: TestDatanodeUpgrade.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void createFile(FileSystem fs, FSDataOutputStream out,
    String fileName, int fileLen) throws IOException {
  Random random = new Random(fileName.hashCode());
  byte buffer[] = new byte[fileLen];
  random.nextBytes(buffer);
  out.write(buffer);
  out.sync();
  ((DFSOutputStream) out.getWrappedStream()).abortForTests();
}
 
Example #11
Source File: TestNameNodeIdempotence.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Test addBlock() name-node RPC is idempotent
 */
public void testIdepotentCallsAddBlock() throws IOException {
  ClientProtocol nn = cluster.getNameNode();
  FileSystem fs = cluster.getFileSystem();
  DFSClient dfsclient = ((DistributedFileSystem) fs).dfs;

  String src = "/testNameNodeFingerprintSent1.txt";
  // Path f = new Path(src);

  DFSOutputStream dos = (DFSOutputStream) dfsclient.create(src, true,
      (short) 1, 512L);

  FSDataOutputStream a_out = new FSDataOutputStream(dos); // fs.create(f);

  for (int i = 0; i < 512; i++) {
    a_out.writeBytes("bc");
  }
  a_out.flush();

  LocatedBlocks lb = nn.getBlockLocations(src, 256, 257);
  LocatedBlock lb1 = nn.addBlockAndFetchMetaInfo(src, dfsclient.clientName,
      null, null, 512L, lb.getLocatedBlocks().get(lb.locatedBlockCount() - 1)
          .getBlock());
  LocatedBlock lb2 = nn.addBlockAndFetchMetaInfo(src, dfsclient.clientName,
      null, null, 512L, lb.getLocatedBlocks().get(lb.locatedBlockCount() - 1)
          .getBlock());
  TestCase.assertTrue("blocks: " + lb1.getBlock() + " and " + lb2.getBlock(),
      lb1.getBlock().equals(lb2.getBlock()));
}
 
Example #12
Source File: TestCloseFile.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void testRestartNameNode(boolean waitSafeMode) throws Exception {
  String file = "/testRestartNameNode" + waitSafeMode;

  // Create a file and write data.
  FSDataOutputStream out = fs.create(new Path(file));
  String clientName = ((DistributedFileSystem) fs).getClient().getClientName();
  byte[] buffer = new byte[FILE_LEN];
  random.nextBytes(buffer);
  out.write(buffer);
  ((DFSOutputStream) out.getWrappedStream()).sync();

  // Now shutdown the namenode and try to close the file.
  cluster.shutdownNameNode(0);
  Thread closeThread = new CloseThread(out, file, clientName);
  closeThread.start();
  Thread.sleep(CLOSE_FILE_TIMEOUT / 4);

  // Restart the namenode and verify the close file worked.
  if (!waitSafeMode) {
    cluster.restartNameNode(0, new String[]{}, false);
    cluster.getNameNode().setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  } else {
    cluster.restartNameNode(0);
  }
  closeThread.join(5000);
  assertTrue(pass);
}
 
Example #13
Source File: FSHLog.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * This method gets the pipeline for the current WAL.
 */
@Override
DatanodeInfo[] getPipeline() {
  if (this.hdfs_out != null) {
    if (this.hdfs_out.getWrappedStream() instanceof DFSOutputStream) {
      return ((DFSOutputStream) this.hdfs_out.getWrappedStream()).getPipeline();
    }
  }
  return new DatanodeInfo[0];
}
 
Example #14
Source File: TestAddBlock.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test adding new blocks but without closing the corresponding the file
 */
@Test
public void testAddBlockUC() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path file1 = new Path("/file1");
  DFSTestUtil.createFile(fs, file1, BLOCKSIZE - 1, REPLICATION, 0L);
  
  FSDataOutputStream out = null;
  try {
    // append files without closing the streams
    out = fs.append(file1);
    String appendContent = "appending-content";
    out.writeBytes(appendContent);
    ((DFSOutputStream) out.getWrappedStream()).hsync(
        EnumSet.of(SyncFlag.UPDATE_LENGTH));
    
    // restart NN
    cluster.restartNameNode(true);
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    
    INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
    BlockInfoContiguous[] fileBlocks = fileNode.getBlocks();
    assertEquals(2, fileBlocks.length);
    assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
    assertEquals(appendContent.length() - 1, fileBlocks[1].getNumBytes());
    assertEquals(BlockUCState.UNDER_CONSTRUCTION,
        fileBlocks[1].getBlockUCState());
  } finally {
    if (out != null) {
      out.close();
    }
  }
}
 
Example #15
Source File: TestDiskspaceQuotaUpdate.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test if the quota can be correctly updated when file length is updated
 * through fsync
 */
@Test (timeout=60000)
public void testUpdateQuotaForFSync() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
  dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);

  FSDataOutputStream out = dfs.append(bar);
  out.write(new byte[BLOCKSIZE / 4]);
  ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));

  INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  long ns = quota.getNameSpace();
  long ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals(BLOCKSIZE * 2 * REPLICATION, ds); // file is under construction

  out.write(new byte[BLOCKSIZE / 4]);
  out.close();

  fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns);
  assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);

  // append another block
  DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);

  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
}
 
Example #16
Source File: HdfsDataOutputStream.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Sync buffered data to DataNodes (flush to disk devices).
 * 
 * @param syncFlags
 *          Indicate the detailed semantic and actions of the hsync.
 * @throws IOException
 * @see FSDataOutputStream#hsync()
 */
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
  OutputStream wrappedStream = getWrappedStream();
  if (wrappedStream instanceof CryptoOutputStream) {
    ((CryptoOutputStream) wrappedStream).flush();
    wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
  }
  ((DFSOutputStream) wrappedStream).hsync(syncFlags);
}
 
Example #17
Source File: BaseRecordHandler.java    From DataLink with Apache License 2.0 5 votes vote down vote up
private void hsync(FSDataOutputStream fsOut) throws Exception {
    // 调用hsync时,必须设置SyncFlag.UPDATE_LENGTH,否则RDD或者MR任务读取不到写入的数据
    // 参见:
    // https://issues.cloudera.org/browse/DISTRO-696;
    // http://www.hypertable.com/documentation/administrator_guide/hdfs_and_durability
    // https://blog.csdn.net/leen0304/article/details/77854052?locationNum=10&fps=1
    // https://issues.apache.org/jira/browse/HDFS-11915
    if (fsOut instanceof HdfsDataOutputStream) {
        ((HdfsDataOutputStream) fsOut).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
    } else if (fsOut.getWrappedStream() instanceof DFSOutputStream) {
        ((DFSOutputStream) fsOut.getWrappedStream()).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
    } else {
        fsOut.hsync();
    }
}
 
Example #18
Source File: HdfsDataOutputStream.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Sync buffered data to DataNodes (flush to disk devices).
 * 
 * @param syncFlags
 *          Indicate the detailed semantic and actions of the hsync.
 * @throws IOException
 * @see FSDataOutputStream#hsync()
 */
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
  OutputStream wrappedStream = getWrappedStream();
  if (wrappedStream instanceof CryptoOutputStream) {
    ((CryptoOutputStream) wrappedStream).flush();
    wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
  }
  ((DFSOutputStream) wrappedStream).hsync(syncFlags);
}
 
Example #19
Source File: TestDiskspaceQuotaUpdate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test if the quota can be correctly updated when file length is updated
 * through fsync
 */
@Test (timeout=60000)
public void testUpdateQuotaForFSync() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
  dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);

  FSDataOutputStream out = dfs.append(bar);
  out.write(new byte[BLOCKSIZE / 4]);
  ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));

  INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  long ns = quota.getNameSpace();
  long ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals(BLOCKSIZE * 2 * REPLICATION, ds); // file is under construction

  out.write(new byte[BLOCKSIZE / 4]);
  out.close();

  fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns);
  assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);

  // append another block
  DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);

  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
}
 
Example #20
Source File: TestAddBlock.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test adding new blocks but without closing the corresponding the file
 */
@Test
public void testAddBlockUC() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path file1 = new Path("/file1");
  DFSTestUtil.createFile(fs, file1, BLOCKSIZE - 1, REPLICATION, 0L);
  
  FSDataOutputStream out = null;
  try {
    // append files without closing the streams
    out = fs.append(file1);
    String appendContent = "appending-content";
    out.writeBytes(appendContent);
    ((DFSOutputStream) out.getWrappedStream()).hsync(
        EnumSet.of(SyncFlag.UPDATE_LENGTH));
    
    // restart NN
    cluster.restartNameNode(true);
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    
    INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
    BlockInfoContiguous[] fileBlocks = fileNode.getBlocks();
    assertEquals(2, fileBlocks.length);
    assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
    assertEquals(appendContent.length() - 1, fileBlocks[1].getNumBytes());
    assertEquals(BlockUCState.UNDER_CONSTRUCTION,
        fileBlocks[1].getBlockUCState());
  } finally {
    if (out != null) {
      out.close();
    }
  }
}
 
Example #21
Source File: Hdfs.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public HdfsDataOutputStream createInternal(Path f,
    EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
    int bufferSize, short replication, long blockSize, Progressable progress,
    ChecksumOpt checksumOpt, boolean createParent) throws IOException {

  final DFSOutputStream dfsos = dfs.primitiveCreate(getUriPath(f),
    absolutePermission, createFlag, createParent, replication, blockSize,
    progress, bufferSize, checksumOpt);
  return dfs.createWrappedOutputStream(dfsos, statistics,
      dfsos.getInitialLen());
}
 
Example #22
Source File: TestFSImage.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void testPersistHelper(Configuration conf) throws IOException {
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    FSNamesystem fsn = cluster.getNamesystem();
    DistributedFileSystem fs = cluster.getFileSystem();

    final Path dir = new Path("/abc/def");
    final Path file1 = new Path(dir, "f1");
    final Path file2 = new Path(dir, "f2");

    // create an empty file f1
    fs.create(file1).close();

    // create an under-construction file f2
    FSDataOutputStream out = fs.create(file2);
    out.writeBytes("hello");
    ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet
        .of(SyncFlag.UPDATE_LENGTH));

    // checkpoint
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

    cluster.restartNameNode();
    cluster.waitActive();
    fs = cluster.getFileSystem();

    assertTrue(fs.isDirectory(dir));
    assertTrue(fs.exists(file1));
    assertTrue(fs.exists(file2));

    // check internals of file2
    INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile();
    assertEquals("hello".length(), file2Node.computeFileSize());
    assertTrue(file2Node.isUnderConstruction());
    BlockInfoContiguous[] blks = file2Node.getBlocks();
    assertEquals(1, blks.length);
    assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
    // check lease manager
    Lease lease = fsn.leaseManager.getLeaseByPath(file2.toString());
    Assert.assertNotNull(lease);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #23
Source File: HdfsDataOutputStream.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats,
    long startPosition) throws IOException {
  super(out, stats, startPosition);
}
 
Example #24
Source File: HdfsDataOutputStream.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats
    ) throws IOException {
  this(out, stats, 0L);
}
 
Example #25
Source File: TestRbwReportSafeMode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
@Test
public void testRBW() throws Exception {
  String fileName = "/testRBW";
  FSDataOutputStream out = fs.create(new Path(fileName));
  // Create RBW.
  byte[] buffer = new byte[1024 * 10 + 100];
  Random r = new Random();
  r.nextBytes(buffer);
  out.write(buffer);
  out.sync();

  cluster.restartNameNode(0, new String[] {}, false);
  ((DFSOutputStream) out.getWrappedStream()).abortForTests();

  // Send multiple RBW reports.
  waitForBlocks();
  cluster.restartDataNodes();
  Thread.sleep(10000);

  System.out.println("Restarts done");
  FSNamesystem namesystem = cluster.getNameNode().namesystem;

  long totalBlocks = namesystem.getBlocksTotal();
  long safeBlocks = namesystem.getSafeBlocks();
  long startTime = System.currentTimeMillis();
  while (totalBlocks != safeBlocks
      && (System.currentTimeMillis() - startTime < 15000)) {
    Thread.sleep(1000);
    System.out.println("Waiting for blocks, Total : " + totalBlocks
        + " Safe : " + safeBlocks);
    totalBlocks = namesystem.getBlocksTotal();
    safeBlocks = namesystem.getSafeBlocks();
  }

  assertEquals(11, totalBlocks);
  assertEquals(totalBlocks, safeBlocks);
  for (DataNode dn : cluster.getDataNodes()) {
    assertEquals(1, dn.data.getBlocksBeingWrittenReport(cluster.getNameNode()
        .getNamespaceID()).length);
  }
}
 
Example #26
Source File: TestFileAppend4.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void testFullClusterPowerLoss() throws Exception {
  cluster = new MiniDFSCluster(conf, 2, true, null);
  FileSystem fs1 = cluster.getFileSystem();
  try {
    short rep = 2; // replication
    assertTrue(BLOCK_SIZE%4 == 0);

    file1 = new Path("/dnDeath.dat");

    // write 1/2 block & close
    stm = fs1.create(file1, true, 1024, rep, 4096);
    AppendTestUtil.write(stm, 0, 1024);
    stm.sync();
    loseLeases(fs1);

    DFSOutputStream dfso = (DFSOutputStream)stm.getWrappedStream();
    dfso.abortForTests();

    // close the DNs
    DataNodeProperties badDN = cluster.stopDataNode(0);
    DataNodeProperties badDN2 = cluster.stopDataNode(0); // what was 1 is now 0
    assertNotNull(badDN);
    assertNotNull(badDN2);

    // Truncate one of them as if its journal got corrupted
    corruptDataNode(0, CorruptionType.TRUNCATE_BLOCK_HALF);

    // Start the DN back up
    cluster.restartDataNode(badDN);
    cluster.restartDataNode(badDN2);

    // Wait for a heartbeat to make sure we get the initial block
    // report of the replicasBeingWritten
    cluster.waitForDNHeartbeat(0, 10000);
    cluster.waitForDNHeartbeat(1, 10000);

    // Recover the lease
    FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
    recoverFile(fs2);

    assertFileSize(fs2, 512);
    checkFile(fs2, 512);
  } finally {
    // explicitly do not shut down fs1, since it's been frozen up by
    // killing the DataStreamer and not allowing recovery
    cluster.shutdown();
  }
}
 
Example #27
Source File: HdfsDataOutputStream.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public HdfsDataOutputStream(CryptoOutputStream out, FileSystem.Statistics stats,
    long startPosition) throws IOException {
  super(out, stats, startPosition);
  Preconditions.checkArgument(out.getWrappedStream() instanceof DFSOutputStream,
      "CryptoOutputStream should wrap a DFSOutputStream");
}
 
Example #28
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Move an open file into archival storage
 */
@Test
public void testMigrateOpenFileToArchival() throws Exception {
  LOG.info("testMigrateOpenFileToArchival");
  final Path fooDir = new Path("/foo");
  Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
  policyMap.put(fooDir, COLD);
  NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(fooDir), null,
      BLOCK_SIZE, null, policyMap);
  ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
      NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
  MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
  test.setupCluster();

  // create an open file
  banner("writing to file /foo/bar");
  final Path barFile = new Path(fooDir, "bar");
  DFSTestUtil.createFile(test.dfs, barFile, BLOCK_SIZE, (short) 1, 0L);
  FSDataOutputStream out = test.dfs.append(barFile);
  out.writeBytes("hello, ");
  ((DFSOutputStream) out.getWrappedStream()).hsync();

  try {
    banner("start data migration");
    test.setStoragePolicy(); // set /foo to COLD
    test.migrate();

    // make sure the under construction block has not been migrated
    LocatedBlocks lbs = test.dfs.getClient().getLocatedBlocks(
        barFile.toString(), BLOCK_SIZE);
    LOG.info("Locations: " + lbs);
    List<LocatedBlock> blks = lbs.getLocatedBlocks();
    Assert.assertEquals(1, blks.size());
    Assert.assertEquals(1, blks.get(0).getLocations().length);

    banner("finish the migration, continue writing");
    // make sure the writing can continue
    out.writeBytes("world!");
    ((DFSOutputStream) out.getWrappedStream()).hsync();
    IOUtils.cleanup(LOG, out);

    lbs = test.dfs.getClient().getLocatedBlocks(
        barFile.toString(), BLOCK_SIZE);
    LOG.info("Locations: " + lbs);
    blks = lbs.getLocatedBlocks();
    Assert.assertEquals(1, blks.size());
    Assert.assertEquals(1, blks.get(0).getLocations().length);

    banner("finish writing, starting reading");
    // check the content of /foo/bar
    FSDataInputStream in = test.dfs.open(barFile);
    byte[] buf = new byte[13];
    // read from offset 1024
    in.readFully(BLOCK_SIZE, buf, 0, buf.length);
    IOUtils.cleanup(LOG, in);
    Assert.assertEquals("hello, world!", new String(buf));
  } finally {
    test.shutdownCluster();
  }
}
 
Example #29
Source File: TestDataNodeMetrics.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that round-trip acks in a datanode write pipeline are correctly 
 * measured. 
 */
@Test
public void testRoundTripAckMetric() throws Exception {
  final int datanodeCount = 2;
  final int interval = 1;
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
      datanodeCount).build();
  try {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    // Open a file and get the head of the pipeline
    Path testFile = new Path("/testRoundTripAckMetric.txt");
    FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
    DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
    // Slow down the writes to catch the write pipeline
    dout.setChunksPerPacket(5);
    dout.setArtificialSlowdown(3000);
    fsout.write(new byte[10000]);
    DatanodeInfo[] pipeline = null;
    int count = 0;
    while (pipeline == null && count < 5) {
      pipeline = dout.getPipeline();
      System.out.println("Waiting for pipeline to be created.");
      Thread.sleep(1000);
      count++;
    }
    // Get the head node that should be receiving downstream acks
    DatanodeInfo headInfo = pipeline[0];
    DataNode headNode = null;
    for (DataNode datanode : cluster.getDataNodes()) {
      if (datanode.getDatanodeId().equals(headInfo)) {
        headNode = datanode;
        break;
      }
    }
    assertNotNull("Could not find the head of the datanode write pipeline", 
        headNode);
    // Close the file and wait for the metrics to rollover
    Thread.sleep((interval + 1) * 1000);
    // Check the ack was received
    MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics()
        .name());
    assertTrue("Expected non-zero number of acks", 
        getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
    assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval
        + "s", dnMetrics);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #30
Source File: TestDataNodeRollingUpgrade.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test (timeout=600000)
// Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message
public void testDatanodePeersXceiver() throws Exception {
  try {
    startCluster();

    // Create files in DFS.
    String testFile1 = "/" + GenericTestUtils.getMethodName() + ".01.dat";
    String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat";
    String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat";

    DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf);
    DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf);
    DFSClient client3 = new DFSClient(NameNode.getAddress(conf), conf);

    DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true);
    DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true);
    DFSOutputStream s3 = (DFSOutputStream) client3.create(testFile3, true);

    byte[] toWrite = new byte[1024*1024*8];
    Random rb = new Random(1111);
    rb.nextBytes(toWrite);
    s1.write(toWrite, 0, 1024*1024*8);
    s1.flush();
    s2.write(toWrite, 0, 1024*1024*8);
    s2.flush();
    s3.write(toWrite, 0, 1024*1024*8);
    s3.flush();       

    assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer()
        .getNumPeersXceiver());
    s1.close();
    s2.close();
    s3.close();
    assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer()
        .getNumPeersXceiver());
    client1.close();
    client2.close();
    client3.close();      
  } finally {
    shutdownCluster();
  }
}