org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag Java Examples

The following examples show how to use org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestRetryCacheWithHA.java    From hadoop with Apache License 2.0 7 votes vote down vote up
@Override
void prepare() throws Exception {
  final Path filePath = new Path(file);
  DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0);
  // append to the file and leave the last block under construction
  out = this.client.append(file, BlockSize, EnumSet.of(CreateFlag.APPEND),
      null, null);
  byte[] appendContent = new byte[100];
  new Random().nextBytes(appendContent);
  out.write(appendContent);
  ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  LocatedBlocks blks = dfs.getClient()
      .getLocatedBlocks(file, BlockSize + 1);
  assertEquals(1, blks.getLocatedBlocks().size());
  nodes = blks.get(0).getLocations();
  oldBlock = blks.get(0).getBlock();
  
  LocatedBlock newLbk = client.getNamenode().updateBlockForPipeline(
      oldBlock, client.getClientName());
  newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(),
      oldBlock.getBlockId(), oldBlock.getNumBytes(), 
      newLbk.getBlock().getGenerationStamp());
}
 
Example #2
Source File: SequenceFile.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
public void hsyncWithSizeUpdate() throws IOException {
  if (out != null) {
    if (out instanceof HdfsDataOutputStream) {
      try {
        ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
      } catch (NoSuchMethodError e){
        // We are probably working with an older version of hadoop jars which does not have the 
        // hsync function with SyncFlag. Use the hsync version that does not update the size. 
        out.hsync();
      }
    }
    else {
        out.hsync();
    }
  }
}
 
Example #3
Source File: SequenceFile.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
public void hsyncWithSizeUpdate() throws IOException {
  if (out != null) {
    if (out instanceof HdfsDataOutputStream) {
      try {
        ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
      } catch (NoSuchMethodError e){
        // We are probably working with an older version of hadoop jars which does not have the 
        // hsync function with SyncFlag. Use the hsync version that does not update the size. 
        out.hsync();
      }
    }
    else {
        out.hsync();
    }
  }
}
 
Example #4
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Similar with testRenameUCFileInSnapshot, but do renaming first and then 
 * append file without closing it. Unit test for HDFS-5425.
 */
@Test
public void testAppendFileAfterRenameInSnapshot() throws Exception {
  final Path test = new Path("/test");
  final Path foo = new Path(test, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  // rename bar --> bar2
  final Path bar2 = new Path(foo, "bar2");
  hdfs.rename(bar, bar2);
  // append file and keep it as underconstruction.
  FSDataOutputStream out = hdfs.append(bar2);
  out.writeByte(0);
  ((DFSOutputStream) out.getWrappedStream()).hsync(
      EnumSet.of(SyncFlag.UPDATE_LENGTH));

  // save namespace and restart
  restartClusterAndCheckImage(true);
}
 
Example #5
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
void prepare() throws Exception {
  final Path filePath = new Path(file);
  DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0);
  // append to the file and leave the last block under construction
  out = this.client.append(file, BlockSize, EnumSet.of(CreateFlag.APPEND),
      null, null);
  byte[] appendContent = new byte[100];
  new Random().nextBytes(appendContent);
  out.write(appendContent);
  ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  LocatedBlocks blks = dfs.getClient()
      .getLocatedBlocks(file, BlockSize + 1);
  assertEquals(1, blks.getLocatedBlocks().size());
  nodes = blks.get(0).getLocations();
  oldBlock = blks.get(0).getBlock();
  
  LocatedBlock newLbk = client.getNamenode().updateBlockForPipeline(
      oldBlock, client.getClientName());
  newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(),
      oldBlock.getBlockId(), oldBlock.getNumBytes(), 
      newLbk.getBlock().getGenerationStamp());
}
 
Example #6
Source File: TestHFlush.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test hsync (with updating block length in NameNode) while no data is
 * actually written yet
 */
@Test
public void hSyncUpdateLength_00() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
      2).build();
  DistributedFileSystem fileSystem =
      cluster.getFileSystem();
  
  try {
    Path path = new Path(fName);
    FSDataOutputStream stm = fileSystem.create(path, true, 4096, (short) 2,
        AppendTestUtil.BLOCK_SIZE);
    System.out.println("Created file " + path.toString());
    ((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet
        .of(SyncFlag.UPDATE_LENGTH));
    long currentFileLength = fileSystem.getFileStatus(path).getLen();
    assertEquals(0L, currentFileLength);
    stm.close();
  } finally {
    fileSystem.close();
    cluster.shutdown();
  }
}
 
Example #7
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Similar with testRenameUCFileInSnapshot, but do renaming first and then 
 * append file without closing it. Unit test for HDFS-5425.
 */
@Test
public void testAppendFileAfterRenameInSnapshot() throws Exception {
  final Path test = new Path("/test");
  final Path foo = new Path(test, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  // rename bar --> bar2
  final Path bar2 = new Path(foo, "bar2");
  hdfs.rename(bar, bar2);
  // append file and keep it as underconstruction.
  FSDataOutputStream out = hdfs.append(bar2);
  out.writeByte(0);
  ((DFSOutputStream) out.getWrappedStream()).hsync(
      EnumSet.of(SyncFlag.UPDATE_LENGTH));

  // save namespace and restart
  restartClusterAndCheckImage(true);
}
 
Example #8
Source File: TestHFlush.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test hsync (with updating block length in NameNode) while no data is
 * actually written yet
 */
@Test
public void hSyncUpdateLength_00() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
      2).build();
  DistributedFileSystem fileSystem =
      cluster.getFileSystem();
  
  try {
    Path path = new Path(fName);
    FSDataOutputStream stm = fileSystem.create(path, true, 4096, (short) 2,
        AppendTestUtil.BLOCK_SIZE);
    System.out.println("Created file " + path.toString());
    ((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet
        .of(SyncFlag.UPDATE_LENGTH));
    long currentFileLength = fileSystem.getFileStatus(path).getLen();
    assertEquals(0L, currentFileLength);
    stm.close();
  } finally {
    fileSystem.close();
    cluster.shutdown();
  }
}
 
Example #9
Source File: TestHFlush.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * The test uses
 * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)} 
 * to write a file with a custom block size so the writes will be 
 * happening across block's and checksum' boundaries
 */
@Test
public void hFlush_03() throws IOException {
  Configuration conf = new HdfsConfiguration();
  int customPerChecksumSize = 400;
  int customBlockSize = customPerChecksumSize * 3;
  // Modify defaul filesystem settings
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

  doTheJob(conf, fName, customBlockSize, (short) 2, false,
      EnumSet.noneOf(SyncFlag.class));
}
 
Example #10
Source File: OpenFileCtx.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Check the commit status with the given offset
 * @param commitOffset the offset to commit
 * @param channel the channel to return response
 * @param xid the xid of the commit request
 * @param preOpAttr the preOp attribute
 * @param fromRead whether the commit is triggered from read request
 * @return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
 * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR
 */
public COMMIT_STATUS checkCommit(DFSClient dfsClient, long commitOffset,
    Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
  if (!fromRead) {
    Preconditions.checkState(channel != null && preOpAttr != null);
    // Keep stream active
    updateLastAccessTime();
  }
  Preconditions.checkState(commitOffset >= 0);

  COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
      preOpAttr, fromRead);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Got commit status: " + ret.name());
  }
  // Do the sync outside the lock
  if (ret == COMMIT_STATUS.COMMIT_DO_SYNC
      || ret == COMMIT_STATUS.COMMIT_FINISHED) {
    try {
      // Sync file data and length
      fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
      ret = COMMIT_STATUS.COMMIT_FINISHED; // Remove COMMIT_DO_SYNC status 
      // Nothing to do for metadata since attr related change is pass-through
    } catch (ClosedChannelException cce) {
      if (pendingWrites.isEmpty()) {
        ret = COMMIT_STATUS.COMMIT_FINISHED;
      } else {
        ret = COMMIT_STATUS.COMMIT_ERROR;
      }
    } catch (IOException e) {
      LOG.error("Got stream error during data sync: " + e);
      // Do nothing. Stream will be closed eventually by StreamMonitor.
      // status = Nfs3Status.NFS3ERR_IO;
      ret = COMMIT_STATUS.COMMIT_ERROR;
    }
  }
  return ret;
}
 
Example #11
Source File: OpenFileCtx.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Check the commit status with the given offset
 * @param commitOffset the offset to commit
 * @param channel the channel to return response
 * @param xid the xid of the commit request
 * @param preOpAttr the preOp attribute
 * @param fromRead whether the commit is triggered from read request
 * @return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
 * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR
 */
public COMMIT_STATUS checkCommit(DFSClient dfsClient, long commitOffset,
    Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
  if (!fromRead) {
    Preconditions.checkState(channel != null && preOpAttr != null);
    // Keep stream active
    updateLastAccessTime();
  }
  Preconditions.checkState(commitOffset >= 0);

  COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
      preOpAttr, fromRead);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Got commit status: " + ret.name());
  }
  // Do the sync outside the lock
  if (ret == COMMIT_STATUS.COMMIT_DO_SYNC
      || ret == COMMIT_STATUS.COMMIT_FINISHED) {
    try {
      // Sync file data and length
      fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
      ret = COMMIT_STATUS.COMMIT_FINISHED; // Remove COMMIT_DO_SYNC status 
      // Nothing to do for metadata since attr related change is pass-through
    } catch (ClosedChannelException cce) {
      if (pendingWrites.isEmpty()) {
        ret = COMMIT_STATUS.COMMIT_FINISHED;
      } else {
        ret = COMMIT_STATUS.COMMIT_ERROR;
      }
    } catch (IOException e) {
      LOG.error("Got stream error during data sync: " + e);
      // Do nothing. Stream will be closed eventually by StreamMonitor.
      // status = Nfs3Status.NFS3ERR_IO;
      ret = COMMIT_STATUS.COMMIT_ERROR;
    }
  }
  return ret;
}
 
Example #12
Source File: DFSOutputStream.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public void hsync() throws IOException {
  TraceScope scope =
      dfsClient.getPathTraceScope("hsync", src);
  try {
    flushOrSync(true, EnumSet.noneOf(SyncFlag.class));
  } finally {
    scope.close();
  }
}
 
Example #13
Source File: TestFSImageWithSnapshot.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test the fsimage loading while there is file under construction.
 */
@Test (timeout=60000)
public void testLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));      
  
  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
 
Example #14
Source File: TestAddBlock.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test adding new blocks but without closing the corresponding the file
 */
@Test
public void testAddBlockUC() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path file1 = new Path("/file1");
  DFSTestUtil.createFile(fs, file1, BLOCKSIZE - 1, REPLICATION, 0L);
  
  FSDataOutputStream out = null;
  try {
    // append files without closing the streams
    out = fs.append(file1);
    String appendContent = "appending-content";
    out.writeBytes(appendContent);
    ((DFSOutputStream) out.getWrappedStream()).hsync(
        EnumSet.of(SyncFlag.UPDATE_LENGTH));
    
    // restart NN
    cluster.restartNameNode(true);
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    
    INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
    BlockInfoContiguous[] fileBlocks = fileNode.getBlocks();
    assertEquals(2, fileBlocks.length);
    assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
    assertEquals(appendContent.length() - 1, fileBlocks[1].getNumBytes());
    assertEquals(BlockUCState.UNDER_CONSTRUCTION,
        fileBlocks[1].getBlockUCState());
  } finally {
    if (out != null) {
      out.close();
    }
  }
}
 
Example #15
Source File: TestHFlush.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * The test uses
 * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)} 
 * to write a file with a custom block size so the writes will be 
 * happening across block' boundaries
 */
@Test
public void hFlush_02() throws IOException {
  Configuration conf = new HdfsConfiguration();
  int customPerChecksumSize = 512;
  int customBlockSize = customPerChecksumSize * 3;
  // Modify defaul filesystem settings
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

  doTheJob(conf, fName, customBlockSize, (short) 2, false,
      EnumSet.noneOf(SyncFlag.class));
}
 
Example #16
Source File: DFSOutputStream.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Flushes out to all replicas of the block. The data is in the buffers
 * of the DNs but not necessarily in the DN's OS buffers.
 *
 * It is a synchronous operation. When it returns,
 * it guarantees that flushed data become visible to new readers. 
 * It is not guaranteed that data has been flushed to 
 * persistent store on the datanode. 
 * Block allocations are persisted on namenode.
 */
@Override
public void hflush() throws IOException {
  TraceScope scope =
      dfsClient.getPathTraceScope("hflush", src);
  try {
    flushOrSync(false, EnumSet.noneOf(SyncFlag.class));
  } finally {
    scope.close();
  }
}
 
Example #17
Source File: TestHFlush.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * The test calls
 * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
 * while requiring the semantic of {@link SyncFlag#UPDATE_LENGTH}.
 * Similar with {@link #hFlush_02()} , it writes a file with a custom block
 * size so the writes will be happening across block' boundaries
 */
@Test
public void hSyncUpdateLength_02() throws IOException {
  Configuration conf = new HdfsConfiguration();
  int customPerChecksumSize = 512;
  int customBlockSize = customPerChecksumSize * 3;
  // Modify defaul filesystem settings
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

  doTheJob(conf, fName, customBlockSize, (short) 2, true,
      EnumSet.of(SyncFlag.UPDATE_LENGTH));
}
 
Example #18
Source File: TestHFlush.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void hSyncEndBlock_02() throws IOException {
  Configuration conf = new HdfsConfiguration();
  int customPerChecksumSize = 512;
  int customBlockSize = customPerChecksumSize * 3;
  // Modify defaul filesystem settings
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

  doTheJob(conf, fName, customBlockSize, (short) 2, true,
      EnumSet.of(SyncFlag.END_BLOCK));
}
 
Example #19
Source File: TestHFlush.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * The test calls
 * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
 * while requiring the semantic of {@link SyncFlag#UPDATE_LENGTH}.
 * Similar with {@link #hFlush_03()} , it writes a file with a custom block
 * size so the writes will be happening across block's and checksum'
 * boundaries.
 */
@Test
public void hSyncUpdateLength_03() throws IOException {
  Configuration conf = new HdfsConfiguration();
  int customPerChecksumSize = 400;
  int customBlockSize = customPerChecksumSize * 3;
  // Modify defaul filesystem settings
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

  doTheJob(conf, fName, customBlockSize, (short) 2, true,
      EnumSet.of(SyncFlag.UPDATE_LENGTH));
}
 
Example #20
Source File: TestHFlush.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void hSyncEndBlock_03() throws IOException {
  Configuration conf = new HdfsConfiguration();
  int customPerChecksumSize = 400;
  int customBlockSize = customPerChecksumSize * 3;
  // Modify defaul filesystem settings
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

  doTheJob(conf, fName, customBlockSize, (short) 2, true,
      EnumSet.of(SyncFlag.END_BLOCK));
}
 
Example #21
Source File: HdfsBolt.java    From storm-hdfs with Apache License 2.0 5 votes vote down vote up
@Override
public void execute(Tuple tuple) {
    try {
        byte[] bytes = this.format.format(tuple);
        synchronized (this.writeLock) {
            out.write(bytes);
            this.offset += bytes.length;

            if (this.syncPolicy.mark(tuple, this.offset)) {
                if (this.out instanceof HdfsDataOutputStream) {
                    ((HdfsDataOutputStream) this.out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
                } else {
                    this.out.hsync();
                }
                this.syncPolicy.reset();
            }
        }

        this.collector.ack(tuple);

        if(this.rotationPolicy.mark(tuple, this.offset)){
            rotateOutputFile(); // synchronized
            this.offset = 0;
            this.rotationPolicy.reset();
        }
    } catch (IOException e) {
        LOG.warn("write/sync failed.", e);
        this.collector.fail(tuple);
    }
}
 
Example #22
Source File: HdfsBolt.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
protected void syncTuples() throws IOException {
    LOG.debug("Attempting to sync all data to filesystem");
    if (this.out instanceof HdfsDataOutputStream) {
        ((HdfsDataOutputStream) this.out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    } else {
        this.out.hsync();
    }
}
 
Example #23
Source File: TestHFlush.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * The test uses
 * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)} 
 * to write a file with a custom block size so the writes will be 
 * happening across block' boundaries
 */
@Test
public void hFlush_02() throws IOException {
  Configuration conf = new HdfsConfiguration();
  int customPerChecksumSize = 512;
  int customBlockSize = customPerChecksumSize * 3;
  // Modify defaul filesystem settings
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

  doTheJob(conf, fName, customBlockSize, (short) 2, false,
      EnumSet.noneOf(SyncFlag.class));
}
 
Example #24
Source File: DFSOutputStream.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Flushes out to all replicas of the block. The data is in the buffers
 * of the DNs but not necessarily in the DN's OS buffers.
 *
 * It is a synchronous operation. When it returns,
 * it guarantees that flushed data become visible to new readers. 
 * It is not guaranteed that data has been flushed to 
 * persistent store on the datanode. 
 * Block allocations are persisted on namenode.
 */
@Override
public void hflush() throws IOException {
  TraceScope scope =
      dfsClient.getPathTraceScope("hflush", src);
  try {
    flushOrSync(false, EnumSet.noneOf(SyncFlag.class));
  } finally {
    scope.close();
  }
}
 
Example #25
Source File: DFSOutputStream.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void hsync() throws IOException {
  TraceScope scope =
      dfsClient.getPathTraceScope("hsync", src);
  try {
    flushOrSync(true, EnumSet.noneOf(SyncFlag.class));
  } finally {
    scope.close();
  }
}
 
Example #26
Source File: TestFSImageWithSnapshot.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test the fsimage loading while there is file under construction.
 */
@Test (timeout=60000)
public void testLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));      
  
  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
 
Example #27
Source File: TestAddBlock.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test adding new blocks but without closing the corresponding the file
 */
@Test
public void testAddBlockUC() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path file1 = new Path("/file1");
  DFSTestUtil.createFile(fs, file1, BLOCKSIZE - 1, REPLICATION, 0L);
  
  FSDataOutputStream out = null;
  try {
    // append files without closing the streams
    out = fs.append(file1);
    String appendContent = "appending-content";
    out.writeBytes(appendContent);
    ((DFSOutputStream) out.getWrappedStream()).hsync(
        EnumSet.of(SyncFlag.UPDATE_LENGTH));
    
    // restart NN
    cluster.restartNameNode(true);
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    
    INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
    BlockInfoContiguous[] fileBlocks = fileNode.getBlocks();
    assertEquals(2, fileBlocks.length);
    assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
    assertEquals(appendContent.length() - 1, fileBlocks[1].getNumBytes());
    assertEquals(BlockUCState.UNDER_CONSTRUCTION,
        fileBlocks[1].getBlockUCState());
  } finally {
    if (out != null) {
      out.close();
    }
  }
}
 
Example #28
Source File: TestHFlush.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void hSyncEndBlock_03() throws IOException {
  Configuration conf = new HdfsConfiguration();
  int customPerChecksumSize = 400;
  int customBlockSize = customPerChecksumSize * 3;
  // Modify defaul filesystem settings
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

  doTheJob(conf, fName, customBlockSize, (short) 2, true,
      EnumSet.of(SyncFlag.END_BLOCK));
}
 
Example #29
Source File: TestHFlush.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * The test uses
 * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)} 
 * to write a file with a custom block size so the writes will be 
 * happening across block's and checksum' boundaries
 */
@Test
public void hFlush_03() throws IOException {
  Configuration conf = new HdfsConfiguration();
  int customPerChecksumSize = 400;
  int customBlockSize = customPerChecksumSize * 3;
  // Modify defaul filesystem settings
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

  doTheJob(conf, fName, customBlockSize, (short) 2, false,
      EnumSet.noneOf(SyncFlag.class));
}
 
Example #30
Source File: TestHFlush.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * The test calls
 * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
 * while requiring the semantic of {@link SyncFlag#UPDATE_LENGTH}.
 * Similar with {@link #hFlush_03()} , it writes a file with a custom block
 * size so the writes will be happening across block's and checksum'
 * boundaries.
 */
@Test
public void hSyncUpdateLength_03() throws IOException {
  Configuration conf = new HdfsConfiguration();
  int customPerChecksumSize = 400;
  int customBlockSize = customPerChecksumSize * 3;
  // Modify defaul filesystem settings
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

  doTheJob(conf, fName, customBlockSize, (short) 2, true,
      EnumSet.of(SyncFlag.UPDATE_LENGTH));
}