Java Code Examples for org.apache.hadoop.fs.FileSystem.append()

The following are Jave code examples for showing how to use append() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: flume-release-1.7.0   File: HDFSSequenceFile.java   View Source Code Vote up 7 votes
protected void open(Path dstPath, CompressionCodec codeC,
    CompressionType compType, Configuration conf, FileSystem hdfs)
        throws IOException {
  if (useRawLocalFileSystem) {
    if (hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
    outStream = hdfs.append(dstPath);
  } else {
    outStream = hdfs.create(dstPath);
  }
  writer = SequenceFile.createWriter(conf, outStream,
      serializer.getKeyClass(), serializer.getValueClass(), compType, codeC);

  registerCurrentStream(outStream, hdfs, dstPath);
}
 
Example 2
Project: hadoop   File: TestFileAppendRestart.java   View Source Code Vote up 7 votes
private void writeAndAppend(FileSystem fs, Path p,
    int lengthForCreate, int lengthForAppend) throws IOException {
  // Creating a file with 4096 blockSize to write multiple blocks
  FSDataOutputStream stream = fs.create(
      p, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
  try {
    AppendTestUtil.write(stream, 0, lengthForCreate);
    stream.close();
    
    stream = fs.append(p);
    AppendTestUtil.write(stream, lengthForCreate, lengthForAppend);
    stream.close();
  } finally {
    IOUtils.closeStream(stream);
  }
  
  int totalLength = lengthForCreate + lengthForAppend; 
  assertEquals(totalLength, fs.getFileStatus(p).getLen());
}
 
Example 3
Project: hadoop   File: TestFileAppend.java   View Source Code Vote up 6 votes
/**
 * FileNotFoundException is expected for appending to a non-exisiting file
 * 
 * @throws FileNotFoundException as the result
 */
@Test(expected = FileNotFoundException.class)
public void testFileNotFound() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    Path file1 = new Path("/nonexistingfile.dat");
    fs.append(file1);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 4
Project: hadoop   File: TestAppendDifferentChecksum.java   View Source Code Vote up 6 votes
private void appendWithTwoFs(Path p, FileSystem fs1, FileSystem fs2)
    throws IOException {
  FSDataOutputStream stm = fs1.create(p);
  try {
    AppendTestUtil.write(stm, 0, SEGMENT_LENGTH);
  } finally {
    stm.close();
  }
  
  stm = fs2.append(p);
  try {
    AppendTestUtil.write(stm, SEGMENT_LENGTH, SEGMENT_LENGTH);
  } finally {
    stm.close();
  }    
}
 
Example 5
Project: hadoop   File: TestReadWhileWriting.java   View Source Code Vote up 6 votes
/** Try openning a file for append. */
private static FSDataOutputStream append(FileSystem fs, Path p) throws Exception {
  for(int i = 0; i < 10; i++) {
    try {
      return fs.append(p);
    } catch(RemoteException re) {
      if (re.getClassName().equals(RecoveryInProgressException.class.getName())) {
        AppendTestUtil.LOG.info("Will sleep and retry, i=" + i +", p="+p, re);
        Thread.sleep(1000);
      }
      else
        throw re;
    }
  }
  throw new IOException("Cannot append to " + p);
}
 
Example 6
Project: hadoop-oss   File: RollingFileSystemSink.java   View Source Code Vote up 5 votes
/**
 * Test whether the file system supports append and return the answer.
 * @param fs the target file system
 */
private boolean checkAppend(FileSystem fs) {
  boolean canAppend = true;

  try {
    fs.append(basePath);
  } catch (IOException ex) {
    if (ex.getMessage().equals("Not supported")) {
      canAppend = false;
    }
  }

  return canAppend;
}
 
Example 7
Project: kafka-connect-hdfs   File: WALFile.java   View Source Code Vote up 5 votes
Writer(Configuration conf, Option... opts) throws IOException {
  BlockSizeOption blockSizeOption =
      Options.getOption(BlockSizeOption.class, opts);
  BufferSizeOption bufferSizeOption =
      Options.getOption(BufferSizeOption.class, opts);
  ReplicationOption replicationOption =
      Options.getOption(ReplicationOption.class, opts);

  FileOption fileOption = Options.getOption(FileOption.class, opts);
  AppendIfExistsOption appendIfExistsOption = Options.getOption(
      AppendIfExistsOption.class, opts);
  StreamOption streamOption = Options.getOption(StreamOption.class, opts);

  // check consistency of options
  if ((fileOption == null) == (streamOption == null)) {
    throw new IllegalArgumentException("file or stream must be specified");
  }
  if (fileOption == null && (blockSizeOption != null ||
                             bufferSizeOption != null ||
                             replicationOption != null)) {
    throw new IllegalArgumentException("file modifier options not " +
                                       "compatible with stream");
  }

  FSDataOutputStream out;
  boolean ownStream = fileOption != null;
  if (ownStream) {
    Path p = fileOption.getValue();
    FileSystem fs;
    fs = p.getFileSystem(conf);
    int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
                     bufferSizeOption.getValue();
    short replication = replicationOption == null ?
                        fs.getDefaultReplication(p) :
                        (short) replicationOption.getValue();
    long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
                     blockSizeOption.getValue();

    if (appendIfExistsOption != null && appendIfExistsOption.getValue()
        && fs.exists(p)) {
      // Read the file and verify header details
      try (WALFile.Reader reader =
               new WALFile.Reader(conf, WALFile.Reader.file(p), new Reader.OnlyHeaderOption())){
        if (reader.getVersion() != VERSION[3]) {
          throw new VersionMismatchException(VERSION[3], reader.getVersion());
        }
        sync = reader.getSync();
      }
      out = fs.append(p, bufferSize);
      this.appendMode = true;
    } else {
      out = fs.create(p, true, bufferSize, replication, blockSize);
    }
  } else {
    out = streamOption.getValue();
  }

  init(conf, out, ownStream);
}
 
Example 8
Project: flume-release-1.7.0   File: HDFSDataStream.java   View Source Code Vote up 5 votes
protected void doOpen(Configuration conf, Path dstPath, FileSystem hdfs) throws IOException {
  if (useRawLocalFileSystem) {
    if (hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }

  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
    outStream = hdfs.append(dstPath);
    appending = true;
  } else {
    outStream = hdfs.create(dstPath);
  }

  serializer = EventSerializerFactory.getInstance(
      serializerType, serializerContext, outStream);
  if (appending && !serializer.supportsReopen()) {
    outStream.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType +
        ") does not support append");
  }

  // must call superclass to check for replication issues
  registerCurrentStream(outStream, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
}
 
Example 9
Project: Transwarp-Sample-Code   File: HDFSDataStream.java   View Source Code Vote up 5 votes
protected void doOpen(Configuration conf,
  Path dstPath, FileSystem hdfs) throws
  IOException {
  if(useRawLocalFileSystem) {
    if(hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }

  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
          (dstPath)) {
    outStream = hdfs.append(dstPath);
    appending = true;
  } else {
    outStream = hdfs.create(dstPath);
  }

  serializer = EventSerializerFactory.getInstance(
      serializerType, serializerContext, outStream);
  if (appending && !serializer.supportsReopen()) {
    outStream.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType +
        ") does not support append");
  }

  // must call superclass to check for replication issues
  registerCurrentStream(outStream, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
}
 
Example 10
Project: hadoop   File: TestFileAppend.java   View Source Code Vote up 5 votes
/** Test two consecutive appends on a file with a full block. */
@Test
public void testAppendTwice() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs1 = cluster.getFileSystem();
  final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
  try {

    final Path p = new Path("/testAppendTwice/foo");
    final int len = 1 << 16;
    final byte[] fileContents = AppendTestUtil.initBuffer(len);

    {
      // create a new file with a full block.
      FSDataOutputStream out = fs2.create(p, true, 4096, (short)1, len);
      out.write(fileContents, 0, len);
      out.close();
    }

    //1st append does not add any data so that the last block remains full
    //and the last block in INodeFileUnderConstruction is a BlockInfo
    //but not BlockInfoUnderConstruction. 
    fs2.append(p);
    
    //2nd append should get AlreadyBeingCreatedException
    fs1.append(p);
    Assert.fail();
  } catch(RemoteException re) {
    AppendTestUtil.LOG.info("Got an exception:", re);
    Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
        re.getClassName());
  } finally {
    fs2.close();
    fs1.close();
    cluster.shutdown();
  }
}
 
Example 11
Project: hadoop   File: TestAppendDifferentChecksum.java   View Source Code Vote up 5 votes
/**
 * Test which randomly alternates between appending with
 * CRC32 and with CRC32C, crossing several block boundaries.
 * Then, checks that all of the data can be read back correct.
 */
@Test(timeout=RANDOM_TEST_RUNTIME*2)
public void testAlgoSwitchRandomized() throws IOException {
  FileSystem fsWithCrc32 = createFsWithChecksum("CRC32", 512);
  FileSystem fsWithCrc32C = createFsWithChecksum("CRC32C", 512);

  Path p = new Path("/testAlgoSwitchRandomized");
  long seed = Time.now();
  System.out.println("seed: " + seed);
  Random r = new Random(seed);
  
  // Create empty to start
  IOUtils.closeStream(fsWithCrc32.create(p));
  
  long st = Time.now();
  int len = 0;
  while (Time.now() - st < RANDOM_TEST_RUNTIME) {
    int thisLen = r.nextInt(500);
    FileSystem fs = (r.nextBoolean() ? fsWithCrc32 : fsWithCrc32C);
    FSDataOutputStream stm = fs.append(p);
    try {
      AppendTestUtil.write(stm, len, thisLen);
    } finally {
      stm.close();
    }
    len += thisLen;
  }
  
  AppendTestUtil.check(fsWithCrc32, p, len);
  AppendTestUtil.check(fsWithCrc32C, p, len);
}
 
Example 12
Project: hadoop   File: TestFileAppend4.java   View Source Code Vote up 5 votes
private void recoverFile(final FileSystem fs) throws Exception {
  LOG.info("Recovering File Lease");

  // set the soft limit to be 1 second so that the
  // namenode triggers lease recovery upon append request
  cluster.setLeasePeriod(1000, HdfsConstants.LEASE_HARDLIMIT_PERIOD);

  // Trying recovery
  int tries = 60;
  boolean recovered = false;
  FSDataOutputStream out = null;
  while (!recovered && tries-- > 0) {
    try {
      out = fs.append(file1);
      LOG.info("Successfully opened for append");
      recovered = true;
    } catch (IOException e) {
      LOG.info("Failed open for append, waiting on lease recovery");
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ex) {
        // ignore it and try again
      }
    }
  }
  if (out != null) {
    out.close();
  }
  if (!recovered) {
    fail("Recovery should take < 1 min");
  }
  LOG.info("Past out lease recovery");
}
 
Example 13
Project: hadoop   File: TestPersistBlocks.java   View Source Code Vote up 4 votes
/**
 * Earlier versions of HDFS didn't persist block allocation to the edit log.
 * This makes sure that we can still load an edit log when the OP_CLOSE
 * is the opcode which adds all of the blocks. This is a regression
 * test for HDFS-2773.
 * This test uses a tarred pseudo-distributed cluster from Hadoop 1.0
 * which has a multi-block file. This is similar to the tests in
 * {@link TestDFSUpgradeFromImage} but none of those images include
 * a multi-block file.
 */
@Test
public void testEarlierVersionEditLog() throws Exception {
  final Configuration conf = new HdfsConfiguration();
      
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-1.0");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  File dataDir = new File(dfsDir, "data");
  GenericTestUtils.assertExists(dataDir);
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.getAbsolutePath());
  
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(1)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/user/todd/4blocks");
    // Read it without caring about the actual data within - we just need
    // to make sure that the block states and locations are OK.
    DFSTestUtil.readFile(fs, testPath);
    
    // Ensure that we can append to it - if the blocks were in some funny
    // state we'd get some kind of issue here. 
    FSDataOutputStream stm = fs.append(testPath);
    try {
      stm.write(1);
    } finally {
      IOUtils.closeStream(stm);
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 14
Project: flume-release-1.7.0   File: HDFSCompressedDataStream.java   View Source Code Vote up 4 votes
@Override
public void open(String filePath, CompressionCodec codec,
    CompressionType cType) throws IOException {
  Configuration conf = new Configuration();
  Path dstPath = new Path(filePath);
  FileSystem hdfs = dstPath.getFileSystem(conf);
  if (useRawLocalFileSystem) {
    if (hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
    fsOut = hdfs.append(dstPath);
    appending = true;
  } else {
    fsOut = hdfs.create(dstPath);
  }
  if (compressor == null) {
    compressor = CodecPool.getCompressor(codec, conf);
  }
  cmpOut = codec.createOutputStream(fsOut, compressor);
  serializer = EventSerializerFactory.getInstance(serializerType,
      serializerContext, cmpOut);
  if (appending && !serializer.supportsReopen()) {
    cmpOut.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType
        + ") does not support append");
  }

  registerCurrentStream(fsOut, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
  isFinished = false;
}
 
Example 15
Project: hadoop   File: TestDecommission.java   View Source Code Vote up 4 votes
@Test(timeout=120000)
public void testDecommissionWithOpenfile() throws IOException, InterruptedException {
  LOG.info("Starting test testDecommissionWithOpenfile");
  
  //At most 4 nodes will be decommissioned
  startCluster(1, 7, conf);
      
  FileSystem fileSys = cluster.getFileSystem(0);
  FSNamesystem ns = cluster.getNamesystem(0);
  
  String openFile = "/testDecommissionWithOpenfile.dat";
         
  writeFile(fileSys, new Path(openFile), (short)3);   
  // make sure the file was open for write
  FSDataOutputStream fdos =  fileSys.append(new Path(openFile)); 
  
  LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(cluster.getNameNode(0), openFile, 0, fileSize);
            
  DatanodeInfo[] dnInfos4LastBlock = lbs.getLastLocatedBlock().getLocations();
  DatanodeInfo[] dnInfos4FirstBlock = lbs.get(0).getLocations();
  
  ArrayList<String> nodes = new ArrayList<String>();
  ArrayList<DatanodeInfo> dnInfos = new ArrayList<DatanodeInfo>();

  DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
  for (DatanodeInfo datanodeInfo : dnInfos4FirstBlock) {
    DatanodeInfo found = datanodeInfo;
    for (DatanodeInfo dif: dnInfos4LastBlock) {
      if (datanodeInfo.equals(dif)) {
       found = null;
      }
    }
    if (found != null) {
      nodes.add(found.getXferAddr());
      dnInfos.add(dm.getDatanode(found));
    }
  }
  //decommission one of the 3 nodes which have last block
  nodes.add(dnInfos4LastBlock[0].getXferAddr());
  dnInfos.add(dm.getDatanode(dnInfos4LastBlock[0]));
  
  writeConfigFile(excludeFile, nodes);
  refreshNodes(ns, conf);  
  for (DatanodeInfo dn : dnInfos) {
    waitNodeState(dn, AdminStates.DECOMMISSIONED);
  }           

  fdos.close();
}
 
Example 16
Project: Transwarp-Sample-Code   File: HDFSCompressedDataStream.java   View Source Code Vote up 4 votes
@Override
public void open(String filePath, CompressionCodec codec,
    CompressionType cType) throws IOException {
  Configuration conf = new Configuration();
  Path dstPath = new Path(filePath);
  FileSystem hdfs = dstPath.getFileSystem(conf);
  if(useRawLocalFileSystem) {
    if(hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
  (dstPath)) {
    fsOut = hdfs.append(dstPath);
    appending = true;
  } else {
    fsOut = hdfs.create(dstPath);
  }
  if(compressor == null) {
    compressor = CodecPool.getCompressor(codec, conf);
  }
  cmpOut = codec.createOutputStream(fsOut, compressor);
  serializer = EventSerializerFactory.getInstance(serializerType,
      serializerContext, cmpOut);
  if (appending && !serializer.supportsReopen()) {
    cmpOut.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType
        + ") does not support append");
  }

  registerCurrentStream(fsOut, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
  isFinished = false;
}
 
Example 17
Project: hadoop   File: TestPersistBlocks.java   View Source Code Vote up 4 votes
@Test
public void testRestartWithAppend() throws IOException {
  final Configuration conf = new HdfsConfiguration();
  // Turn off persistent IPC, so that the DFSClient can survive NN restart
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);
  MiniDFSCluster cluster = null;

  FSDataOutputStream stream;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    FileSystem fs = cluster.getFileSystem();
    NameNode.getAddress(conf).getPort();
    // Creating a file with 4096 blockSize to write multiple blocks
    stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
    stream.write(DATA_BEFORE_RESTART, 0, DATA_BEFORE_RESTART.length / 2);
    stream.close();
    stream = fs.append(FILE_PATH, BLOCK_SIZE);
    stream.write(DATA_BEFORE_RESTART, DATA_BEFORE_RESTART.length / 2,
        DATA_BEFORE_RESTART.length / 2);
    stream.close();
    
    assertEquals(DATA_BEFORE_RESTART.length,
        fs.getFileStatus(FILE_PATH).getLen());
    
    cluster.restartNameNode();
    
    assertEquals(DATA_BEFORE_RESTART.length,
        fs.getFileStatus(FILE_PATH).getLen());
    
    FSDataInputStream readStream = fs.open(FILE_PATH);
    try {
      byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length];
      IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
      assertArrayEquals(DATA_BEFORE_RESTART, verifyBuf);
    } finally {
      IOUtils.closeStream(readStream);
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example 18
Project: hadoop   File: TestEncryptionZones.java   View Source Code Vote up 4 votes
private void appendOneByte(FileSystem fs, Path p) throws IOException {
  final FSDataOutputStream out = fs.append(p);
  out.write((byte) 0x123);
  out.close();
}
 
Example 19
Project: hadoop   File: TestFileAppend.java   View Source Code Vote up 4 votes
/** Tests appending after soft-limit expires. */
@Test
public void testAppendAfterSoftLimit() 
    throws IOException, InterruptedException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
  //Set small soft-limit for lease
  final long softLimit = 1L;
  final long hardLimit = 9999999L;

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .build();
  cluster.setLeasePeriod(softLimit, hardLimit);
  cluster.waitActive();

  FileSystem fs = cluster.getFileSystem();
  FileSystem fs2 = new DistributedFileSystem();
  fs2.initialize(fs.getUri(), conf);

  final Path testPath = new Path("/testAppendAfterSoftLimit");
  final byte[] fileContents = AppendTestUtil.initBuffer(32);

  // create a new file without closing
  FSDataOutputStream out = fs.create(testPath);
  out.write(fileContents);

  //Wait for > soft-limit
  Thread.sleep(250);

  try {
    FSDataOutputStream appendStream2 = fs2.append(testPath);
    appendStream2.write(fileContents);
    appendStream2.close();
    assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
  } finally {
    fs.close();
    fs2.close();
    cluster.shutdown();
  }
}
 
Example 20
Project: hadoop   File: TestBlockTokenWithDFS.java   View Source Code Vote up 4 votes
/**
 * testing that APPEND operation can handle token expiration when
 * re-establishing pipeline is needed
 */
@Test
public void testAppend() throws Exception {
  MiniDFSCluster cluster = null;
  int numDataNodes = 2;
  Configuration conf = getConf(numDataNodes);

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
    cluster.waitActive();
    assertEquals(numDataNodes, cluster.getDataNodes().size());

    final NameNode nn = cluster.getNameNode();
    final BlockManager bm = nn.getNamesystem().getBlockManager();
    final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();

    // set a short token lifetime (1 second)
    SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
    Path fileToAppend = new Path(FILE_TO_APPEND);
    FileSystem fs = cluster.getFileSystem();

    // write a one-byte file
    FSDataOutputStream stm = writeFile(fs, fileToAppend,
        (short) numDataNodes, BLOCK_SIZE);
    stm.write(rawData, 0, 1);
    stm.close();
    // open the file again for append
    stm = fs.append(fileToAppend);
    int mid = rawData.length - 1;
    stm.write(rawData, 1, mid - 1);
    stm.hflush();

    /*
     * wait till token used in stm expires
     */
    Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
    while (!SecurityTestUtil.isBlockTokenExpired(token)) {
      try {
        Thread.sleep(10);
      } catch (InterruptedException ignored) {
      }
    }

    // remove a datanode to force re-establishing pipeline
    cluster.stopDataNode(0);
    // append the rest of the file
    stm.write(rawData, mid, rawData.length - mid);
    stm.close();
    // check if append is successful
    FSDataInputStream in5 = fs.open(fileToAppend);
    assertTrue(checkFile1(in5));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}