Java Code Examples for org.apache.hadoop.fs.FSDataOutputStream.close()

The following are Jave code examples for showing how to use close() of the org.apache.hadoop.fs.FSDataOutputStream class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop   File: TestTFile.java   View Source Code Vote up 6 votes
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
Example 2
Project: hadoop   File: TestVLong.java   View Source Code Vote up 6 votes
public void testVLongRandom() throws IOException {
  int count = 1024 * 1024;
  long data[] = new long[count];
  Random rng = new Random();
  for (int i = 0; i < data.length; ++i) {
    int shift = rng.nextInt(Long.SIZE) + 1;
    long mask = (1L << shift) - 1;
    long a = ((long) rng.nextInt()) << 32;
    long b = ((long) rng.nextInt()) & 0xffffffffL;
    data[i] = (a + b) & mask;
  }
  
  FSDataOutputStream out = fs.create(path);
  for (int i = 0; i < data.length; ++i) {
    Utils.writeVLong(out, data[i]);
  }
  out.close();

  FSDataInputStream in = fs.open(path);
  for (int i = 0; i < data.length; ++i) {
    Assert.assertEquals(Utils.readVLong(in), data[i]);
  }
  in.close();
  fs.delete(path, false);
}
 
Example 3
Project: hadoop   File: TestMerge.java   View Source Code Vote up 6 votes
private void copyPartitions(Path mapOutputPath, Path indexPath)
  throws IOException {
  FileSystem localFs = FileSystem.getLocal(jobConf);
  FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
  FSDataOutputStream rawOutput = rfs.create(mapOutputPath, true, BUF_SIZE);
  SpillRecord spillRecord = new SpillRecord(numberOfPartitions);
  IndexRecord indexRecord = new IndexRecord();
  for (int i = 0; i < numberOfPartitions; i++) {
    indexRecord.startOffset = rawOutput.getPos();
    byte buffer[] = outStreams[i].toByteArray();
    IFileOutputStream checksumOutput = new IFileOutputStream(rawOutput);
    checksumOutput.write(buffer);
    // Write checksum.
    checksumOutput.finish();
    // Write index record
    indexRecord.rawLength = (long)buffer.length;
    indexRecord.partLength = rawOutput.getPos() - indexRecord.startOffset;
    spillRecord.putIndex(indexRecord, i);
    reporter.progress();
  }
  rawOutput.close();
  spillRecord.writeToFile(indexPath, jobConf);
}
 
Example 4
Project: hadoop   File: TestFavoredNodesEndToEnd.java   View Source Code Vote up 6 votes
@Test(timeout = 180000)
public void testFavoredNodesEndToEndForAppend() throws Exception {
  // create 10 files with random preferred nodes
  for (int i = 0; i < NUM_FILES; i++) {
    Random rand = new Random(System.currentTimeMillis() + i);
    // pass a new created rand so as to get a uniform distribution each time
    // without too much collisions (look at the do-while loop in getDatanodes)
    InetSocketAddress datanode[] = getDatanodes(rand);
    Path p = new Path("/filename" + i);
    // create and close the file.
    dfs.create(p, FsPermission.getDefault(), true, 4096, (short) 3, 4096L,
        null, null).close();
    // re-open for append
    FSDataOutputStream out = dfs.append(p, EnumSet.of(CreateFlag.APPEND),
        4096, null, datanode);
    out.write(SOME_BYTES);
    out.close();
    BlockLocation[] locations = getBlockLocations(p);
    // verify the files got created in the right nodes
    for (BlockLocation loc : locations) {
      String[] hosts = loc.getNames();
      String[] hosts1 = getStringForInetSocketAddrs(datanode);
      assertTrue(compareNodes(hosts, hosts1));
    }
  }
}
 
Example 5
Project: ditb   File: SnapshotDescriptionUtils.java   View Source Code Vote up 6 votes
/**
 * Write the snapshot description into the working directory of a snapshot
 * @param snapshot description of the snapshot being taken
 * @param workingDir working directory of the snapshot
 * @param fs {@link FileSystem} on which the snapshot should be taken
 * @throws IOException if we can't reach the filesystem and the file cannot be cleaned up on
 *           failure
 */
public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs)
    throws IOException {
  FsPermission perms = FSUtils.getFilePermissions(fs, fs.getConf(),
    HConstants.DATA_FILE_UMASK_KEY);
  Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
  try {
    FSDataOutputStream out = FSUtils.create(fs, snapshotInfo, perms, true);
    try {
      snapshot.writeTo(out);
    } finally {
      out.close();
    }
  } catch (IOException e) {
    // if we get an exception, try to remove the snapshot info
    if (!fs.delete(snapshotInfo, false)) {
      String msg = "Couldn't delete snapshot info file: " + snapshotInfo;
      LOG.error(msg);
      throw new IOException(msg);
    }
  }
}
 
Example 6
Project: hadoop-oss   File: TestChecksumFileSystem.java   View Source Code Vote up 6 votes
@Test
public void testMultiChunkFile() throws Exception {
  Path testPath = new Path(TEST_ROOT_DIR, "testMultiChunk");
  FSDataOutputStream fout = localFs.create(testPath);
  for (int i = 0; i < 1000; i++) {
    fout.write(("testing" + i).getBytes());
  }
  fout.close();

  // Exercise some boundary cases - a divisor of the chunk size
  // the chunk size, 2x chunk size, and +/-1 around these.
  readFile(localFs, testPath, 128);
  readFile(localFs, testPath, 511);
  readFile(localFs, testPath, 512);
  readFile(localFs, testPath, 513);
  readFile(localFs, testPath, 1023);
  readFile(localFs, testPath, 1024);
  readFile(localFs, testPath, 1025);
}
 
Example 7
Project: hadoop   File: TestSwiftFileSystemPartitionedUploads.java   View Source Code Vote up 5 votes
/**
 * Test sticks up a very large partitioned file and verifies that
 * it comes back unchanged.
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
  final Path path = new Path("/test/testManyPartitionedFile");

  int len = PART_SIZE_BYTES * 15;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  out.write(src, 0, src.length);
  int expected =
    getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
  out.close();
  assertPartitionsWritten("write completed", out, expected);
  assertEquals("too few bytes written", len,
               SwiftNativeFileSystem.getBytesWritten(out));
  assertEquals("too few bytes uploaded", len,
               SwiftNativeFileSystem.getBytesUploaded(out));
  //now we verify that the data comes back. If it
  //doesn't, it means that the ordering of the partitions
  //isn't right
  byte[] dest = readDataset(fs, path, len);
  //compare data
  SwiftTestUtils.compareByteArrays(src, dest, len);
  //finally, check the data
  FileStatus[] stats = fs.listStatus(path);
  assertEquals("wrong entry count in "
               + SwiftTestUtils.dumpStats(path.toString(), stats),
               expected, stats.length);
}
 
Example 8
Project: aliyun-maxcompute-data-collectors   File: HCatalogTestUtils.java   View Source Code Vote up 5 votes
private void createInputFile(Path path, int rowCount)
  throws IOException {
  if (fs.exists(path)) {
    fs.delete(path, true);
  }
  FSDataOutputStream os = fs.create(path);
  for (int i = 0; i < rowCount; i++) {
    String s = i + "\n";
    os.writeChars(s);
  }
  os.close();
}
 
Example 9
Project: hadoop-oss   File: TestTFile.java   View Source Code Vote up 5 votes
@Test
public void testMetaBlocks() throws IOException {
  Path mFile = new Path(ROOT, "meta.tfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  Writer writer = new Writer(fout, minBlockSize, "none", null, conf);
  someTestingWithMetaBlock(writer, "none");
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf);
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
 
Example 10
Project: hadoop   File: TestHftpFileSystem.java   View Source Code Vote up 5 votes
@Test
public void testReadClosedStream() throws IOException {
  final Path testFile = new Path("/testfile+2");
  FSDataOutputStream os = hdfs.create(testFile, true);
  os.writeBytes("0123456789");
  os.close();

  // ByteRangeInputStream delays opens until reads. Make sure it doesn't
  // open a closed stream that has never been opened
  FSDataInputStream in = hftpFs.open(testFile);
  in.close();
  checkClosedStream(in);
  checkClosedStream(in.getWrappedStream());

  // force the stream to connect and then close it
  in = hftpFs.open(testFile);
  int ch = in.read();
  assertEquals('0', ch);
  in.close();
  checkClosedStream(in);
  checkClosedStream(in.getWrappedStream());

  // make sure seeking doesn't automagically reopen the stream
  in.seek(4);
  checkClosedStream(in);
  checkClosedStream(in.getWrappedStream());
}
 
Example 11
Project: ditb   File: TestFSUtils.java   View Source Code Vote up 5 votes
private void WriteDataToHDFS(FileSystem fs, Path file, int dataSize)
  throws Exception {
  FSDataOutputStream out = fs.create(file);
  byte [] data = new byte[dataSize];
  out.write(data, 0, dataSize);
  out.close();
}
 
Example 12
Project: hadoop   File: TestCombineFileInputFormat.java   View Source Code Vote up 5 votes
/**
 * Test when input files are from non-default file systems
 */
@Test
public void testForNonDefaultFileSystem() throws Throwable {
  Configuration conf = new Configuration();

  // use a fake file system scheme as default
  conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, DUMMY_FS_URI);

  // default fs path
  assertEquals(DUMMY_FS_URI, FileSystem.getDefaultUri(conf).toString());
  // add a local file
  Path localPath = new Path("testFile1");
  FileSystem lfs = FileSystem.getLocal(conf);
  FSDataOutputStream dos = lfs.create(localPath);
  dos.writeChars("Local file for CFIF");
  dos.close();

  Job job = Job.getInstance(conf);
  FileInputFormat.setInputPaths(job, lfs.makeQualified(localPath));
  DummyInputFormat inFormat = new DummyInputFormat();
  List<InputSplit> splits = inFormat.getSplits(job);
  assertTrue(splits.size() > 0);
  for (InputSplit s : splits) {
    CombineFileSplit cfs = (CombineFileSplit)s;
    for (Path p : cfs.getPaths()) {
      assertEquals(p.toUri().getScheme(), "file");
    }
  }
}
 
Example 13
Project: hadoop   File: TestFileCreation.java   View Source Code Vote up 4 votes
/**
 * Test deleteOnExit
 */
@Test
public void testDeleteOnExit() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  FileSystem localfs = FileSystem.getLocal(conf);

  try {

    // Creates files in HDFS and local file system.
    //
    Path file1 = new Path("filestatus.dat");
    Path file2 = new Path("filestatus2.dat");
    Path file3 = new Path("filestatus3.dat");
    FSDataOutputStream stm1 = createFile(fs, file1, 1);
    FSDataOutputStream stm2 = createFile(fs, file2, 1);
    FSDataOutputStream stm3 = createFile(localfs, file3, 1);
    System.out.println("DeleteOnExit: Created files.");

    // write to files and close. Purposely, do not close file2.
    writeFile(stm1);
    writeFile(stm3);
    stm1.close();
    stm2.close();
    stm3.close();

    // set delete on exit flag on files.
    fs.deleteOnExit(file1);
    fs.deleteOnExit(file2);
    localfs.deleteOnExit(file3);

    // close the file system. This should make the above files
    // disappear.
    fs.close();
    localfs.close();
    fs = null;
    localfs = null;

    // reopen file system and verify that file does not exist.
    fs = cluster.getFileSystem();
    localfs = FileSystem.getLocal(conf);

    assertTrue(file1 + " still exists inspite of deletOnExit set.",
               !fs.exists(file1));
    assertTrue(file2 + " still exists inspite of deletOnExit set.",
               !fs.exists(file2));
    assertTrue(file3 + " still exists inspite of deletOnExit set.",
               !localfs.exists(file3));
    System.out.println("DeleteOnExit successful.");

  } finally {
    IOUtils.closeStream(fs);
    IOUtils.closeStream(localfs);
    cluster.shutdown();
  }
}
 
Example 14
Project: hadoop-oss   File: TestChecksumFileSystem.java   View Source Code Vote up 4 votes
@Test
public void testVerifyChecksum() throws Exception {    
  Path testPath = new Path(TEST_ROOT_DIR, "testPath");
  Path testPath11 = new Path(TEST_ROOT_DIR, "testPath11");
  FSDataOutputStream fout = localFs.create(testPath);
  fout.write("testing".getBytes());
  fout.close();
  
  fout = localFs.create(testPath11);
  fout.write("testing you".getBytes());
  fout.close();

  // Exercise some boundary cases - a divisor of the chunk size
  // the chunk size, 2x chunk size, and +/-1 around these.
  readFile(localFs, testPath, 128);
  readFile(localFs, testPath, 511);
  readFile(localFs, testPath, 512);
  readFile(localFs, testPath, 513);
  readFile(localFs, testPath, 1023);
  readFile(localFs, testPath, 1024);
  readFile(localFs, testPath, 1025);

  localFs.delete(localFs.getChecksumFile(testPath), true);
  assertTrue("checksum deleted", !localFs.exists(localFs.getChecksumFile(testPath)));
  
  //copying the wrong checksum file
  FileUtil.copy(localFs, localFs.getChecksumFile(testPath11), localFs, 
      localFs.getChecksumFile(testPath),false,true,localFs.getConf());
  assertTrue("checksum exists", localFs.exists(localFs.getChecksumFile(testPath)));
  
  boolean errorRead = false;
  try {
    readFile(localFs, testPath, 1024);
  }catch(ChecksumException ie) {
    errorRead = true;
  }
  assertTrue("error reading", errorRead);
  
  //now setting verify false, the read should succeed
  localFs.setVerifyChecksum(false);
  String str = readFile(localFs, testPath, 1024).toString();
  assertTrue("read", "testing".equals(str));
}
 
Example 15
Project: hadoop   File: TestPipelinesFailover.java   View Source Code Vote up 4 votes
private void doWriteOverFailoverTest(TestScenario scenario,
    MethodToTestIdempotence methodToTest) throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  // Don't check replication periodically.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  
  FSDataOutputStream stm = null;
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .build();
  try {
    int sizeWritten = 0;
    
    cluster.waitActive();
    cluster.transitionToActive(0);
    Thread.sleep(500);

    LOG.info("Starting with NN 0 active");
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    stm = fs.create(TEST_PATH);
    
    // write a block and a half
    AppendTestUtil.write(stm, 0, BLOCK_AND_A_HALF);
    sizeWritten += BLOCK_AND_A_HALF;
    
    // Make sure all of the blocks are written out before failover.
    stm.hflush();

    LOG.info("Failing over to NN 1");
    scenario.run(cluster);

    // NOTE: explicitly do *not* make any further metadata calls
    // to the NN here. The next IPC call should be to allocate the next
    // block. Any other call would notice the failover and not test
    // idempotence of the operation (HDFS-3031)
    
    FSNamesystem ns1 = cluster.getNameNode(1).getNamesystem();
    BlockManagerTestUtil.updateState(ns1.getBlockManager());
    assertEquals(0, ns1.getPendingReplicationBlocks());
    assertEquals(0, ns1.getCorruptReplicaBlocks());
    assertEquals(0, ns1.getMissingBlocksCount());

    // If we're testing allocateBlock()'s idempotence, write another
    // block and a half, so we have to allocate a new block.
    // Otherise, don't write anything, so our next RPC will be
    // completeFile() if we're testing idempotence of that operation.
    if (methodToTest == MethodToTestIdempotence.ALLOCATE_BLOCK) {
      // write another block and a half
      AppendTestUtil.write(stm, sizeWritten, BLOCK_AND_A_HALF);
      sizeWritten += BLOCK_AND_A_HALF;
    }
    
    stm.close();
    stm = null;
    
    AppendTestUtil.check(fs, TEST_PATH, sizeWritten);
  } finally {
    IOUtils.closeStream(stm);
    cluster.shutdown();
  }
}
 
Example 16
Project: hadoop   File: TestRbwSpaceReservation.java   View Source Code Vote up 4 votes
private void createFileAndTestSpaceReservation(
    final String fileNamePrefix, final int fileBlockSize)
    throws IOException, InterruptedException {
  // Enough for 1 block + meta files + some delta.
  final long configuredCapacity = fileBlockSize * 2 - 1;
  startCluster(BLOCK_SIZE, 1, configuredCapacity);
  FSDataOutputStream out = null;
  Path path = new Path("/" + fileNamePrefix + ".dat");

  try {
    out = fs.create(path, false, 4096, (short) 1, fileBlockSize);

    byte[] buffer = new byte[rand.nextInt(fileBlockSize / 4)];
    out.write(buffer);
    out.hsync();
    int bytesWritten = buffer.length;

    // Check that space was reserved for a full block minus the bytesWritten.
    assertThat(singletonVolume.getReservedForRbw(),
               is((long) fileBlockSize - bytesWritten));
    out.close();
    out = null;

    // Check that the reserved space has been released since we closed the
    // file.
    assertThat(singletonVolume.getReservedForRbw(), is(0L));

    // Reopen the file for appends and write 1 more byte.
    out = fs.append(path);
    out.write(buffer);
    out.hsync();
    bytesWritten += buffer.length;

    // Check that space was again reserved for a full block minus the
    // bytesWritten so far.
    assertThat(singletonVolume.getReservedForRbw(),
               is((long) fileBlockSize - bytesWritten));

    // Write once again and again verify the available space. This ensures
    // that the reserved space is progressively adjusted to account for bytes
    // written to disk.
    out.write(buffer);
    out.hsync();
    bytesWritten += buffer.length;
    assertThat(singletonVolume.getReservedForRbw(),
               is((long) fileBlockSize - bytesWritten));
  } finally {
    if (out != null) {
      out.close();
    }
  }
}
 
Example 17
Project: hadoop-oss   File: Credentials.java   View Source Code Vote up 4 votes
public void writeTokenStorageFile(Path filename, 
                                  Configuration conf) throws IOException {
  FSDataOutputStream os = filename.getFileSystem(conf).create(filename);
  writeTokenStorageToStream(os);
  os.close();
}
 
Example 18
Project: hadoop   File: TestFileAppend.java   View Source Code Vote up 4 votes
/** Tests appending after soft-limit expires. */
@Test
public void testAppendAfterSoftLimit() 
    throws IOException, InterruptedException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
  //Set small soft-limit for lease
  final long softLimit = 1L;
  final long hardLimit = 9999999L;

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .build();
  cluster.setLeasePeriod(softLimit, hardLimit);
  cluster.waitActive();

  FileSystem fs = cluster.getFileSystem();
  FileSystem fs2 = new DistributedFileSystem();
  fs2.initialize(fs.getUri(), conf);

  final Path testPath = new Path("/testAppendAfterSoftLimit");
  final byte[] fileContents = AppendTestUtil.initBuffer(32);

  // create a new file without closing
  FSDataOutputStream out = fs.create(testPath);
  out.write(fileContents);

  //Wait for > soft-limit
  Thread.sleep(250);

  try {
    FSDataOutputStream appendStream2 = fs2.append(testPath);
    appendStream2.write(fileContents);
    appendStream2.close();
    assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
  } finally {
    fs.close();
    fs2.close();
    cluster.shutdown();
  }
}
 
Example 19
Project: ditb   File: TestChecksum.java   View Source Code Vote up 4 votes
protected void testChecksumInternals(boolean useTags) throws IOException {
  Compression.Algorithm algo = NONE;
  for (boolean pread : new boolean[] { false, true }) {
    for (int bytesPerChecksum : BYTES_PER_CHECKSUM) {
      Path path = new Path(TEST_UTIL.getDataTestDir(), "checksumChunk_" + 
                           algo + bytesPerChecksum);
      FSDataOutputStream os = fs.create(path);
      HFileContext meta = new HFileContextBuilder()
                          .withCompression(algo)
                          .withIncludesMvcc(true)
                          .withIncludesTags(useTags)
                          .withHBaseCheckSum(true)
                          .withBytesPerCheckSum(bytesPerChecksum)
                          .build();
      HFileBlock.Writer hbw = new HFileBlock.Writer(null,
         meta);

      // write one block. The block has data
      // that is at least 6 times more than the checksum chunk size
      long dataSize = 0;
      DataOutputStream dos = hbw.startWriting(BlockType.DATA);
      for (; dataSize < 6 * bytesPerChecksum;) {
        for (int i = 0; i < 1234; ++i) {
          dos.writeInt(i);
          dataSize += 4;
        }
      }
      hbw.writeHeaderAndData(os);
      long totalSize = hbw.getOnDiskSizeWithHeader();
      os.close();

      long expectedChunks = ChecksumUtil.numChunks(
                             dataSize + HConstants.HFILEBLOCK_HEADER_SIZE,
                             bytesPerChecksum);
      LOG.info("testChecksumChunks: pread=" + pread +
                 ", bytesPerChecksum=" + bytesPerChecksum +
                 ", fileSize=" + totalSize +
                 ", dataSize=" + dataSize +
                 ", expectedChunks=" + expectedChunks);

      // Verify hbase checksums. 
      assertEquals(true, hfs.useHBaseChecksum());

      // Read data back from file.
      FSDataInputStream is = fs.open(path);
      FSDataInputStream nochecksum = hfs.getNoChecksumFs().open(path);
      meta = new HFileContextBuilder()
             .withCompression(algo)
             .withIncludesMvcc(true)
             .withIncludesTags(useTags)
             .withHBaseCheckSum(true)
             .withBytesPerCheckSum(bytesPerChecksum)
             .build();
      HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(new FSDataInputStreamWrapper(
          is, nochecksum), totalSize, hfs, path, meta);
      HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
      is.close();
      b.sanityCheck();
      assertEquals(dataSize, b.getUncompressedSizeWithoutHeader());

      // verify that we have the expected number of checksum chunks
      assertEquals(totalSize, HConstants.HFILEBLOCK_HEADER_SIZE + dataSize +
                   expectedChunks * HFileBlock.CHECKSUM_SIZE);

      // assert that we did not encounter hbase checksum verification failures
      assertEquals(0, HFile.getChecksumFailuresCount());
    }
  }
}
 
Example 20
Project: hadoop   File: TestOutOfBandAzureBlobOperationsLive.java   View Source Code Vote up 4 votes
@Test
public void outOfBandFolder_create_rootDir() throws Exception {
  Path targetFile = new Path("/newInRoot");
  FSDataOutputStream s2 = fs.create(targetFile);
  s2.close();
}