Java Code Examples for org.apache.hadoop.fs.FileSystem.setVerifyChecksum()

The following are Jave code examples for showing how to use setVerifyChecksum() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop   File: TestFSInputChecker.java   Source Code and License Vote up 6 votes
/**
 * Tests read/seek/getPos/skipped opeation for input stream.
 */
private void testChecker(FileSystem fileSys, boolean readCS)
throws Exception {
  Path file = new Path("try.dat");
  writeFile(fileSys, file);

  try {
    if (!readCS) {
      fileSys.setVerifyChecksum(false);
    }

    stm = fileSys.open(file);
    checkReadAndGetPos();
    checkSeek();
    checkSkip();
    //checkMark
    assertFalse(stm.markSupported());
    stm.close();
  } finally {
    if (!readCS) {
      fileSys.setVerifyChecksum(true);
    }
    cleanupFile(fileSys, file);
  }
}
 
Example 2
Project: hadoop   File: TestPread.java   Source Code and License Vote up 6 votes
private void dfsPreadTest(Configuration conf, boolean disableTransferTo, boolean verifyChecksum)
    throws IOException {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  if (disableTransferTo) {
    conf.setBoolean("dfs.datanode.transferTo.allowed", false);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fileSys = cluster.getFileSystem();
  fileSys.setVerifyChecksum(verifyChecksum);
  try {
    Path file1 = new Path("preadtest.dat");
    writeFile(fileSys, file1);
    pReadFile(fileSys, file1);
    datanodeRestartTest(cluster, fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 3
Project: hadoop   File: DumpTypedBytes.java   Source Code and License Vote up 6 votes
/**
 * The main driver for <code>DumpTypedBytes</code>.
 */
public int run(String[] args) throws Exception {
  if (args.length == 0) {
    System.err.println("Too few arguments!");
    printUsage();
    return 1;
  }
  Path pattern = new Path(args[0]);
  FileSystem fs = pattern.getFileSystem(getConf());
  fs.setVerifyChecksum(true);
  for (Path p : FileUtil.stat2Paths(fs.globStatus(pattern), pattern)) {
    List<FileStatus> inputFiles = new ArrayList<FileStatus>();
    FileStatus status = fs.getFileStatus(p);
    if (status.isDirectory()) {
      FileStatus[] files = fs.listStatus(p);
      Collections.addAll(inputFiles, files);
    } else {
      inputFiles.add(status);
    }
    return dumpTypedBytes(inputFiles);
  }
  return -1;
}
 
Example 4
Project: flume-release-1.7.0   File: TestHDFSEventSink.java   Source Code and License Vote up 5 votes
/**
 * This test simulates what happens when a batch of events is written to a compressed sequence
 * file (and thus hsync'd to hdfs) but the file is not yet closed.
 *
 * When this happens, the data that we wrote should still be readable.
 */
@Test
public void testBlockCompressSequenceFileWriterSync() throws IOException, EventDeliveryException {
  String hdfsPath = testPath + "/sequenceFileWriterSync";
  FileSystem fs = FileSystem.get(new Configuration());
  // Since we are reading a partial file we don't want to use checksums
  fs.setVerifyChecksum(false);
  fs.setWriteChecksum(false);

  // Compression codecs that don't require native hadoop libraries
  String [] codecs = {"BZip2Codec", "DeflateCodec"};

  for (String codec : codecs) {
    sequenceFileWriteAndVerifyEvents(fs, hdfsPath, codec, Collections.singletonList(
        "single-event"
    ));

    sequenceFileWriteAndVerifyEvents(fs, hdfsPath, codec, Arrays.asList(
        "multiple-events-1",
        "multiple-events-2",
        "multiple-events-3",
        "multiple-events-4",
        "multiple-events-5"
    ));
  }

  fs.close();
}