org.apache.hadoop.io.IOUtils Java Examples

The following examples show how to use org.apache.hadoop.io.IOUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestLineReader.java    From tajo with Apache License 2.0 6 votes vote down vote up
@Test
public void testByteBufLineReaderWithoutTerminating() throws IOException {
  String path = JavaResourceUtil.getResourceURL("dataset/testLineText.txt").getFile();
  File file = new File(path);
  String data = FileUtil.readTextFile(file);

  ByteBufInputChannel channel = new ByteBufInputChannel(new FileInputStream(file));
  ByteBufLineReader reader = new ByteBufLineReader(channel);

  long totalRead = 0;
  int i = 0;
  AtomicInteger bytes = new AtomicInteger();
  for(;;){
    ByteBuf buf = reader.readLineBuf(bytes);
    totalRead += bytes.get();
    if(buf == null) break;
    i++;
  }
  IOUtils.cleanup(null, reader);
  assertFalse(channel.isOpen());
  assertEquals(file.length(), totalRead);
  assertEquals(file.length(), reader.readBytes());
  assertEquals(data.split("\n").length, i);
}
 
Example #2
Source File: HDFSUtil.java    From jMetalSP with MIT License 6 votes vote down vote up
public boolean writeFile(String text, String path) {
  boolean result = false;
  if (text != null && path != null) {
    try {
      fs = FileSystem.get(conf);
      InputStream in = org.apache.commons.io.IOUtils.toInputStream(text, "UTF-8");
      OutputStream out = fs.create(new Path(path));
      IOUtils.copyBytes(in, out, conf);
      result = true;
    } catch (Exception ex) {
      ex.printStackTrace();
      result = false;
    }

  }
  return result;
}
 
Example #3
Source File: TestRumenJobTraces.java    From RDFS with Apache License 2.0 6 votes vote down vote up
static private <T extends DeepCompare> void jsonFileMatchesGold(
    FileSystem lfs, Path result, Path gold, Class<? extends T> clazz,
    String fileDescription) throws IOException {
  JsonObjectMapperParser<T> goldParser =
      new JsonObjectMapperParser<T>(gold, clazz, new Configuration());
  InputStream resultStream = lfs.open(result);
  JsonObjectMapperParser<T> resultParser =
      new JsonObjectMapperParser<T>(resultStream, clazz);
  try {
    while (true) {
      DeepCompare goldJob = goldParser.getNext();
      DeepCompare resultJob = resultParser.getNext();
      if ((goldJob == null) || (resultJob == null)) {
        assertTrue(goldJob == resultJob);
        break;
      }

      try {
        resultJob.deepCompare(goldJob, new TreePath(null, "<root>"));
      } catch (DeepInequalityException e) {
        String error = e.path.toString();

        assertFalse(fileDescription + " mismatches: " + error, true);
      }
    }
  } finally {
    IOUtils.cleanup(null, goldParser, resultParser);
  }
}
 
Example #4
Source File: SparkBuildDictionary.java    From kylin with Apache License 2.0 6 votes vote down vote up
private void checkSnapshot(CubeManager cubeManager, CubeSegment cubeSegment) {
    List<DimensionDesc> dimensionDescs = cubeSegment.getCubeDesc().getDimensions();
    for (DimensionDesc dimensionDesc : dimensionDescs) {
        TableRef lookup = dimensionDesc.getTableRef();
        String tableIdentity = lookup.getTableIdentity();
        if (cubeSegment.getModel().isLookupTable(tableIdentity) && !cubeSegment.getCubeDesc().isExtSnapshotTable(tableIdentity)) {
            logger.info("Checking snapshot of {}", lookup);
            try {
                JoinDesc join = cubeSegment.getModel().getJoinsTree().getJoinByPKSide(lookup);
                ILookupTable table = cubeManager.getLookupTable(cubeSegment, join);
                if (table != null) {
                    IOUtils.closeStream(table);
                }
            } catch (Throwable th) {
                throw new RuntimeException(String.format(Locale.ROOT, "Checking snapshot of %s failed.", lookup), th);
            }
        }
    }
}
 
Example #5
Source File: TestFileJournalManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Make sure that in-progress streams aren't counted if we don't ask for
 * them.
 */
@Test
public void testExcludeInProgressStreams() throws CorruptionException,
    IOException {
  File f = new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams");
  
  // Don't close the edit log once the files have been set up.
  NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 
                                 10, false);
  StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
  
  FileJournalManager jm = new FileJournalManager(conf, sd, storage);
  
  // If we exclude the in-progess stream, we should only have 100 tx.
  assertEquals(100, getNumberOfTransactions(jm, 1, false, false));
  
  EditLogInputStream elis = getJournalInputStream(jm, 90, false);
  try {
    FSEditLogOp lastReadOp = null;
    while ((lastReadOp = elis.readOp()) != null) {
      assertTrue(lastReadOp.getTransactionId() <= 100);
    }
  } finally {
    IOUtils.cleanup(LOG, elis);
  }
}
 
Example #6
Source File: TestStickyBit.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Ensure that even if a file is in a directory with the sticky bit on,
 * another user can write to that file (assuming correct permissions).
 */
private void confirmCanAppend(Configuration conf, Path p) throws Exception {
  // Write a file to the new tmp directory as a regular user
  Path file = new Path(p, "foo");
  writeFile(hdfsAsUser1, file);
  hdfsAsUser1.setPermission(file, new FsPermission((short) 0777));

  // Log onto cluster as another user and attempt to append to file
  Path file2 = new Path(p, "foo");
  FSDataOutputStream h = null;
  try {
    h = hdfsAsUser2.append(file2);
    h.write("Some more data".getBytes());
    h.close();
    h = null;
  } finally {
    IOUtils.cleanup(null, h);
  }
}
 
Example #7
Source File: INode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public InputStream serialize() throws IOException {
  ByteArrayOutputStream bytes = new ByteArrayOutputStream();
  DataOutputStream out = new DataOutputStream(bytes);
  try {
    out.writeByte(fileType.ordinal());
    if (isFile()) {
      out.writeInt(blocks.length);
      for (int i = 0; i < blocks.length; i++) {
        out.writeLong(blocks[i].getId());
        out.writeLong(blocks[i].getLength());
      }
    }
    out.close();
    out = null;
  } finally {
    IOUtils.closeStream(out);
  }
  return new ByteArrayInputStream(bytes.toByteArray());
}
 
Example #8
Source File: ThriftStockAvgFileReader.java    From hiped2 with Apache License 2.0 6 votes vote down vote up
public static void readFromProtoBuf(InputStream inputStream)
    throws IOException {

  ThriftBlockReader<StockAvg> reader =
      new ThriftBlockReader<StockAvg>(
          inputStream, new TypeRef<StockAvg>() {
      });

  StockAvg stock;
  while ((stock = reader.readNext()) != null) {
    System.out.println(ToStringBuilder.reflectionToString(stock));

  }

  IOUtils.closeStream(inputStream);
}
 
Example #9
Source File: TestAtomicFileOutputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testFailToRename() throws IOException {
  assumeTrue(Shell.WINDOWS);
  OutputStream fos = null;
  try {
    fos = new AtomicFileOutputStream(DST_FILE);
    fos.write(TEST_STRING.getBytes());
    FileUtil.setWritable(TEST_DIR, false);
    exception.expect(IOException.class);
    exception.expectMessage("failure in native rename");
    try {
      fos.close();
    } finally {
      fos = null;
    }
  } finally {
    IOUtils.cleanup(null, fos);
    FileUtil.setWritable(TEST_DIR, true);
  }
}
 
Example #10
Source File: CryptoUtils.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Wraps a given FSDataInputStream with a CryptoInputStream. The size of the
 * data buffer required for the stream is specified by the
 * "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
 * variable.
 * 
 * @param conf
 * @param in
 * @return FSDataInputStream
 * @throws IOException
 */
public static FSDataInputStream wrapIfNecessary(Configuration conf,
    FSDataInputStream in) throws IOException {
  if (isEncryptedSpillEnabled(conf)) {
    CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
    int bufferSize = getBufferSize(conf);
    // Not going to be used... but still has to be read...
    // Since the O/P stream always writes it..
    IOUtils.readFully(in, new byte[8], 0, 8);
    byte[] iv = 
        new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
    IOUtils.readFully(in, iv, 0, 
        cryptoCodec.getCipherSuite().getAlgorithmBlockSize());
    if (LOG.isDebugEnabled()) {
      LOG.debug("IV read from Stream ["
          + Base64.encodeBase64URLSafeString(iv) + "]");
    }
    return new CryptoFSDataInputStream(in, cryptoCodec, bufferSize,
        getEncryptionKey(), iv);
  } else {
    return in;
  }
}
 
Example #11
Source File: TestMapFile.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * test throwing {@code IOException} in {@code MapFile.Writer} constructor    
 */
@Test
public void testWriteWithFailDirCreation() {
  String ERROR_MESSAGE = "Mkdirs failed to create directory";
  Path dirName = new Path(TEST_DIR, "fail.mapfile");
  MapFile.Writer writer = null;
  try {
    FileSystem fs = FileSystem.getLocal(conf);
    FileSystem spyFs = spy(fs);
    Path pathSpy = spy(dirName);
    when(pathSpy.getFileSystem(conf)).thenReturn(spyFs);
    when(spyFs.mkdirs(dirName)).thenReturn(false);

    writer = new MapFile.Writer(conf, pathSpy,
        MapFile.Writer.keyClass(IntWritable.class),
        MapFile.Writer.valueClass(Text.class));
    fail("testWriteWithFailDirCreation error !!!");
  } catch (IOException ex) {
    assertTrue("testWriteWithFailDirCreation ex error !!!", ex.getMessage()
        .startsWith(ERROR_MESSAGE));
  } finally {
    IOUtils.cleanup(null, writer);
  }
}
 
Example #12
Source File: BlockSender.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Read checksum into given buffer
 * @param buf buffer to read the checksum into
 * @param checksumOffset offset at which to write the checksum into buf
 * @param checksumLen length of checksum to write
 * @throws IOException on error
 */
private void readChecksum(byte[] buf, final int checksumOffset,
    final int checksumLen) throws IOException {
  if (checksumSize <= 0 && checksumIn == null) {
    return;
  }
  try {
    checksumIn.readFully(buf, checksumOffset, checksumLen);
  } catch (IOException e) {
    LOG.warn(" Could not read or failed to veirfy checksum for data"
        + " at offset " + offset + " for block " + block, e);
    IOUtils.closeStream(checksumIn);
    checksumIn = null;
    if (corruptChecksumOk) {
      if (checksumOffset < checksumLen) {
        // Just fill the array with zeros.
        Arrays.fill(buf, checksumOffset, checksumLen, (byte) 0);
      }
    } else {
      throw e;
    }
  }
}
 
Example #13
Source File: HdfsUtil.java    From rainbow with Apache License 2.0 6 votes vote down vote up
public void upFile(File localFile, String hdfsPath) throws IOException
{
    InputStream in = new BufferedInputStream(new FileInputStream(localFile));
    OutputStream out = fileSystem.create(new Path(hdfsPath));
    try
    {
        IOUtils.copyBytes(in, out, conf);
    } catch (Exception e)
    {
        e.printStackTrace();
    } finally
    {
        IOUtils.closeStream(in);
        IOUtils.closeStream(out);
    }
}
 
Example #14
Source File: HdfsUtil.java    From rainbow with Apache License 2.0 6 votes vote down vote up
public void upFile(InputStream fileInputStream, String hdfsPath)
        throws IOException
{
    InputStream in = new BufferedInputStream(fileInputStream);
    OutputStream out = fileSystem.create(new Path(hdfsPath));
    try
    {
        IOUtils.copyBytes(in, out, conf);
    } catch (Exception e)
    {
        e.printStackTrace();
    } finally
    {
        // close Stream
        IOUtils.closeStream(in);
        IOUtils.closeStream(out);
    }
}
 
Example #15
Source File: VersionInfo.java    From big-c with Apache License 2.0 6 votes vote down vote up
protected VersionInfo(String component) {
  info = new Properties();
  String versionInfoFile = component + "-version-info.properties";
  InputStream is = null;
  try {
    is = Thread.currentThread().getContextClassLoader()
      .getResourceAsStream(versionInfoFile);
    if (is == null) {
      throw new IOException("Resource not found");
    }
    info.load(is);
  } catch (IOException ex) {
    LogFactory.getLog(getClass()).warn("Could not read '" +
        versionInfoFile + "', " + ex.toString(), ex);
  } finally {
    IOUtils.closeStream(is);
  }
}
 
Example #16
Source File: TagCompressionContext.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Uncompress tags from the InputStream and writes to the destination array.
 * @param src Stream where the compressed tags are available
 * @param dest Destination array where to write the uncompressed tags
 * @param offset Offset in destination where tags to be written
 * @param length Length of all tag bytes
 * @throws IOException
 */
public void uncompressTags(InputStream src, byte[] dest, int offset, int length)
    throws IOException {
  int endOffset = offset + length;
  while (offset < endOffset) {
    byte status = (byte) src.read();
    if (status == Dictionary.NOT_IN_DICTIONARY) {
      int tagLen = StreamUtils.readRawVarint32(src);
      offset = Bytes.putAsShort(dest, offset, tagLen);
      IOUtils.readFully(src, dest, offset, tagLen);
      tagDict.addEntry(dest, offset, tagLen);
      offset += tagLen;
    } else {
      short dictIdx = StreamUtils.toShort(status, (byte) src.read());
      byte[] entry = tagDict.getEntry(dictIdx);
      if (entry == null) {
        throw new IOException("Missing dictionary entry for index " + dictIdx);
      }
      offset = Bytes.putAsShort(dest, offset, entry.length);
      System.arraycopy(entry, 0, dest, offset, entry.length);
      offset += entry.length;
    }
  }
}
 
Example #17
Source File: SupportResource.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
@POST
@Path("download")
@Consumes(MediaType.APPLICATION_JSON)
public Response downloadData(@PathParam("jobId") JobId jobId)
    throws IOException, UserNotFoundException, JobResourceNotFoundException {
  final DownloadDataResponse response;
  try {
    final ImmutableSupportRequest request = new ImmutableSupportRequest.Builder()
      .setUserId(context.getUserPrincipal().getName())
      .setJobId(jobId)
      .build();
    response = supportService.downloadSupportRequest(request);
  } catch (JobNotFoundException e) {
    throw JobResourceNotFoundException.fromJobNotFoundException(e);
  }
  final StreamingOutput streamingOutput = new StreamingOutput() {
    @Override
    public void write(OutputStream output) throws IOException, WebApplicationException {
      IOUtils.copyBytes(response.getInput(), output, 4096, true);
    }
  };
  return Response.ok(streamingOutput, MediaType.APPLICATION_OCTET_STREAM)
    .header("Content-Disposition", "attachment; filename=\"" + response.getFileName() + "\"").build();
}
 
Example #18
Source File: FsVolumeImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Get the next subdirectory within the block pool slice.
 *
 * @return         The next subdirectory within the block pool slice, or
 *                   null if there are no more.
 */
private String getNextSubDir(String prev, File dir)
      throws IOException {
  List<String> children =
      IOUtils.listDirectory(dir, SubdirFilter.INSTANCE);
  cache = null;
  cacheMs = 0;
  if (children.size() == 0) {
    LOG.trace("getNextSubDir({}, {}): no subdirectories found in {}",
        storageID, bpid, dir.getAbsolutePath());
    return null;
  }
  Collections.sort(children);
  String nextSubDir = nextSorted(children, prev);
  if (nextSubDir == null) {
    LOG.trace("getNextSubDir({}, {}): no more subdirectories found in {}",
        storageID, bpid, dir.getAbsolutePath());
  } else {
    LOG.trace("getNextSubDir({}, {}): picking next subdirectory {} " +
        "within {}", storageID, bpid, nextSubDir, dir.getAbsolutePath());
  }
  return nextSubDir;
}
 
Example #19
Source File: TestFiHftp.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static byte[] createFile(FileSystem fs, Path name, long length, 
    short replication, long blocksize) throws IOException {
  final FSDataOutputStream out = fs.create(name, false, 4096,
      replication, blocksize);
  try {
    for(long n = length; n > 0; ) {
      ran.nextBytes(buffer);
      final int w = n < buffer.length? (int)n: buffer.length;
      out.write(buffer, 0, w);
      md5.update(buffer, 0, w);
      n -= w;
    }
  } finally {
    IOUtils.closeStream(out);
  }
  return md5.digest();
}
 
Example #20
Source File: FileUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Copy all files in a directory to one output file (merge). */
public static boolean copyMerge(FileSystem srcFS, Path srcDir, 
                                FileSystem dstFS, Path dstFile, 
                                boolean deleteSource,
                                Configuration conf, String addString) throws IOException {
  dstFile = checkDest(srcDir.getName(), dstFS, dstFile, false);

  if (!srcFS.getFileStatus(srcDir).isDirectory())
    return false;
 
  OutputStream out = dstFS.create(dstFile);
  
  try {
    FileStatus contents[] = srcFS.listStatus(srcDir);
    Arrays.sort(contents);
    for (int i = 0; i < contents.length; i++) {
      if (contents[i].isFile()) {
        InputStream in = srcFS.open(contents[i].getPath());
        try {
          IOUtils.copyBytes(in, out, conf, false);
          if (addString!=null)
            out.write(addString.getBytes("UTF-8"));
              
        } finally {
          in.close();
        } 
      }
    }
  } finally {
    out.close();
  }
  

  if (deleteSource) {
    return srcFS.delete(srcDir, true);
  } else {
    return true;
  }
}
 
Example #21
Source File: ReplicaInfo.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Copy specified file into a temporary file. Then rename the
 * temporary file to the original name. This will cause any
 * hardlinks to the original file to be removed. The temporary
 * files are created in the same directory. The temporary files will
 * be recovered (especially on Windows) on datanode restart.
 */
private void unlinkFile(File file, Block b) throws IOException {
  File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file));
  try {
    FileInputStream in = new FileInputStream(file);
    try {
      FileOutputStream out = new FileOutputStream(tmpFile);
      try {
        IOUtils.copyBytes(in, out, 16*1024);
      } finally {
        out.close();
      }
    } finally {
      in.close();
    }
    if (file.length() != tmpFile.length()) {
      throw new IOException("Copy of file " + file + " size " + file.length()+
                            " into file " + tmpFile +
                            " resulted in a size of " + tmpFile.length());
    }
    FileUtil.replaceFile(tmpFile, file);
  } catch (IOException e) {
    boolean done = tmpFile.delete();
    if (!done) {
      DataNode.LOG.info("detachFile failed to delete temporary file " +
                        tmpFile);
    }
    throw e;
  }
}
 
Example #22
Source File: TestDataTransferKeepalive.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test for the case where the client beings to read a long block, but doesn't
 * read bytes off the stream quickly. The datanode should time out sending the
 * chunks and the transceiver should die, even if it has a long keepalive.
 */
@Test(timeout=300000)
public void testSlowReader() throws Exception {
  // Set a client socket cache expiry time much longer than 
  // the datanode-side expiration time.
  final long CLIENT_EXPIRY_MS = 600000L;
  Configuration clientConf = new Configuration(conf);
  clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
  clientConf.set(DFS_CLIENT_CONTEXT, "testSlowReader");
  DistributedFileSystem fs =
      (DistributedFileSystem)FileSystem.get(cluster.getURI(),
          clientConf);
  // Restart the DN with a shorter write timeout.
  DataNodeProperties props = cluster.stopDataNode(0);
  props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
      WRITE_TIMEOUT);
  props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
      120000);
  assertTrue(cluster.restartDataNode(props, true));
  dn = cluster.getDataNodes().get(0);
  // Wait for heartbeats to avoid a startup race where we
  // try to write the block while the DN is still starting.
  cluster.triggerHeartbeats();
  
  DFSTestUtil.createFile(fs, TEST_FILE, 1024*1024*8L, (short)1, 0L);
  FSDataInputStream stm = fs.open(TEST_FILE);
  stm.read();
  assertXceiverCount(1);

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    public Boolean get() {
      // DN should time out in sendChunks, and this should force
      // the xceiver to exit.
      return getXceiverCountWithoutServer() == 0;
    }
  }, 500, 50000);

  IOUtils.closeStream(stm);
}
 
Example #23
Source File: RemoteFetcher.java    From tajo with Apache License 2.0 5 votes vote down vote up
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
    throws Exception {
  if (cause instanceof ReadTimeoutException) {
    LOG.warn(cause.getMessage(), cause);
  } else {
    LOG.error("Fetch failed :", cause);
  }

  // this fetching will be retry
  IOUtils.cleanup(LOG, fc, raf);
  endFetch(FetcherState.FETCH_FAILED);
  ctx.close();
}
 
Example #24
Source File: BlurIndexSimpleWriter.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private void closeWriter() {
  _writeLock.lock();
  try {
    if (_lastWrite.get() + _maxWriterIdle < System.currentTimeMillis()) {
      BlurIndexWriter writer = _writer.getAndSet(null);
      if (writer != null) {
        LOG.info("Closing idle writer for table [{0}] shard [{1}]", _tableContext.getTable(),
            _shardContext.getShard());
        IOUtils.cleanup(LOG, writer);
      }
    }
  } finally {
    _writeLock.unlock();
  }
}
 
Example #25
Source File: TestBlockReaderLocalLegacy.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testBothOldAndNewShortCircuitConfigured() throws Exception {
  final short REPL_FACTOR = 1;
  final int FILE_LENGTH = 512;
  Assume.assumeTrue(null == DomainSocket.getLoadingFailureReason());
  TemporarySocketDirectory socketDir = new TemporarySocketDirectory();
  HdfsConfiguration conf = getConfiguration(socketDir);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  socketDir.close();
  FileSystem fs = cluster.getFileSystem();

  Path path = new Path("/foo");
  byte orig[] = new byte[FILE_LENGTH];
  for (int i = 0; i < orig.length; i++) {
    orig[i] = (byte)(i%10);
  }
  FSDataOutputStream fos = fs.create(path, (short)1);
  fos.write(orig);
  fos.close();
  DFSTestUtil.waitReplication(fs, path, REPL_FACTOR);
  FSDataInputStream fis = cluster.getFileSystem().open(path);
  byte buf[] = new byte[FILE_LENGTH];
  IOUtils.readFully(fis, buf, 0, FILE_LENGTH);
  fis.close();
  Assert.assertArrayEquals(orig, buf);
  Arrays.equals(orig, buf);
  cluster.shutdown();
}
 
Example #26
Source File: TestContainerSmallFile.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@AfterClass
public static void shutdown() throws InterruptedException {
  if (cluster != null) {
    cluster.shutdown();
  }
  IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
}
 
Example #27
Source File: FileContextTestWrapper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public byte[] readFile(Path path, int len) throws IOException {
  DataInputStream dis = fc.open(path);
  byte[] buffer = new byte[len];
  IOUtils.readFully(dis, buffer, 0, len);
  dis.close();
  return buffer;
}
 
Example #28
Source File: TestFSInputChecker.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
  // create a file and verify that checksum corruption results in 
  // a checksum exception on LocalFS
  
  String dir = PathUtils.getTestDirName(getClass());
  Path file = new Path(dir + "/corruption-test.dat");
  Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
  
  writeFile(fileSys, file);
  
  int fileLen = (int)fileSys.getFileStatus(file).getLen();
  
  byte [] buf = new byte[fileLen];

  InputStream in = fileSys.open(file);
  IOUtils.readFully(in, buf, 0, buf.length);
  in.close();
  
  // check .crc corruption
  checkFileCorruption(fileSys, file, crcFile);
  fileSys.delete(file, true);
  
  writeFile(fileSys, file);
  
  // check data corrutpion
  checkFileCorruption(fileSys, file, file);
  
  fileSys.delete(file, true);
}
 
Example #29
Source File: TestQJMWithFaults.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Run through the creation of a log without any faults injected,
 * and count how many RPCs are made to each node. This sets the
 * bounds for the other test cases, so they can exhaustively explore
 * the space of potential failures.
 */
private static long determineMaxIpcNumber() throws Exception {
  Configuration conf = new Configuration();
  MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf).build();
  QuorumJournalManager qjm = null;
  long ret;
  try {
    qjm = createInjectableQJM(cluster);
    qjm.format(FAKE_NSINFO);
    doWorkload(cluster, qjm);
    
    SortedSet<Integer> ipcCounts = Sets.newTreeSet();
    for (AsyncLogger l : qjm.getLoggerSetForTests().getLoggersForTests()) {
      InvocationCountingChannel ch = (InvocationCountingChannel)l;
      ch.waitForAllPendingCalls();
      ipcCounts.add(ch.getRpcCount());
    }

    // All of the loggers should have sent the same number of RPCs, since there
    // were no failures.
    assertEquals(1, ipcCounts.size());
    
    ret = ipcCounts.first();
    LOG.info("Max IPC count = " + ret);
  } finally {
    IOUtils.closeStream(qjm);
    cluster.shutdown();
  }
  return ret;
}
 
Example #30
Source File: MD5FileUtils.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Read dataFile and compute its MD5 checksum.
 */
public static MD5Hash computeMd5ForFile(File dataFile) throws IOException {
  InputStream in = new FileInputStream(dataFile);
  try {
    MessageDigest digester = MD5Hash.getDigester();
    DigestInputStream dis = new DigestInputStream(in, digester);
    IOUtils.copyBytes(dis, new IOUtils.NullOutputStream(), 128*1024);
    
    return new MD5Hash(digester.digest());
  } finally {
    IOUtils.closeStream(in);
  }
}