Java Code Examples for org.apache.hadoop.io.compress.CodecPool#returnDecompressor()

The following examples show how to use org.apache.hadoop.io.compress.CodecPool#returnDecompressor() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   File: IFile.java    License: Apache License 2.0 6 votes vote down vote up
public void close() throws IOException {
  // Close the underlying stream
  in.close();
  
  // Release the buffer
  dataIn = null;
  buffer = null;
  if(readRecordsCounter != null) {
    readRecordsCounter.increment(numRecordsRead);
  }

  // Return the decompressor
  if (decompressor != null) {
    decompressor.reset();
    CodecPool.returnDecompressor(decompressor);
    decompressor = null;
  }
}
 
Example 2
Source Project: ojai   File: JSONFileRecordReader.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void close() throws IOException {
  try {
    documentStream.close();
  } catch (Exception e) {
    throw new IOException(
        "Error closing document Stream in JsonFileRecordReader");
  }
  try {
    if (inputStream != null) {
      inputStream.close();
    }
  } finally {
    if (decompressor != null) {
      CodecPool.returnDecompressor(decompressor);
      decompressor = null;
    }
  }
}
 
Example 3
@Override
public synchronized void  close() throws IOException {
try {
    if (officeReader!=null) {
	officeReader.close();
     }
    } finally {
      if (decompressor != null) { // return this decompressor
        CodecPool.returnDecompressor(decompressor);
        decompressor = null;
      } // return decompressor of linked workbooks
	if (this.currentHFR!=null) {
		currentHFR.close();
	}
    }
  	// do not close the filesystem! will cause exceptions in Spark
}
 
Example 4
@Override
public synchronized void  close() throws IOException {
try {
    if (officeReader!=null) {
	officeReader.close();
     }
    } finally {
      if (decompressor != null) { // return this decompressor
        CodecPool.returnDecompressor(decompressor);
        decompressor = null;
      } // return decompressor of linked workbooks
	if (this.currentHFR!=null) {
		currentHFR.close();
	}
    }
	// do not close the filesystem! will cause exceptions in Spark
 
}
 
Example 5
Source Project: presto   File: HadoopDecompressor.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void destroy()
{
    if (destroyed) {
        return;
    }
    destroyed = true;
    CodecPool.returnDecompressor(decompressor);
}
 
Example 6
Source Project: pxf   File: ChunkRecordReader.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Closes the input stream.
 */
@Override
public synchronized void close() throws IOException {
    try {
        if (in != null) {
            in.close();
        }
    } finally {
        if (decompressor != null) {
            CodecPool.returnDecompressor(decompressor);
        }
    }
}
 
Example 7
Source Project: RDFS   File: HadoopLogsAnalyzer.java    License: Apache License 2.0 5 votes vote down vote up
private boolean setNextDirectoryInputStream() throws FileNotFoundException,
    IOException {
  if (input != null) {
    input.close();
    LOG.info("File closed: "+currentFileName);
    input = null;
  }

  if (inputCodec != null) {
    CodecPool.returnDecompressor(inputDecompressor);
    inputDecompressor = null;
    inputCodec = null;
  }

  ++inputDirectoryCursor;

  if (inputDirectoryCursor >= inputDirectoryFiles.length) {
    return false;
  }

  fileFirstLine = true;

  currentFileName = inputDirectoryFiles[inputDirectoryCursor];

  LOG.info("\nOpening file " + currentFileName
      + "  *************************** .");
  LOG
      .info("This file, " + (inputDirectoryCursor + 1) + "/"
          + inputDirectoryFiles.length + ", starts with line " + lineNumber
          + ".");

  input = maybeUncompressedPath(new Path(inputDirectoryPath, currentFileName));

  return input != null;
}
 
Example 8
Source Project: hadoop   File: LineRecordReader.java    License: Apache License 2.0 5 votes vote down vote up
public synchronized void close() throws IOException {
  try {
    if (in != null) {
      in.close();
    }
  } finally {
    if (decompressor != null) {
      CodecPool.returnDecompressor(decompressor);
      decompressor = null;
    }
  }
}
 
Example 9
private void closeResource() throws IOException {
  try {
    if(stream_workbook != null) {
      stream_workbook.close();
      stream_workbook = null;
    }
  } finally {
    if (decompressor != null) {
      CodecPool.returnDecompressor(decompressor);
      decompressor = null;
    }
  }
}
 
Example 10
Source Project: incubator-tez   File: IFile.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Read entire ifile content to memory.
 *
 * @param buffer
 * @param in
 * @param compressedLength
 * @param codec
 * @param ifileReadAhead
 * @param ifileReadAheadLength
 * @throws IOException
 */
public static void readToMemory(byte[] buffer, InputStream in, int compressedLength,
    CompressionCodec codec, boolean ifileReadAhead, int ifileReadAheadLength)
    throws IOException {
  boolean isCompressed = IFile.Reader.isCompressedFlagEnabled(in);
  IFileInputStream checksumIn = new IFileInputStream(in,
      compressedLength - IFile.HEADER.length, ifileReadAhead,
      ifileReadAheadLength);
  in = checksumIn;
  Decompressor decompressor = null;
  if (isCompressed && codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
    if (decompressor != null) {
      decompressor.reset();
      in = codec.createInputStream(checksumIn, decompressor);
    } else {
      LOG.warn("Could not obtain decompressor from CodecPool");
      in = checksumIn;
    }
  }
  try {
    IOUtils.readFully(in, buffer, 0, buffer.length - IFile.HEADER.length);
  } catch (IOException ioe) {
    IOUtils.cleanup(LOG, in);
    throw ioe;
  } finally {
    if (decompressor != null) {
      decompressor.reset();
      CodecPool.returnDecompressor(decompressor);
    }
  }
}
 
Example 11
Source Project: hadoop   File: Compression.java    License: Apache License 2.0 5 votes vote down vote up
public void returnDecompressor(Decompressor decompressor) {
  if (decompressor != null) {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Returned a decompressor: " + decompressor.hashCode());
    }
    CodecPool.returnDecompressor(decompressor);
  }
}
 
Example 12
@AfterEach
   public void tearDown() throws IOException {
// Remove input and output directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR,true);
dfsCluster.getFileSystem().delete(DFS_OUTPUT_DIR,true);
// close any open decompressor
for (Decompressor currentDecompressor: this.openDecompressors) {
	if (currentDecompressor!=null) {
		 CodecPool.returnDecompressor(currentDecompressor);
	}
	}
   }
 
Example 13
@AfterEach
   public void tearDown() throws IOException {
// Remove input and output directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR,true);
dfsCluster.getFileSystem().delete(DFS_OUTPUT_DIR,true);
// close any open decompressor
for (Decompressor currentDecompressor: this.openDecompressors) {
	if (currentDecompressor!=null) {
		 CodecPool.returnDecompressor(currentDecompressor);
	}
	}

   }
 
Example 14
Source Project: big-c   File: FixedLengthRecordReader.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void close() throws IOException {
  try {
    if (inputStream != null) {
      inputStream.close();
      inputStream = null;
    }
  } finally {
    if (decompressor != null) {
      CodecPool.returnDecompressor(decompressor);
      decompressor = null;
    }
  }
}
 
Example 15
@Override
public synchronized void close() throws IOException {
	try {
		if (inputStream != null) {
			inputStream.close();
			inputStream = null;
		}
	} finally {
		if (decompressor != null) {
			CodecPool.returnDecompressor(decompressor);
			decompressor = null;
		}
	}
}
 
Example 16
Source Project: big-c   File: HadoopLogsAnalyzer.java    License: Apache License 2.0 5 votes vote down vote up
private boolean setNextDirectoryInputStream() throws FileNotFoundException,
    IOException {
  if (input != null) {
    input.close();
    LOG.info("File closed: " + currentFileName);
    input = null;
  }

  if (inputCodec != null) {
    CodecPool.returnDecompressor(inputDecompressor);
    inputDecompressor = null;
    inputCodec = null;
  }

  ++inputDirectoryCursor;

  if (inputDirectoryCursor >= inputDirectoryFiles.length) {
    return false;
  }

  fileFirstLine = true;

  currentFileName = inputDirectoryFiles[inputDirectoryCursor];

  LOG.info("\nOpening file " + currentFileName
      + "  *************************** .");
  LOG
      .info("This file, " + (inputDirectoryCursor + 1) + "/"
          + inputDirectoryFiles.length + ", starts with line " + lineNumber
          + ".");

  input =
      maybeUncompressedPath(new Path(inputDirectoryPath, currentFileName));

  return true;
}
 
Example 17
Source Project: incubator-hivemall   File: HadoopUtils.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void close() throws IOException {
    super.close();
    if (decompressor != null) {
        CodecPool.returnDecompressor(decompressor);
        this.decompressor = null;
    }
}
 
Example 18
@Override
public void close() throws IOException {
    this.mReader.close();
    CodecPool.returnDecompressor(mDecompressor);
    mDecompressor = null;
}
 
Example 19
Source Project: RDFS   File: Compression.java    License: Apache License 2.0 4 votes vote down vote up
public void returnDecompressor(Decompressor decompressor) {
  if (decompressor != null) {
    LOG.debug("Returned a decompressor: " + decompressor.hashCode());
    CodecPool.returnDecompressor(decompressor);
  }
}
 
Example 20
Source Project: tez   File: IFile.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Read entire ifile content to memory.
 *
 * @param buffer
 * @param in
 * @param compressedLength
 * @param codec
 * @param ifileReadAhead
 * @param ifileReadAheadLength
 * @throws IOException
 */
public static void readToMemory(byte[] buffer, InputStream in, int compressedLength,
    CompressionCodec codec, boolean ifileReadAhead, int ifileReadAheadLength)
    throws IOException {
  boolean isCompressed = IFile.Reader.isCompressedFlagEnabled(in);
  IFileInputStream checksumIn = new IFileInputStream(in,
      compressedLength - IFile.HEADER.length, ifileReadAhead,
      ifileReadAheadLength);
  in = checksumIn;
  Decompressor decompressor = null;
  if (isCompressed && codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
    if (decompressor != null) {
      decompressor.reset();
      in = getDecompressedInputStreamWithBufferSize(codec, checksumIn, decompressor, compressedLength);
    } else {
      LOG.warn("Could not obtain decompressor from CodecPool");
      in = checksumIn;
    }
  }
  try {
    IOUtils.readFully(in, buffer, 0, buffer.length - IFile.HEADER.length);
    /*
     * We've gotten the amount of data we were expecting. Verify the
     * decompressor has nothing more to offer. This action also forces the
     * decompressor to read any trailing bytes that weren't critical for
     * decompression, which is necessary to keep the stream in sync.
     */
    if (in.read() >= 0) {
      throw new IOException("Unexpected extra bytes from input stream");
    }
  } catch (IOException ioe) {
    if(in != null) {
      try {
        in.close();
      } catch(IOException e) {
        if(LOG.isDebugEnabled()) {
          LOG.debug("Exception in closing " + in, e);
        }
      }
    }
    throw ioe;
  } finally {
    if (decompressor != null) {
      decompressor.reset();
      CodecPool.returnDecompressor(decompressor);
    }
  }
}