Java Code Examples for org.apache.hadoop.fs.FileSystem.Statistics

The following are Jave code examples for showing how to use Statistics of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop-oss   File: SFTPInputStream.java   Source Code and License Vote up 6 votes
SFTPInputStream(InputStream stream, ChannelSftp channel,
    FileSystem.Statistics stats) {

  if (stream == null) {
    throw new IllegalArgumentException(E_NULL_INPUTSTREAM);
  }
  if (channel == null || !channel.isConnected()) {
    throw new IllegalArgumentException(E_CLIENT_NULL);
  }
  this.wrappedStream = stream;
  this.channel = channel;
  this.stats = stats;

  this.pos = 0;
  this.closed = false;
}
 
Example 2
Project: hadoop   File: TestEvents.java   Source Code and License Vote up 6 votes
private byte[] getEvents() throws Exception {
  ByteArrayOutputStream output = new ByteArrayOutputStream();
  FSDataOutputStream fsOutput = new FSDataOutputStream(output,
      new FileSystem.Statistics("scheme"));
  EventWriter writer = new EventWriter(fsOutput);
  writer.write(getJobPriorityChangedEvent());
  writer.write(getJobStatusChangedEvent());
  writer.write(getTaskUpdatedEvent());
  writer.write(getReduceAttemptKilledEvent());
  writer.write(getJobKilledEvent());
  writer.write(getSetupAttemptStartedEvent());
  writer.write(getTaskAttemptFinishedEvent());
  writer.write(getSetupAttemptFieledEvent());
  writer.write(getSetupAttemptKilledEvent());
  writer.write(getCleanupAttemptStartedEvent());
  writer.write(getCleanupAttemptFinishedEvent());
  writer.write(getCleanupAttemptFiledEvent());
  writer.write(getCleanupAttemptKilledEvent());

  writer.flush();
  writer.close();

  return output.toByteArray();
}
 
Example 3
Project: hadoop   File: DFSClient.java   Source Code and License Vote up 6 votes
/**
 * Wraps the stream in a CryptoOutputStream if the underlying file is
 * encrypted.
 */
public HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos,
    FileSystem.Statistics statistics, long startPos) throws IOException {
  final FileEncryptionInfo feInfo = dfsos.getFileEncryptionInfo();
  if (feInfo != null) {
    // File is encrypted, wrap the stream in a crypto stream.
    // Currently only one version, so no special logic based on the version #
    getCryptoProtocolVersion(feInfo);
    final CryptoCodec codec = getCryptoCodec(conf, feInfo);
    KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
    final CryptoOutputStream cryptoOut =
        new CryptoOutputStream(dfsos, codec,
            decrypted.getMaterial(), feInfo.getIV(), startPos);
    return new HdfsDataOutputStream(cryptoOut, statistics, startPos);
  } else {
    // No FileEncryptionInfo present so no encryption.
    return new HdfsDataOutputStream(dfsos, statistics, startPos);
  }
}
 
Example 4
Project: hadoop   File: NativeAzureFileSystemBaseTest.java   Source Code and License Vote up 6 votes
@Test
public void testStatistics() throws Exception {
  FileSystem.clearStatistics();
  FileSystem.Statistics stats = FileSystem.getStatistics("wasb",
      NativeAzureFileSystem.class);
  assertEquals(0, stats.getBytesRead());
  assertEquals(0, stats.getBytesWritten());
  Path newFile = new Path("testStats");
  writeString(newFile, "12345678");
  assertEquals(8, stats.getBytesWritten());
  assertEquals(0, stats.getBytesRead());
  String readBack = readString(newFile);
  assertEquals("12345678", readBack);
  assertEquals(8, stats.getBytesRead());
  assertEquals(8, stats.getBytesWritten());
  assertTrue(fs.delete(newFile, true));
  assertEquals(8, stats.getBytesRead());
  assertEquals(8, stats.getBytesWritten());
}
 
Example 5
Project: hadoop   File: S3AInputStream.java   Source Code and License Vote up 5 votes
public S3AInputStream(String bucket, String key, long contentLength, AmazonS3Client client,
                      FileSystem.Statistics stats) {
  this.bucket = bucket;
  this.key = key;
  this.contentLength = contentLength;
  this.client = client;
  this.stats = stats;
  this.pos = 0;
  this.closed = false;
  this.wrappedStream = null;
}
 
Example 6
Project: hadoop-oss   File: FTPInputStream.java   Source Code and License Vote up 5 votes
public FTPInputStream(InputStream stream, FTPClient client,
    FileSystem.Statistics stats) {
  if (stream == null) {
    throw new IllegalArgumentException("Null InputStream");
  }
  if (client == null || !client.isConnected()) {
    throw new IllegalArgumentException("FTP client null or not connected");
  }
  this.wrappedStream = stream;
  this.client = client;
  this.stats = stats;
  this.pos = 0;
  this.closed = false;
}
 
Example 7
Project: hadoop   File: SwiftNativeInputStream.java   Source Code and License Vote up 5 votes
public SwiftNativeInputStream(SwiftNativeFileSystemStore storeNative,
    FileSystem.Statistics statistics, Path path, long bufferSize)
        throws IOException {
  this.nativeStore = storeNative;
  this.statistics = statistics;
  this.path = path;
  if (bufferSize <= 0) {
    throw new IllegalArgumentException("Invalid buffer size");
  }
  this.bufferSize = bufferSize;
  //initial buffer fill
  this.httpStream = storeNative.getObject(path).getInputStream();
  //fillBuffer(0);
}
 
Example 8
Project: hadoop   File: Task.java   Source Code and License Vote up 5 votes
/**
 * Gets a handle to the Statistics instance based on the scheme associated
 * with path.
 * 
 * @param path the path.
 * @param conf the configuration to extract the scheme from if not part of 
 *   the path.
 * @return a Statistics instance, or null if none is found for the scheme.
 */
protected static List<Statistics> getFsStatistics(Path path, Configuration conf) throws IOException {
  List<Statistics> matchedStats = new ArrayList<FileSystem.Statistics>();
  path = path.getFileSystem(conf).makeQualified(path);
  String scheme = path.toUri().getScheme();
  for (Statistics stats : FileSystem.getAllStatistics()) {
    if (stats.getScheme().equals(scheme)) {
      matchedStats.add(stats);
    }
  }
  return matchedStats;
}
 
Example 9
Project: hadoop   File: Task.java   Source Code and License Vote up 5 votes
void updateCounters() {
  if (readBytesCounter == null) {
    readBytesCounter = counters.findCounter(scheme,
        FileSystemCounter.BYTES_READ);
  }
  if (writeBytesCounter == null) {
    writeBytesCounter = counters.findCounter(scheme,
        FileSystemCounter.BYTES_WRITTEN);
  }
  if (readOpsCounter == null) {
    readOpsCounter = counters.findCounter(scheme,
        FileSystemCounter.READ_OPS);
  }
  if (largeReadOpsCounter == null) {
    largeReadOpsCounter = counters.findCounter(scheme,
        FileSystemCounter.LARGE_READ_OPS);
  }
  if (writeOpsCounter == null) {
    writeOpsCounter = counters.findCounter(scheme,
        FileSystemCounter.WRITE_OPS);
  }
  long readBytes = 0;
  long writeBytes = 0;
  long readOps = 0;
  long largeReadOps = 0;
  long writeOps = 0;
  for (FileSystem.Statistics stat: stats) {
    readBytes = readBytes + stat.getBytesRead();
    writeBytes = writeBytes + stat.getBytesWritten();
    readOps = readOps + stat.getReadOps();
    largeReadOps = largeReadOps + stat.getLargeReadOps();
    writeOps = writeOps + stat.getWriteOps();
  }
  readBytesCounter.setValue(readBytes);
  writeBytesCounter.setValue(writeBytes);
  readOpsCounter.setValue(readOps);
  largeReadOpsCounter.setValue(largeReadOps);
  writeOpsCounter.setValue(writeOps);
}
 
Example 10
Project: dremio-oss   File: FSDataOutputStreamWrapper.java   Source Code and License Vote up 4 votes
public FSDataOutputStreamWrapper(FSDataOutputStream os, FileSystem.Statistics stats,
    long startPosition) throws IOException {
  super(os, stats, startPosition);
  underlyingOS = os;
}
 
Example 11
Project: monarch   File: ADataOutputStream.java   Source Code and License Vote up 4 votes
public PositionCache(OutputStream out, FileSystem.Statistics stats, long pos)
    throws IOException {
  super(out);
  statistics = stats;
  position = pos;
}
 
Example 12
Project: monarch   File: ADataOutputStream.java   Source Code and License Vote up 4 votes
public ADataOutputStream(OutputStream out, FileSystem.Statistics stats, long startPosition)
    throws IOException {
  super(new ADataOutputStream.PositionCache(out, stats, startPosition));
  wrappedStream = out;
}
 
Example 13
Project: hadoop   File: Task.java   Source Code and License Vote up 4 votes
FileSystemStatisticUpdater(List<FileSystem.Statistics> stats, String scheme) {
  this.stats = stats;
  this.scheme = scheme;
}
 
Example 14
Project: dremio-oss   File: FSDataOutputStreamWrapper.java   Source Code and License Vote up 4 votes
public FSDataOutputStreamWrapper(FSDataOutputStream os, FileSystem.Statistics stats)
    throws IOException {
  this(os, stats, 0);
}
 
Example 15
Project: hadoop   File: DFSClient.java   Source Code and License Vote up 4 votes
/**
 * Wraps the stream in a CryptoOutputStream if the underlying file is
 * encrypted.
 */
public HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos,
    FileSystem.Statistics statistics) throws IOException {
  return createWrappedOutputStream(dfsos, statistics, 0);
}
 
Example 16
Project: hadoop   File: HdfsDataOutputStream.java   Source Code and License Vote up 4 votes
public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats,
    long startPosition) throws IOException {
  super(out, stats, startPosition);
}
 
Example 17
Project: hadoop   File: S3AFastOutputStream.java   Source Code and License Vote up 4 votes
/**
 * Creates a fast OutputStream that uploads to S3 from memory.
 * For MultiPartUploads, as soon as sufficient bytes have been written to
 * the stream a part is uploaded immediately (by using the low-level
 * multi-part upload API on the AmazonS3Client).
 *
 * @param client AmazonS3Client used for S3 calls
 * @param fs S3AFilesystem
 * @param bucket S3 bucket name
 * @param key S3 key name
 * @param progress report progress in order to prevent timeouts
 * @param statistics track FileSystem.Statistics on the performed operations
 * @param cannedACL used CannedAccessControlList
 * @param serverSideEncryptionAlgorithm algorithm for server side encryption
 * @param partSize size of a single part in a multi-part upload (except
 * last part)
 * @param multiPartThreshold files at least this size use multi-part upload
 * @throws IOException
 */
public S3AFastOutputStream(AmazonS3Client client, S3AFileSystem fs,
    String bucket, String key, Progressable progress,
    FileSystem.Statistics statistics, CannedAccessControlList cannedACL,
    String serverSideEncryptionAlgorithm, long partSize,
    long multiPartThreshold, ThreadPoolExecutor threadPoolExecutor)
    throws IOException {
  this.bucket = bucket;
  this.key = key;
  this.client = client;
  this.fs = fs;
  this.cannedACL = cannedACL;
  this.statistics = statistics;
  this.serverSideEncryptionAlgorithm = serverSideEncryptionAlgorithm;
  //Ensure limit as ByteArrayOutputStream size cannot exceed Integer.MAX_VALUE
  if (partSize > Integer.MAX_VALUE) {
    this.partSize = Integer.MAX_VALUE;
    LOG.warn("s3a: MULTIPART_SIZE capped to ~2.14GB (maximum allowed size " +
        "when using 'FAST_UPLOAD = true')");
  } else {
    this.partSize = (int) partSize;
  }
  if (multiPartThreshold > Integer.MAX_VALUE) {
    this.multiPartThreshold = Integer.MAX_VALUE;
    LOG.warn("s3a: MIN_MULTIPART_THRESHOLD capped to ~2.14GB (maximum " +
        "allowed size when using 'FAST_UPLOAD = true')");
  } else {
    this.multiPartThreshold = (int) multiPartThreshold;
  }
  this.bufferLimit = this.multiPartThreshold;
  this.closed = false;
  int initialBufferSize = this.fs.getConf()
      .getInt(Constants.FAST_BUFFER_SIZE, Constants.DEFAULT_FAST_BUFFER_SIZE);
  if (initialBufferSize < 0) {
    LOG.warn("s3a: FAST_BUFFER_SIZE should be a positive number. Using " +
        "default value");
    initialBufferSize = Constants.DEFAULT_FAST_BUFFER_SIZE;
  } else if (initialBufferSize > this.bufferLimit) {
    LOG.warn("s3a: automatically adjusting FAST_BUFFER_SIZE to not " +
        "exceed MIN_MULTIPART_THRESHOLD");
    initialBufferSize = this.bufferLimit;
  }
  this.buffer = new ByteArrayOutputStream(initialBufferSize);
  this.executorService = MoreExecutors.listeningDecorator(threadPoolExecutor);
  this.multiPartUpload = null;
  this.progressListener = new ProgressableListener(progress);
  if (LOG.isDebugEnabled()){
    LOG.debug("Initialized S3AFastOutputStream for bucket '{}' key '{}'",
        bucket, key);
  }
}
 
Example 18
Project: hadoop   File: HdfsDataOutputStream.java   Source Code and License Vote up 4 votes
public HdfsDataOutputStream(CryptoOutputStream out, FileSystem.Statistics stats,
    long startPosition) throws IOException {
  super(out, stats, startPosition);
  Preconditions.checkArgument(out.getWrappedStream() instanceof DFSOutputStream,
      "CryptoOutputStream should wrap a DFSOutputStream");
}
 
Example 19
Project: hadoop   File: DFSClient.java   Source Code and License Vote up 3 votes
/**
 * Create an input stream that obtains a nodelist from the
 * namenode, and then reads from all the right places.  Creates
 * inner subclass of InputStream that does the right out-of-band
 * work.
 * @deprecated Use {@link #open(String, int, boolean)} instead.
 */
@Deprecated
public DFSInputStream open(String src, int buffersize, boolean verifyChecksum,
                           FileSystem.Statistics stats)
    throws IOException, UnresolvedLinkException {
  return open(src, buffersize, verifyChecksum);
}
 
Example 20
Project: hadoop   File: DFSClient.java   Source Code and License Vote up 3 votes
/**
 * Append to an existing HDFS file.  
 * 
 * @param src file name
 * @param buffersize buffer size
 * @param flag indicates whether to append data to a new block instead of
 *             the last block
 * @param progress for reporting write-progress; null is acceptable.
 * @param statistics file system statistics; null is acceptable.
 * @return an output stream for writing into the file
 * 
 * @see ClientProtocol#append(String, String, EnumSetWritable)
 */
public HdfsDataOutputStream append(final String src, final int buffersize,
    EnumSet<CreateFlag> flag, final Progressable progress,
    final FileSystem.Statistics statistics) throws IOException {
  final DFSOutputStream out = append(src, buffersize, flag, null, progress);
  return createWrappedOutputStream(out, statistics, out.getInitialLen());
}