Java Code Examples for org.apache.hadoop.fs.FileSystem.Statistics

The following examples show how to use org.apache.hadoop.fs.FileSystem.Statistics. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: MapTask.java    License: Apache License 2.0 6 votes vote down vote up
TrackedRecordReader(TaskReporter reporter, JobConf job) 
  throws IOException{
  inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  this.reporter = reporter;
  
  List<Statistics> matchedStats = null;
  if (this.reporter.getInputSplit() instanceof FileSplit) {
    matchedStats = getFsStatistics(((FileSplit) this.reporter
        .getInputSplit()).getPath(), job);
  }
  fsStats = matchedStats;

  bytesInPrev = getInputBytes(fsStats);
  rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
      job, reporter);
  bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
 
Example 2
Source Project: hadoop   Source File: MapTask.java    License: Apache License 2.0 6 votes vote down vote up
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
 
Example 3
Source Project: hadoop   Source File: MapTask.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,
    JobConf job, TaskUmbilicalProtocol umbilical, TaskReporter reporter) 
throws IOException, ClassNotFoundException, InterruptedException {
  this.reporter = reporter;
  mapOutputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
  fileOutputByteCounter = reporter
      .getCounter(FileOutputFormatCounter.BYTES_WRITTEN);

  List<Statistics> matchedStats = null;
  if (outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
    matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
        .getOutputPath(taskContext), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesOutPrev = getOutputBytes(fsStats);
  out = outputFormat.getRecordWriter(taskContext);
  long bytesOutCurr = getOutputBytes(fsStats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
 
Example 4
Source Project: hadoop   Source File: MapTask.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public void init(MapOutputCollector.Context context
                ) throws IOException, ClassNotFoundException {
  this.reporter = context.getReporter();
  JobConf job = context.getJobConf();
  String finalName = getOutputName(getPartition());
  FileSystem fs = FileSystem.get(job);

  OutputFormat<K, V> outputFormat = job.getOutputFormat();   
  mapOutputRecordCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
  
  fileOutputByteCounter = reporter
      .getCounter(FileOutputFormatCounter.BYTES_WRITTEN);

  List<Statistics> matchedStats = null;
  if (outputFormat instanceof FileOutputFormat) {
    matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job);
  }
  fsStats = matchedStats;

  long bytesOutPrev = getOutputBytes(fsStats);
  out = job.getOutputFormat().getRecordWriter(fs, job, finalName, reporter);
  long bytesOutCurr = getOutputBytes(fsStats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
 
Example 5
Source Project: hadoop   Source File: ReduceTask.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
NewTrackingRecordWriter(ReduceTask reduce,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.outputRecordCounter = reduce.reduceOutputCounter;
  this.fileOutputByteCounter = reduce.fileOutputByteCounter;

  List<Statistics> matchedStats = null;
  if (reduce.outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
    matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
        .getOutputPath(taskContext), taskContext.getConfiguration());
  }

  fsStats = matchedStats;

  long bytesOutPrev = getOutputBytes(fsStats);
  this.real = (org.apache.hadoop.mapreduce.RecordWriter<K, V>) reduce.outputFormat
      .getRecordWriter(taskContext);
  long bytesOutCurr = getOutputBytes(fsStats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
 
Example 6
Source Project: hadoop   Source File: FCStatisticsBaseTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testStatistics() throws IOException, URISyntaxException {
  URI fsUri = getFsUri();
  Statistics stats = FileContext.getStatistics(fsUri);
  Assert.assertEquals(0, stats.getBytesRead());
  Path filePath = fileContextTestHelper .getTestRootPath(fc, "file1");
  createFile(fc, filePath, numBlocks, blockSize);

  Assert.assertEquals(0, stats.getBytesRead());
  verifyWrittenBytes(stats);
  FSDataInputStream fstr = fc.open(filePath);
  byte[] buf = new byte[blockSize];
  int bytesRead = fstr.read(buf, 0, blockSize);
  fstr.read(0, buf, 0, blockSize);
  Assert.assertEquals(blockSize, bytesRead);
  verifyReadBytes(stats);
  verifyWrittenBytes(stats);
  verifyReadBytes(FileContext.getStatistics(getFsUri()));
  Map<URI, Statistics> statsMap = FileContext.getAllStatistics();
  URI exactUri = getSchemeAuthorityUri();
  verifyWrittenBytes(statsMap.get(exactUri));
  fc.delete(filePath, true);
}
 
Example 7
Source Project: big-c   Source File: MapTask.java    License: Apache License 2.0 6 votes vote down vote up
TrackedRecordReader(TaskReporter reporter, JobConf job) 
  throws IOException{
  inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  this.reporter = reporter;
  
  List<Statistics> matchedStats = null;
  if (this.reporter.getInputSplit() instanceof FileSplit) {
    matchedStats = getFsStatistics(((FileSplit) this.reporter
        .getInputSplit()).getPath(), job);
  }
  fsStats = matchedStats;

  bytesInPrev = getInputBytes(fsStats);
  rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
      job, reporter);
  bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
 
Example 8
Source Project: big-c   Source File: MapTask.java    License: Apache License 2.0 6 votes vote down vote up
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
 
Example 9
Source Project: big-c   Source File: MapTask.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,
    JobConf job, TaskUmbilicalProtocol umbilical, TaskReporter reporter) 
throws IOException, ClassNotFoundException, InterruptedException {
  this.reporter = reporter;
  mapOutputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
  fileOutputByteCounter = reporter
      .getCounter(FileOutputFormatCounter.BYTES_WRITTEN);

  List<Statistics> matchedStats = null;
  if (outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
    matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
        .getOutputPath(taskContext), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesOutPrev = getOutputBytes(fsStats);
  out = outputFormat.getRecordWriter(taskContext);
  long bytesOutCurr = getOutputBytes(fsStats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
 
Example 10
Source Project: big-c   Source File: MapTask.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public void init(MapOutputCollector.Context context
                ) throws IOException, ClassNotFoundException {
  this.reporter = context.getReporter();
  JobConf job = context.getJobConf();
  String finalName = getOutputName(getPartition());
  FileSystem fs = FileSystem.get(job);

  OutputFormat<K, V> outputFormat = job.getOutputFormat();   
  mapOutputRecordCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
  
  fileOutputByteCounter = reporter
      .getCounter(FileOutputFormatCounter.BYTES_WRITTEN);

  List<Statistics> matchedStats = null;
  if (outputFormat instanceof FileOutputFormat) {
    matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job);
  }
  fsStats = matchedStats;

  long bytesOutPrev = getOutputBytes(fsStats);
  out = job.getOutputFormat().getRecordWriter(fs, job, finalName, reporter);
  long bytesOutCurr = getOutputBytes(fsStats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
 
Example 11
Source Project: big-c   Source File: ReduceTask.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({ "deprecation", "unchecked" })
public OldTrackingRecordWriter(ReduceTask reduce, JobConf job,
    TaskReporter reporter, String finalName) throws IOException {
  this.reduceOutputCounter = reduce.reduceOutputCounter;
  this.fileOutputByteCounter = reduce.fileOutputByteCounter;
  List<Statistics> matchedStats = null;
  if (job.getOutputFormat() instanceof FileOutputFormat) {
    matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job);
  }
  fsStats = matchedStats;

  FileSystem fs = FileSystem.get(job);
  long bytesOutPrev = getOutputBytes(fsStats);
  this.real = job.getOutputFormat().getRecordWriter(fs, job, finalName,
      reporter);
  long bytesOutCurr = getOutputBytes(fsStats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
 
Example 12
Source Project: big-c   Source File: ReduceTask.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
NewTrackingRecordWriter(ReduceTask reduce,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.outputRecordCounter = reduce.reduceOutputCounter;
  this.fileOutputByteCounter = reduce.fileOutputByteCounter;

  List<Statistics> matchedStats = null;
  if (reduce.outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
    matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
        .getOutputPath(taskContext), taskContext.getConfiguration());
  }

  fsStats = matchedStats;

  long bytesOutPrev = getOutputBytes(fsStats);
  this.real = (org.apache.hadoop.mapreduce.RecordWriter<K, V>) reduce.outputFormat
      .getRecordWriter(taskContext);
  long bytesOutCurr = getOutputBytes(fsStats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
 
Example 13
Source Project: big-c   Source File: FCStatisticsBaseTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testStatistics() throws IOException, URISyntaxException {
  URI fsUri = getFsUri();
  Statistics stats = FileContext.getStatistics(fsUri);
  Assert.assertEquals(0, stats.getBytesRead());
  Path filePath = fileContextTestHelper .getTestRootPath(fc, "file1");
  createFile(fc, filePath, numBlocks, blockSize);

  Assert.assertEquals(0, stats.getBytesRead());
  verifyWrittenBytes(stats);
  FSDataInputStream fstr = fc.open(filePath);
  byte[] buf = new byte[blockSize];
  int bytesRead = fstr.read(buf, 0, blockSize);
  fstr.read(0, buf, 0, blockSize);
  Assert.assertEquals(blockSize, bytesRead);
  verifyReadBytes(stats);
  verifyWrittenBytes(stats);
  verifyReadBytes(FileContext.getStatistics(getFsUri()));
  Map<URI, Statistics> statsMap = FileContext.getAllStatistics();
  URI exactUri = getSchemeAuthorityUri();
  verifyWrittenBytes(statsMap.get(exactUri));
  fc.delete(filePath, true);
}
 
Example 14
Source Project: stocator   Source File: SwiftAPIClient.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Direct HTTP PUT request without JOSS package
 *
 * @param objName name of the object
 * @param contentType content type
 * @return HttpURLConnection
 */
@Override
public FSDataOutputStream createObject(String objName, String contentType,
    Map<String, String> metadata, Statistics statistics, boolean overwrite) throws IOException {
  final URL url = new URL(mJossAccount.getAccessURL() + "/" + getURLEncodedObjName(objName));
  LOG.debug("PUT {}. Content-Type : {}", url.toString(), contentType);

  // When overwriting an object, cached metadata will be outdated
  String cachedName = getObjName(container + "/", objName);
  objectCache.remove(cachedName);

  try {
    final OutputStream sos;
    if (nonStreamingUpload) {
      sos = new SwiftNoStreamingOutputStream(mJossAccount, url, contentType,
          metadata, swiftConnectionManager, this);
    } else {
      sos = new SwiftOutputStream(mJossAccount, url, contentType,
          metadata, swiftConnectionManager);
    }
    return new FSDataOutputStream(sos, statistics);
  } catch (IOException e) {
    LOG.error(e.getMessage());
    throw e;
  }
}
 
Example 15
Source Project: hadoop   Source File: Task.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Gets a handle to the Statistics instance based on the scheme associated
 * with path.
 * 
 * @param path the path.
 * @param conf the configuration to extract the scheme from if not part of 
 *   the path.
 * @return a Statistics instance, or null if none is found for the scheme.
 */
protected static List<Statistics> getFsStatistics(Path path, Configuration conf) throws IOException {
  List<Statistics> matchedStats = new ArrayList<FileSystem.Statistics>();
  path = path.getFileSystem(conf).makeQualified(path);
  String scheme = path.toUri().getScheme();
  for (Statistics stats : FileSystem.getAllStatistics()) {
    if (stats.getScheme().equals(scheme)) {
      matchedStats.add(stats);
    }
  }
  return matchedStats;
}
 
Example 16
Source Project: hadoop   Source File: Task.java    License: Apache License 2.0 5 votes vote down vote up
void updateCounters() {
  if (readBytesCounter == null) {
    readBytesCounter = counters.findCounter(scheme,
        FileSystemCounter.BYTES_READ);
  }
  if (writeBytesCounter == null) {
    writeBytesCounter = counters.findCounter(scheme,
        FileSystemCounter.BYTES_WRITTEN);
  }
  if (readOpsCounter == null) {
    readOpsCounter = counters.findCounter(scheme,
        FileSystemCounter.READ_OPS);
  }
  if (largeReadOpsCounter == null) {
    largeReadOpsCounter = counters.findCounter(scheme,
        FileSystemCounter.LARGE_READ_OPS);
  }
  if (writeOpsCounter == null) {
    writeOpsCounter = counters.findCounter(scheme,
        FileSystemCounter.WRITE_OPS);
  }
  long readBytes = 0;
  long writeBytes = 0;
  long readOps = 0;
  long largeReadOps = 0;
  long writeOps = 0;
  for (FileSystem.Statistics stat: stats) {
    readBytes = readBytes + stat.getBytesRead();
    writeBytes = writeBytes + stat.getBytesWritten();
    readOps = readOps + stat.getReadOps();
    largeReadOps = largeReadOps + stat.getLargeReadOps();
    writeOps = writeOps + stat.getWriteOps();
  }
  readBytesCounter.setValue(readBytes);
  writeBytesCounter.setValue(writeBytes);
  readOpsCounter.setValue(readOps);
  largeReadOpsCounter.setValue(largeReadOps);
  writeOpsCounter.setValue(writeOps);
}
 
Example 17
Source Project: hadoop   Source File: MapTask.java    License: Apache License 2.0 5 votes vote down vote up
private long getInputBytes(List<Statistics> stats) {
  if (stats == null) return 0;
  long bytesRead = 0;
  for (Statistics stat: stats) {
    bytesRead = bytesRead + stat.getBytesRead();
  }
  return bytesRead;
}
 
Example 18
Source Project: hadoop   Source File: MapTask.java    License: Apache License 2.0 5 votes vote down vote up
private long getInputBytes(List<Statistics> stats) {
  if (stats == null) return 0;
  long bytesRead = 0;
  for (Statistics stat: stats) {
    bytesRead = bytesRead + stat.getBytesRead();
  }
  return bytesRead;
}
 
Example 19
Source Project: hadoop   Source File: MapTask.java    License: Apache License 2.0 5 votes vote down vote up
private long getOutputBytes(List<Statistics> stats) {
  if (stats == null) return 0;
  long bytesWritten = 0;
  for (Statistics stat: stats) {
    bytesWritten = bytesWritten + stat.getBytesWritten();
  }
  return bytesWritten;
}
 
Example 20
Source Project: hadoop   Source File: MapTask.java    License: Apache License 2.0 5 votes vote down vote up
private long getOutputBytes(List<Statistics> stats) {
  if (stats == null) return 0;
  long bytesWritten = 0;
  for (Statistics stat: stats) {
    bytesWritten = bytesWritten + stat.getBytesWritten();
  }
  return bytesWritten;
}
 
Example 21
Source Project: hadoop-gpu   Source File: Task.java    License: Apache License 2.0 5 votes vote down vote up
private synchronized void updateCounters() {
  for(Statistics stat: FileSystem.getAllStatistics()) {
    String uriScheme = stat.getScheme();
    FileSystemStatisticUpdater updater = statisticUpdaters.get(uriScheme);
    if(updater==null) {//new FileSystem has been found in the cache
      updater = new FileSystemStatisticUpdater(uriScheme, stat);
      statisticUpdaters.put(uriScheme, updater);
    }
    updater.updateCounters();      
  }
}
 
Example 22
Source Project: hadoop   Source File: ReduceTask.java    License: Apache License 2.0 5 votes vote down vote up
private long getOutputBytes(List<Statistics> stats) {
  if (stats == null) return 0;
  long bytesWritten = 0;
  for (Statistics stat: stats) {
    bytesWritten = bytesWritten + stat.getBytesWritten();
  }
  return bytesWritten;
}
 
Example 23
Source Project: hadoop   Source File: ReduceTask.java    License: Apache License 2.0 5 votes vote down vote up
private long getOutputBytes(List<Statistics> stats) {
  if (stats == null) return 0;
  long bytesWritten = 0;
  for (Statistics stat: stats) {
    bytesWritten = bytesWritten + stat.getBytesWritten();
  }
  return bytesWritten;
}
 
Example 24
Source Project: hadoop   Source File: AbstractFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Prints statistics for all file systems.
 */
public static synchronized void printStatistics() {
  for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
    System.out.println("  FileSystem " + pair.getKey().getScheme() + "://"
        + pair.getKey().getAuthority() + ": " + pair.getValue());
  }
}
 
Example 25
Source Project: hadoop   Source File: AbstractFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
protected static synchronized Map<URI, Statistics> getAllStatistics() {
  Map<URI, Statistics> statsMap = new HashMap<URI, Statistics>(
      STATISTICS_TABLE.size());
  for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
    URI key = pair.getKey();
    Statistics value = pair.getValue();
    Statistics newStatsObj = new Statistics(value);
    statsMap.put(URI.create(key.toString()), newStatsObj);
  }
  return statsMap;
}
 
Example 26
Source Project: hadoop   Source File: TestLocalFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout = 1000)
public void testStatistics() throws Exception {
  int fileSchemeCount = 0;
  for (Statistics stats : FileSystem.getAllStatistics()) {
    if (stats.getScheme().equals("file")) {
      fileSchemeCount++;
    }
  }
  assertEquals(1, fileSchemeCount);
}
 
Example 27
Source Project: big-c   Source File: Task.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Gets a handle to the Statistics instance based on the scheme associated
 * with path.
 * 
 * @param path the path.
 * @param conf the configuration to extract the scheme from if not part of 
 *   the path.
 * @return a Statistics instance, or null if none is found for the scheme.
 */
protected static List<Statistics> getFsStatistics(Path path, Configuration conf) throws IOException {
  List<Statistics> matchedStats = new ArrayList<FileSystem.Statistics>();
  path = path.getFileSystem(conf).makeQualified(path);
  String scheme = path.toUri().getScheme();
  for (Statistics stats : FileSystem.getAllStatistics()) {
    if (stats.getScheme().equals(scheme)) {
      matchedStats.add(stats);
    }
  }
  return matchedStats;
}
 
Example 28
Source Project: big-c   Source File: Task.java    License: Apache License 2.0 5 votes vote down vote up
void updateCounters() {
  if (readBytesCounter == null) {
    readBytesCounter = counters.findCounter(scheme,
        FileSystemCounter.BYTES_READ);
  }
  if (writeBytesCounter == null) {
    writeBytesCounter = counters.findCounter(scheme,
        FileSystemCounter.BYTES_WRITTEN);
  }
  if (readOpsCounter == null) {
    readOpsCounter = counters.findCounter(scheme,
        FileSystemCounter.READ_OPS);
  }
  if (largeReadOpsCounter == null) {
    largeReadOpsCounter = counters.findCounter(scheme,
        FileSystemCounter.LARGE_READ_OPS);
  }
  if (writeOpsCounter == null) {
    writeOpsCounter = counters.findCounter(scheme,
        FileSystemCounter.WRITE_OPS);
  }
  long readBytes = 0;
  long writeBytes = 0;
  long readOps = 0;
  long largeReadOps = 0;
  long writeOps = 0;
  for (FileSystem.Statistics stat: stats) {
    readBytes = readBytes + stat.getBytesRead();
    writeBytes = writeBytes + stat.getBytesWritten();
    readOps = readOps + stat.getReadOps();
    largeReadOps = largeReadOps + stat.getLargeReadOps();
    writeOps = writeOps + stat.getWriteOps();
  }
  readBytesCounter.setValue(readBytes);
  writeBytesCounter.setValue(writeBytes);
  readOpsCounter.setValue(readOps);
  largeReadOpsCounter.setValue(largeReadOps);
  writeOpsCounter.setValue(writeOps);
}
 
Example 29
Source Project: big-c   Source File: MapTask.java    License: Apache License 2.0 5 votes vote down vote up
private long getInputBytes(List<Statistics> stats) {
  if (stats == null) return 0;
  long bytesRead = 0;
  for (Statistics stat: stats) {
    bytesRead = bytesRead + stat.getBytesRead();
  }
  return bytesRead;
}
 
Example 30
Source Project: big-c   Source File: MapTask.java    License: Apache License 2.0 5 votes vote down vote up
private long getInputBytes(List<Statistics> stats) {
  if (stats == null) return 0;
  long bytesRead = 0;
  for (Statistics stat: stats) {
    bytesRead = bytesRead + stat.getBytesRead();
  }
  return bytesRead;
}