Java Code Examples for org.apache.hadoop.mapred.Reporter#NULL

The following examples show how to use org.apache.hadoop.mapred.Reporter#NULL . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MergeManagerImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void combineAndSpill(
    RawKeyValueIterator kvIter,
    Counters.Counter inCounter) throws IOException {
  JobConf job = jobConf;
  Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
  Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
  Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
  RawComparator<K> comparator = 
    (RawComparator<K>)job.getCombinerKeyGroupingComparator();
  try {
    CombineValuesIterator values = new CombineValuesIterator(
        kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
        inCounter);
    while (values.more()) {
      combiner.reduce(values.getKey(), values, combineCollector,
                      Reporter.NULL);
      values.nextKey();
    }
  } finally {
    combiner.close();
  }
}
 
Example 2
Source File: MergeManagerImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void combineAndSpill(
    RawKeyValueIterator kvIter,
    Counters.Counter inCounter) throws IOException {
  JobConf job = jobConf;
  Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
  Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
  Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
  RawComparator<K> comparator = 
    (RawComparator<K>)job.getCombinerKeyGroupingComparator();
  try {
    CombineValuesIterator values = new CombineValuesIterator(
        kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
        inCounter);
    while (values.more()) {
      combiner.reduce(values.getKey(), values, combineCollector,
                      Reporter.NULL);
      values.nextKey();
    }
  } finally {
    combiner.close();
  }
}
 
Example 3
Source File: ParquetRecordWriterUtil.java    From presto with Apache License 2.0 5 votes vote down vote up
private static RecordWriter createParquetWriter(Path target, JobConf conf, Properties properties)
        throws IOException
{
    if (conf.get(DataWritableWriteSupport.PARQUET_HIVE_SCHEMA) == null) {
        List<String> columnNames = Splitter.on(',').splitToList(properties.getProperty(IOConstants.COLUMNS));
        List<TypeInfo> columnTypes = getTypeInfosFromTypeString(properties.getProperty(IOConstants.COLUMNS_TYPES));
        MessageType schema = HiveSchemaConverter.convert(columnNames, columnTypes);
        setParquetSchema(conf, schema);
    }

    ParquetOutputFormat<ParquetHiveRecord> outputFormat = new ParquetOutputFormat<>(new DataWritableWriteSupport());

    return new ParquetRecordWriterWrapper(outputFormat, conf, target.toString(), Reporter.NULL, properties);
}
 
Example 4
Source File: TestMRAppWithCombiner.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public void reduce(K key, Iterator<V> values, OutputCollector<K, V> output,
    Reporter reporter) throws IOException {
  if (Reporter.NULL == reporter) {
    Assert.fail("A valid Reporter should have been used but, Reporter.NULL is used");
  }
}
 
Example 5
Source File: PipeMapRed.java    From hadoop with Apache License 2.0 4 votes vote down vote up
void waitOutputThreads() throws IOException {
  try {
    if (outThread_ == null) {
      // This happens only when reducer has empty input(So reduce() is not
      // called at all in this task). If reducer still generates output,
      // which is very uncommon and we may not have to support this case.
      // So we don't write this output to HDFS, but we consume/collect
      // this output just to avoid reducer hanging forever.

      OutputCollector collector = new OutputCollector() {
        public void collect(Object key, Object value)
          throws IOException {
          //just consume it, no need to write the record anywhere
        }
      };
      Reporter reporter = Reporter.NULL;//dummy reporter
      startOutputThreads(collector, reporter);
    }
    int exitVal = sim.waitFor();
    // how'd it go?
    if (exitVal != 0) {
      if (nonZeroExitIsFailure_) {
        throw new RuntimeException("PipeMapRed.waitOutputThreads(): subprocess failed with code "
                                   + exitVal);
      } else {
        LOG.info("PipeMapRed.waitOutputThreads(): subprocess exited with " +
        		"code " + exitVal + " in " + PipeMapRed.class.getName());
      }
    }
    if (outThread_ != null) {
      outThread_.join(joinDelay_);
    }
    if (errThread_ != null) {
      errThread_.join(joinDelay_);
    }
    if (outerrThreadsThrowable != null) {
      throw new RuntimeException(outerrThreadsThrowable);
    }
  } catch (InterruptedException e) {
    //ignore
  }
}
 
Example 6
Source File: TestMRAppWithCombiner.java    From big-c with Apache License 2.0 4 votes vote down vote up
public void reduce(K key, Iterator<V> values, OutputCollector<K, V> output,
    Reporter reporter) throws IOException {
  if (Reporter.NULL == reporter) {
    Assert.fail("A valid Reporter should have been used but, Reporter.NULL is used");
  }
}
 
Example 7
Source File: PipeMapRed.java    From big-c with Apache License 2.0 4 votes vote down vote up
void waitOutputThreads() throws IOException {
  try {
    if (outThread_ == null) {
      // This happens only when reducer has empty input(So reduce() is not
      // called at all in this task). If reducer still generates output,
      // which is very uncommon and we may not have to support this case.
      // So we don't write this output to HDFS, but we consume/collect
      // this output just to avoid reducer hanging forever.

      OutputCollector collector = new OutputCollector() {
        public void collect(Object key, Object value)
          throws IOException {
          //just consume it, no need to write the record anywhere
        }
      };
      Reporter reporter = Reporter.NULL;//dummy reporter
      startOutputThreads(collector, reporter);
    }
    int exitVal = sim.waitFor();
    // how'd it go?
    if (exitVal != 0) {
      if (nonZeroExitIsFailure_) {
        throw new RuntimeException("PipeMapRed.waitOutputThreads(): subprocess failed with code "
                                   + exitVal);
      } else {
        LOG.info("PipeMapRed.waitOutputThreads(): subprocess exited with " +
        		"code " + exitVal + " in " + PipeMapRed.class.getName());
      }
    }
    if (outThread_ != null) {
      outThread_.join(joinDelay_);
    }
    if (errThread_ != null) {
      errThread_.join(joinDelay_);
    }
    if (outerrThreadsThrowable != null) {
      throw new RuntimeException(outerrThreadsThrowable);
    }
  } catch (InterruptedException e) {
    //ignore
  }
}
 
Example 8
Source File: PipeMapRed.java    From RDFS with Apache License 2.0 4 votes vote down vote up
void waitOutputThreads() {
  try {
    if (outThread_ == null) {
      // This happens only when reducer has empty input(So reduce() is not
      // called at all in this task). If reducer still generates output,
      // which is very uncommon and we may not have to support this case.
      // So we don't write this output to HDFS, but we consume/collect
      // this output just to avoid reducer hanging forever.

      OutputCollector collector = new OutputCollector() {
        public void collect(Object key, Object value)
          throws IOException {
          //just consume it, no need to write the record anywhere
        }
      };
      Reporter reporter = Reporter.NULL;//dummy reporter
      startOutputThreads(collector, reporter);
    }
    int exitVal = sim.waitFor();
    // how'd it go?
    if (exitVal != 0) {
      if (nonZeroExitIsFailure_) {
        throw new RuntimeException("PipeMapRed.waitOutputThreads(): subprocess failed with code "
                                   + exitVal);
      } else {
        logprintln("PipeMapRed.waitOutputThreads(): subprocess exited with code " + exitVal
                   + " in " + PipeMapRed.class.getName());
      }
    }
    if (outThread_ != null) {
      outThread_.join(joinDelay_);
    }
    if (errThread_ != null) {
      errThread_.join(joinDelay_);
    }
    if (outerrThreadsThrowable != null) {
      throw new RuntimeException(outerrThreadsThrowable);
    }
  } catch (InterruptedException e) {
    //ignore
  }
}
 
Example 9
Source File: JobContextImpl.java    From incubator-tez with Apache License 2.0 4 votes vote down vote up
public JobContextImpl(JobConf conf, TezDAGID dagId) {
  this(conf, dagId, Reporter.NULL);
}
 
Example 10
Source File: JobContextImpl.java    From tez with Apache License 2.0 4 votes vote down vote up
public JobContextImpl(JobConf conf, TezDAGID dagId) {
  this(conf, dagId, Reporter.NULL);
}
 
Example 11
Source File: PipeMapRed.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
void waitOutputThreads() {
  try {
    if (outThread_ == null) {
      // This happens only when reducer has empty input(So reduce() is not
      // called at all in this task). If reducer still generates output,
      // which is very uncommon and we may not have to support this case.
      // So we don't write this output to HDFS, but we consume/collect
      // this output just to avoid reducer hanging forever.

      OutputCollector collector = new OutputCollector() {
        public void collect(Object key, Object value)
          throws IOException {
          //just consume it, no need to write the record anywhere
        }
      };
      Reporter reporter = Reporter.NULL;//dummy reporter
      startOutputThreads(collector, reporter);
    }
    int exitVal = sim.waitFor();
    // how'd it go?
    if (exitVal != 0) {
      if (nonZeroExitIsFailure_) {
        throw new RuntimeException("PipeMapRed.waitOutputThreads(): subprocess failed with code "
                                   + exitVal);
      } else {
        logprintln("PipeMapRed.waitOutputThreads(): subprocess exited with code " + exitVal
                   + " in " + PipeMapRed.class.getName());
      }
    }
    if (outThread_ != null) {
      outThread_.join(joinDelay_);
    }
    if (errThread_ != null) {
      errThread_.join(joinDelay_);
    }
    if (outerrThreadsThrowable != null) {
      throw new RuntimeException(outerrThreadsThrowable);
    }
  } catch (InterruptedException e) {
    //ignore
  }
}