Java Code Examples for org.apache.hadoop.util.ReflectionUtils#newInstance()

The following examples show how to use org.apache.hadoop.util.ReflectionUtils#newInstance() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ResourceManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
protected ReservationSystem createReservationSystem() {
  String reservationClassName =
      conf.get(YarnConfiguration.RM_RESERVATION_SYSTEM_CLASS,
          AbstractReservationSystem.getDefaultReservationSystem(scheduler));
  if (reservationClassName == null) {
    return null;
  }
  LOG.info("Using ReservationSystem: " + reservationClassName);
  try {
    Class<?> reservationClazz = Class.forName(reservationClassName);
    if (ReservationSystem.class.isAssignableFrom(reservationClazz)) {
      return (ReservationSystem) ReflectionUtils.newInstance(
          reservationClazz, this.conf);
    } else {
      throw new YarnRuntimeException("Class: " + reservationClassName
          + " not instance of " + ReservationSystem.class.getCanonicalName());
    }
  } catch (ClassNotFoundException e) {
    throw new YarnRuntimeException(
        "Could not instantiate ReservationSystem: " + reservationClassName, e);
  }
}
 
Example 2
Source File: CodecMap.java    From aliyun-maxcompute-data-collectors with Apache License 2.0 6 votes vote down vote up
/**
 * Given a codec name, instantiate the concrete implementation
 * class that implements it.
 * @throws com.cloudera.sqoop.io.UnsupportedCodecException if a codec cannot
 * be found with the supplied name.
 */
public static CompressionCodec getCodec(String codecName,
  Configuration conf) throws com.cloudera.sqoop.io.UnsupportedCodecException {
  // Try standard Hadoop mechanism first
  CompressionCodec codec = getCodecByName(codecName, conf);
  if (codec != null) {
    return codec;
  }
  // Fall back to Sqoop mechanism
  String codecClassName = null;
  try {
    codecClassName = getCodecClassName(codecName);
    if (null == codecClassName) {
      return null;
    }
    Class<? extends CompressionCodec> codecClass =
        (Class<? extends CompressionCodec>)
        conf.getClassByName(codecClassName);
    return (CompressionCodec) ReflectionUtils.newInstance(
        codecClass, conf);
  } catch (ClassNotFoundException cnfe) {
    throw new com.cloudera.sqoop.io.UnsupportedCodecException(
        "Cannot find codec class "
        + codecClassName + " for codec " + codecName);
  }
}
 
Example 3
Source File: TabletSplitSplit.java    From datawave with Apache License 2.0 6 votes vote down vote up
/**
 * {@inheritDoc}
 * 
 * @throws IOException
 *             If the child InputSplit cannot be read, typically for failing access checks.
 */
@SuppressWarnings("unchecked")
// Generic array assignment
public void readFields(DataInput in) throws IOException {
    table = WritableUtils.readString(in);
    int card = WritableUtils.readVInt(in);
    if (splits == null || splits.length != card) {
        splits = new InputSplit[card];
    }
    Class<? extends InputSplit>[] cls = new Class[card];
    try {
        for (int i = 0; i < card; ++i) {
            cls[i] = Class.forName(Text.readString(in)).asSubclass(InputSplit.class);
        }
        for (int i = 0; i < card; ++i) {
            splits[i] = ReflectionUtils.newInstance(cls[i], null);
            if (splits[i] instanceof Writable) {
                ((Writable) splits[i]).readFields(in);
            }
        }
    } catch (ClassNotFoundException e) {
        throw (IOException) new IOException("Failed split init").initCause(e);
    }
}
 
Example 4
Source File: TotalOrderPartitioner.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Read the cut points from the given IFile.
 * @param fs The file system
 * @param p The path to read
 * @param keyClass The map output key class
 * @param job The job config
 * @throws IOException
 */
                               // matching key types enforced by passing in
@SuppressWarnings("unchecked") // map output key class
private K[] readPartitions(FileSystem fs, Path p, Class<K> keyClass,
    JobConf job) throws IOException {
  SequenceFile.Reader reader = new SequenceFile.Reader(fs, p, job);
  ArrayList<K> parts = new ArrayList<K>();
  K key = (K) ReflectionUtils.newInstance(keyClass, job);
  NullWritable value = NullWritable.get();
  while (reader.next(key, value)) {
    parts.add(key);
    key = (K) ReflectionUtils.newInstance(keyClass, job);
  }
  reader.close();
  return parts.toArray((K[])Array.newInstance(keyClass, parts.size()));
}
 
Example 5
Source File: HttpFSFileSystem.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Called after a new FileSystem instance is constructed.
 *
 * @param name a uri whose authority section names the host, port, etc. for this FileSystem
 * @param conf the configuration
 */
@Override
public void initialize(URI name, Configuration conf) throws IOException {
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

  //the real use is the one that has the Kerberos credentials needed for
  //SPNEGO to work
  realUser = ugi.getRealUser();
  if (realUser == null) {
    realUser = UserGroupInformation.getLoginUser();
  }
  super.initialize(name, conf);
  try {
    uri = new URI(name.getScheme() + "://" + name.getAuthority());
  } catch (URISyntaxException ex) {
    throw new IOException(ex);
  }

  Class<? extends DelegationTokenAuthenticator> klass =
      getConf().getClass("httpfs.authenticator.class",
          KerberosDelegationTokenAuthenticator.class,
          DelegationTokenAuthenticator.class);
  DelegationTokenAuthenticator authenticator =
      ReflectionUtils.newInstance(klass, getConf());
  authURL = new DelegationTokenAuthenticatedURL(authenticator);
}
 
Example 6
Source File: HttpServer2.java    From knox with Apache License 2.0 6 votes vote down vote up
/** Get an array of FilterConfiguration specified in the conf */
private static FilterInitializer[] getFilterInitializers(Configuration conf) {
  if (conf == null) {
    return null;
  }

  Class<?>[] classes = conf.getClasses(FILTER_INITIALIZER_PROPERTY);
  if (classes == null) {
    return null;
  }

  FilterInitializer[] initializers = new FilterInitializer[classes.length];
  for(int i = 0; i < classes.length; i++) {
    initializers[i] = (FilterInitializer)ReflectionUtils.newInstance(
        classes[i], conf);
  }
  return initializers;
}
 
Example 7
Source File: HttpServer.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/** Get an array of FilterConfiguration specified in the conf */
private static FilterInitializer[] getFilterInitializers(Configuration conf) {
  if (conf == null) {
    return null;
  }

  Class<?>[] classes = conf.getClasses(FILTER_INITIALIZER_PROPERTY);
  if (classes == null) {
    return null;
  }

  FilterInitializer[] initializers = new FilterInitializer[classes.length];
  for(int i = 0; i < classes.length; i++) {
    initializers[i] = (FilterInitializer)ReflectionUtils.newInstance(
        classes[i], conf);
  }
  return initializers;
}
 
Example 8
Source File: HttpServer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Get an array of FilterConfiguration specified in the conf */
private static FilterInitializer[] getFilterInitializers(Configuration conf) {
  if (conf == null) {
    return null;
  }

  Class<?>[] classes = conf.getClasses(FILTER_INITIALIZER_PROPERTY);
  if (classes == null) {
    return null;
  }

  FilterInitializer[] initializers = new FilterInitializer[classes.length];
  for(int i = 0; i < classes.length; i++) {
    initializers[i] = (FilterInitializer)ReflectionUtils.newInstance(
        classes[i], conf);
  }
  return initializers;
}
 
Example 9
Source File: JobConf.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Get the {@link InputFormat} implementation for the map-reduce job,
 * defaults to {@link TextInputFormat} if not specified explicity.
 * 
 * @return the {@link InputFormat} implementation for the map-reduce job.
 */
public InputFormat getInputFormat() {
  return ReflectionUtils.newInstance(getClass("mapred.input.format.class",
                                                           TextInputFormat.class,
                                                           InputFormat.class),
                                                  this);
}
 
Example 10
Source File: Codec.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public ErasureCode createErasureCode(Configuration conf) {
  // Create the scheduler
  Class<?> erasureCode = null;
  try {
    erasureCode = conf.getClass(ERASURE_CODE_KEY_PREFIX + this.id,
          conf.getClassByName(this.erasureCodeClass));
  } catch (ClassNotFoundException e) {
    throw new RuntimeException(e);
  }
  ErasureCode code = (ErasureCode) ReflectionUtils.newInstance(erasureCode,
      conf);
  code.init(this);
  return code;
}
 
Example 11
Source File: Client.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void receiveResponse() {
  if (shouldCloseConnection.get()) {
    return;
  }
  touch();
  
  try {
    int id = in.readInt();                    // try to read an id

    if (LOG.isDebugEnabled())
      LOG.debug(getName() + " got value #" + id);

    Call call = calls.get(id);

    int state = in.readInt();     // read call status
    if (state == Status.SUCCESS.state) {
      Writable value = ReflectionUtils.newInstance(valueClass, conf);
      value.readFields(in);                 // read value
      call.setValue(value);
      calls.remove(id);
    } else if (state == Status.ERROR.state) {
      call.setException(new RemoteException(WritableUtils.readString(in),
                                            WritableUtils.readString(in)));
      calls.remove(id);
    } else if (state == Status.FATAL.state) {
      // Close the connection
      markClosed(new RemoteException(WritableUtils.readString(in), 
                                     WritableUtils.readString(in)));
    }
  } catch (IOException e) {
    markClosed(e);
  } catch (Throwable te) {
    markClosed((IOException)new IOException().initCause(te));
  }
}
 
Example 12
Source File: MRCombiner.java    From incubator-tez with Apache License 2.0 5 votes vote down vote up
private void runNewCombiner(final TezRawKeyValueIterator rawIter, final Writer writer) throws InterruptedException, IOException {
  
  RecordWriter recordWriter = new RecordWriter() {

    @Override
    public void write(Object key, Object value) throws IOException,
        InterruptedException {
      writer.append(key, value);
    }

    @Override
    public void close(TaskAttemptContext context) throws IOException,
        InterruptedException {
      // Will be closed by whoever invokes the combiner.
    }
  };
  
  Class<? extends org.apache.hadoop.mapreduce.Reducer> reducerClazz = (Class<? extends org.apache.hadoop.mapreduce.Reducer>) conf
      .getClass(MRJobConfig.COMBINE_CLASS_ATTR, null,
          org.apache.hadoop.mapreduce.Reducer.class);
  org.apache.hadoop.mapreduce.Reducer reducer = ReflectionUtils.newInstance(reducerClazz, conf);
  
  org.apache.hadoop.mapreduce.Reducer.Context reducerContext =
      createReduceContext(
          conf,
          mrTaskAttemptID,
          rawIter,
          new MRCounters.MRCounter(combineInputKeyCounter),
          new MRCounters.MRCounter(combineInputValueCounter),
          recordWriter,
          reporter,
          (RawComparator)comparator,
          keyClass,
          valClass);
  
  reducer.run(reducerContext);
  recordWriter.close(reducerContext);
}
 
Example 13
Source File: MapProcessor.java    From tez with Apache License 2.0 5 votes vote down vote up
void runOldMapper(
    final JobConf job,
    final MRTaskReporter reporter,
    final MRInputLegacy input,
    final KeyValueWriter output
    ) throws IOException, InterruptedException {

  // Initialize input in-line since it sets parameters which may be used by the processor.
  // Done only for MRInput.
  // TODO use new method in MRInput to get required info
  //input.initialize(job, master);
  
  InputSplit inputSplit = input.getOldInputSplit();
  
  updateJobWithSplit(job, inputSplit);

  RecordReader in = new OldRecordReader(input);

  OutputCollector collector = new OldOutputCollector(output);

  MapRunnable runner =
      (MapRunnable)ReflectionUtils.newInstance(job.getMapRunnerClass(), job);

  runner.run(in, collector, (Reporter)reporter);
  
  // Set progress to 1.0f if there was no exception,
  reporter.setProgress(1.0f);
  // start the sort phase only if there are reducers
  this.statusUpdate();
}
 
Example 14
Source File: DelegatingOutputFormat.java    From aliyun-maxcompute-data-collectors with Apache License 2.0 5 votes vote down vote up
public DelegatingRecordWriter(TaskAttemptContext context)
    throws ClassNotFoundException {

  this.conf = context.getConfiguration();

  @SuppressWarnings("unchecked")
  Class<? extends FieldMapProcessor> procClass =
      (Class<? extends FieldMapProcessor>)
      conf.getClass(DELEGATE_CLASS_KEY, null);
  this.mapProcessor = ReflectionUtils.newInstance(procClass, this.conf);
}
 
Example 15
Source File: TestRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void writeBadOutput(TaskAttempt attempt, Configuration conf)
  throws Exception {
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, 
      TypeConverter.fromYarn(attempt.getID()));
 
  TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat
      .getRecordWriter(tContext);
  
  NullWritable nullWritable = NullWritable.get();
  try {
    theRecordWriter.write(key2, val2);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val2);
    theRecordWriter.write(nullWritable, val1);
    theRecordWriter.write(key1, nullWritable);
    theRecordWriter.write(key2, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key1, val1);
  } finally {
    theRecordWriter.close(tContext);
  }
  
  OutputFormat outputFormat = ReflectionUtils.newInstance(
      tContext.getOutputFormatClass(), conf);
  OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
  committer.commitTask(tContext);
}
 
Example 16
Source File: GenericWritable.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void readFields(DataInput in) throws IOException {
  type = in.readByte();
  Class<? extends Writable> clazz = getTypes()[type & 0xff];
  try {
    instance = ReflectionUtils.newInstance(clazz, conf);
  } catch (Exception e) {
    e.printStackTrace();
    throw new IOException("Cannot initialize the class: " + clazz);
  }
  instance.readFields(in);
}
 
Example 17
Source File: Client.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void receiveResponse() {
  if (shouldCloseConnection.get()) {
    return;
  }
  touch();
  
  try {
    int id = in.readInt();                    // try to read an id

    if (LOG.isDebugEnabled())
      LOG.debug(getName() + " got value #" + id);

    Call call = calls.remove(id);

    int state = in.readInt();     // read call status
    if (state == Status.SUCCESS.state) {
      Writable value = ReflectionUtils.newInstance(valueClass, conf);
      value.readFields(in);                 // read value
      call.setValue(value);
    } else if (state == Status.ERROR.state) {
      call.setException(new RemoteException(WritableUtils.readString(in),
                                            WritableUtils.readString(in)));
    } else if (state == Status.FATAL.state) {
      // Close the connection
      markClosed(new RemoteException(WritableUtils.readString(in), 
                                     WritableUtils.readString(in)));
    }
  } catch (IOException e) {
    markClosed(e);
  }
}
 
Example 18
Source File: WritableComparator.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** Construct a new {@link WritableComparable} instance. */
public WritableComparable newKey() {
  return ReflectionUtils.newInstance(keyClass, conf);
}
 
Example 19
Source File: TestMRKeyValueTextInputFormat.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testSplitableCodecs() throws Exception {
  final Job job = Job.getInstance(defaultConf);
  final Configuration conf = job.getConfiguration();

  // Create the codec
  CompressionCodec codec = null;
  try {
    codec = (CompressionCodec)
    ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"), conf);
  } catch (ClassNotFoundException cnfe) {
    throw new IOException("Illegal codec!");
  }
  Path file = new Path(workDir, "test"+codec.getDefaultExtension());

  int seed = new Random().nextInt();
  LOG.info("seed = " + seed);
  Random random = new Random(seed);

  localFs.delete(workDir, true);
  FileInputFormat.setInputPaths(job, workDir);

  final int MAX_LENGTH = 500000;
  FileInputFormat.setMaxInputSplitSize(job, MAX_LENGTH / 20);
  // for a variety of lengths
  for (int length = 0; length < MAX_LENGTH;
       length += random.nextInt(MAX_LENGTH / 4) + 1) {

    LOG.info("creating; entries = " + length);

    // create a file with length entries
    Writer writer =
      new OutputStreamWriter(codec.createOutputStream(localFs.create(file)));
    try {
      for (int i = 0; i < length; i++) {
        writer.write(Integer.toString(i * 2));
        writer.write("\t");
        writer.write(Integer.toString(i));
        writer.write("\n");
      }
    } finally {
      writer.close();
    }

    // try splitting the file in a variety of sizes
    KeyValueTextInputFormat format = new KeyValueTextInputFormat();
    assertTrue("KVTIF claims not splittable", format.isSplitable(job, file));
    for (int i = 0; i < 3; i++) {
      int numSplits = random.nextInt(MAX_LENGTH / 2000) + 1;
      LOG.info("splitting: requesting = " + numSplits);
      List<InputSplit> splits = format.getSplits(job);
      LOG.info("splitting: got =        " + splits.size());

      // check each split
      BitSet bits = new BitSet(length);
      for (int j = 0; j < splits.size(); j++) {
        LOG.debug("split["+j+"]= " + splits.get(j));
        TaskAttemptContext context = MapReduceTestUtil.
          createDummyMapTaskAttemptContext(job.getConfiguration());
        RecordReader<Text, Text> reader = format.createRecordReader(
          splits.get(j), context);
        Class<?> clazz = reader.getClass();
        MapContext<Text, Text, Text, Text> mcontext =
          new MapContextImpl<Text, Text, Text, Text>(job.getConfiguration(),
          context.getTaskAttemptID(), reader, null, null,
          MapReduceTestUtil.createDummyReporter(), splits.get(j));
        reader.initialize(splits.get(j), mcontext);

        Text key = null;
        Text value = null;
        try {
          int count = 0;
          while (reader.nextKeyValue()) {
            key = reader.getCurrentKey();
            value = reader.getCurrentValue();
            final int k = Integer.parseInt(key.toString());
            final int v = Integer.parseInt(value.toString());
            assertEquals("Bad key", 0, k % 2);
            assertEquals("Mismatched key/value", k / 2, v);
            LOG.debug("read " + k + "," + v);
            assertFalse(k + "," + v + " in multiple partitions.",bits.get(v));
            bits.set(v);
            count++;
          }
          if (count > 0) {
            LOG.info("splits["+j+"]="+splits.get(j)+" count=" + count);
          } else {
            LOG.debug("splits["+j+"]="+splits.get(j)+" count=" + count);
          }
        } finally {
          reader.close();
        }
      }
      assertEquals("Some keys in no partition.", length, bits.cardinality());
    }

  }
}
 
Example 20
Source File: Application.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Start the child process to handle the task for us.
 * @param conf the task's configuration
 * @param recordReader the fake record reader to update progress with
 * @param output the collector to send output to
 * @param reporter the reporter for the task
 * @param outputKeyClass the class of the output keys
 * @param outputValueClass the class of the output values
 * @throws IOException
 * @throws InterruptedException
 */
Application(JobConf conf, 
            RecordReader<FloatWritable, NullWritable> recordReader, 
            OutputCollector<K2,V2> output, Reporter reporter,
            Class<? extends K2> outputKeyClass,
            Class<? extends V2> outputValueClass
            ) throws IOException, InterruptedException {
  serverSocket = new ServerSocket(0);
  Map<String, String> env = new HashMap<String,String>();
  // add TMPDIR environment variable with the value of java.io.tmpdir
  env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
  env.put("hadoop.pipes.command.port", 
          Integer.toString(serverSocket.getLocalPort()));
  List<String> cmd = new ArrayList<String>();
  String interpretor = conf.get("hadoop.pipes.executable.interpretor");
  if (interpretor != null) {
    cmd.add(interpretor);
  }

  String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
  FileUtil.chmod(executable, "a+x");
  cmd.add(executable);
  // wrap the command in a stdout/stderr capture
  TaskAttemptID taskid = TaskAttemptID.forName(conf.get("mapred.task.id"));
  File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT);
  File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR);
  long logLength = TaskLog.getTaskLogLength(conf);
  cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
      false);

  process = runClient(cmd, env);
  clientSocket = serverSocket.accept();
  handler = new OutputHandler<K2, V2>(output, reporter, recordReader);
  K2 outputKey = (K2)
    ReflectionUtils.newInstance(outputKeyClass, conf);
  V2 outputValue = (V2) 
    ReflectionUtils.newInstance(outputValueClass, conf);
  downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, 
                                outputKey, outputValue, conf);
  downlink.start();
  downlink.setJobConf(conf);
}