Java Code Examples for org.apache.hadoop.util.StringInterner

The following examples show how to use org.apache.hadoop.util.StringInterner. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: TaskAttemptListenerImpl.java    License: Apache License 2.0 6 votes vote down vote up
@Override
 public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
throws IOException {
   diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
   LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
       + diagnosticInfo);

   org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
     TypeConverter.toYarn(taskAttemptID);
   taskHeartbeatHandler.progressing(attemptID);

   // This is mainly used for cases where we want to propagate exception traces
   // of tasks that fail.

   // This call exists as a hadoop mapreduce legacy wherein all changes in
   // counters/progress/phase/output-size are reported through statusUpdate()
   // call but not diagnosticInformation.
   context.getEventHandler().handle(
       new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
 }
 
Example 2
Source Project: hadoop   Source File: TaskAttemptImpl.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
 
Example 3
Source Project: hadoop   Source File: TaskAttemptImpl.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // unregister it to TaskAttemptListener so that it stops listening
  // for it
  taskAttempt.taskAttemptListener.unregister(
      taskAttempt.attemptId, taskAttempt.jvmID);

  if (event instanceof TaskAttemptKillEvent) {
    taskAttempt.addDiagnosticInfo(
        ((TaskAttemptKillEvent) event).getMessage());
  }

  taskAttempt.reportedStatus.progress = 1.0f;
  taskAttempt.updateProgressSplits();
  //send the cleanup event to containerLauncher
  taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
      taskAttempt.attemptId, 
      taskAttempt.container.getId(), StringInterner
          .weakIntern(taskAttempt.container.getNodeId().toString()),
      taskAttempt.container.getContainerToken(),
      ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP));
}
 
Example 4
Source Project: incubator-tez   Source File: TezYARNUtils.java    License: Apache License 2.0 6 votes vote down vote up
public static String getFrameworkClasspath(Configuration conf) {
  Map<String, String> environment = new HashMap<String, String>();

  TezYARNUtils.addToEnvironment(environment,
      Environment.CLASSPATH.name(),
      Environment.PWD.$(),
      File.pathSeparator);

  TezYARNUtils.addToEnvironment(environment,
      Environment.CLASSPATH.name(),
      Environment.PWD.$() + File.separator + "*",
      File.pathSeparator);

  // Add YARN/COMMON/HDFS jars and conf locations to path
  for (String c : conf.getStrings(
      YarnConfiguration.YARN_APPLICATION_CLASSPATH,
      YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
    TezYARNUtils.addToEnvironment(environment, Environment.CLASSPATH.name(),
        c.trim(), File.pathSeparator);
  }
  return StringInterner.weakIntern(environment.get(Environment.CLASSPATH.name()));
}
 
Example 5
Source Project: hadoop   Source File: TaskStatus.java    License: Apache License 2.0 6 votes vote down vote up
public void readFields(DataInput in) throws IOException {
  this.taskid.readFields(in);
  setProgress(in.readFloat());
  this.numSlots = in.readInt();
  this.runState = WritableUtils.readEnum(in, State.class);
  setDiagnosticInfo(StringInterner.weakIntern(Text.readString(in)));
  setStateString(StringInterner.weakIntern(Text.readString(in)));
  this.phase = WritableUtils.readEnum(in, Phase.class); 
  this.startTime = in.readLong(); 
  this.finishTime = in.readLong(); 
  counters = new Counters();
  this.includeAllCounters = in.readBoolean();
  this.outputSize = in.readLong();
  counters.readFields(in);
  nextRecordRange.readFields(in);
}
 
Example 6
Source Project: tez   Source File: TaskSpec.java    License: Apache License 2.0 6 votes vote down vote up
public TaskSpec(TezTaskAttemptID taskAttemptID,
    String dagName, String vertexName,
    int vertexParallelism,
    ProcessorDescriptor processorDescriptor,
    List<InputSpec> inputSpecList, List<OutputSpec> outputSpecList,
    @Nullable List<GroupInputSpec> groupInputSpecList, Configuration taskConf) {
  Objects.requireNonNull(taskAttemptID, "taskAttemptID is null");
  Objects.requireNonNull(dagName, "dagName is null");
  Objects.requireNonNull(vertexName, "vertexName is null");
  Objects.requireNonNull(processorDescriptor, "processorDescriptor is null");
  Objects.requireNonNull(inputSpecList, "inputSpecList is null");
  Objects.requireNonNull(outputSpecList, "outputSpecList is null");
  this.taskAttemptId = taskAttemptID;
  this.dagName = StringInterner.weakIntern(dagName);
  this.vertexName = StringInterner.weakIntern(vertexName);
  this.processorDescriptor = processorDescriptor;
  this.inputSpecList = inputSpecList;
  this.outputSpecList = outputSpecList;
  this.groupInputSpecList = groupInputSpecList;
  this.vertexParallelism = vertexParallelism;
  this.taskConf = taskConf;
}
 
Example 7
Source Project: hadoop   Source File: TaskReport.java    License: Apache License 2.0 6 votes vote down vote up
public void readFields(DataInput in) throws IOException {
  this.taskid.readFields(in);
  this.progress = in.readFloat();
  this.state = StringInterner.weakIntern(Text.readString(in));
  this.startTime = in.readLong(); 
  this.finishTime = in.readLong();
  
  diagnostics = WritableUtils.readStringArray(in);
  counters = new Counters();
  counters.readFields(in);
  currentStatus = WritableUtils.readEnum(in, TIPStatus.class);
  if (currentStatus == TIPStatus.RUNNING) {
    int num = WritableUtils.readVInt(in);    
    for (int i = 0; i < num; i++) {
      TaskAttemptID t = new TaskAttemptID();
      t.readFields(in);
      runningAttempts.add(t);
    }
  } else if (currentStatus == TIPStatus.COMPLETE) {
    successfulAttempt.readFields(in);
  }
}
 
Example 8
Source Project: hadoop   Source File: JobHistoryParser.java    License: Apache License 2.0 6 votes vote down vote up
private void handleReduceAttemptFinishedEvent
(ReduceAttemptFinishedEvent event) {
  TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
  TaskAttemptInfo attemptInfo = 
    taskInfo.attemptsMap.get(event.getAttemptId());
  attemptInfo.finishTime = event.getFinishTime();
  attemptInfo.status = StringInterner.weakIntern(event.getTaskStatus());
  attemptInfo.state = StringInterner.weakIntern(event.getState());
  attemptInfo.shuffleFinishTime = event.getShuffleFinishTime();
  attemptInfo.sortFinishTime = event.getSortFinishTime();
  attemptInfo.counters = event.getCounters();
  attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
  attemptInfo.port = event.getPort();
  attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
  info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
}
 
Example 9
Source Project: big-c   Source File: TaskAttemptListenerImpl.java    License: Apache License 2.0 6 votes vote down vote up
@Override
 public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
throws IOException {
   diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
   LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
       + diagnosticInfo);

   org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
     TypeConverter.toYarn(taskAttemptID);
   taskHeartbeatHandler.progressing(attemptID);

   // This is mainly used for cases where we want to propagate exception traces
   // of tasks that fail.

   // This call exists as a hadoop mapreduce legacy wherein all changes in
   // counters/progress/phase/output-size are reported through statusUpdate()
   // call but not diagnosticInformation.
   context.getEventHandler().handle(
       new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
 }
 
Example 10
Source Project: big-c   Source File: TaskAttemptImpl.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
 
Example 11
Source Project: big-c   Source File: TaskAttemptImpl.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // unregister it to TaskAttemptListener so that it stops listening
  // for it
  taskAttempt.taskAttemptListener.unregister(
      taskAttempt.attemptId, taskAttempt.jvmID);

  if (event instanceof TaskAttemptKillEvent) {
    taskAttempt.addDiagnosticInfo(
        ((TaskAttemptKillEvent) event).getMessage());
  }

  taskAttempt.reportedStatus.progress = 1.0f;
  taskAttempt.updateProgressSplits();
  //send the cleanup event to containerLauncher
  taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
      taskAttempt.attemptId, 
      taskAttempt.container.getId(), StringInterner
          .weakIntern(taskAttempt.container.getNodeId().toString()),
      taskAttempt.container.getContainerToken(),
      ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP));
}
 
Example 12
Source Project: big-c   Source File: TaskImpl.java    License: Apache License 2.0 6 votes vote down vote up
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
    TaskAttemptCompletionEventStatus status) {
  TaskAttempt attempt = attempts.get(attemptId);
  //raise the completion event only if the container is assigned
  // to nextAttemptNumber
  if (attempt.getNodeHttpAddress() != null) {
    TaskAttemptCompletionEvent tce = recordFactory
        .newRecordInstance(TaskAttemptCompletionEvent.class);
    tce.setEventId(-1);
    String scheme = (encryptedShuffle) ? "https://" : "http://";
    tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
       + attempt.getNodeHttpAddress().split(":")[0] + ":"
       + attempt.getShufflePort()));
    tce.setStatus(status);
    tce.setAttemptId(attempt.getID());
    int runTime = 0;
    if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
      runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
    tce.setAttemptRunTime(runTime);
    
    //raise the event to job so that it adds the completion event to its
    //data structures
    eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
  }
}
 
Example 13
Source Project: big-c   Source File: QueueInfo.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void readFields(DataInput in) throws IOException {
  queueName = StringInterner.weakIntern(Text.readString(in));
  queueState = WritableUtils.readEnum(in, QueueState.class);
  schedulingInfo = StringInterner.weakIntern(Text.readString(in));
  int length = in.readInt();
  stats = new JobStatus[length];
  for (int i = 0; i < length; i++) {
    stats[i] = new JobStatus();
    stats[i].readFields(in);
  }
  int count = in.readInt();
  children.clear();
  for (int i = 0; i < count; i++) {
    QueueInfo childQueueInfo = new QueueInfo();
    childQueueInfo.readFields(in);
    children.add(childQueueInfo);
  }
}
 
Example 14
Source Project: big-c   Source File: JobHistoryParser.java    License: Apache License 2.0 6 votes vote down vote up
private void handleReduceAttemptFinishedEvent
(ReduceAttemptFinishedEvent event) {
  TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
  TaskAttemptInfo attemptInfo = 
    taskInfo.attemptsMap.get(event.getAttemptId());
  attemptInfo.finishTime = event.getFinishTime();
  attemptInfo.status = StringInterner.weakIntern(event.getTaskStatus());
  attemptInfo.state = StringInterner.weakIntern(event.getState());
  attemptInfo.shuffleFinishTime = event.getShuffleFinishTime();
  attemptInfo.sortFinishTime = event.getSortFinishTime();
  attemptInfo.counters = event.getCounters();
  attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
  attemptInfo.port = event.getPort();
  attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
  info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
}
 
Example 15
/**
 * Helper method to create InputAttemptIdentifier
 *
 * @param targetIndex
 * @param targetIndexCount
 * @param version
 * @param shufflePayload
 * @return CompositeInputAttemptIdentifier
 */
private CompositeInputAttemptIdentifier constructInputAttemptIdentifier(int targetIndex, int targetIndexCount, int version,
                                                                        DataMovementEventPayloadProto shufflePayload) {
  String pathComponent = (shufflePayload.hasPathComponent()) ? StringInterner.weakIntern(shufflePayload.getPathComponent()) : null;
  int spillEventId = shufflePayload.getSpillId();
  CompositeInputAttemptIdentifier srcAttemptIdentifier = null;
  if (shufflePayload.hasSpillId()) {
    boolean lastEvent = shufflePayload.getLastEvent();
    InputAttemptIdentifier.SPILL_INFO info = (lastEvent) ? InputAttemptIdentifier.SPILL_INFO
        .FINAL_UPDATE : InputAttemptIdentifier.SPILL_INFO.INCREMENTAL_UPDATE;
    srcAttemptIdentifier =
        new CompositeInputAttemptIdentifier(targetIndex, version, pathComponent, false, info, spillEventId, targetIndexCount);
  } else {
    srcAttemptIdentifier =
        new CompositeInputAttemptIdentifier(targetIndex, version, pathComponent, targetIndexCount);
  }
  return srcAttemptIdentifier;
}
 
Example 16
Source Project: Flink-CEPplus   Source File: Utils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Copied method from org.apache.hadoop.yarn.util.Apps.
 * It was broken by YARN-1824 (2.4.0) and fixed for 2.4.1
 * by https://issues.apache.org/jira/browse/YARN-1931
 */
public static void addToEnvironment(Map<String, String> environment,
		String variable, String value) {
	String val = environment.get(variable);
	if (val == null) {
		val = value;
	} else {
		val = val + File.pathSeparator + value;
	}
	environment.put(StringInterner.weakIntern(variable),
			StringInterner.weakIntern(val));
}
 
Example 17
Source Project: tez   Source File: AbstractCounterGroup.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void readFields(DataInput in) throws IOException {
  displayName = StringInterner.weakIntern(Text.readString(in));
  counters.clear();
  int size = WritableUtils.readVInt(in);
  for (int i = 0; i < size; i++) {
    T counter = newCounter();
    counter.readFields(in);
    counters.put(counter.getName(), counter);
    limits.incrCounters();
  }
}
 
Example 18
Source Project: hadoop   Source File: Apps.java    License: Apache License 2.0 5 votes vote down vote up
@Public
@Unstable
public static void addToEnvironment(
    Map<String, String> environment,
    String variable, String value, String classPathSeparator) {
  String val = environment.get(variable);
  if (val == null) {
    val = value;
  } else {
    val = val + classPathSeparator + value;
  }
  environment.put(StringInterner.weakIntern(variable), 
      StringInterner.weakIntern(val));
}
 
Example 19
Source Project: hadoop   Source File: TaskAttemptImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public String getAssignedContainerMgrAddress() {
  readLock.lock();
  try {
    return container == null ? null : StringInterner.weakIntern(container
      .getNodeId().toString());
  } finally {
    readLock.unlock();
  }
}
 
Example 20
Source Project: hadoop   Source File: ContainerRemoteLaunchEvent.java    License: Apache License 2.0 5 votes vote down vote up
public ContainerRemoteLaunchEvent(TaskAttemptId taskAttemptID,
    ContainerLaunchContext containerLaunchContext,
    Container allocatedContainer, Task remoteTask) {
  super(taskAttemptID, allocatedContainer.getId(), StringInterner
    .weakIntern(allocatedContainer.getNodeId().toString()),
    allocatedContainer.getContainerToken(),
    ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH);
  this.allocatedContainer = allocatedContainer;
  this.containerLaunchContext = containerLaunchContext;
  this.task = remoteTask;
}
 
Example 21
Source Project: hadoop   Source File: JobProfile.java    License: Apache License 2.0 5 votes vote down vote up
public void readFields(DataInput in) throws IOException {
  jobid.readFields(in);
  this.jobFile = StringInterner.weakIntern(Text.readString(in));
  this.url = StringInterner.weakIntern(Text.readString(in));
  this.user = StringInterner.weakIntern(Text.readString(in));
  this.name = StringInterner.weakIntern(Text.readString(in));
  this.queueName = StringInterner.weakIntern(Text.readString(in));
}
 
Example 22
Source Project: hadoop   Source File: Task.java    License: Apache License 2.0 5 votes vote down vote up
public void readFields(DataInput in) throws IOException {
  jobFile = StringInterner.weakIntern(Text.readString(in));
  taskId = TaskAttemptID.read(in);
  partition = in.readInt();
  numSlotsRequired = in.readInt();
  taskStatus.readFields(in);
  skipRanges.readFields(in);
  currentRecIndexIterator = skipRanges.skipRangeIterator();
  currentRecStartIndex = currentRecIndexIterator.next();
  skipping = in.readBoolean();
  jobCleanup = in.readBoolean();
  if (jobCleanup) {
    jobRunStateForCleanup = 
      WritableUtils.readEnum(in, JobStatus.State.class);
  }
  jobSetup = in.readBoolean();
  writeSkipRecs = in.readBoolean();
  taskCleanup = in.readBoolean();
  if (taskCleanup) {
    setPhase(TaskStatus.Phase.CLEANUP);
  }
  user = StringInterner.weakIntern(Text.readString(in));
  int len = in.readInt();
  encryptedSpillKey = new byte[len];
  extraData.readFields(in);
  in.readFully(encryptedSpillKey);
}
 
Example 23
Source Project: hadoop   Source File: MapTask.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private <T> T getSplitDetails(Path file, long offset) 
 throws IOException {
  FileSystem fs = file.getFileSystem(conf);
  FSDataInputStream inFile = fs.open(file);
  inFile.seek(offset);
  String className = StringInterner.weakIntern(Text.readString(inFile));
  Class<T> cls;
  try {
    cls = (Class<T>) conf.getClassByName(className);
  } catch (ClassNotFoundException ce) {
    IOException wrap = new IOException("Split class " + className + 
                                        " not found");
    wrap.initCause(ce);
    throw wrap;
  }
  SerializationFactory factory = new SerializationFactory(conf);
  Deserializer<T> deserializer = 
    (Deserializer<T>) factory.getDeserializer(cls);
  deserializer.open(inFile);
  T split = deserializer.deserialize(null);
  long pos = inFile.getPos();
  getCounters().findCounter(
      TaskCounter.SPLIT_RAW_BYTES).increment(pos - offset);
  inFile.close();
  return split;
}
 
Example 24
Source Project: hadoop   Source File: ClusterStatus.java    License: Apache License 2.0 5 votes vote down vote up
public void readFields(DataInput in) throws IOException {
  numActiveTrackers = in.readInt();
  int numTrackerNames = in.readInt();
  if (numTrackerNames > 0) {
    for (int i = 0; i < numTrackerNames; i++) {
      String name = StringInterner.weakIntern(Text.readString(in));
      activeTrackers.add(name);
    }
  }
  numBlacklistedTrackers = in.readInt();
  int blackListTrackerInfoSize = in.readInt();
  if(blackListTrackerInfoSize > 0) {
    for (int i = 0; i < blackListTrackerInfoSize; i++) {
      BlackListInfo info = new BlackListInfo();
      info.readFields(in);
      blacklistedTrackersInfo.add(info);
    }
  }
  numExcludedNodes = in.readInt();
  ttExpiryInterval = in.readLong();
  map_tasks = in.readInt();
  reduce_tasks = in.readInt();
  max_map_tasks = in.readInt();
  max_reduce_tasks = in.readInt();
  status = WritableUtils.readEnum(in, JobTrackerStatus.class);
  grayListedTrackers = in.readInt();
}
 
Example 25
Source Project: tez   Source File: TaskSpec.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void readFields(DataInput in) throws IOException {
  taskAttemptId = TezTaskAttemptID.readTezTaskAttemptID(in);
  dagName = StringInterner.weakIntern(in.readUTF());
  vertexName = StringInterner.weakIntern(in.readUTF());
  vertexParallelism = in.readInt();
  // TODO TEZ-305 convert this to PB
  processorDescriptor = new ProcessorDescriptor();
  processorDescriptor.readFields(in);
  int numInputSpecs = in.readInt();
  inputSpecList = new ArrayList<InputSpec>(numInputSpecs);
  for (int i = 0; i < numInputSpecs; i++) {
    InputSpec inputSpec = new InputSpec();
    inputSpec.readFields(in);
    inputSpecList.add(inputSpec);
  }
  int numOutputSpecs = in.readInt();
  outputSpecList = new ArrayList<OutputSpec>(numOutputSpecs);
  for (int i = 0; i < numOutputSpecs; i++) {
    OutputSpec outputSpec = new OutputSpec();
    outputSpec.readFields(in);
    outputSpecList.add(outputSpec);
  }
  boolean hasGroupInputs = in.readBoolean();
  if (hasGroupInputs) {
    int numGroups = in.readInt();
    groupInputSpecList = Lists.newArrayListWithCapacity(numGroups);
    for (int i=0; i<numGroups; ++i) {
      GroupInputSpec group = new GroupInputSpec();
      group.readFields(in);
      groupInputSpecList.add(group);
    }
  }
  boolean hasVertexConf = in.readBoolean();
  if (hasVertexConf) {
    taskConf = new Configuration(false);
    taskConf.readFields(in);
  }
}
 
Example 26
Source Project: tez   Source File: OutputSpec.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void readFields(DataInput in) throws IOException {
  destinationVertexName = StringInterner.weakIntern(in.readUTF());
  physicalEdgeCount = in.readInt();
  outputDescriptor = new OutputDescriptor();
  outputDescriptor.readFields(in);
}
 
Example 27
Source Project: hadoop   Source File: GenericCounter.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void readFields(DataInput in) throws IOException {
  name = StringInterner.weakIntern(Text.readString(in));
  displayName = in.readBoolean() ? 
      StringInterner.weakIntern(Text.readString(in)) : name;
  value = WritableUtils.readVLong(in);
}
 
Example 28
Source Project: hadoop   Source File: AbstractCounterGroup.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void readFields(DataInput in) throws IOException {
  displayName = StringInterner.weakIntern(Text.readString(in));
  counters.clear();
  int size = WritableUtils.readVInt(in);
  for (int i = 0; i < size; i++) {
    T counter = newCounter();
    counter.readFields(in);
    counters.put(counter.getName(), counter);
    limits.incrCounters();
  }
}
 
Example 29
Source Project: hadoop   Source File: TaggedInputSplit.java    License: Apache License 2.0 5 votes vote down vote up
private Class<?> readClass(DataInput in) throws IOException {
  String className = StringInterner.weakIntern(Text.readString(in));
  try {
    return conf.getClassByName(className);
  } catch (ClassNotFoundException e) {
    throw new RuntimeException("readObject can't find class", e);
  }
}
 
Example 30
Source Project: hadoop   Source File: EventReader.java    License: Apache License 2.0 5 votes vote down vote up
static Counters fromAvro(JhCounters counters) {
  Counters result = new Counters();
  if(counters != null) {
    for (JhCounterGroup g : counters.groups) {
      CounterGroup group =
          result.addGroup(StringInterner.weakIntern(g.name.toString()), 
              StringInterner.weakIntern(g.displayName.toString()));
      for (JhCounter c : g.counts) {
        group.addCounter(StringInterner.weakIntern(c.name.toString()), 
            StringInterner.weakIntern(c.displayName.toString()), c.value);
      }
    }
  }
  return result;
}