Java Code Examples for org.apache.hadoop.util.StringUtils#stringifyException()

The following examples show how to use org.apache.hadoop.util.StringUtils#stringifyException() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MultiThreadedWriterWithACL.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void recordFailure(final Table table, final Put put, final long keyBase,
    final long start, IOException e) {
  failedKeySet.add(keyBase);
  String exceptionInfo;
  if (e instanceof RetriesExhaustedWithDetailsException) {
    RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException) e;
    exceptionInfo = aggEx.getExhaustiveDescription();
  } else {
    StringWriter stackWriter = new StringWriter();
    PrintWriter pw = new PrintWriter(stackWriter);
    e.printStackTrace(pw);
    pw.flush();
    exceptionInfo = StringUtils.stringifyException(e);
  }
  LOG.error("Failed to insert: " + keyBase + " after " + (System.currentTimeMillis() - start)
      + "ms; region information: " + getRegionDebugInfoSafe(table, put.getRow()) + "; errors: "
      + exceptionInfo);
}
 
Example 2
Source File: Command.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Display an exception prefaced with the command name.  Also increments
 * the error count for the command which will result in a non-zero exit
 * code.
 * @param e exception to display
 */
public void displayError(Exception e) {
  // build up a list of exceptions that occurred
  exceptions.add(e);
  
  String errorMessage = e.getLocalizedMessage();
  if (errorMessage == null) {
    // this is an unexpected condition, so dump the whole exception since
    // it's probably a nasty internal error where the backtrace would be
    // useful
    errorMessage = StringUtils.stringifyException(e);
    LOG.debug(errorMessage);
  } else {
    errorMessage = errorMessage.split("\n", 2)[0];
  }
  displayError(errorMessage);
}
 
Example 3
Source File: TextOutputReader.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void splitKeyVal(byte[] line, int length, Text key, Text val)
  throws IOException {
  // Need to find numKeyFields separators
  int pos = UTF8ByteArrayUtils.findBytes(line, 0, length, separator);
  for(int k=1; k<numKeyFields && pos!=-1; k++) {
    pos = UTF8ByteArrayUtils.findBytes(line, pos + separator.length, 
      length, separator);
  }
  try {
    if (pos == -1) {
      key.set(line, 0, length);
      val.set("");
    } else {
      StreamKeyValUtil.splitKeyVal(line, 0, length, key, val, pos,
        separator.length);
    }
  } catch (CharacterCodingException e) {
    throw new IOException(StringUtils.stringifyException(e));
  }
}
 
Example 4
Source File: Task.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Report a fatal error to the parent (task) tracker.
 */
protected void reportFatalError(TaskAttemptID id, Throwable throwable, 
                                String logMsg) {
  LOG.fatal(logMsg);
  
  if (ShutdownHookManager.get().isShutdownInProgress()) {
    return;
  }
  
  Throwable tCause = throwable.getCause();
  String cause = tCause == null 
                 ? StringUtils.stringifyException(throwable)
                 : StringUtils.stringifyException(tCause);
  try {
    umbilical.fatalError(id, cause);
  } catch (IOException ioe) {
    LOG.fatal("Failed to contact the tasktracker", ioe);
    System.exit(-1);
  }
}
 
Example 5
Source File: DistCh.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/** Run a FileOperation */
public void map(Text key, FileOperation value,
    OutputCollector<WritableComparable<?>, Text> out, Reporter reporter
    ) throws IOException {
  try {
    value.run(jobconf);
    ++succeedcount;
    reporter.incrCounter(Counter.SUCCEED, 1);
  } catch (IOException e) {
    ++failcount;
    reporter.incrCounter(Counter.FAIL, 1);

    String s = "FAIL: " + value + ", " + StringUtils.stringifyException(e);
    out.collect(null, new Text(s));
    LOG.info(s);
  } finally {
    reporter.setStatus(getCountString());
  }
}
 
Example 6
Source File: Command.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Display an exception prefaced with the command name.  Also increments
 * the error count for the command which will result in a non-zero exit
 * code.
 * @param e exception to display
 */
public void displayError(Exception e) {
  // build up a list of exceptions that occurred
  exceptions.add(e);
  
  String errorMessage = e.getLocalizedMessage();
  if (errorMessage == null) {
    // this is an unexpected condition, so dump the whole exception since
    // it's probably a nasty internal error where the backtrace would be
    // useful
    errorMessage = StringUtils.stringifyException(e);
    LOG.debug(errorMessage);
  } else {
    errorMessage = errorMessage.split("\n", 2)[0];
  }
  displayError(errorMessage);
}
 
Example 7
Source File: DataNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Initializes the {@link #data}. The initialization is done only once, when
 * handshake with the the first namenode is completed.
 */
private synchronized void initFsDataSet(Configuration conf,
    AbstractList<File> dataDirs, int numNamespaces) throws IOException {
  if (data != null) { // Already initialized
    return;
  }

  // get version and id info from the name-node
  boolean simulatedFSDataset = 
    conf.getBoolean("dfs.datanode.simulateddatastorage", false);

  if (simulatedFSDataset) {
    storage.createStorageID(selfAddr.getPort());
    // it would have been better to pass storage as a parameter to
    // constructor below - need to augment ReflectionUtils used below.
    conf.set("dfs.datanode.StorageId", storage.getStorageID());
    try {
      data = (FSDatasetInterface) ReflectionUtils.newInstance(
          Class.forName(
          "org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"),
          conf);
    } catch (ClassNotFoundException e) {
      throw new IOException(StringUtils.stringifyException(e));
    }
  } else {
    data = new FSDataset(this, conf, numNamespaces);
  }
}
 
Example 8
Source File: RpcExecutor.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void run(CallRunner cr) {
  MonitoredRPCHandler status = RpcServer.getStatus();
  cr.setStatus(status);
  try {
    this.activeHandlerCount.incrementAndGet();
    cr.run();
  } catch (Throwable e) {
    if (e instanceof Error) {
      int failedCount = failedHandlerCount.incrementAndGet();
      if (this.handlerFailureThreshhold >= 0
          && failedCount > handlerCount * this.handlerFailureThreshhold) {
        String message = "Number of failed RpcServer handler runs exceeded threshhold "
            + this.handlerFailureThreshhold + "; reason: " + StringUtils.stringifyException(e);
        if (abortable != null) {
          abortable.abort(message, e);
        } else {
          LOG.error("Error but can't abort because abortable is null: "
              + StringUtils.stringifyException(e));
          throw e;
        }
      } else {
        LOG.warn("Handler errors " + StringUtils.stringifyException(e));
      }
    } else {
      LOG.warn("Handler  exception " + StringUtils.stringifyException(e));
    }
  } finally {
    this.activeHandlerCount.decrementAndGet();
  }
}
 
Example 9
Source File: TajoContainerProxy.java    From incubator-tajo with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void stopContainer() {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Release TajoWorker Resource: " + executionBlockId + "," + containerID + ", state:" + this.state);
  }
  if(isCompletelyDone()) {
    LOG.info("Container already stopped:" + containerID);
    return;
  }
  if(this.state == ContainerState.PREP) {
    this.state = ContainerState.KILLED_BEFORE_LAUNCH;
  } else {
    try {
      TajoWorkerContainer tajoWorkerContainer = ((TajoWorkerContainer)container);
      releaseWorkerResource(context, executionBlockId, tajoWorkerContainer.getId());
      context.getResourceAllocator().removeContainer(containerID);
      this.state = ContainerState.DONE;
    } catch (Throwable t) {
      // ignore the cleanup failure
      String message = "cleanup failed for container "
          + this.containerID + " : "
          + StringUtils.stringifyException(t);
      LOG.warn(message);
      this.state = ContainerState.DONE;
      return;
    }
  }
}
 
Example 10
Source File: ExpressionFactory.java    From examples with Apache License 2.0 5 votes vote down vote up
/**
 * Invokes "static void registerExpression(FindExpressionFactory)" on the given class.
 * This method abstracts the contract between the factory and the expression
 * class.  Do not assume that directly invoking registerExpression on the
 * given class will have the same effect.
 * @param expressionClass class to allow an opportunity to register
 */
public void registerExpression(Class<? extends Expression> expressionClass) {
  try {
    Method register = 
    expressionClass.getMethod(
        "registerExpression", ExpressionFactory.class
    );
    if(register != null) {
      register.invoke(null, this);
    }
  } catch (Exception e) {
    throw new RuntimeException(StringUtils.stringifyException(e));
  }
}
 
Example 11
Source File: DistCp.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** Map method. Copies one file from source file system to destination.
 * @param key src len
 * @param value FilePair (FileStatus src, Path dst)
 * @param out Log of failed copies
 * @param reporter
 */
public void map(LongWritable key,
                FilePairComparable value,
                OutputCollector<FilePairComparable, Text> out,
                Reporter reporter) throws IOException {
  final FileStatus srcstat = value.input;
  final Path relativedst = new Path(value.output);
  try {
    copy(value, out, reporter);
  } catch (IOException e) {
    ++failcount;
    reporter.incrCounter(Counter.FAIL, 1);
    updateStatus(reporter);
    final String sfailure = "FAIL " + relativedst + " : " +
                      StringUtils.stringifyException(e);
    out.collect(value, new Text(sfailure));
    LOG.info(sfailure);
    try {
      for (int i = 0; i < 3; ++i) {
        try {
          final Path tmp = new Path(attemptTmpRoot, relativedst);
          if (destFileSys.delete(tmp, true))
            break;
        } catch (Throwable ex) {
          // ignore, we are just cleaning up
          LOG.debug("Ignoring cleanup exception", ex);
        }
        // update status, so we don't get timed out
        updateStatus(reporter);
        Thread.sleep(3 * 1000);
      }
    } catch (InterruptedException inte) {
      throw (IOException)new IOException().initCause(inte);
    }
  } finally {
    updateStatus(reporter);
  }
}
 
Example 12
Source File: PredicateHandlerTest.java    From accumulo-hive-storage-manager with Apache License 2.0 5 votes vote down vote up
@Test
public void iteratorIgnoreRowIDFields() {
    setup();
    ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false);
    ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "aaa");
    List<ExprNodeDesc> children = Lists.newArrayList();
    children.add(column);
    children.add(constant);
    ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, new GenericUDFOPEqualOrLessThan(), children);
    assertNotNull(node);

    ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false);
    ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "bbb");
    List<ExprNodeDesc> children2 = Lists.newArrayList();
    children2.add(column2);
    children2.add(constant2);
    ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, new GenericUDFOPGreaterThan(), children2);
    assertNotNull(node2);


    List<ExprNodeDesc> bothFilters = Lists.newArrayList();
    bothFilters.add(node);
    bothFilters.add(node2);
    ExprNodeDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, new GenericUDFOPAnd(), bothFilters);

    String filterExpr = Utilities.serializeExpression(both);
    conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr);
    try {
        List<IteratorSetting> iterators = handler.getIterators(conf);
        assertEquals(iterators.size() , 0);
    } catch (SerDeException e) {
        StringUtils.stringifyException(e);
    }
}
 
Example 13
Source File: MapWritable.java    From anthelion with Apache License 2.0 5 votes vote down vote up
/**
 * Copy constructor. This constructor makes a deep copy, using serialization /
 * deserialization to break any possible references to contained objects.
 * 
 * @param map map to copy from
 */
public MapWritable(MapWritable map) {
  if (map != null) {
    try {
      DataOutputBuffer dob = new DataOutputBuffer();
      map.write(dob);
      DataInputBuffer dib = new DataInputBuffer();
      dib.reset(dob.getData(), dob.getLength());
      readFields(dib);
    } catch (IOException e) {
      throw new IllegalArgumentException("this map cannot be copied: " +
              StringUtils.stringifyException(e));
    }
  }
}
 
Example 14
Source File: QueryMasterTask.java    From tajo with Apache License 2.0 5 votes vote down vote up
public String getErrorMessage() {
  if (isInitError()) {
    return StringUtils.stringifyException(initError);
  } else {
    return null;
  }
}
 
Example 15
Source File: PipeMapper.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
public void map(Object key, Object value, OutputCollector output, Reporter reporter) throws IOException {
  if (outerrThreadsThrowable != null) {
    mapRedFinished();
    throw new IOException ("MROutput/MRErrThread failed:"
                           + StringUtils.stringifyException(
                                                            outerrThreadsThrowable));
  }
  try {
    // 1/4 Hadoop in
    numRecRead_++;
    maybeLogRecord();
    if (debugFailDuring_ && numRecRead_ == 3) {
      throw new IOException("debugFailDuring_");
    }

    // 2/4 Hadoop to Tool
    if (numExceptions_ == 0) {
      if (!this.ignoreKey) {
        write(key);
        clientOut_.write(getInputSeparator());
      }
      write(value);
      clientOut_.write('\n');
      if(skipping) {
        //flush the streams on every record input if running in skip mode
        //so that we don't buffer other records surrounding a bad record. 
        clientOut_.flush();
      }
    } else {
      numRecSkipped_++;
    }
  } catch (IOException io) {
    numExceptions_++;
    if (numExceptions_ > 1 || numRecWritten_ < minRecWrittenToEnableSkip_) {
      // terminate with failure
      String msg = logFailure(io);
      appendLogToJobLog("failure");
      mapRedFinished();
      throw new IOException(msg);
    } else {
      // terminate with success:
      // swallow input records although the stream processor failed/closed
    }
  }
}
 
Example 16
Source File: RMFatalEvent.java    From big-c with Apache License 2.0 4 votes vote down vote up
public RMFatalEvent(RMFatalEventType rmFatalEventType, Exception cause) {
  super(rmFatalEventType);
  this.cause = StringUtils.stringifyException(cause);
}
 
Example 17
Source File: YarnContainerProxy.java    From incubator-tajo with Apache License 2.0 4 votes vote down vote up
@Override
  public synchronized void stopContainer() {

    if(isCompletelyDone()) {
      return;
    }
    if(this.state == ContainerState.PREP) {
      this.state = ContainerState.KILLED_BEFORE_LAUNCH;
    } else {
      LOG.info("KILLING " + containerID);

      ContainerManagementProtocol proxy = null;
      try {
        proxy = getCMProxy(this.containerID, this.containerMgrAddress,
            this.containerToken);

        // kill the remote container if already launched
        List<ContainerId> willBeStopedIds = new ArrayList<ContainerId>();
        willBeStopedIds.add(this.containerID);
        StopContainersRequest stopRequests = Records.newRecord(StopContainersRequest.class);
        stopRequests.setContainerIds(willBeStopedIds);
        proxy.stopContainers(stopRequests);
        // If stopContainer returns without an error, assuming the stop made
        // it over to the NodeManager.
//          context.getEventHandler().handle(
//              new AMContainerEvent(containerID, AMContainerEventType.C_NM_STOP_SENT));
        context.getResourceAllocator().removeContainer(containerID);
      } catch (Throwable t) {

        // ignore the cleanup failure
        String message = "cleanup failed for container "
            + this.containerID + " : "
            + StringUtils.stringifyException(t);
//          context.getEventHandler().handle(
//              new AMContainerEventStopFailed(containerID, message));
        LOG.warn(message);
        this.state = ContainerState.DONE;
        return;
      } finally {
        if (proxy != null) {
          yarnRPC.stopProxy(proxy, conf);
        }
      }
      this.state = ContainerState.DONE;
    }
  }
 
Example 18
Source File: YarnContainerProxy.java    From incubator-tajo with Apache License 2.0 4 votes vote down vote up
@Override
@SuppressWarnings("unchecked")
public synchronized void launch(ContainerLaunchContext commonContainerLaunchContext) {
  LOG.info("Launching Container with Id: " + containerID);
  if(this.state == ContainerState.KILLED_BEFORE_LAUNCH) {
    state = ContainerState.DONE;
    LOG.error("Container (" + containerID + " was killed before it was launched");
    return;
  }

  ContainerManagementProtocol proxy = null;
  try {

    proxy = getCMProxy(containerID, containerMgrAddress,
        containerToken);

    // Construct the actual Container
    ContainerLaunchContext containerLaunchContext = createContainerLaunchContext(commonContainerLaunchContext);

    // Now launch the actual container
    List<StartContainerRequest> startRequestList = new ArrayList<StartContainerRequest>();
    StartContainerRequest startRequest = Records
        .newRecord(StartContainerRequest.class);
    startRequest.setContainerLaunchContext(containerLaunchContext);
    startRequestList.add(startRequest);
    StartContainersRequest startRequests = Records.newRecord(StartContainersRequest.class);
    startRequests.setStartContainerRequests(startRequestList);
    StartContainersResponse response = proxy.startContainers(startRequests);

    ByteBuffer portInfo = response.getAllServicesMetaData().get(PullServerAuxService.PULLSERVER_SERVICEID);

    if(portInfo != null) {
      port = PullServerAuxService.deserializeMetaData(portInfo);
    }

    LOG.info("PullServer port returned by ContainerManager for "
        + containerID + " : " + port);

    if(port < 0) {
      this.state = ContainerState.FAILED;
      throw new IllegalStateException("Invalid shuffle port number "
          + port + " returned for " + containerID);
    }

    this.state = ContainerState.RUNNING;
    this.hostName = containerMgrAddress.split(":")[0];
    context.getResourceAllocator().addContainer(containerID, this);
  } catch (Throwable t) {
    String message = "Container launch failed for " + containerID + " : "
        + StringUtils.stringifyException(t);
    this.state = ContainerState.FAILED;
    LOG.error(message);
  } finally {
    if (proxy != null) {
      yarnRPC.stopProxy(proxy, conf);
    }
  }
}
 
Example 19
Source File: DistCpV1.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** Map method. Copies one file from source file system to destination.
 * @param key src len
 * @param value FilePair (FileStatus src, Path dst)
 * @param out Log of failed copies
 * @param reporter
 */
public void map(LongWritable key,
                FilePair value,
                OutputCollector<WritableComparable<?>, Text> out,
                Reporter reporter) throws IOException {
  final FileStatus srcstat = value.input;
  final Path relativedst = new Path(value.output);
  try {
    copyWithRetries(srcstat, relativedst, out, reporter);
  } catch (IOException e) {
    ++failcount;
    reporter.incrCounter(Counter.FAIL, 1);
    updateStatus(reporter);
    final String sfailure = "FAIL " + relativedst + " : " +
                      StringUtils.stringifyException(e);
    out.collect(null, new Text(sfailure));
    LOG.info(sfailure);
    if (e instanceof FileNotFoundException) {
      final String s = "Possible Cause for failure: Either the filesystem "
                       + srcstat.getPath().getFileSystem(job)
                       + " is not accessible or the file is deleted";
      LOG.error(s);
      out.collect(null, new Text(s));
    }

    try {
      for (int i = 0; i < 3; ++i) {
        try {
          final Path tmp = new Path(job.get(TMP_DIR_LABEL), relativedst);
          if (destFileSys.delete(tmp, true))
            break;
        } catch (Throwable ex) {
          // ignore, we are just cleaning up
          LOG.debug("Ignoring cleanup exception", ex);
        }
        // update status, so we don't get timed out
        updateStatus(reporter);
        Thread.sleep(3 * 1000);
      }
    } catch (InterruptedException inte) {
      throw (IOException)new IOException().initCause(inte);
    }
  } finally {
    updateStatus(reporter);
  }
}
 
Example 20
Source File: TaskReporter.java    From incubator-tez with Apache License 2.0 3 votes vote down vote up
/**
 * Sends out final events for task failure.
 * @param taskAttemptID
 * @param t
 * @param diagnostics
 * @param srcMeta
 * @return
 * @throws IOException
 *           indicates an RPC communication failure.
 * @throws TezException
 *           indicates an exception somewhere in the AM.
 */
private boolean taskFailed(TezTaskAttemptID taskAttemptID, Throwable t, String diagnostics,
    EventMetaData srcMeta) throws IOException, TezException {
  TezEvent statusUpdateEvent = new TezEvent(new TaskStatusUpdateEvent(task.getCounters(),
      task.getProgress()), updateEventMetadata);
  if (diagnostics == null) {
    diagnostics = StringUtils.stringifyException(t);
  }
  TezEvent taskAttemptFailedEvent = new TezEvent(new TaskAttemptFailedEvent(diagnostics),
      srcMeta == null ? updateEventMetadata : srcMeta);
  return heartbeat(Lists.newArrayList(statusUpdateEvent, taskAttemptFailedEvent));
}