backtype.storm.topology.FailedException Java Examples

The following examples show how to use backtype.storm.topology.FailedException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AccumuloEventStorageBoltTest.java    From cognition with Apache License 2.0 6 votes vote down vote up
@Test
public void testProcessException(
    @Injectable LogRecord record,
    @Injectable BatchWriter eventWriter,
    @Injectable Mutation event,
    @Injectable MutationsRejectedException e) throws Exception {
  bolt.eventWriter = eventWriter;

  new Expectations(bolt) {{
    bolt.getEventMutation(record);
    result = event;
    eventWriter.addMutation(event);
    result = e;
    bolt.resetEventWriter();
  }};

  thrown.expect(FailedException.class);
  bolt.process(record);
}
 
Example #2
Source File: TridentSpoutExecutor.java    From jstorm with Apache License 2.0 6 votes vote down vote up
@Override
public void execute(BatchInfo info, Tuple input) {
    // there won't be a BatchInfo for the success stream
    TransactionAttempt attempt = (TransactionAttempt) input.getValue(0);
    if (input.getSourceStreamId().equals(MasterBatchCoordinator.COMMIT_STREAM_ID)) {
        if (attempt.equals(_activeBatches.get(attempt.getTransactionId()))) {
            ((ICommitterTridentSpout.Emitter) _emitter).commit(attempt);
            _activeBatches.remove(attempt.getTransactionId());
        } else {
            throw new FailedException("Received commit for different transaction attempt");
        }
    } else if (input.getSourceStreamId().equals(MasterBatchCoordinator.SUCCESS_STREAM_ID)) {
        // valid to delete before what's been committed since
        // those batches will never be accessed again
        _activeBatches.headMap(attempt.getTransactionId()).clear();
        _emitter.success(attempt);
    } else {
        _collector.setBatch(info.batchId);
        _emitter.emitBatch(attempt, input.getValue(1), _collector);
        _activeBatches.put(attempt.getTransactionId(), attempt);
    }
}
 
Example #3
Source File: TridentBoltExecutor.java    From jstorm with Apache License 2.0 6 votes vote down vote up
private boolean finishBatch(TrackedBatch tracked, Tuple finishTuple) {
    boolean success = true;
    try {
        _bolt.finishBatch(tracked.info);
        String stream = COORD_STREAM(tracked.info.batchGroup);
        _collector.flush();
        for (Integer task : tracked.condition.targetTasks) {
            _collector.emitDirect(task, stream, finishTuple, new Values(tracked.info.batchId,
                    Utils.get(tracked.taskEmittedTuples, task, 0)));
        }
        if (tracked.delayedAck != null) {
            _collector.ack(tracked.delayedAck);
            tracked.delayedAck = null;
        }
    } catch (FailedException e) {
        failBatch(tracked, e);
        success = false;
    }
    _batches.remove(tracked.info.batchId.getId());
    return success;
}
 
Example #4
Source File: WindowsStateUpdater.java    From jstorm with Apache License 2.0 6 votes vote down vote up
@Override
public void updateState(WindowsState state, List<TridentTuple> tuples, TridentCollector collector) {
    Long currentTxId = state.getCurrentTxId();
    LOG.debug("Removing triggers using WindowStateUpdater, txnId: [{}] ", currentTxId);
    for (TridentTuple tuple : tuples) {
        try {
            Object fieldValue = tuple.getValueByField(WindowTridentProcessor.TRIGGER_FIELD_NAME);
            if (!(fieldValue instanceof WindowTridentProcessor.TriggerInfo)) {
                throw new IllegalClassException(WindowTridentProcessor.TriggerInfo.class, fieldValue.getClass());
            }
            WindowTridentProcessor.TriggerInfo triggerInfo = (WindowTridentProcessor.TriggerInfo) fieldValue;
            String triggerCompletedKey = WindowTridentProcessor.getWindowTriggerInprocessIdPrefix(triggerInfo.windowTaskId) + currentTxId;

            LOG.debug("Removing trigger key [{}] and trigger completed key [{}] from store: [{}]", triggerInfo, triggerCompletedKey, windowsStore);

            windowsStore.removeAll(Lists.newArrayList(triggerInfo.generateTriggerKey(), triggerCompletedKey));
        } catch (Exception ex) {
            LOG.warn(ex.getMessage());
            collector.reportError(ex);
            throw new FailedException(ex);
        }
    }
}
 
Example #5
Source File: SimpleBatchTestBolt.java    From jstorm with Apache License 2.0 6 votes vote down vote up
@Override
    public byte[] commit(BatchId batchId) throws FailedException {
        System.out.println("SimpleBatchTestBolt #commit");
//        LOG.info("$$$$Receive BatchId " + batchId);
//        if (currentBatchId == null) {
//            currentBatchId = batchId;
//        } else if (currentBatchId.getId() >= batchId.getId()) {
//            LOG.info("Current BatchId is " + currentBatchId + ", receive:" + batchId);
//            throw new RuntimeException();
//        }
//        currentBatchId = batchId;

        //AtomicLong counter = (AtomicLong) counters.remove(batchId);
//        if (counter == null) {
//            counter = new AtomicLong(0);
//        }

//        LOG.info("Flush " + id + "," + counter);
//        return Utils.serialize(batchId);
        return null;
    }
 
Example #6
Source File: HBaseState.java    From storm-hbase with Apache License 2.0 6 votes vote down vote up
public List<List<Values>> batchRetrieve(List<TridentTuple> tridentTuples) {
    List<List<Values>> batchRetrieveResult = Lists.newArrayList();
    List<Get> gets = Lists.newArrayList();
    for (TridentTuple tuple : tridentTuples) {
        byte[] rowKey = options.mapper.rowKey(tuple);
        gets.add(hBaseClient.constructGetRequests(rowKey, options.projectionCriteria));
    }

    try {
        Result[] results = hBaseClient.batchGet(gets);
        for(Result result : results) {
            List<Values> values = options.rowToStormValueMapper.toValues(result);
            batchRetrieveResult.add(values);
        }
    } catch (Exception e) {
        LOG.warn("Batch get operation failed. Triggering replay.", e);
        throw new FailedException(e);
    }
    return batchRetrieveResult;
}
 
Example #7
Source File: HBaseState.java    From storm-hbase with Apache License 2.0 6 votes vote down vote up
public void updateState(List<TridentTuple> tuples, TridentCollector collector) {
    List<Mutation> mutations = Lists.newArrayList();

    for (TridentTuple tuple : tuples) {
        byte[] rowKey = options.mapper.rowKey(tuple);
        ColumnList cols = options.mapper.columns(tuple);
        mutations.addAll(hBaseClient.constructMutationReq(rowKey, cols, options.durability));
    }

    try {
        hBaseClient.batchMutate(mutations);
    } catch (Exception e) {
        LOG.warn("Batch write failed but some requests might have succeeded. Triggering replay.", e);
        throw new FailedException(e);
    }
}
 
Example #8
Source File: HBaseMapState.java    From storm-hbase with Apache License 2.0 6 votes vote down vote up
@Override
public List<T> multiGet(List<List<Object>> keys) {
    List<Get> gets = new ArrayList<Get>();
    for(List<Object> key : keys){
        LOG.info("Partition: {}, GET: {}", this.partitionNum, key);
        Get get = new Get(toRowKey(key));
        get.addColumn(this.options.columnFamily.getBytes(), this.options.qualifier.getBytes());
        gets.add(get);
    }

    List<T> retval = new ArrayList<T>();
    try {
        Result[] results = this.table.get(gets);
        for (Result result : results) {
            byte[] value = result.getValue(this.options.columnFamily.getBytes(), this.options.qualifier.getBytes());
            if(value != null) {
                retval.add(this.serializer.deserialize(value));
            } else {
                retval.add(null);
            }
        }
    } catch(IOException e){
        throw new FailedException("IOException while reading from HBase.", e);
    }
    return retval;
}
 
Example #9
Source File: ESIndexState.java    From storm-trident-elasticsearch with Apache License 2.0 6 votes vote down vote up
public void bulkUpdateIndices(List<TridentTuple> inputs, TridentTupleMapper<Document<T>> mapper, BulkResponseHandler handler) {
    BulkRequestBuilder bulkRequest = client.prepareBulk();
    for (TridentTuple input : inputs) {
        Document<T> doc = mapper.map(input);
        byte[] source = serializeSourceOrFail(doc);
        IndexRequestBuilder request = client.prepareIndex(doc.getName(), doc.getType(), doc.getId()).setSource(source);

        if(doc.getParentId() != null) {
            request.setParent(doc.getParentId());
        }
        bulkRequest.add(request);
    }

    if( bulkRequest.numberOfActions() > 0) {
        try {
            handler.handle(bulkRequest.execute().actionGet());
        } catch(ElasticsearchException e) {
            LOGGER.error("error while executing bulk request to elasticsearch");
            throw new FailedException("Failed to store data into elasticsearch", e);
        }
    }
}
 
Example #10
Source File: ElasticSearchJsonBolt.java    From cognition with Apache License 2.0 6 votes vote down vote up
void addField(XContentBuilder source,
              Entry<String, String> entry,
              Map<String, Map<String, String>> fieldTypeMappings) throws IOException {
  String key = entry.getKey();
  String value = entry.getValue();

  if (StringUtils.isBlank(value)) {
    logger.debug("Skipping blank value for key: {}", key);
    return;
  }

  String cleanedKey = ElasticsearchUtil.cleanKey(key);

  Map<String, String> fieldTypeMapping = getFieldTypeMapping(fieldTypeMappings, cleanedKey);

  try {
    addFieldByType(source, cleanedKey, value, fieldTypeMapping);
  } catch (NumberFormatException | ParseException e) {
    logger.error("Failed to parse entry - {}:{}", key, value);
    throw new FailedException(e);
  }
}
 
Example #11
Source File: LineRegexReplaceInRegionBolt.java    From cognition with Apache License 2.0 6 votes vote down vote up
void populateLogRecord(LogRecord logRecord, String record) {	  
    try (CSVReader csvReader = new CSVReader(new StringReader(record), delimiter)) {
      String[] values = csvReader.readNext();
      
      int fieldSize = Math.min(fieldList.size(), values.length);
      for (int i = 0; i < fieldSize; i++) {
        String field = fieldList.get(i);
        String value = values[i];
        logRecord.setValue(field, value);
}
      
    } catch (IOException e) {
      logger.error("Failed to parse line: {}", record);
      throw new FailedException(e);
    }
  }
 
Example #12
Source File: HdfsState.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
public void commit(Long txId) {
    try {
        options.doCommit(txId);
    } catch (IOException e) {
        LOG.warn("Commit failed due to IOException. Failing the batch.", e);
        throw new FailedException(e);
    }
}
 
Example #13
Source File: TransactionalSpoutBatchExecutor.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
public void execute(Tuple input) {
    TransactionAttempt attempt = (TransactionAttempt) input.getValue(0);
    try {
        if (input.getSourceStreamId().equals(TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID)) {
            if (attempt.equals(_activeTransactions.get(attempt.getTransactionId()))) {
                ((ICommitterTransactionalSpout.Emitter) _emitter).commit(attempt);
                _activeTransactions.remove(attempt.getTransactionId());
                _collector.ack(input);
            } else {
                _collector.fail(input);
            }
        } else {
            _emitter.emitBatch(attempt, input.getValue(1), _collector);
            _activeTransactions.put(attempt.getTransactionId(), attempt);
            _collector.ack(input);
            BigInteger committed = (BigInteger) input.getValue(2);
            if (committed != null) {
                // valid to delete before what's been committed since
                // those batches will never be accessed again
                _activeTransactions.headMap(committed).clear();
                _emitter.cleanupBefore(committed);
            }
        }
    } catch (FailedException e) {
        LOG.warn("Failed to emit batch for transaction", e);
        _collector.fail(input);
    }
}
 
Example #14
Source File: BatchBoltExecutor.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
public void execute(Tuple input) {
    Object id = input.getValue(0);
    IBatchBolt bolt = getBatchBolt(id);
    try {
        bolt.execute(input);
        _collector.ack(input);
    } catch (FailedException e) {
        LOG.error("Failed to process tuple in batch", e);
        _collector.fail(input);
    }
}
 
Example #15
Source File: AccumuloEventStorageBolt.java    From cognition with Apache License 2.0 5 votes vote down vote up
@Override
protected void process(LogRecord record) {
  Mutation event = getEventMutation(record);
  try {
    eventWriter.addMutation(event);
  } catch (MutationsRejectedException e) {
    logger.error("Failed to store row. Reseting Writer", e);
    resetEventWriter();
    throw new FailedException(e);
  }
}
 
Example #16
Source File: ElasticSearchJsonBolt.java    From cognition with Apache License 2.0 5 votes vote down vote up
@Override
final public void execute(Tuple input, BasicOutputCollector collector) {
  LogRecord record = (LogRecord) input.getValueByField(AbstractLogRecordBolt.RECORD);

  try {
    indexRecord(record);
  } catch (Exception e) {
    logger.error("Error indexing record", e);
    throw new FailedException("Error indexing record", e);
  }
  collector.emit(new Values(record));
  collector.emit(ES_JSON, new Values(record.getValue(esJsonField)));
}
 
Example #17
Source File: TridentBoltExecutor.java    From jstorm with Apache License 2.0 5 votes vote down vote up
private void failBatch(TrackedBatch tracked, FailedException e) {
    if (e != null && e instanceof ReportedFailedException) {
        _collector.reportError(e);
    }
    tracked.failed = true;
    if (tracked.delayedAck != null) {
        _collector.fail(tracked.delayedAck);
        tracked.delayedAck = null;
    }
}
 
Example #18
Source File: CoordinatedBolt.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public void handlePrepareCommit(Tuple tuple) {
    basicCollector.setContext(tuple);
    try {
        BatchId id = (BatchId) tuple.getValue(0);
        ((IPrepareCommit) delegate).prepareCommit(id, basicCollector);
        collector.ack(tuple);
    } catch (FailedException e) {
        if (e instanceof ReportedFailedException) {
            collector.reportError(e);
        }
        collector.fail(tuple);
    }
}
 
Example #19
Source File: CoordinatedBolt.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public void handleRegular(Tuple tuple) {
    basicCollector.setContext(tuple);
    try {
        delegate.execute(tuple, basicCollector);
        collector.ack(tuple);
    } catch (FailedException e) {
        if (e instanceof ReportedFailedException) {
            collector.reportError(e);
        }
        collector.fail(tuple);
    }
}
 
Example #20
Source File: HdfsState.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public void updateState(List<TridentTuple> tuples, TridentCollector tridentCollector) {
    try {
        this.options.execute(tuples);
    } catch (IOException e) {
        LOG.warn("Failing batch due to IOException.", e);
        throw new FailedException(e);
    }
}
 
Example #21
Source File: MetaSimpleClient.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
public void revert(BatchId id, byte[] commitResult) {
	try {
		switchOffsetMap();
		updateOffsetToZk(currentOffsets);
		
	}catch(Exception e) {
		LOG.warn("Failed to update offset to ZK", e);
		throw new FailedException(e);
	}
}
 
Example #22
Source File: MetaSimpleClient.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
public byte[] commit(BatchId id) throws FailedException {
	try {
		updateOffsetToZk(currentOffsets);
		switchOffsetMap();
	}catch(Exception e) {
		LOG.warn("Failed to update offset to ZK", e);
		throw new FailedException(e);
	}
	
	return null;
}
 
Example #23
Source File: ElasticSearchJsonBolt.java    From cognition with Apache License 2.0 5 votes vote down vote up
@Override
public String build(Map<String, String> fields) throws ParseException {
  String value = fields.get(fieldName);
  if (StringUtils.isBlank(value)) {
    logger.error("Blank date field for time series index name: {}", fieldName);
    throw new FailedException("Blank date field for time series index name " + fieldName);
  } else {
    TemporalAccessor date = inputDateFormatter.parse(value);
    return String.format("%s%s", indexName, outputDateFormatter.format(date));
  }
}
 
Example #24
Source File: InOrderDeliveryTest.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
public void execute(Tuple tuple, BasicOutputCollector collector) {
    Integer c1 = tuple.getInteger(0);
    Integer c2 = tuple.getInteger(1);
    Integer exp = expected.get(c1);
    if (exp == null)
        exp = 0;
    if (c2.intValue() != exp.intValue()) {
        System.out.println(c1 + " " + c2 + " != " + exp);
        throw new FailedException(c1 + " " + c2 + " != " + exp);
    }
    exp = c2 + 1;
    expected.put(c1, exp);
}
 
Example #25
Source File: HdfsState.java    From storm-hdfs with Apache License 2.0 5 votes vote down vote up
public void updateState(List<TridentTuple> tuples, TridentCollector tridentCollector){
    try{
        this.options.execute(tuples);
    } catch (IOException e){
        LOG.warn("Failing batch due to IOException.", e);
        throw new FailedException(e);
    }
}
 
Example #26
Source File: EsIndexBolt.java    From cognition with Apache License 2.0 5 votes vote down vote up
@Override
public void execute(Tuple input) {
  try {
    esBolt.execute(input);
  } catch (Exception e) {
    logger.error("Index failed with tuple {}", input);
    logger.error("Index failed", e);
    throw new FailedException(e);
  }
}
 
Example #27
Source File: CsvHdfsBolt.java    From cognition with Apache License 2.0 5 votes vote down vote up
@Override
protected void execute(Tuple tuple, RecordCollector collector) {
  String metadataJson = new String((byte[]) tuple.getValue(0));

  try {
    FileMetadata fileMetadata = FileMetadata.parseJson(metadataJson);

    String filename = fileMetadata.getFilename();
    String hdfsPath = fileMetadata.getHdfsPath();
    String fileType = fileMetadata.getFileType();

    if (isBlank(filename) || isBlank(hdfsPath) || isBlank(fileType)) {
      LOGGER.error("Incomplete file metadata. Requires: filename, hdfsPath and fileType. {}", fileMetadata);
      throw new FailedException("Incomplete file metadata: " + fileMetadata);
    }

    try (FSDataInputStream fsDataInputStream = fileSystem.open(new Path(hdfsPath));
         InputStreamReader fileReader = new InputStreamReader(fsDataInputStream);) {

      CsvLogRecordParser parser = new CsvLogRecordParser(csvParserConfig);
      parser.parse(fileReader, fileType, logRecord -> {
        logRecord.setValue("filename", filename);
        logRecord.setValue("fileType", fileType);
        collector.emit(logRecord);
      });
    }
  } catch (IOException e) {
    LOGGER.error(metadataJson, e);
    throw new FailedException(e);
  }
}
 
Example #28
Source File: CsvUrlBolt.java    From cognition with Apache License 2.0 5 votes vote down vote up
@Override
protected void execute(Tuple tuple, RecordCollector collector) {
  String metadataJson = new String((byte[]) tuple.getValue(0));
  File tempFile = null;

  try {
    FileMetadata fileMetadata = FileMetadata.parseJson(metadataJson);

    String filename = fileMetadata.getFilename();
    String fileUrl = fileMetadata.getFileUrl();
    String fileType = fileMetadata.getFileType();

    if (isBlank(filename) || isBlank(fileUrl) || isBlank(fileType)) {
      LOGGER.error("Incomplete file metadata. Requires: filename, fileUrl and fileType. {}", fileMetadata);
      throw new FailedException("Incomplete file metadata: " + fileMetadata);
    }

    tempFile = File.createTempFile("csv", null);
    FileUtils.copyURLToFile(new URL(fileUrl), tempFile);

    try (FileReader fileReader = new FileReader(tempFile);) {

      CsvLogRecordParser parser = new CsvLogRecordParser(csvParserConfig);
      parser.parse(fileReader, fileType, logRecord -> {
        logRecord.setValue("filename", filename);
        logRecord.setValue("fileType", fileType);
        logRecord.setValue("fileUrl", fileUrl);
        collector.emit(logRecord);
      });
    }
  } catch (IOException e) {
    LOGGER.error(metadataJson, e);
    throw new FailedException(e);
  } finally {
    FileUtils.deleteQuietly(tempFile);
  }
}
 
Example #29
Source File: ESIndexState.java    From storm-trident-elasticsearch with Apache License 2.0 5 votes vote down vote up
protected byte[] serializeSourceOrFail(Document<T> doc) {
    try {
        return serializer.serialize(doc.getSource());
    } catch (IOException e) {
        LOGGER.error("Error while serializing document source", e);
        throw new FailedException("Failed to serialize source as byte[]", e);
    }
}
 
Example #30
Source File: BatchMetaSpout.java    From jstorm with Apache License 2.0 4 votes vote down vote up
@Override
public byte[] commit(BatchId id) throws FailedException {
	return metaClient.commit(id);
}