Java Code Examples for org.apache.storm.tuple.Tuple

The following are top voted examples for showing how to use org.apache.storm.tuple.Tuple. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: DBus   File: DispatcherBolt.java   View source code 6 votes vote down vote up
@Override
public void execute(Tuple input) {
    if (TupleUtils.isTick(input)) {
        collector.ack(input);
        return;
    }
    try {
        Command cmd = (Command) input.getValueByField(EmitFields.COMMAND);
        BoltCommandHandler handler = handlerManager.getHandler(cmd);
        handler.handle(input);
        this.collector.ack(input);
    } catch (Exception e) {
        this.collector.fail(input);
        this.collector.reportError(e);
        logger.error("Process Error!", e);
    }

}
 
Example 2
Project: DStream   File: PredictorBolt.java   View source code 6 votes vote down vote up
public void execute(Tuple tuple) {
    final String word = tuple.getStringByField(Constraints.wordFileds);
    Integer count = tuple.getIntegerByField(Constraints.coinCountFileds);

    predictorHotKeyUtil.PredictorHotKey(word,count);

    if(predictorHotKeyUtil.isHotKey(word))
        collector.emit(new Values(word,1));

    predictorHotKeyUtil.SynopsisHashMapRandomDump(new DumpRemoveHandler() {
        @Override
        public void dumpRemove(String key) {
            collector.emit(new Values(word,1));
        }
    });

    collector.ack(tuple);
}
 
Example 3
Project: SQLonStorm   File: GroupByBolt.java   View source code 6 votes vote down vote up
/**
 * 聚合函数 max 计算结果
 *@param isJoin 是否含有join
 * @param tupleList 属于某一个组的tuple list
 * @param parameter max(tab1.A) 参数值,目前仅支持一个参数
 * @return 该组tuple max 函数的执行结果
 */
private int getResultofMax(boolean isJoin, List<Tuple> tupleList, TCItem parameter) {
    int maxRes = Integer.MIN_VALUE;
    for (Tuple tuple : tupleList) {
        String tempKey = parameter.getTableName() + "." + parameter.getColName();
        if (!isJoin) {
            tempKey = parameter.getColName();
        }
        String tValStr = tuple.getStringByField(tempKey);
        if (!tValStr.isEmpty()) {
            int tVal = Integer.valueOf(tValStr);
            maxRes = Math.max(maxRes, tVal);
        }
    }
    return maxRes;
}
 
Example 4
Project: storm_spring_boot_demo   File: RollingCountAggBolt.java   View source code 6 votes vote down vote up
@Override
public void execute(Tuple tuple) {
  Object obj = tuple.getValue(0);
  long count = tuple.getLong(1);
  int source = tuple.getSourceTask();
  Map<Integer, Long> subCounts = counts.get(obj);
  if (subCounts == null) {
    subCounts = new HashMap<Integer, Long>();
    counts.put(obj, subCounts);
  }
  //Update the current count for this object
  subCounts.put(source, count);
  //Output the sum of all the known counts so for this key
  long sum = 0;
  for (Long val: subCounts.values()) {
    sum += val;
  }
  collector.emit(new Values(obj, sum));
}
 
Example 5
Project: open-kilda   File: OFELinkBoltTest.java   View source code 6 votes vote down vote up
@Test
public void invalidJsonForDiscoveryFilter() throws CmdLineException, ConfigurationException {
    OFEventWFMTopology manager = new OFEventWFMTopology(makeLaunchEnvironment());
    TopologyConfig config = manager.getConfig();
    OFELinkBolt bolt = new OFELinkBolt(config);

    TopologyContext context = Mockito.mock(TopologyContext.class);

    Mockito.when(context.getComponentId(TASK_ID_BOLT))
            .thenReturn(COMPONENT_ID_SOURCE);
    Mockito.when(context.getComponentOutputFields(COMPONENT_ID_SOURCE, STREAM_ID_INPUT))
            .thenReturn(KafkaMessage.FORMAT);

    OutputCollectorMock outputDelegate = Mockito.spy(new OutputCollectorMock());
    OutputCollector output = new OutputCollector(outputDelegate);

    bolt.prepare(stormConfig(), context, output);
    bolt.initState(new InMemoryKeyValueState<>());

    Tuple tuple = new TupleImpl(context, new Values("{\"corrupted-json"), TASK_ID_BOLT, STREAM_ID_INPUT);
    bolt.doWork(tuple);

    Mockito.verify(outputDelegate).ack(tuple);
}
 
Example 6
Project: patent-crawler   File: StatusUpdaterBolt.java   View source code 6 votes vote down vote up
/**
 * Do not ack the tuple straight away! wait to get the confirmation that it
 * worked
 **/
public void ack(Tuple t, String url) {
    synchronized (waitAck) {
        String sha256hex = org.apache.commons.codec.digest.DigestUtils
                .sha256Hex(url);
        List<Tuple> tt = waitAck.getIfPresent(sha256hex);
        if (tt == null) {
            // check that there has been no removal of the entry since
            Metadata metadata = (Metadata) t.getValueByField("metadata");
            if (metadata.getFirstValue("es.status.skipped.sending") != null) {
                LOG.debug(
                        "Indexing skipped for {} with ID {} but key removed since",
                        url, sha256hex);
                // ack straight away!
                super.ack(t, url);
                return;
            }
            tt = new LinkedList<>();
        }
        tt.add(t);
        waitAck.put(sha256hex, tt);
        LOG.debug("Added to waitAck {} with ID {} total {}", url,
                sha256hex, tt.size());
    }
}
 
Example 7
Project: Mastering-Apache-Storm   File: StormHBaseBolt.java   View source code 6 votes vote down vote up
public void execute(Tuple input, BasicOutputCollector collector) {
	Map<String, Map<String, Object>> record = new HashMap<String, Map<String, Object>>();
	Map<String, Object> personalMap = new HashMap<String, Object>();
	// "firstName","lastName","companyName")
	personalMap.put("firstName", input.getValueByField("firstName"));
	personalMap.put("lastName", input.getValueByField("lastName"));

	Map<String, Object> companyMap = new HashMap<String, Object>();
	companyMap.put("companyName", input.getValueByField("companyName"));

	record.put("personal", personalMap);
	record.put("company", companyMap);
	// call the inset method of HBaseOperations class to insert record into
	// HBase
	hbaseOperations.insert(record, UUID.randomUUID().toString());
}
 
Example 8
Project: storm-clickhouse   File: ClickhouseInsertBolt.java   View source code 6 votes vote down vote up
@SuppressWarnings("rawtypes")
@Override
   protected void process(Tuple tuple) {
       try {
           List<Column> columns = jdbcMapper.getColumns(tuple);
           List<List<Column>> columnLists = new ArrayList<List<Column>>();
           columnLists.add(columns);
           if(!StringUtils.isBlank(tableName)) {
               this.jdbcClient.insert(this.tableName, columnLists);
           } else {
               this.jdbcClient.executeInsertQuery(this.insertQuery, columnLists);
           }
           this.collector.ack(tuple);
       } catch (Exception e) {
           this.collector.reportError(e);
           this.collector.fail(tuple);
       }
   }
 
Example 9
Project: patent-crawler   File: RedirectionBolt.java   View source code 6 votes vote down vote up
@Override
public void execute(Tuple tuple) {
    String url = tuple.getStringByField("url");
    byte[] content = tuple.getBinaryByField("content");
    Metadata metadata = (Metadata) tuple.getValueByField("metadata");
    String text = tuple.getStringByField("text");

    Values v = new Values(url, content, metadata, text);

    // if there is a text - no need to parse it again
    if (StringUtils.isNotBlank(text)) {
        collector.emit(tuple, v);
    } else {
        collector.emit("tika", tuple, v);
    }

    collector.ack(tuple);
}
 
Example 10
Project: open-kilda   File: ConfirmationBolt.java   View source code 6 votes vote down vote up
@Override
public void execute(Tuple input, BasicOutputCollector collector) {
	if (input.contains(FIELD_SWITCH_ID)) {
		String switchId = (String)input.getValueByField(FIELD_SWITCH_ID);
	    logger.debug("switchId=" + input.getValueByField(FIELD_SWITCH_ID));
	    
	    if (isSwitchConfirmed(switchId)) {
	    	collector.emit(new Values(switchId));
	    } else {
	    	logger.warn("could not confirm switch with id " + switchId);
	    	// TODO - any action here?
	    }
    } else {
    	logger.error(FIELD_SWITCH_ID + " not found in tuple " + input);
    }
}
 
Example 11
Project: DBus   File: HeartbeatHandler.java   View source code 6 votes vote down vote up
@Override
public void handle(Tuple tuple) {
    EmitData data = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA);
    List<PairWrapper<String, Object>> wrapperList = data.get(EmitData.MESSAGE);
    if (wrapperList != null && !wrapperList.isEmpty()) {
        for (PairWrapper<String, Object> wrapper : wrapperList) {
            HeartbeatPulse pulse = HeartbeatPulse.build(wrapper.pairs2map());
            if (logger.isDebugEnabled()) {
                Object offset = data.get(EmitData.OFFSET);
                HeartBeatPacket packet = HeartBeatPacket.parse(pulse.getPacket());
                SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
                String groupId = tuple.getStringByField(Constants.EmitFields.GROUP_FIELD);
                logger.debug("[heartbeat] {} offset:{} ts:{}, time:{}", groupId, offset == null ? -1 : offset, packet.getTxtime(), format.format(new Date(packet.getTxtime())));
            }
            reporter.mark(pulse);
        }
    }

    handler.handle(tuple);
    this.listener.getOutputCollector().ack(tuple);
}
 
Example 12
Project: DStream   File: WordCounterBolt.java   View source code 6 votes vote down vote up
@Override
public void execute(Tuple tuple) {
    boltstatus++;

    String word = tuple.getStringByField(Constraints.wordFileds);
    if (!word.isEmpty()) {
        Long count = counts.get(word);
        if (count == null) {
            count = 0L;
        }
        count++;
        counts.put(word, count);
        outputCollector.emit(tuple,new Values(word,count));
    }
    outputCollector.ack(tuple);
}
 
Example 13
Project: open-kilda   File: CrudBolt.java   View source code 6 votes vote down vote up
private void handleRestoreRequest(CommandMessage message, Tuple tuple) throws IOException {
    ImmutablePair<Flow, Flow> requestedFlow = ((FlowRestoreRequest) message.getData()).getPayload();

    ImmutablePair<PathInfoData, PathInfoData> path = pathComputer.getPath(requestedFlow.getLeft());
    logger.info("Restored flow path: {}", path);

    if (!flowCache.isOneSwitchFlow(requestedFlow) && pathComputer.isEmpty(path)) {
        throw new MessageException(message.getCorrelationId(), System.currentTimeMillis(),
                ErrorType.CREATION_FAILURE, "Could not restore flow", "Path was not found");
    }

    ImmutablePair<Flow, Flow> flow;
    if (flowCache.cacheContainsFlow(requestedFlow.getLeft().getFlowId())) {
        flow = flowCache.updateFlow(requestedFlow, path);
    } else {
        flow = flowCache.createFlow(requestedFlow, path);
    }
    logger.info("Restored flow: {}", flow);

    Values topology = new Values(Utils.MAPPER.writeValueAsString(
            new FlowInfoData(requestedFlow.getLeft().getFlowId(), flow,
                    FlowOperation.UPDATE, message.getCorrelationId())));
    outputCollector.emit(StreamType.UPDATE.toString(), tuple, topology);
}
 
Example 14
Project: SQLonStorm   File: SingleJoinBolt.java   View source code 6 votes vote down vote up
@Override
public void execute(Tuple tuple) {
  List<Object> id = tuple.select(_idFields);
  GlobalStreamId streamId = new GlobalStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId());
  if (!_pending.containsKey(id)) {
    _pending.put(id, new HashMap<GlobalStreamId, Tuple>());
  }
  Map<GlobalStreamId, Tuple> parts = _pending.get(id);
  if (parts.containsKey(streamId))
    throw new RuntimeException("Received same side of single join twice");
  parts.put(streamId, tuple);
  if (parts.size() == _numSources) {
    _pending.remove(id);
    List<Object> joinResult = new ArrayList<Object>();
    for (String outField : _outFields) {
      GlobalStreamId loc = _fieldLocations.get(outField);
      joinResult.add(parts.get(loc).getValueByField(outField));
    }
    _collector.emit(new ArrayList<Tuple>(parts.values()), joinResult);

    for (Tuple part : parts.values()) {
      _collector.ack(part);
    }
  }
}
 
Example 15
Project: open-kilda   File: OpenTSDBFilterBolt.java   View source code 6 votes vote down vote up
@Override
public void execute(Tuple tuple) {
    final String data = tuple.getString(0);
    LOGGER.debug("Processing datapoint", data);
    try {
        Datapoint datapoint = MAPPER.readValue(data, Datapoint.class);
        if (isUpdateRequired(datapoint)) {
            addDatapoint(datapoint);

            List<Object> stream = Stream.of(datapoint.getMetric(), datapoint.getTime(), datapoint.getValue(),
                    datapoint.getTags())
                    .collect(Collectors.toList());

            LOGGER.debug("emit: " + stream);
            collector.emit(stream);
        }
    } catch (IOException e) {
        LOGGER.error("Failed read datapoint", e);
    } finally {
        collector.ack(tuple);
    }
}
 
Example 16
Project: Mastering-Apache-Storm   File: SequenceFileBolt.java   View source code 6 votes vote down vote up
public void execute(Tuple tuple) {
    try {
        long offset;
        synchronized (this.writeLock) {
            this.writer.append(this.format.key(tuple), this.format.value(tuple));
            offset = this.writer.getLength();

            if (this.syncPolicy.mark(tuple, offset)) {
                this.writer.hsync();
                this.syncPolicy.reset();
            }
        }

        this.collector.ack(tuple);
        if (this.rotationPolicy.mark(tuple, offset)) {
            rotateOutputFile(); // synchronized
            this.rotationPolicy.reset();
        }
    } catch (IOException e) {
        LOG.warn("write/sync failed.", e);
        this.collector.fail(tuple);
    }

}
 
Example 17
Project: DStream   File: SplitterBolt.java   View source code 6 votes vote down vote up
public void execute(Tuple tuple) {
    if(tuple.getSourceComponent().equals(UPSTREAM_COMPONENT_ID)){
        String word = tuple.getStringByField(UPSTREAM_FIEDLS);
        if(word.length() <= 0) {
            collector.ack(tuple);
            return;
        }
        collector.emit(Constraints.coinFileds, new Values(word));
        Key ky = new Key(word.getBytes());
        if(bf.membershipTest(ky))
            collector.emit(Constraints.hotFileds, tuple, new Values(word));
        else
            collector.emit(Constraints.nohotFileds, tuple, new Values(word));

    }else {
        String key = tuple.getStringByField(Constraints.wordFileds);
        Integer type = tuple.getIntegerByField(Constraints.typeFileds);
        Key hk = new Key(key.getBytes());
        if(!bf.membershipTest(hk) && type.equals(1))
            bf.add(hk);
        if(bf.membershipTest(hk) && type.equals(0))
            bf.delete(hk);
    }
    collector.ack(tuple);
}
 
Example 18
Project: open-kilda   File: CacheBolt.java   View source code 6 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
protected void doTick(Tuple tuple) {
    // FIXME(dbogun): tick only once, because timePassed never reset
    if (timePassed == discoveryInterval) {
        Values values = getNetworkRequest();
        if (values != null) {
            outputCollector.emit(StreamType.TPE.toString(), tuple, values);
        } else {
            logger.error("Could not send network cache request");
        }
    }
    if (timePassed <= discoveryInterval) {
        timePassed += 1;
    }
}
 
Example 19
Project: storm_spring_boot_demo   File: SingleJoinBolt.java   View source code 6 votes vote down vote up
@Override
public void execute(Tuple tuple) {
  List<Object> id = tuple.select(_idFields);
  GlobalStreamId streamId = new GlobalStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId());
  if (!_pending.containsKey(id)) {
    _pending.put(id, new HashMap<GlobalStreamId, Tuple>());
  }
  Map<GlobalStreamId, Tuple> parts = _pending.get(id);
  if (parts.containsKey(streamId))
    throw new RuntimeException("Received same side of single join twice");
  parts.put(streamId, tuple);
  if (parts.size() == _numSources) {
    _pending.remove(id);
    List<Object> joinResult = new ArrayList<Object>();
    for (String outField : _outFields) {
      GlobalStreamId loc = _fieldLocations.get(outField);
      joinResult.add(parts.get(loc).getValueByField(outField));
    }
    _collector.emit(new ArrayList<Tuple>(parts.values()), joinResult);

    for (Tuple part : parts.values()) {
      _collector.ack(part);
    }
  }
}
 
Example 20
Project: open-kilda   File: CacheBolt.java   View source code 6 votes vote down vote up
private void emitRerouteCommands(Set<ImmutablePair<Flow, Flow>> flows, Tuple tuple,
                                 String correlationId, FlowOperation operation) {
    String rerouteCorrelationId = String.format("%s-%s", correlationId, REROUTE.toString());

    for (ImmutablePair<Flow, Flow> flow : flows) {
        try {
            flow.getLeft().setState(FlowState.DOWN);
            flow.getRight().setState(FlowState.DOWN);
            FlowRerouteRequest request = new FlowRerouteRequest(flow.getLeft(), operation);

            Values values = new Values(Utils.MAPPER.writeValueAsString(new CommandMessage(
                    request, System.currentTimeMillis(), rerouteCorrelationId, Destination.WFM)));
            outputCollector.emit(StreamType.WFM_DUMP.toString(), tuple, values);

            logger.info("Flow {} reroute command message sent", flow.getLeft().getFlowId());
        } catch (JsonProcessingException exception) {
            logger.error("Could not format flow reroute request by flow={}", flow, exception);
        }
    }
}
 
Example 21
Project: SQLonStorm   File: GroupByBolt.java   View source code 6 votes vote down vote up
/**
 * 聚合函数 count 计算结果
 *
 * @param isJoin        含有join
 * @param tupleList     属于某一个组的tuple list
 * @param parameterList count(tab1.A,tab1.B) 参数值,可支持多个参数。 若tuple 对应某个参数属性的值为空,则该条tuple 不参与count的计算
 * @return 该组tuple count 函数的执行结果
 */
private int getResultOfCount(boolean isJoin, List<Tuple> tupleList, List<TCItem> parameterList) {
    int ans = 0;
    for (Tuple tuple : tupleList) {
        boolean flag = true;
        for (TCItem tcItem : parameterList) {
            String tempKey = tcItem.getTableName() + "." + tcItem.getColName();
            if (!isJoin) {
                tempKey = tcItem.getColName();
            }
            if (tuple.getStringByField(tempKey).isEmpty()) {
                flag = false;
                break;
            }
        }
        if (flag) {
            ans++;
        }
    }
    return ans;
}
 
Example 22
Project: Mastering-Apache-Storm   File: ApacheLogSplitterBolt.java   View source code 6 votes vote down vote up
public void execute(Tuple input, BasicOutputCollector collector) {
	// Get the Apache log from the tuple
	String log = input.getString(0);

	if (StringUtils.isBlank(log)) {
		// ignore blank lines
		return;
	}
	// call the logSplitter(String apachelog) method of ApacheLogSplitter
	// class.
	Map<String, Object> logMap = apacheLogSplitter.logSplitter(log);
	List<Object> logdata = new ArrayList<Object>();
	for (String element : LOG_ELEMENTS) {
		logdata.add(logMap.get(element));
	}
	// emits set of fields (ip, referrer, user-agent, bytesSent, etc)
	collector.emit(logdata);

}
 
Example 23
Project: DBus   File: DbusKafkaWriterBolt.java   View source code 6 votes vote down vote up
@Override
public void execute(Tuple input) {
    if (TupleUtils.isTick(input)) {
        collector.ack(input);
        return;
    }

    try {
        Command cmd = (Command) input.getValueByField(EmitFields.COMMAND);
        BoltCommandHandler handler = handlerManager.getHandler(cmd);
        handler.handle(input);
    } catch (Exception e) {
        logger.error("Process data error", e);
        this.collector.reportError(e);
        this.collector.fail(input);
    }

}
 
Example 24
Project: open-kilda   File: OFELinkBolt.java   View source code 6 votes vote down vote up
private void handleSwitchEvent(Tuple tuple, SwitchInfoData switchData) {
    String switchID = switchData.getSwitchId();
    String state = "" + switchData.getState();
    logger.info("DISCO: Switch Event: switch={} state={}", switchID, state);

    if (SwitchState.DEACTIVATED.getType().equals(state)) {
        // current logic: switch down means stop checking associated ports/links.
        // - possible extra steps of validation of switch down should occur elsewhere
        // - possible extra steps of generating link down messages aren't important since
        //      the TPE will drop the switch node from its graph.
        discovery.handleSwitchDown(switchID);
    } else if (SwitchState.ACTIVATED.getType().equals(state)) {
        discovery.handleSwitchUp(switchID);
    } else {
        // TODO: Should this be a warning? Evaluate whether any other state needs to be handled
        logger.warn("SWITCH Event: ignoring state: {}", state);
    }

    // Pass the original message along, to the Topology Engine topic.
    String json = tuple.getString(0);
    collector.emit(topoEngTopic, tuple, new Values(PAYLOAD, json));
}
 
Example 25
Project: open-kilda   File: OFELinkBolt.java   View source code 6 votes vote down vote up
private void handleIslEvent(Tuple tuple, IslInfoData discoveredIsl) {
    PathNode node = discoveredIsl.getPath().get(0);
    String switchID = node.getSwitchId();
    String portID = "" + node.getPortNo();
    IslChangeType state = discoveredIsl.getState();
    logger.info("DISCO: ISL Event: switch={} port={} state={}", switchID, portID, state);

    if (IslChangeType.DISCOVERED.equals(state)) {
        discovery.handleDiscovered(switchID, portID);
    } else if (IslChangeType.FAILED.equals(state)) {
        discovery.handleFailed(switchID, portID);
    } else {
        // TODO: Should this be a warning? Evaluate whether any other state needs to be handled
        logger.warn("ISL Event: ignoring state: {}", state);
    }

    String json = tuple.getString(0);
    collector.emit(topoEngTopic, tuple, new Values(PAYLOAD, json));
}
 
Example 26
Project: DBus   File: DbusAppenderBolt.java   View source code 6 votes vote down vote up
@Override
public void execute(Tuple input) {
    if (TupleUtils.isTick(input)) {
        collector.ack(input);
        return;
    }
    try {
        Command cmd = (Command) input.getValueByField(EmitFields.COMMAND);
        BoltCommandHandler handler = handlerManager.getHandler(cmd);
        handler.handle(input);
        this.collector.ack(input);
    } catch (Exception e) {
        this.collector.fail(input);
        this.collector.reportError(e);
        logger.error("Process Error!", e);
    }

}
 
Example 27
Project: open-kilda   File: SpeakerBolt.java   View source code 6 votes vote down vote up
protected void discoverIslPartTwo(Tuple tuple, IslInfoData data) throws Exception {
    /*
     * Second part of the discover process.
     *
     * 1.  Grabs a message that has been sent from the first part and thus we know that the source port is
     *     and active ISL.
     * 2.  Check the status of the destination port, in Path[1], and if activeISL then emit to Kafka
     */
    ISwitchImpl sw = getSwitch(data.getPath().get(1).getSwitchId());
    if (!sw.isActive()) {
        return;
    }
    IPortImpl port = sw.getPort(data.getPath().get(1).getPortNo());

    if (port.isActiveIsl()) {
        long now = Instant.now().toEpochMilli();
        InfoMessage infoMessage = new InfoMessage(data, now, "system", null);
        logger.debug("checking isl on: {}", data.toString());
        collector.emit(SimulatorTopology.KAFKA_BOLT_STREAM, tuple,
                new Values("INFO", Utils.MAPPER.writeValueAsString(infoMessage)));
    }
}
 
Example 28
Project: Practical-Real-time-Processing-and-Analytics   File: WordCountBolt.java   View source code 5 votes vote down vote up
public void execute(Tuple tuple) {
	String word = tuple.getStringByField("word");
	Long count = this.counts.get(word);
	if (count == null) {
		count = 0L;
	}
	count++;
	this.counts.put(word, count);
	this.collector.emit(new Values(word, count));
}
 
Example 29
Project: Mastering-Apache-Storm   File: HdfsBolt.java   View source code 5 votes vote down vote up
public void execute(Tuple tuple) {
	try {
		byte[] bytes = this.format.format(tuple);
		synchronized (this.writeLock) {
			out.write(bytes);
			this.offset += bytes.length;

			if (this.syncPolicy.mark(tuple, this.offset)) {
				if (this.out instanceof HdfsDataOutputStream) {
					((HdfsDataOutputStream) this.out).hsync(EnumSet
							.of(SyncFlag.UPDATE_LENGTH));
				} else {
					this.out.hsync();
				}
				this.syncPolicy.reset();
			}
		}

		this.collector.ack(tuple);

		if (this.rotationPolicy.mark(tuple, this.offset)) {
			rotateOutputFile(); // synchronized
			this.offset = 0;
			this.rotationPolicy.reset();
		}
	} catch (IOException e) {
		LOG.warn("write/sync failed.", e);
		this.collector.fail(tuple);
	}
}
 
Example 30
Project: Practical-Real-time-Processing-and-Analytics   File: ElasticSearchBolt.java   View source code 5 votes vote down vote up
@Override
public void execute(Tuple input, BasicOutputCollector collector) {
	String valueByField = input.getString(0);
	System.out.println(valueByField);
	try {
		IndexResponse response = client.prepareIndex("pub-nub", "sensor-data")
				.setSource(convertStringtoMap(valueByField)).get();
		System.out.println(response.status());
	} catch (IOException e) {
		e.printStackTrace();
	}
}
 
Example 31
Project: DBus   File: DispatcherResumeHandler.java   View source code 5 votes vote down vote up
@Override
public void handle(Tuple tuple) {
    EmitData emitData = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA);
    Command cmd = (Command)tuple.getValueByField(Constants.EmitFields.COMMAND);

    TopicResumeCmd ctrlCmd = emitData.get(EmitData.CTRL_CMD);
    this.emit(listener.getOutputCollector(), tuple, Joiner.on(".").join(ctrlCmd.getSchema(), ctrlCmd.getTable()), emitData, cmd);
    logger.info("Resume table[{}]", groupField(ctrlCmd.getSchema(), ctrlCmd.getTable()));
}
 
Example 32
Project: patent-crawler   File: StatusUpdaterBolt.java   View source code 5 votes vote down vote up
public void onRemoval(RemovalNotification<String, List<Tuple>> removal) {
    if (!removal.wasEvicted())
        return;
    LOG.error("Purged from waitAck {} with {} values", removal.getKey(),
            removal.getValue().size());
    for (Tuple t : removal.getValue()) {
        _collector.fail(t);
    }
}
 
Example 33
Project: Mastering-Apache-Storm   File: KeyWordIdentifierBolt.java   View source code 5 votes vote down vote up
public void execute(Tuple input) {

		String referrer = input.getStringByField("referrer").toString();
		// call the getKeyword(String referrer) method KeywordGenerator class to
		// generate the search keyword.
		Object keyword = keywordGenerator.getKeyword(referrer);
		// emits all the field emitted by previous bolt + keyword
		collector.emit(new Values(input.getString(0), input.getString(1), input
				.getString(2), input.getString(3), input.getString(4), input
				.getString(5), input.getString(6), input.getString(7), input
				.getString(8), input.getString(9), keyword));

	}
 
Example 34
Project: DBus   File: DispatcherInitialLoadHandler.java   View source code 5 votes vote down vote up
@Override
public void handle(Tuple tuple) {
    EmitData emitData = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA);

    Command cmd = (Command)tuple.getValueByField(Constants.EmitFields.COMMAND);
    String dbschema = emitData.get(EmitData.DB_SCHEMA);
    String table = emitData.get(EmitData.DATA_TABLE);
    this.emit(listener.getOutputCollector(), tuple, groupField(dbschema, table), emitData, cmd);
    logger.info("Full pull request[{}]", groupField(dbschema, table));
}
 
Example 35
Project: DBus   File: PagedBatchDataFetchingBolt.java   View source code 5 votes vote down vote up
private void emitMonitorState(Tuple input, String dataSourceInfo, String dbNameSpace, long dealRows, String startSecs, String totalRows, String totalPartitions, int finishedShardCount) {
    JSONObject jsonInfo = new JSONObject();
    jsonInfo.put(DataPullConstants.DATA_SOURCE_INFO, dataSourceInfo);
    jsonInfo.put(DataPullConstants.DATA_SOURCE_NAME_SPACE, dbNameSpace);
    jsonInfo.put(DataPullConstants.ZkMonitoringJson.DB_NAMESPACE_NODE_FINISHED_COUNT, finishedShardCount);
    jsonInfo.put(DataPullConstants.ZkMonitoringJson.DB_NAMESPACE_NODE_FINISHED_ROWS, dealRows);
    jsonInfo.put(DataPullConstants.ZkMonitoringJson.DB_NAMESPACE_NODE_START_SECS, startSecs);
    jsonInfo.put(DataPullConstants.ZkMonitoringJson.DB_NAMESPACE_NODE_TOTAL_ROWS, totalRows);
    jsonInfo.put(DataPullConstants.DATA_CHUNK_COUNT, totalPartitions);
    collector.emit(input, new Values(jsonInfo));
}
 
Example 36
Project: Building-Data-Streaming-Applications-with-Apache-Kafka   File: StringToWordsSpliterBolt.java   View source code 5 votes vote down vote up
public void execute(Tuple input) {
    String line = input.getString(0);
    String[] words = line.split(" ");

    for (String word : words) {
        if (!word.isEmpty()) {
            collector.emit(new Values(word));
        }

    }

    collector.ack(input);
}
 
Example 37
Project: DBus   File: KafkaBoltDefaultHandler.java   View source code 5 votes vote down vote up
@Override
public void handle(Tuple tuple) {
    EmitData emitData = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA);
    MetaVersion version = emitData.get(EmitData.VERSION);
    DbusMessage message = emitData.get(EmitData.MESSAGE);
    String key = Joiner.on(".").join(version.getSchema(), version.getTable());
    reporter.reportStat(key, message.payloadSizeWithoutBefore());
    logger.debug("report stat on:{},report count:{}", key, message.getPayload().size());
    listener.writeData(version.getSchema(), version.getTable(), message, tuple);
}
 
Example 38
Project: open-kilda   File: FieldNameTopicSelector.java   View source code 5 votes vote down vote up
@Override
public String getTopic(Tuple tuple) {
    if (tuple.contains(fieldName)) {
        return tuple.getStringByField(fieldName);
    } else {
        LOG.warn("Field {} Not Found. Returning default topic {}", fieldName, defaultTopicName);
        return defaultTopicName;
    }
}
 
Example 39
Project: DBus   File: AppenderInitialLoadHandler.java   View source code 5 votes vote down vote up
@Override
public void handle(Tuple tuple) {
    EmitData emitData = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA);
    String dbschema = emitData.get(EmitData.DB_SCHEMA);
    String table = emitData.get(EmitData.DATA_TABLE);

    logger.info("Full data request was received [{}.{}]", dbschema, table);

    MetaVersion ver = MetaVerController.getVersionFromCache(dbschema, table);

    if (ver == null) {
        throw new RuntimeException("The version of table " + dbschema + "." + table + " was not found!");
    }

    //EmitData data = new EmitData();
    //data.add(EmitData.AVRO_SCHEMA, EmitData.NO_VALUE);
    //data.add(EmitData.VERSION, ver);
    //DbusMessage message = BoltCommandHandlerHelper.buildTerminationMessage(dbschema, table, ver.getVersion());
    //data.add(EmitData.MESSAGE, message);

    // 修改data table表状态
    BoltCommandHandlerHelper.changeDataTableStatus(ver.getSchema(), ver.getTable(), DataTable.STATUS_WAITING);

    //List<Object> values = new Values(EmitData.NO_VALUE, data, Command.DATA_INCREMENT_TERMINATION);
    //this.listener.getOutputCollector().emit(tuple, values);
    //this.emit(listener.getOutputCollector(),tuple, groupField(dbschema, table), data, Command.DATA_INCREMENT_TERMINATION);


    logger.info("Full data request was processed");
}
 
Example 40
Project: Mastering-Apache-Storm   File: DelimitedRecordFormat.java   View source code 5 votes vote down vote up
public byte[] format(Tuple tuple) {
    StringBuilder sb = new StringBuilder();
    Fields fields = this.fields == null ? tuple.getFields() : this.fields;
    int size = fields.size();
    for(int i = 0; i < size; i++){
        sb.append(tuple.getValueByField(fields.get(i)));
        if(i != size - 1){
            sb.append(this.fieldDelimiter);
        }
    }
    sb.append(this.recordDelimiter);
    return sb.toString().getBytes();
}