Java Code Examples for org.apache.storm.tuple.Tuple

The following are top voted examples for showing how to use org.apache.storm.tuple.Tuple. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: DBus   File: DispatcherBolt.java   Source Code and License 6 votes vote down vote up
@Override
public void execute(Tuple input) {
    if (TupleUtils.isTick(input)) {
        collector.ack(input);
        return;
    }
    try {
        Command cmd = (Command) input.getValueByField(EmitFields.COMMAND);
        BoltCommandHandler handler = handlerManager.getHandler(cmd);
        handler.handle(input);
        this.collector.ack(input);
    } catch (Exception e) {
        this.collector.fail(input);
        this.collector.reportError(e);
        logger.error("Process Error!", e);
    }

}
 
Example 2
Project: DStream   File: PredictorBolt.java   Source Code and License 6 votes vote down vote up
public void execute(Tuple tuple) {
    final String word = tuple.getStringByField(Constraints.wordFileds);
    Integer count = tuple.getIntegerByField(Constraints.coinCountFileds);

    predictorHotKeyUtil.PredictorHotKey(word,count);

    if(predictorHotKeyUtil.isHotKey(word))
        collector.emit(new Values(word,1));

    predictorHotKeyUtil.SynopsisHashMapRandomDump(new DumpRemoveHandler() {
        @Override
        public void dumpRemove(String key) {
            collector.emit(new Values(word,1));
        }
    });

    collector.ack(tuple);
}
 
Example 3
Project: SQLonStorm   File: GroupByBolt.java   Source Code and License 6 votes vote down vote up
/**
 * 聚合函数 max 计算结果
 *@param isJoin 是否含有join
 * @param tupleList 属于某一个组的tuple list
 * @param parameter max(tab1.A) 参数值,目前仅支持一个参数
 * @return 该组tuple max 函数的执行结果
 */
private int getResultofMax(boolean isJoin, List<Tuple> tupleList, TCItem parameter) {
    int maxRes = Integer.MIN_VALUE;
    for (Tuple tuple : tupleList) {
        String tempKey = parameter.getTableName() + "." + parameter.getColName();
        if (!isJoin) {
            tempKey = parameter.getColName();
        }
        String tValStr = tuple.getStringByField(tempKey);
        if (!tValStr.isEmpty()) {
            int tVal = Integer.valueOf(tValStr);
            maxRes = Math.max(maxRes, tVal);
        }
    }
    return maxRes;
}
 
Example 4
Project: storm_spring_boot_demo   File: RollingCountAggBolt.java   Source Code and License 6 votes vote down vote up
@Override
public void execute(Tuple tuple) {
  Object obj = tuple.getValue(0);
  long count = tuple.getLong(1);
  int source = tuple.getSourceTask();
  Map<Integer, Long> subCounts = counts.get(obj);
  if (subCounts == null) {
    subCounts = new HashMap<Integer, Long>();
    counts.put(obj, subCounts);
  }
  //Update the current count for this object
  subCounts.put(source, count);
  //Output the sum of all the known counts so for this key
  long sum = 0;
  for (Long val: subCounts.values()) {
    sum += val;
  }
  collector.emit(new Values(obj, sum));
}
 
Example 5
Project: open-kilda   File: OFELinkBoltTest.java   Source Code and License 6 votes vote down vote up
@Test
public void invalidJsonForDiscoveryFilter() throws CmdLineException, ConfigurationException {
    OFEventWFMTopology manager = new OFEventWFMTopology(makeLaunchEnvironment());
    TopologyConfig config = manager.getConfig();
    OFELinkBolt bolt = new OFELinkBolt(config);

    TopologyContext context = Mockito.mock(TopologyContext.class);

    Mockito.when(context.getComponentId(TASK_ID_BOLT))
            .thenReturn(COMPONENT_ID_SOURCE);
    Mockito.when(context.getComponentOutputFields(COMPONENT_ID_SOURCE, STREAM_ID_INPUT))
            .thenReturn(KafkaMessage.FORMAT);

    OutputCollectorMock outputDelegate = Mockito.spy(new OutputCollectorMock());
    OutputCollector output = new OutputCollector(outputDelegate);

    bolt.prepare(stormConfig(), context, output);
    bolt.initState(new InMemoryKeyValueState<>());

    Tuple tuple = new TupleImpl(context, new Values("{\"corrupted-json"), TASK_ID_BOLT, STREAM_ID_INPUT);
    bolt.doWork(tuple);

    Mockito.verify(outputDelegate).ack(tuple);
}
 
Example 6
Project: patent-crawler   File: StatusUpdaterBolt.java   Source Code and License 6 votes vote down vote up
/**
 * Do not ack the tuple straight away! wait to get the confirmation that it
 * worked
 **/
public void ack(Tuple t, String url) {
    synchronized (waitAck) {
        String sha256hex = org.apache.commons.codec.digest.DigestUtils
                .sha256Hex(url);
        List<Tuple> tt = waitAck.getIfPresent(sha256hex);
        if (tt == null) {
            // check that there has been no removal of the entry since
            Metadata metadata = (Metadata) t.getValueByField("metadata");
            if (metadata.getFirstValue("es.status.skipped.sending") != null) {
                LOG.debug(
                        "Indexing skipped for {} with ID {} but key removed since",
                        url, sha256hex);
                // ack straight away!
                super.ack(t, url);
                return;
            }
            tt = new LinkedList<>();
        }
        tt.add(t);
        waitAck.put(sha256hex, tt);
        LOG.debug("Added to waitAck {} with ID {} total {}", url,
                sha256hex, tt.size());
    }
}
 
Example 7
Project: Mastering-Apache-Storm   File: StormHBaseBolt.java   Source Code and License 6 votes vote down vote up
public void execute(Tuple input, BasicOutputCollector collector) {
	Map<String, Map<String, Object>> record = new HashMap<String, Map<String, Object>>();
	Map<String, Object> personalMap = new HashMap<String, Object>();
	// "firstName","lastName","companyName")
	personalMap.put("firstName", input.getValueByField("firstName"));
	personalMap.put("lastName", input.getValueByField("lastName"));

	Map<String, Object> companyMap = new HashMap<String, Object>();
	companyMap.put("companyName", input.getValueByField("companyName"));

	record.put("personal", personalMap);
	record.put("company", companyMap);
	// call the inset method of HBaseOperations class to insert record into
	// HBase
	hbaseOperations.insert(record, UUID.randomUUID().toString());
}
 
Example 8
Project: storm-clickhouse   File: ClickhouseInsertBolt.java   Source Code and License 6 votes vote down vote up
@SuppressWarnings("rawtypes")
@Override
   protected void process(Tuple tuple) {
       try {
           List<Column> columns = jdbcMapper.getColumns(tuple);
           List<List<Column>> columnLists = new ArrayList<List<Column>>();
           columnLists.add(columns);
           if(!StringUtils.isBlank(tableName)) {
               this.jdbcClient.insert(this.tableName, columnLists);
           } else {
               this.jdbcClient.executeInsertQuery(this.insertQuery, columnLists);
           }
           this.collector.ack(tuple);
       } catch (Exception e) {
           this.collector.reportError(e);
           this.collector.fail(tuple);
       }
   }
 
Example 9
Project: patent-crawler   File: RedirectionBolt.java   Source Code and License 6 votes vote down vote up
@Override
public void execute(Tuple tuple) {
    String url = tuple.getStringByField("url");
    byte[] content = tuple.getBinaryByField("content");
    Metadata metadata = (Metadata) tuple.getValueByField("metadata");
    String text = tuple.getStringByField("text");

    Values v = new Values(url, content, metadata, text);

    // if there is a text - no need to parse it again
    if (StringUtils.isNotBlank(text)) {
        collector.emit(tuple, v);
    } else {
        collector.emit("tika", tuple, v);
    }

    collector.ack(tuple);
}
 
Example 10
Project: open-kilda   File: ConfirmationBolt.java   Source Code and License 6 votes vote down vote up
@Override
public void execute(Tuple input, BasicOutputCollector collector) {
	if (input.contains(FIELD_SWITCH_ID)) {
		String switchId = (String)input.getValueByField(FIELD_SWITCH_ID);
	    logger.debug("switchId=" + input.getValueByField(FIELD_SWITCH_ID));
	    
	    if (isSwitchConfirmed(switchId)) {
	    	collector.emit(new Values(switchId));
	    } else {
	    	logger.warn("could not confirm switch with id " + switchId);
	    	// TODO - any action here?
	    }
    } else {
    	logger.error(FIELD_SWITCH_ID + " not found in tuple " + input);
    }
}
 
Example 11
Project: DBus   File: HeartbeatHandler.java   Source Code and License 6 votes vote down vote up
@Override
public void handle(Tuple tuple) {
    EmitData data = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA);
    List<PairWrapper<String, Object>> wrapperList = data.get(EmitData.MESSAGE);
    if (wrapperList != null && !wrapperList.isEmpty()) {
        for (PairWrapper<String, Object> wrapper : wrapperList) {
            HeartbeatPulse pulse = HeartbeatPulse.build(wrapper.pairs2map());
            if (logger.isDebugEnabled()) {
                Object offset = data.get(EmitData.OFFSET);
                HeartBeatPacket packet = HeartBeatPacket.parse(pulse.getPacket());
                SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
                String groupId = tuple.getStringByField(Constants.EmitFields.GROUP_FIELD);
                logger.debug("[heartbeat] {} offset:{} ts:{}, time:{}", groupId, offset == null ? -1 : offset, packet.getTxtime(), format.format(new Date(packet.getTxtime())));
            }
            reporter.mark(pulse);
        }
    }

    handler.handle(tuple);
    this.listener.getOutputCollector().ack(tuple);
}
 
Example 12
Project: DStream   File: WordCounterBolt.java   Source Code and License 6 votes vote down vote up
@Override
public void execute(Tuple tuple) {
    boltstatus++;

    String word = tuple.getStringByField(Constraints.wordFileds);
    if (!word.isEmpty()) {
        Long count = counts.get(word);
        if (count == null) {
            count = 0L;
        }
        count++;
        counts.put(word, count);
        outputCollector.emit(tuple,new Values(word,count));
    }
    outputCollector.ack(tuple);
}
 
Example 13
Project: open-kilda   File: CrudBolt.java   Source Code and License 6 votes vote down vote up
private void handleRestoreRequest(CommandMessage message, Tuple tuple) throws IOException {
    ImmutablePair<Flow, Flow> requestedFlow = ((FlowRestoreRequest) message.getData()).getPayload();

    ImmutablePair<PathInfoData, PathInfoData> path = pathComputer.getPath(requestedFlow.getLeft());
    logger.info("Restored flow path: {}", path);

    if (!flowCache.isOneSwitchFlow(requestedFlow) && pathComputer.isEmpty(path)) {
        throw new MessageException(message.getCorrelationId(), System.currentTimeMillis(),
                ErrorType.CREATION_FAILURE, "Could not restore flow", "Path was not found");
    }

    ImmutablePair<Flow, Flow> flow;
    if (flowCache.cacheContainsFlow(requestedFlow.getLeft().getFlowId())) {
        flow = flowCache.updateFlow(requestedFlow, path);
    } else {
        flow = flowCache.createFlow(requestedFlow, path);
    }
    logger.info("Restored flow: {}", flow);

    Values topology = new Values(Utils.MAPPER.writeValueAsString(
            new FlowInfoData(requestedFlow.getLeft().getFlowId(), flow,
                    FlowOperation.UPDATE, message.getCorrelationId())));
    outputCollector.emit(StreamType.UPDATE.toString(), tuple, topology);
}
 
Example 14
Project: SQLonStorm   File: SingleJoinBolt.java   Source Code and License 6 votes vote down vote up
@Override
public void execute(Tuple tuple) {
  List<Object> id = tuple.select(_idFields);
  GlobalStreamId streamId = new GlobalStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId());
  if (!_pending.containsKey(id)) {
    _pending.put(id, new HashMap<GlobalStreamId, Tuple>());
  }
  Map<GlobalStreamId, Tuple> parts = _pending.get(id);
  if (parts.containsKey(streamId))
    throw new RuntimeException("Received same side of single join twice");
  parts.put(streamId, tuple);
  if (parts.size() == _numSources) {
    _pending.remove(id);
    List<Object> joinResult = new ArrayList<Object>();
    for (String outField : _outFields) {
      GlobalStreamId loc = _fieldLocations.get(outField);
      joinResult.add(parts.get(loc).getValueByField(outField));
    }
    _collector.emit(new ArrayList<Tuple>(parts.values()), joinResult);

    for (Tuple part : parts.values()) {
      _collector.ack(part);
    }
  }
}
 
Example 15
Project: open-kilda   File: OpenTSDBFilterBolt.java   Source Code and License 6 votes vote down vote up
@Override
public void execute(Tuple tuple) {
    final String data = tuple.getString(0);
    LOGGER.debug("Processing datapoint", data);
    try {
        Datapoint datapoint = MAPPER.readValue(data, Datapoint.class);
        if (isUpdateRequired(datapoint)) {
            addDatapoint(datapoint);

            List<Object> stream = Stream.of(datapoint.getMetric(), datapoint.getTime(), datapoint.getValue(),
                    datapoint.getTags())
                    .collect(Collectors.toList());

            LOGGER.debug("emit: " + stream);
            collector.emit(stream);
        }
    } catch (IOException e) {
        LOGGER.error("Failed read datapoint", e);
    } finally {
        collector.ack(tuple);
    }
}
 
Example 16
Project: Mastering-Apache-Storm   File: SequenceFileBolt.java   Source Code and License 6 votes vote down vote up
public void execute(Tuple tuple) {
    try {
        long offset;
        synchronized (this.writeLock) {
            this.writer.append(this.format.key(tuple), this.format.value(tuple));
            offset = this.writer.getLength();

            if (this.syncPolicy.mark(tuple, offset)) {
                this.writer.hsync();
                this.syncPolicy.reset();
            }
        }

        this.collector.ack(tuple);
        if (this.rotationPolicy.mark(tuple, offset)) {
            rotateOutputFile(); // synchronized
            this.rotationPolicy.reset();
        }
    } catch (IOException e) {
        LOG.warn("write/sync failed.", e);
        this.collector.fail(tuple);
    }

}
 
Example 17
Project: DStream   File: SplitterBolt.java   Source Code and License 6 votes vote down vote up
public void execute(Tuple tuple) {
    if(tuple.getSourceComponent().equals(UPSTREAM_COMPONENT_ID)){
        String word = tuple.getStringByField(UPSTREAM_FIEDLS);
        if(word.length() <= 0) {
            collector.ack(tuple);
            return;
        }
        collector.emit(Constraints.coinFileds, new Values(word));
        Key ky = new Key(word.getBytes());
        if(bf.membershipTest(ky))
            collector.emit(Constraints.hotFileds, tuple, new Values(word));
        else
            collector.emit(Constraints.nohotFileds, tuple, new Values(word));

    }else {
        String key = tuple.getStringByField(Constraints.wordFileds);
        Integer type = tuple.getIntegerByField(Constraints.typeFileds);
        Key hk = new Key(key.getBytes());
        if(!bf.membershipTest(hk) && type.equals(1))
            bf.add(hk);
        if(bf.membershipTest(hk) && type.equals(0))
            bf.delete(hk);
    }
    collector.ack(tuple);
}
 
Example 18
Project: open-kilda   File: CacheBolt.java   Source Code and License 6 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
protected void doTick(Tuple tuple) {
    // FIXME(dbogun): tick only once, because timePassed never reset
    if (timePassed == discoveryInterval) {
        Values values = getNetworkRequest();
        if (values != null) {
            outputCollector.emit(StreamType.TPE.toString(), tuple, values);
        } else {
            logger.error("Could not send network cache request");
        }
    }
    if (timePassed <= discoveryInterval) {
        timePassed += 1;
    }
}
 
Example 19
Project: storm_spring_boot_demo   File: SingleJoinBolt.java   Source Code and License 6 votes vote down vote up
@Override
public void execute(Tuple tuple) {
  List<Object> id = tuple.select(_idFields);
  GlobalStreamId streamId = new GlobalStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId());
  if (!_pending.containsKey(id)) {
    _pending.put(id, new HashMap<GlobalStreamId, Tuple>());
  }
  Map<GlobalStreamId, Tuple> parts = _pending.get(id);
  if (parts.containsKey(streamId))
    throw new RuntimeException("Received same side of single join twice");
  parts.put(streamId, tuple);
  if (parts.size() == _numSources) {
    _pending.remove(id);
    List<Object> joinResult = new ArrayList<Object>();
    for (String outField : _outFields) {
      GlobalStreamId loc = _fieldLocations.get(outField);
      joinResult.add(parts.get(loc).getValueByField(outField));
    }
    _collector.emit(new ArrayList<Tuple>(parts.values()), joinResult);

    for (Tuple part : parts.values()) {
      _collector.ack(part);
    }
  }
}
 
Example 20
Project: open-kilda   File: CacheBolt.java   Source Code and License 6 votes vote down vote up
private void emitRerouteCommands(Set<ImmutablePair<Flow, Flow>> flows, Tuple tuple,
                                 String correlationId, FlowOperation operation) {
    String rerouteCorrelationId = String.format("%s-%s", correlationId, REROUTE.toString());

    for (ImmutablePair<Flow, Flow> flow : flows) {
        try {
            flow.getLeft().setState(FlowState.DOWN);
            flow.getRight().setState(FlowState.DOWN);
            FlowRerouteRequest request = new FlowRerouteRequest(flow.getLeft(), operation);

            Values values = new Values(Utils.MAPPER.writeValueAsString(new CommandMessage(
                    request, System.currentTimeMillis(), rerouteCorrelationId, Destination.WFM)));
            outputCollector.emit(StreamType.WFM_DUMP.toString(), tuple, values);

            logger.info("Flow {} reroute command message sent", flow.getLeft().getFlowId());
        } catch (JsonProcessingException exception) {
            logger.error("Could not format flow reroute request by flow={}", flow, exception);
        }
    }
}
 
Example 21
Project: SQLonStorm   File: GroupByBolt.java   Source Code and License 6 votes vote down vote up
/**
 * 聚合函数 count 计算结果
 *
 * @param isJoin        含有join
 * @param tupleList     属于某一个组的tuple list
 * @param parameterList count(tab1.A,tab1.B) 参数值,可支持多个参数。 若tuple 对应某个参数属性的值为空,则该条tuple 不参与count的计算
 * @return 该组tuple count 函数的执行结果
 */
private int getResultOfCount(boolean isJoin, List<Tuple> tupleList, List<TCItem> parameterList) {
    int ans = 0;
    for (Tuple tuple : tupleList) {
        boolean flag = true;
        for (TCItem tcItem : parameterList) {
            String tempKey = tcItem.getTableName() + "." + tcItem.getColName();
            if (!isJoin) {
                tempKey = tcItem.getColName();
            }
            if (tuple.getStringByField(tempKey).isEmpty()) {
                flag = false;
                break;
            }
        }
        if (flag) {
            ans++;
        }
    }
    return ans;
}
 
Example 22
Project: Mastering-Apache-Storm   File: ApacheLogSplitterBolt.java   Source Code and License 6 votes vote down vote up
public void execute(Tuple input, BasicOutputCollector collector) {
	// Get the Apache log from the tuple
	String log = input.getString(0);

	if (StringUtils.isBlank(log)) {
		// ignore blank lines
		return;
	}
	// call the logSplitter(String apachelog) method of ApacheLogSplitter
	// class.
	Map<String, Object> logMap = apacheLogSplitter.logSplitter(log);
	List<Object> logdata = new ArrayList<Object>();
	for (String element : LOG_ELEMENTS) {
		logdata.add(logMap.get(element));
	}
	// emits set of fields (ip, referrer, user-agent, bytesSent, etc)
	collector.emit(logdata);

}
 
Example 23
Project: DBus   File: DbusKafkaWriterBolt.java   Source Code and License 6 votes vote down vote up
@Override
public void execute(Tuple input) {
    if (TupleUtils.isTick(input)) {
        collector.ack(input);
        return;
    }

    try {
        Command cmd = (Command) input.getValueByField(EmitFields.COMMAND);
        BoltCommandHandler handler = handlerManager.getHandler(cmd);
        handler.handle(input);
    } catch (Exception e) {
        logger.error("Process data error", e);
        this.collector.reportError(e);
        this.collector.fail(input);
    }

}
 
Example 24
Project: open-kilda   File: OFELinkBolt.java   Source Code and License 6 votes vote down vote up
private void handleSwitchEvent(Tuple tuple, SwitchInfoData switchData) {
    String switchID = switchData.getSwitchId();
    String state = "" + switchData.getState();
    logger.info("DISCO: Switch Event: switch={} state={}", switchID, state);

    if (SwitchState.DEACTIVATED.getType().equals(state)) {
        // current logic: switch down means stop checking associated ports/links.
        // - possible extra steps of validation of switch down should occur elsewhere
        // - possible extra steps of generating link down messages aren't important since
        //      the TPE will drop the switch node from its graph.
        discovery.handleSwitchDown(switchID);
    } else if (SwitchState.ACTIVATED.getType().equals(state)) {
        discovery.handleSwitchUp(switchID);
    } else {
        // TODO: Should this be a warning? Evaluate whether any other state needs to be handled
        logger.warn("SWITCH Event: ignoring state: {}", state);
    }

    // Pass the original message along, to the Topology Engine topic.
    String json = tuple.getString(0);
    collector.emit(topoEngTopic, tuple, new Values(PAYLOAD, json));
}
 
Example 25
Project: open-kilda   File: OFELinkBolt.java   Source Code and License 6 votes vote down vote up
private void handleIslEvent(Tuple tuple, IslInfoData discoveredIsl) {
    PathNode node = discoveredIsl.getPath().get(0);
    String switchID = node.getSwitchId();
    String portID = "" + node.getPortNo();
    IslChangeType state = discoveredIsl.getState();
    logger.info("DISCO: ISL Event: switch={} port={} state={}", switchID, portID, state);

    if (IslChangeType.DISCOVERED.equals(state)) {
        discovery.handleDiscovered(switchID, portID);
    } else if (IslChangeType.FAILED.equals(state)) {
        discovery.handleFailed(switchID, portID);
    } else {
        // TODO: Should this be a warning? Evaluate whether any other state needs to be handled
        logger.warn("ISL Event: ignoring state: {}", state);
    }

    String json = tuple.getString(0);
    collector.emit(topoEngTopic, tuple, new Values(PAYLOAD, json));
}
 
Example 26
Project: DBus   File: DbusAppenderBolt.java   Source Code and License 6 votes vote down vote up
@Override
public void execute(Tuple input) {
    if (TupleUtils.isTick(input)) {
        collector.ack(input);
        return;
    }
    try {
        Command cmd = (Command) input.getValueByField(EmitFields.COMMAND);
        BoltCommandHandler handler = handlerManager.getHandler(cmd);
        handler.handle(input);
        this.collector.ack(input);
    } catch (Exception e) {
        this.collector.fail(input);
        this.collector.reportError(e);
        logger.error("Process Error!", e);
    }

}
 
Example 27
Project: open-kilda   File: SpeakerBolt.java   Source Code and License 6 votes vote down vote up
protected void discoverIslPartTwo(Tuple tuple, IslInfoData data) throws Exception {
    /*
     * Second part of the discover process.
     *
     * 1.  Grabs a message that has been sent from the first part and thus we know that the source port is
     *     and active ISL.
     * 2.  Check the status of the destination port, in Path[1], and if activeISL then emit to Kafka
     */
    ISwitchImpl sw = getSwitch(data.getPath().get(1).getSwitchId());
    if (!sw.isActive()) {
        return;
    }
    IPortImpl port = sw.getPort(data.getPath().get(1).getPortNo());

    if (port.isActiveIsl()) {
        long now = Instant.now().toEpochMilli();
        InfoMessage infoMessage = new InfoMessage(data, now, "system", null);
        logger.debug("checking isl on: {}", data.toString());
        collector.emit(SimulatorTopology.KAFKA_BOLT_STREAM, tuple,
                new Values("INFO", Utils.MAPPER.writeValueAsString(infoMessage)));
    }
}
 
Example 28
Project: Practical-Real-time-Processing-and-Analytics   File: WordCountBolt.java   Source Code and License 5 votes vote down vote up
public void execute(Tuple tuple) {
	String word = tuple.getStringByField("word");
	Long count = this.counts.get(word);
	if (count == null) {
		count = 0L;
	}
	count++;
	this.counts.put(word, count);
	this.collector.emit(new Values(word, count));
}
 
Example 29
Project: Mastering-Apache-Storm   File: HdfsBolt.java   Source Code and License 5 votes vote down vote up
public void execute(Tuple tuple) {
	try {
		byte[] bytes = this.format.format(tuple);
		synchronized (this.writeLock) {
			out.write(bytes);
			this.offset += bytes.length;

			if (this.syncPolicy.mark(tuple, this.offset)) {
				if (this.out instanceof HdfsDataOutputStream) {
					((HdfsDataOutputStream) this.out).hsync(EnumSet
							.of(SyncFlag.UPDATE_LENGTH));
				} else {
					this.out.hsync();
				}
				this.syncPolicy.reset();
			}
		}

		this.collector.ack(tuple);

		if (this.rotationPolicy.mark(tuple, this.offset)) {
			rotateOutputFile(); // synchronized
			this.offset = 0;
			this.rotationPolicy.reset();
		}
	} catch (IOException e) {
		LOG.warn("write/sync failed.", e);
		this.collector.fail(tuple);
	}
}
 
Example 30
Project: Practical-Real-time-Processing-and-Analytics   File: ElasticSearchBolt.java   Source Code and License 5 votes vote down vote up
@Override
public void execute(Tuple input, BasicOutputCollector collector) {
	String valueByField = input.getString(0);
	System.out.println(valueByField);
	try {
		IndexResponse response = client.prepareIndex("pub-nub", "sensor-data")
				.setSource(convertStringtoMap(valueByField)).get();
		System.out.println(response.status());
	} catch (IOException e) {
		e.printStackTrace();
	}
}
 
Example 31
Project: DBus   File: DispatcherResumeHandler.java   Source Code and License 5 votes vote down vote up
@Override
public void handle(Tuple tuple) {
    EmitData emitData = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA);
    Command cmd = (Command)tuple.getValueByField(Constants.EmitFields.COMMAND);

    TopicResumeCmd ctrlCmd = emitData.get(EmitData.CTRL_CMD);
    this.emit(listener.getOutputCollector(), tuple, Joiner.on(".").join(ctrlCmd.getSchema(), ctrlCmd.getTable()), emitData, cmd);
    logger.info("Resume table[{}]", groupField(ctrlCmd.getSchema(), ctrlCmd.getTable()));
}
 
Example 32
Project: patent-crawler   File: StatusUpdaterBolt.java   Source Code and License 5 votes vote down vote up
public void onRemoval(RemovalNotification<String, List<Tuple>> removal) {
    if (!removal.wasEvicted())
        return;
    LOG.error("Purged from waitAck {} with {} values", removal.getKey(),
            removal.getValue().size());
    for (Tuple t : removal.getValue()) {
        _collector.fail(t);
    }
}
 
Example 33
Project: Mastering-Apache-Storm   File: KeyWordIdentifierBolt.java   Source Code and License 5 votes vote down vote up
public void execute(Tuple input) {

		String referrer = input.getStringByField("referrer").toString();
		// call the getKeyword(String referrer) method KeywordGenerator class to
		// generate the search keyword.
		Object keyword = keywordGenerator.getKeyword(referrer);
		// emits all the field emitted by previous bolt + keyword
		collector.emit(new Values(input.getString(0), input.getString(1), input
				.getString(2), input.getString(3), input.getString(4), input
				.getString(5), input.getString(6), input.getString(7), input
				.getString(8), input.getString(9), keyword));

	}
 
Example 34
Project: DBus   File: DispatcherInitialLoadHandler.java   Source Code and License 5 votes vote down vote up
@Override
public void handle(Tuple tuple) {
    EmitData emitData = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA);

    Command cmd = (Command)tuple.getValueByField(Constants.EmitFields.COMMAND);
    String dbschema = emitData.get(EmitData.DB_SCHEMA);
    String table = emitData.get(EmitData.DATA_TABLE);
    this.emit(listener.getOutputCollector(), tuple, groupField(dbschema, table), emitData, cmd);
    logger.info("Full pull request[{}]", groupField(dbschema, table));
}
 
Example 35
Project: DBus   File: PagedBatchDataFetchingBolt.java   Source Code and License 5 votes vote down vote up
private void emitMonitorState(Tuple input, String dataSourceInfo, String dbNameSpace, long dealRows, String startSecs, String totalRows, String totalPartitions, int finishedShardCount) {
    JSONObject jsonInfo = new JSONObject();
    jsonInfo.put(DataPullConstants.DATA_SOURCE_INFO, dataSourceInfo);
    jsonInfo.put(DataPullConstants.DATA_SOURCE_NAME_SPACE, dbNameSpace);
    jsonInfo.put(DataPullConstants.ZkMonitoringJson.DB_NAMESPACE_NODE_FINISHED_COUNT, finishedShardCount);
    jsonInfo.put(DataPullConstants.ZkMonitoringJson.DB_NAMESPACE_NODE_FINISHED_ROWS, dealRows);
    jsonInfo.put(DataPullConstants.ZkMonitoringJson.DB_NAMESPACE_NODE_START_SECS, startSecs);
    jsonInfo.put(DataPullConstants.ZkMonitoringJson.DB_NAMESPACE_NODE_TOTAL_ROWS, totalRows);
    jsonInfo.put(DataPullConstants.DATA_CHUNK_COUNT, totalPartitions);
    collector.emit(input, new Values(jsonInfo));
}
 
Example 36
Project: Building-Data-Streaming-Applications-with-Apache-Kafka   File: StringToWordsSpliterBolt.java   Source Code and License 5 votes vote down vote up
public void execute(Tuple input) {
    String line = input.getString(0);
    String[] words = line.split(" ");

    for (String word : words) {
        if (!word.isEmpty()) {
            collector.emit(new Values(word));
        }

    }

    collector.ack(input);
}
 
Example 37
Project: DBus   File: KafkaBoltDefaultHandler.java   Source Code and License 5 votes vote down vote up
@Override
public void handle(Tuple tuple) {
    EmitData emitData = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA);
    MetaVersion version = emitData.get(EmitData.VERSION);
    DbusMessage message = emitData.get(EmitData.MESSAGE);
    String key = Joiner.on(".").join(version.getSchema(), version.getTable());
    reporter.reportStat(key, message.payloadSizeWithoutBefore());
    logger.debug("report stat on:{},report count:{}", key, message.getPayload().size());
    listener.writeData(version.getSchema(), version.getTable(), message, tuple);
}
 
Example 38
Project: open-kilda   File: FieldNameTopicSelector.java   Source Code and License 5 votes vote down vote up
@Override
public String getTopic(Tuple tuple) {
    if (tuple.contains(fieldName)) {
        return tuple.getStringByField(fieldName);
    } else {
        LOG.warn("Field {} Not Found. Returning default topic {}", fieldName, defaultTopicName);
        return defaultTopicName;
    }
}
 
Example 39
Project: DBus   File: AppenderInitialLoadHandler.java   Source Code and License 5 votes vote down vote up
@Override
public void handle(Tuple tuple) {
    EmitData emitData = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA);
    String dbschema = emitData.get(EmitData.DB_SCHEMA);
    String table = emitData.get(EmitData.DATA_TABLE);

    logger.info("Full data request was received [{}.{}]", dbschema, table);

    MetaVersion ver = MetaVerController.getVersionFromCache(dbschema, table);

    if (ver == null) {
        throw new RuntimeException("The version of table " + dbschema + "." + table + " was not found!");
    }

    //EmitData data = new EmitData();
    //data.add(EmitData.AVRO_SCHEMA, EmitData.NO_VALUE);
    //data.add(EmitData.VERSION, ver);
    //DbusMessage message = BoltCommandHandlerHelper.buildTerminationMessage(dbschema, table, ver.getVersion());
    //data.add(EmitData.MESSAGE, message);

    // 修改data table表状态
    BoltCommandHandlerHelper.changeDataTableStatus(ver.getSchema(), ver.getTable(), DataTable.STATUS_WAITING);

    //List<Object> values = new Values(EmitData.NO_VALUE, data, Command.DATA_INCREMENT_TERMINATION);
    //this.listener.getOutputCollector().emit(tuple, values);
    //this.emit(listener.getOutputCollector(),tuple, groupField(dbschema, table), data, Command.DATA_INCREMENT_TERMINATION);


    logger.info("Full data request was processed");
}
 
Example 40
Project: Mastering-Apache-Storm   File: DelimitedRecordFormat.java   Source Code and License 5 votes vote down vote up
public byte[] format(Tuple tuple) {
    StringBuilder sb = new StringBuilder();
    Fields fields = this.fields == null ? tuple.getFields() : this.fields;
    int size = fields.size();
    for(int i = 0; i < size; i++){
        sb.append(tuple.getValueByField(fields.get(i)));
        if(i != size - 1){
            sb.append(this.fieldDelimiter);
        }
    }
    sb.append(this.recordDelimiter);
    return sb.toString().getBytes();
}
 
Example 41
Project: open-kilda   File: JsonMessage.java   Source Code and License 5 votes vote down vote up
public JsonMessage(Tuple raw) throws MessageFormatException {
    super();

    String json = raw.getString(getFormat().fieldIndex(FIELD_ID_JSON));
    try {
        payload = unpackJson(json);
    } catch (IOException e) {
        throw new MessageFormatException(raw, e);
    }
}
 
Example 42
Project: Building-Data-Streaming-Applications-with-Apache-Kafka   File: IPFraudHiveBolt.java   Source Code and License 5 votes vote down vote up
public void execute(Tuple input) {
    String ipRecord = (String) input.getValue(0);
    String[] columns = ipRecord.split(",");
    Values value = new Values(columns[0], columns[3], columns[4], columns[5], columns[6]);
    _collector.emit(value);
    _collector.ack(input);


}
 
Example 43
Project: Mastering-Apache-Storm   File: DefaultSequenceFormat.java   Source Code and License 5 votes vote down vote up
public Writable value(Tuple tuple) {
    if(this.value == null){
        this.value = new Text();
    }
    this.value.set(tuple.getStringByField(this.valueField));
    return this.value;
}
 
Example 44
Project: open-kilda   File: ErrorBolt.java   Source Code and License 5 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
public void execute(Tuple tuple) {
    ComponentType componentId = ComponentType.valueOf(tuple.getSourceComponent());
    StreamType streamId = StreamType.valueOf(tuple.getSourceStreamId());
    ErrorType errorType = (ErrorType) tuple.getValueByField(FlowTopology.ERROR_TYPE_FIELD);
    ErrorMessage error = (ErrorMessage) tuple.getValueByField(AbstractTopology.MESSAGE_FIELD);
    error.setDestination(Destination.NORTHBOUND);
    Values values = new Values(error);

    try {
        logger.debug("Request tuple={}", tuple);

        switch (componentId) {
            case CRUD_BOLT:
            case SPLITTER_BOLT:
                logger.debug("Error message: data={}", error.getData());
                outputCollector.emit(StreamType.RESPONSE.toString(), tuple, values);
                break;
            default:
                logger.debug("Skip message from unknown component: component={}, stream={}, error-type={}",
                        componentId, streamId, errorType);
                break;
        }
    } catch (Exception exception) {
        logger.error("Could not process message: {}", tuple, exception);
    } finally {
        logger.debug("Error message ack: component={}, stream={}, tuple={}, values={}",
                tuple.getSourceComponent(), tuple.getSourceStreamId(), tuple, values);

        outputCollector.ack(tuple);
    }
}
 
Example 45
Project: open-kilda   File: CrudBolt.java   Source Code and License 5 votes vote down vote up
private void handleDeleteRequest(String flowId, CommandMessage message, Tuple tuple) throws IOException {
    ImmutablePair<Flow, Flow> flow = flowCache.deleteFlow(flowId);

    logger.info("Deleted flow: {}", flow);

    Values topology = new Values(MAPPER.writeValueAsString(
            new FlowInfoData(flowId, flow, FlowOperation.DELETE, message.getCorrelationId())));
    outputCollector.emit(StreamType.DELETE.toString(), tuple, topology);

    Values northbound = new Values(new InfoMessage(new FlowResponse(buildFlowResponse(flow)),
            message.getTimestamp(), message.getCorrelationId(), Destination.NORTHBOUND));
    outputCollector.emit(StreamType.RESPONSE.toString(), tuple, northbound);
}
 
Example 46
Project: DBus   File: DbusKafkaWriterBolt.java   Source Code and License 5 votes vote down vote up
@Override
public void reloadBolt(Tuple tuple) {
    String msg = null;
    try {
        PropertiesHolder.reload();
        ThreadLocalCache.reload();
        if (producer != null) {
            producer.close();
        }

        producer = createProducer();
        msg = "kafka write bolt reload successful!";
        logger.info("Kafka writer bolt was reloaded at:{}", System.currentTimeMillis());
    } catch (Exception e) {
        msg = e.getMessage();
        throw new RuntimeException(e);
    } finally {
        if (tuple != null) {
            EmitData data = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA);
            ControlMessage message = data.get(EmitData.MESSAGE);
            CtlMessageResult result = new CtlMessageResult("kafka-write-bolt", msg);
            result.setOriginalMessage(message);
            CtlMessageResultSender sender = new CtlMessageResultSender(message.getType(), zkconnect);
            sender.send("kafka-write-bolt-" + context.getThisTaskId(), result, false, true);
        }
    }
}
 
Example 47
Project: open-kilda   File: CrudBolt.java   Source Code and License 5 votes vote down vote up
private void handleDumpRequest(CommandMessage message, Tuple tuple) {
    List<Flow> flows = flowCache.dumpFlows().stream().map(this::buildFlowResponse).collect(Collectors.toList());

    logger.info("Dump flows: {}", flows);

    Values northbound = new Values(new InfoMessage(new FlowsResponse(flows),
            message.getTimestamp(), message.getCorrelationId(), Destination.NORTHBOUND));
    outputCollector.emit(StreamType.RESPONSE.toString(), tuple, northbound);
}
 
Example 48
Project: open-kilda   File: CrudBolt.java   Source Code and License 5 votes vote down vote up
private void handleReadRequest(String flowId, CommandMessage message, Tuple tuple) {
    ImmutablePair<Flow, Flow> flow = flowCache.getFlow(flowId);

    logger.info("Got flow: {}", flow);

    Values northbound = new Values(new InfoMessage(new FlowResponse(buildFlowResponse(flow)),
            message.getTimestamp(), message.getCorrelationId(), Destination.NORTHBOUND));
    outputCollector.emit(StreamType.RESPONSE.toString(), tuple, northbound);
}
 
Example 49
Project: open-kilda   File: CrudBolt.java   Source Code and License 5 votes vote down vote up
private void handleStatusRequest(String flowId, CommandMessage message, Tuple tuple) throws IOException {
    ImmutablePair<Flow, Flow> flow = flowCache.getFlow(flowId);
    FlowState status = flow.getLeft().getState();

    logger.info("Status flow: {}={}", flowId, status);

    Values northbound = new Values(new InfoMessage(new FlowStatusResponse(new FlowIdStatusPayload(flowId, status)),
            message.getTimestamp(), message.getCorrelationId(), Destination.NORTHBOUND));
    outputCollector.emit(StreamType.RESPONSE.toString(), tuple, northbound);
}
 
Example 50
Project: DBus   File: DbusKafkaWriterBolt.java   Source Code and License 5 votes vote down vote up
@Override
public void writeData(String dbSchema, String table, DbusMessage dbusMessage, Tuple input) {
    EmitData data = (EmitData) input.getValueByField(Constants.EmitFields.DATA);
    List<String> topics = topicProvider.provideTopics(dbSchema, table);
    if (topics == null || topics.isEmpty()) {
        logger.error("Can't find a topic to write the message!");
        this.collector.ack(input);
        return;
    }

    String message = dbusMessage.toString();
    ProducerRecord<String, String> record = new ProducerRecord<>(topics.get(0), buildKey(dbusMessage), message);
    reporter.report(message.getBytes().length, dbusMessage.getPayload().size());
    Object offsetObj = data.get(EmitData.OFFSET);
    String offset = offsetObj != null ? offsetObj.toString() : "0";
    producer.send(record, (metadata, exception) -> {
        if (exception != null) {
            synchronized (this.collector) {
                this.collector.fail(input);
            }
            logger.error("Write data to kafka error, original data:[schema:{}, table:{}, offset:{}]!", dbSchema, table, offset, exception);
        } else {
            synchronized (this.collector) {
                this.collector.ack(input);
            }
            logger.info("kafka-message,original-offset:{}, key:{}", offset, record.key());
        }
    });
}
 
Example 51
Project: open-kilda   File: AbstractTickRichBolt.java   Source Code and License 5 votes vote down vote up
@Override
public void execute(Tuple tuple) {
    //If it's a tick tuple, emit all words and counts
    if (isTickTuple(tuple)) {
        doTick(tuple);
    } else {
        doWork(tuple);
    }
}
 
Example 52
Project: Mastering-Apache-Storm   File: HdfsBolt.java   Source Code and License 5 votes vote down vote up
public void execute(Tuple tuple) {
	try {
		byte[] bytes = this.format.format(tuple);
		synchronized (this.writeLock) {
			out.write(bytes);
			this.offset += bytes.length;

			if (this.syncPolicy.mark(tuple, this.offset)) {
				if (this.out instanceof HdfsDataOutputStream) {
					((HdfsDataOutputStream) this.out).hsync(EnumSet
							.of(SyncFlag.UPDATE_LENGTH));
				} else {
					this.out.hsync();
				}
				this.syncPolicy.reset();
			}
		}

		this.collector.ack(tuple);

		if (this.rotationPolicy.mark(tuple, this.offset)) {
			rotateOutputFile(); // synchronized
			this.offset = 0;
			this.rotationPolicy.reset();
		}
	} catch (IOException e) {
		LOG.warn("write/sync failed.", e);
		this.collector.fail(tuple);
	}
}
 
Example 53
Project: open-kilda   File: LoggerBolt.java   Source Code and License 5 votes vote down vote up
@Override
public void execute(Tuple tuple) {
    System.out.println("this = " + this);

    // No way to do dynamic log level with SLF4J
    // https://stackoverflow.com/questions/2621701/setting-log-level-of-message-at-runtime-in-slf4j
    // https://jira.qos.ch/browse/SLF4J-124

    log(level, "\n{}: fields: {} :: values: {}",
            new Object[] {watermark, tuple.getFields(), tuple.getValues()});

    _collector.ack(tuple);
}
 
Example 54
Project: open-kilda   File: FilerBolt.java   Source Code and License 5 votes vote down vote up
@Override
public void execute(Tuple tuple) {
    File file = getFile();
    logger.debug("FILER: Writing tuple to disk: File = {}, tuple={}", file.getAbsolutePath(), tuple);

    try {
        // Start with just the values; determine later if the fields are needed.
        //Files.append(tuple.getFields().toString(), file, Charsets.UTF_8);
        Files.append(tuple.getValues().toString() + "\n", file, Charsets.UTF_8);
    } catch (IOException e) {
        logger.error("FILER: couldn't append to file: {}. Exception: {}. Cause: {}",
                file.getAbsolutePath(), e.getMessage(), e.getCause());
    }
    _collector.ack(tuple);
}
 
Example 55
Project: Mastering-Apache-Storm   File: MySQLDump.java   Source Code and License 5 votes vote down vote up
/**
 * Persist input tuple.
 * @param tuple
 */
public void persistRecord(Tuple tuple) {
	try {

		// preparedStatements can use variables and are more efficient
		preparedStatement = connect
				.prepareStatement("insert into  apachelog values (default,?, ?, ?, ?, ? , ?, ?, ?,?,?,?)");

		preparedStatement.setString(1, tuple.getStringByField("ip"));
		preparedStatement.setString(2, tuple.getStringByField("dateTime"));
		preparedStatement.setString(3, tuple.getStringByField("request"));
		preparedStatement.setString(4, tuple.getStringByField("response"));
		preparedStatement.setString(5, tuple.getStringByField("bytesSent"));
		preparedStatement.setString(6, tuple.getStringByField("referrer"));
		preparedStatement.setString(7, tuple.getStringByField("useragent"));
		preparedStatement.setString(8, tuple.getStringByField("country"));
		preparedStatement.setString(9, tuple.getStringByField("browser"));
		preparedStatement.setString(10, tuple.getStringByField("os"));
		preparedStatement.setString(11, tuple.getStringByField("keyword"));
		
		// Insert record
		preparedStatement.executeUpdate();

	} catch (Exception e) {
		throw new RuntimeException(
				"Error occure while persisting records in mysql : ");
	} finally {
		// close prepared statement
		if (preparedStatement != null) {
			try {
				preparedStatement.close();
			} catch (Exception exception) {
				System.out
						.println("Error occure while closing PreparedStatement : ");
			}
		}
	}

}
 
Example 56
Project: open-kilda   File: CacheBolt.java   Source Code and License 5 votes vote down vote up
private void handleSwitchEvent(SwitchInfoData sw, Tuple tuple) {
    logger.info("State update switch {} message {}", sw.getSwitchId(), sw.getState());
    Set<ImmutablePair<Flow, Flow>> affectedFlows;

    switch (sw.getState()) {

        case ADDED:
        case ACTIVATED:
            onSwitchUp(sw);
            break;

        case REMOVED:
        case DEACTIVATED:
            if (networkCache.cacheContainsSwitch(sw.getSwitchId())) {
                networkCache.updateSwitch(sw);
            }

            affectedFlows = flowCache.getFlowsWithAffectedPath(sw.getSwitchId());
            emitRerouteCommands(affectedFlows, tuple, "SWITCH", FlowOperation.UPDATE);

            break;
        case CACHED:
            break;
        case CHANGED:
            break;

        default:
            logger.warn("Unknown state update switch info message");
            break;
    }
}
 
Example 57
Project: open-kilda   File: CacheBolt.java   Source Code and License 5 votes vote down vote up
private void handleIslEvent(IslInfoData isl, Tuple tuple) {
    logger.info("State update isl {} message cached {}", isl.getId(), isl.getState());
    Set<ImmutablePair<Flow, Flow>> affectedFlows;

    switch (isl.getState()) {
        case DISCOVERED:
            if (networkCache.cacheContainsIsl(isl.getId())) {
                networkCache.updateIsl(isl);
            } else {
                networkCache.createIsl(isl);
            }
            affectedFlows = flowCache.dumpFlows().stream()
                    .filter(flow -> FlowState.DOWN.equals(flow.getLeft().getState()))
                    .collect(Collectors.toSet());
            emitRerouteCommands(affectedFlows, tuple, "ISL", FlowOperation.UPDATE);
            break;

        case FAILED:
            try {
                networkCache.deleteIsl(isl.getId());
            } catch (CacheException exception) {
                logger.warn("{}:{}", exception.getErrorMessage(), exception.getErrorDescription());
            }
            affectedFlows = flowCache.getFlowsWithAffectedPath(isl);
            emitRerouteCommands(affectedFlows, tuple, "ISL", FlowOperation.UPDATE);
            break;

        case OTHER_UPDATE:
            break;

        case CACHED:
            break;

        default:
            logger.warn("Unknown state update isl info message");
            break;
    }
}
 
Example 58
Project: storm-rabbitmq   File: RabbitMqBoltTest.java   Source Code and License 5 votes vote down vote up
@Test
public void unableToGetChannel() throws Exception {
    doThrow(Exception.class).when(rabbitMqChannelProvider).getChannel();
    RabbitMqBolt rabbitMqBolt = new RabbitMqBolt(rabbitMqChannelProvider, new EmptyTupleToRabbitMqMessageConverter());
    rabbitMqBolt.prepare(null, null, mockOutputCollector);
    Tuple tuple = mock(Tuple.class);
    rabbitMqBolt.execute(tuple);
    verify(mockOutputCollector, times(1)).reportError(any(Exception.class));
    verify(mockOutputCollector, times(1)).fail(tuple);
}
 
Example 59
Project: DBus   File: MaDefaultHandler.java   Source Code and License 5 votes vote down vote up
private void sendTermination(MetaVersion ver, Tuple input) {
    DbusMessage message = BoltCommandHandlerHelper.buildTerminationMessage(ver.getSchema(), ver.getTable(), ver.getVersion());

    EmitData data = new EmitData();
    data.add(EmitData.AVRO_SCHEMA, EmitData.NO_VALUE);
    data.add(EmitData.VERSION, ver);
    data.add(EmitData.MESSAGE, message);

    this.emit(listener.getOutputCollector(), input, groupField(ver.getSchema(), ver.getTable()), data, Command.DATA_INCREMENT_TERMINATION);

    // 修改data table表状态
    //BoltCommandHandlerHelper.changeDataTableStatus(ver.getSchema(), ver.getTable(), DataTable.STATUS_ABORT);
}
 
Example 60
Project: open-kilda   File: SpeakerBolt.java   Source Code and License 5 votes vote down vote up
protected void discoverIsl(Tuple tuple, DiscoverIslCommandData data) throws Exception {
    /*
     * This process is a bit screwy and does put a loop in the topology:
     *
     * 1.  Determine if the source switch is up and the source port is an Active ISL port
     * 2.  Create the IslInfoData package as if it is a working ISL (both ports are active)
     * 3.  Emit tha IslInfoData back to SpeakerBolt with fields grouping but keyed on the second switch to
     *     ensure that the tuple goes to the instance which has that switch in it's switches Map and set command
     *     to DiscoverIslP2
     */

    ISwitchImpl sw = getSwitch(data.getSwitchId());
    if (!sw.isActive()) {
        return;
    }
    IPortImpl localPort = sw.getPort(data.getPortNo());

    if (localPort.isActiveIsl()) {
        List<PathNode> path = new ArrayList<>();
        PathNode path1 = new PathNode(sw.getDpid().toString(), localPort.getNumber(), 0);
        path1.setSegLatency(localPort.getLatency());
        PathNode path2 = new PathNode(localPort.getPeerSwitch(), localPort.getPeerPortNum(), 1);
        path.add(path1);
        path.add(path2);
        IslInfoData islInfoData = new IslInfoData(
                localPort.getLatency(),
                path,
                100000,
                IslChangeType.DISCOVERED,
                100000);
        collector.emit(SimulatorTopology.SWITCH_BOLT_STREAM, tuple,
                new Values(localPort.getPeerSwitch().toLowerCase(), Commands.DO_DISCOVER_ISL_P2_COMMAND.name(), islInfoData));
    }
}