backtype.storm.topology.IRichSpout Java Examples

The following examples show how to use backtype.storm.topology.IRichSpout. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HBaseAuditLogApplication.java    From eagle with Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology execute(Config config, StormEnvironment environment) {
    TopologyBuilder builder = new TopologyBuilder();
    KafkaSpoutProvider provider = new KafkaSpoutProvider();
    IRichSpout spout = provider.getSpout(config);

    HBaseAuditLogParserBolt bolt = new HBaseAuditLogParserBolt();

    int numOfSpoutTasks = config.getInt(SPOUT_TASK_NUM);
    int numOfParserTasks = config.getInt(PARSER_TASK_NUM);
    int numOfJoinTasks = config.getInt(JOIN_TASK_NUM);
    int numOfSinkTasks = config.getInt(SINK_TASK_NUM);

    builder.setSpout("ingest", spout, numOfSpoutTasks);
    BoltDeclarer boltDeclarer = builder.setBolt("parserBolt", bolt, numOfParserTasks);
    boltDeclarer.fieldsGrouping("ingest", new Fields(StringScheme.STRING_SCHEME_KEY));

    HBaseResourceSensitivityDataJoinBolt joinBolt = new HBaseResourceSensitivityDataJoinBolt(config);
    BoltDeclarer joinBoltDeclarer = builder.setBolt("joinBolt", joinBolt, numOfJoinTasks);
    joinBoltDeclarer.fieldsGrouping("parserBolt", new Fields("f1"));

    StormStreamSink sinkBolt = environment.getStreamSink("hbase_audit_log_stream",config);
    BoltDeclarer kafkaBoltDeclarer = builder.setBolt("kafkaSink", sinkBolt, numOfSinkTasks);
    kafkaBoltDeclarer.fieldsGrouping("joinBolt", new Fields("user"));
    return builder.createTopology();
}
 
Example #2
Source File: OozieAuditLogApplication.java    From eagle with Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology execute(Config config, StormEnvironment environment) {
    TopologyBuilder builder = new TopologyBuilder();
    KafkaSpoutProvider provider = new KafkaSpoutProvider();
    IRichSpout spout = provider.getSpout(config);

    int numOfSpoutTasks = config.getInt(SPOUT_TASK_NUM);
    int numOfParserTask = config.getInt(PARSER_TASK_NUM);
    int numOfJoinTasks = config.getInt(JOIN_TASK_NUM);

    builder.setSpout("ingest", spout, numOfSpoutTasks);

    OozieAuditLogParserBolt parserBolt = new OozieAuditLogParserBolt();
    BoltDeclarer parserBoltDeclarer = builder.setBolt("parserBolt", parserBolt, numOfParserTask);
    parserBoltDeclarer.fieldsGrouping("ingest", new Fields(StringScheme.STRING_SCHEME_KEY));

    OozieResourceSensitivityDataJoinBolt joinBolt = new OozieResourceSensitivityDataJoinBolt(config);
    BoltDeclarer boltDeclarer = builder.setBolt("joinBolt", joinBolt, numOfJoinTasks);
    boltDeclarer.fieldsGrouping("parserBolt", new Fields("f1"));

    StormStreamSink sinkBolt = environment.getStreamSink("oozie_audit_log_stream", config);
    int numOfSinkTasks = config.getInt(SINK_TASK_NUM);
    BoltDeclarer kafkaBoltDeclarer = builder.setBolt("kafkaSink", sinkBolt, numOfSinkTasks);
    kafkaBoltDeclarer.fieldsGrouping("joinBolt", new Fields("user"));
    return builder.createTopology();
}
 
Example #3
Source File: HdfsAuthLogMonitoringMain.java    From eagle with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception{
    System.setProperty("config.resource", "/application.conf");
    Config config = ConfigFactory.load();
    KafkaSpoutProvider provider = new KafkaSpoutProvider();
    IRichSpout spout = provider.getSpout(config);

    SecurityLogParserBolt bolt = new SecurityLogParserBolt();
    TopologyBuilder builder = new TopologyBuilder();

    int numOfSpoutTasks = config.getInt(SPOUT_TASK_NUM);
    int numOfParserTasks = config.getInt(PARSER_TASK_NUM);
    int numOfSinkTasks = config.getInt(SINK_TASK_NUM);

    builder.setSpout("ingest", spout, numOfSpoutTasks);
    BoltDeclarer boltDeclarer = builder.setBolt("parserBolt", bolt, numOfParserTasks);
    boltDeclarer.shuffleGrouping("ingest");

    KafkaBolt kafkaBolt = new KafkaBolt();
    BoltDeclarer kafkaBoltDeclarer = builder.setBolt("kafkaSink", kafkaBolt, numOfSinkTasks);
    kafkaBoltDeclarer.shuffleGrouping("parserBolt");

    StormTopology topology = builder.createTopology();

    TopologySubmitter.submit(topology, config);
}
 
Example #4
Source File: GCLogApplication.java    From eagle with Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology execute(Config config, StormEnvironment environment) {
    TopologyBuilder builder = new TopologyBuilder();
    KafkaSpoutProvider provider = new KafkaSpoutProvider();
    IRichSpout spout = provider.getSpout(config);

    int numOfSpoutTasks = config.getInt(SPOUT_TASK_NUM);
    int numOfAnalyzerTasks = config.getInt(ANALYZER_TASK_NUM);
    int numOfGeneratorTasks = config.getInt(GENERATOR_TASK_NUM);
    int numOfSinkTasks = config.getInt(SINK_TASK_NUM);

    builder.setSpout("ingest", spout, numOfSpoutTasks);

    GCLogAnalyzerBolt bolt = new GCLogAnalyzerBolt();
    BoltDeclarer boltDeclarer = builder.setBolt("analyzerBolt", bolt, numOfAnalyzerTasks);
    boltDeclarer.fieldsGrouping("ingest", new Fields(StringScheme.STRING_SCHEME_KEY));

    GCMetricGeneratorBolt generatorBolt = new GCMetricGeneratorBolt(config);
    BoltDeclarer joinBoltDeclarer = builder.setBolt("generatorBolt", generatorBolt, numOfGeneratorTasks);
    joinBoltDeclarer.fieldsGrouping("analyzerBolt", new Fields("f1"));

    StormStreamSink sinkBolt = environment.getStreamSink("gc_log_stream",config);
    BoltDeclarer kafkaBoltDeclarer = builder.setBolt("kafkaSink", sinkBolt, numOfSinkTasks);
    kafkaBoltDeclarer.fieldsGrouping("generatorBolt", new Fields("f1"));
    return builder.createTopology();
}
 
Example #5
Source File: CommonSpoutCreator.java    From PoseidonX with Apache License 2.0 5 votes vote down vote up
/**
 * 创建storm Spout
 */
public static IRichSpout create(IRichOperator operator)
{
    StormSpout spout = new StormSpout();
    spout.setOperator(operator);
    return spout;
}
 
Example #6
Source File: AbstractDRPCTopology.java    From jea with Apache License 2.0 5 votes vote down vote up
@Override
protected IRichSpout initSpout() {
	// TODO Auto-generated method stub
	if(this.localDRPC != null) {
		//本地部署
		return new DRPCSpout(this.getTopologyName(), this.localDRPC);
	} else {
		//远程部署
		return new DRPCSpout(this.getTopologyName());
	}
}
 
Example #7
Source File: TransactionTopologyBuilder.java    From jstorm with Apache License 2.0 5 votes vote down vote up
private SpoutDeclarer setSpout(String id, IRichSpout spout, Number parallelismHint, boolean isSchedule) {
    upToDownstreamComponentsMap.put(id, new HashSet<String>());
    IRichSpout spoutExecutor;
    if (spout instanceof IBasicTransactionSpoutExecutor) {
        spoutExecutor = new BasicTransactionSpout((IBasicTransactionSpoutExecutor) spout);
    } else if (!isSchedule) {
        spoutExecutor = new BasicTransactionSpout((ITransactionSpoutExecutor) spout);
    } else {
        spoutExecutor = new ScheduleTransactionSpout((ITransactionSpoutExecutor) spout);
    }
    SpoutDeclarer ret = super.setSpout(id, spoutExecutor, parallelismHint);
    return ret;
}
 
Example #8
Source File: AckTransactionSpout.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public AckTransactionSpout(IRichSpout spout) {
    this.spoutExecutor = spout;
    if (spoutExecutor instanceof IAckValueSpout || spoutExecutor instanceof IFailValueSpout)
        isCacheTuple = true;
    else
        isCacheTuple = false;
}
 
Example #9
Source File: FlowmixBuilder.java    From flowmix with Apache License 2.0 5 votes vote down vote up
/**
 * @return A topology builder than can further be customized.
 */
public TopologyBuilder create() {

    TopologyBuilder builder = new TopologyBuilder();

    if(eventsComponent instanceof IRichSpout)
      builder.setSpout(EVENT, (IRichSpout) eventsComponent, eventLoaderParallelism == -1 ? parallelismHint : eventLoaderParallelism);
    else if(eventsComponent instanceof IRichBolt)
      builder.setBolt(EVENT, (IRichBolt) eventsComponent, eventLoaderParallelism == -1 ? parallelismHint : eventLoaderParallelism);
    else
      throw new RuntimeException("The component for events is not valid. Must be IRichSpout or IRichBolt");


    if(flowLoaderSpout instanceof IRichSpout)
      builder.setSpout(FLOW_LOADER_STREAM, (IRichSpout) flowLoaderSpout, 1);
    else if(flowLoaderSpout instanceof IRichBolt)
      builder.setBolt(FLOW_LOADER_STREAM, (IRichBolt) flowLoaderSpout, 1);
    else
      throw new RuntimeException("The component for rules is not valid. Must be IRichSpout or IRichBolt");

    builder.setSpout("tick", new TickSpout(1000), 1);
    builder.setBolt(INITIALIZER, new FlowInitializerBolt(), parallelismHint)  // kicks off a flow determining where to start
            .localOrShuffleGrouping(EVENT)
            .allGrouping(FLOW_LOADER_STREAM, FLOW_LOADER_STREAM);

    declarebolt(builder, FILTER, new FilterBolt(), parallelismHint, true);
    declarebolt(builder, SELECT, new SelectorBolt(), parallelismHint, true);
    declarebolt(builder, PARTITION, new PartitionBolt(), parallelismHint, true);
    declarebolt(builder, SWITCH, new SwitchBolt(), parallelismHint, true);
    declarebolt(builder, AGGREGATE, new AggregatorBolt(), parallelismHint, true);
    declarebolt(builder, JOIN, new JoinBolt(), parallelismHint, true);
    declarebolt(builder, EACH, new EachBolt(), parallelismHint, true);
    declarebolt(builder, SORT, new SortBolt(), parallelismHint, true);
    declarebolt(builder, SPLIT, new SplitBolt(), parallelismHint, true);
    declarebolt(builder, OUTPUT, outputBolt, parallelismHint, false);

    return builder;
}
 
Example #10
Source File: TridentTopology.java    From jstorm with Apache License 2.0 5 votes vote down vote up
private static Map getSpoutComponentConfig(Object spout) {
    if(spout instanceof IRichSpout) {
        return ((IRichSpout) spout).getComponentConfiguration();
    } else if (spout instanceof IBatchSpout) {
        return ((IBatchSpout) spout).getComponentConfiguration();
    } else {
        return ((ITridentSpout) spout).getComponentConfiguration();
    }
}
 
Example #11
Source File: HiveQueryMonitoringApplication.java    From eagle with Apache License 2.0 5 votes vote down vote up
@Override
public StormTopology execute(Config config, StormEnvironment environment) {
    TopologyBuilder builder = new TopologyBuilder();
    HiveJobRunningSourcedStormSpoutProvider provider = new HiveJobRunningSourcedStormSpoutProvider();
    IRichSpout spout = provider.getSpout(config, config.getInt(SPOUT_TASK_NUM));


    int numOfSpoutTasks = config.getInt(SPOUT_TASK_NUM);
    int numOfFilterTasks = config.getInt(FILTER_TASK_NUM);
    int numOfParserTasks = config.getInt(PARSER_TASK_NUM);
    int numOfJoinTasks = config.getInt(JOIN_TASK_NUM);
    int numOfSinkTasks = config.getInt(SINK_TASK_NUM);

    builder.setSpout("ingest", spout, numOfSpoutTasks);
    JobFilterBolt bolt = new JobFilterBolt();
    BoltDeclarer boltDeclarer = builder.setBolt("filterBolt", bolt, numOfFilterTasks);
    boltDeclarer.fieldsGrouping("ingest", new Fields("jobId"));

    HiveQueryParserBolt parserBolt = new HiveQueryParserBolt();
    BoltDeclarer parserBoltDeclarer = builder.setBolt("parserBolt", parserBolt, numOfParserTasks);
    parserBoltDeclarer.fieldsGrouping("filterBolt", new Fields("user"));

    HiveSensitivityDataEnrichBolt joinBolt = new HiveSensitivityDataEnrichBolt(config);
    BoltDeclarer joinBoltDeclarer = builder.setBolt("joinBolt", joinBolt, numOfJoinTasks);
    joinBoltDeclarer.fieldsGrouping("parserBolt", new Fields("user"));

    StormStreamSink sinkBolt = environment.getStreamSink("hive_query_stream", config);
    BoltDeclarer kafkaBoltDeclarer = builder.setBolt("kafkaSink", sinkBolt, numOfSinkTasks);
    kafkaBoltDeclarer.fieldsGrouping("joinBolt", new Fields("user"));
    return builder.createTopology();
}
 
Example #12
Source File: HadoopQueueRunningApp.java    From eagle with Apache License 2.0 5 votes vote down vote up
public StormTopology execute(Config config, StormEnvironment environment) {
    HadoopQueueRunningAppConfig appConfig = new HadoopQueueRunningAppConfig(config);

    String spoutName = "runningQueueSpout";
    String persistBoltName = "persistBolt";

    IRichSpout spout = new HadoopQueueRunningSpout(appConfig);

    //String acceptedAppStreamId = persistBoltName + "-to-" + DataSource.RUNNING_APPS.toString();
    //String schedulerStreamId = persistBoltName + "-to-" + DataSource.SCHEDULER.toString();
    //streamMaps.put(DataSource.RUNNING_APPS, acceptedAppStreamId);
    //streamMaps.put(DataSource.SCHEDULER, schedulerStreamId);

    int numOfPersistTasks = appConfig.topology.numPersistTasks;
    int numOfSinkTasks = appConfig.topology.numSinkTasks;
    int numOfSpoutTasks = 1;

    HadoopQueueMetricPersistBolt bolt = new HadoopQueueMetricPersistBolt(appConfig);
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(spoutName, spout, numOfSpoutTasks).setNumTasks(numOfSpoutTasks);
    builder.setBolt(persistBoltName, bolt, numOfPersistTasks).setNumTasks(numOfPersistTasks).shuffleGrouping(spoutName);

    StormStreamSink queueSinkBolt = environment.getStreamSink("HADOOP_QUEUE_STREAM", config);
    builder.setBolt("queueKafkaSink", queueSinkBolt, numOfSinkTasks)
            .setNumTasks(numOfSinkTasks).shuffleGrouping(persistBoltName);

    //StormStreamSink appSinkBolt = environment.getStreamSink("ACCEPTED_APP_STREAM", config);
    //builder.setBolt("appKafkaSink", appSinkBolt, numOfSinkTasks)
    //        .setNumTasks(numOfSinkTasks).shuffleGrouping(persistBoltName, acceptedAppStreamId);

    return builder.createTopology();
}
 
Example #13
Source File: TridentTopology.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public Stream newStream(String txId, IRichSpout spout) {
    return newStream(txId, new RichSpoutBatchExecutor(spout));
}
 
Example #14
Source File: ComponentCreator.java    From PoseidonX with Apache License 2.0 4 votes vote down vote up
/**
 * 根据Streaming的算子创建Storm Spout实例
 *
 */
public static IRichSpout createSpout(IRichOperator operator) throws StreamingException
{
    return CommonSpoutCreator.create(operator);
}
 
Example #15
Source File: RichSpoutBatchTriggerer.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public RichSpoutBatchTriggerer(IRichSpout delegate, String streamName, String batchGroup) {
    _delegate = delegate;
    _stream = streamName;
    _coordStream = TridentBoltExecutor.COORD_STREAM(batchGroup);
}
 
Example #16
Source File: RichSpoutBatchExecutor.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public RichSpoutBatchExecutor(IRichSpout spout) {
    _spout = spout;
}
 
Example #17
Source File: SpoutTracker.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public SpoutTracker(IRichSpout delegate, String trackId) {
    _delegate = delegate;
    _trackId = trackId;
}
 
Example #18
Source File: TransactionTopologyBuilder.java    From jstorm with Apache License 2.0 4 votes vote down vote up
/********************** build spout declarer ***********************/
@Override
public SpoutDeclarer setSpout(String id, IRichSpout spout, Number parallelismHint) throws IllegalArgumentException {
    return setSpout(id, spout, parallelismHint, true);
}
 
Example #19
Source File: ExecutionContext.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public void addSpout(String id, IRichSpout spout){
    this.spoutMap.put(id, spout);
}
 
Example #20
Source File: AbstractTopology.java    From jea with Apache License 2.0 4 votes vote down vote up
protected TopologyBuilder initBuilder(){
	TopologyBuilder builder = new TopologyBuilder();
	IRichSpout spout = initSpout();
	builder.setSpout(spoutId, spout);
	return builder;
}
 
Example #21
Source File: KafkaProducer.java    From storm-benchmark with Apache License 2.0 4 votes vote down vote up
public IRichSpout getSpout() {
  return spout;
}
 
Example #22
Source File: WordCount.java    From storm-benchmark with Apache License 2.0 4 votes vote down vote up
public IRichSpout getSpout() {
  return spout;
}
 
Example #23
Source File: ExecutionContext.java    From flux with Apache License 2.0 4 votes vote down vote up
public void addSpout(String id, IRichSpout spout){
    this.spoutMap.put(id, spout);
}
 
Example #24
Source File: AbstractHdfsAuditLogApplication.java    From eagle with Apache License 2.0 4 votes vote down vote up
@Override
public StormTopology execute(Config config, StormEnvironment environment) {
    TopologyBuilder builder = new TopologyBuilder();
    KafkaSpoutProvider provider = new KafkaSpoutProvider();
    IRichSpout spout = provider.getSpout(config);

    int numOfSpoutTasks = config.getInt(SPOUT_TASK_NUM);
    int numOfParserTasks = config.getInt(PARSER_TASK_NUM);
    int numOfSensitivityJoinTasks = config.getInt(SENSITIVITY_JOIN_TASK_NUM);
    int numOfIPZoneJoinTasks = config.getInt(IPZONE_JOIN_TASK_NUM);
    int numOfSinkTasks = config.getInt(SINK_TASK_NUM);
    int numOfTrafficMonitorTasks = config.hasPath(TRAFFIC_MONITOR_TASK_NUM) ? config.getInt(TRAFFIC_MONITOR_TASK_NUM) : numOfParserTasks;

    builder.setSpout("ingest", spout, numOfSpoutTasks).setNumTasks(numOfSpoutTasks);

    // ---------------------
    // ingest -> parserBolt
    // ---------------------

    BaseRichBolt parserBolt = getParserBolt(config);
    BoltDeclarer boltDeclarer = builder.setBolt("parserBolt", parserBolt, numOfParserTasks).setNumTasks(numOfParserTasks).shuffleGrouping("ingest");
    boltDeclarer.shuffleGrouping("ingest");

    // Boolean useDefaultPartition = !config.hasPath("eagleProps.useDefaultPartition") || config.getBoolean("eagleProps.useDefaultPartition");
    // if (useDefaultPartition) {
    //    boltDeclarer.fieldsGrouping("ingest", new Fields(StringScheme.STRING_SCHEME_KEY));
    // } else {
    //    boltDeclarer.customGrouping("ingest", new CustomPartitionGrouping(createStrategy(config)));
    // }

    // ------------------------------
    // parserBolt -> sensitivityJoin
    // ------------------------------

    HdfsSensitivityDataEnrichBolt sensitivityDataJoinBolt = new HdfsSensitivityDataEnrichBolt(config);
    BoltDeclarer sensitivityDataJoinBoltDeclarer = builder.setBolt("sensitivityJoin", sensitivityDataJoinBolt, numOfSensitivityJoinTasks).setNumTasks(numOfSensitivityJoinTasks);
    // sensitivityDataJoinBoltDeclarer.fieldsGrouping("parserBolt", new Fields("f1"));
    sensitivityDataJoinBoltDeclarer.shuffleGrouping("parserBolt");

    if (config.hasPath(TRAFFIC_MONITOR_ENABLED) && config.getBoolean(TRAFFIC_MONITOR_ENABLED)) {
        HadoopLogAccumulatorBolt auditLogAccumulator = new HadoopLogAccumulatorBolt(config);
        BoltDeclarer auditLogAccumulatorDeclarer = builder.setBolt("logAccumulator", auditLogAccumulator, numOfTrafficMonitorTasks);
        auditLogAccumulatorDeclarer.setNumTasks(numOfTrafficMonitorTasks).shuffleGrouping("parserBolt");
    }

    // ------------------------------
    // sensitivityJoin -> ipZoneJoin
    // ------------------------------
    IPZoneDataEnrichBolt ipZoneDataJoinBolt = new IPZoneDataEnrichBolt(config);
    BoltDeclarer ipZoneDataJoinBoltDeclarer = builder.setBolt("ipZoneJoin", ipZoneDataJoinBolt, numOfIPZoneJoinTasks).setNumTasks(numOfIPZoneJoinTasks);
    // ipZoneDataJoinBoltDeclarer.fieldsGrouping("sensitivityJoin", new Fields("user"));
    ipZoneDataJoinBoltDeclarer.shuffleGrouping("sensitivityJoin");

    // ------------------------
    // ipZoneJoin -> kafkaSink
    // ------------------------

    StormStreamSink sinkBolt = environment.getStreamSink("hdfs_audit_log_stream", config);
    BoltDeclarer kafkaBoltDeclarer = builder.setBolt("kafkaSink", sinkBolt, numOfSinkTasks).setNumTasks(numOfSinkTasks);
    kafkaBoltDeclarer.shuffleGrouping("ipZoneJoin");
    return builder.createTopology();
}
 
Example #25
Source File: HdfsAuditLogApplication.java    From eagle with Apache License 2.0 4 votes vote down vote up
@Override
public StormTopology execute(Config config, StormEnvironment environment) {
    TopologyBuilder builder = new TopologyBuilder();
    KafkaSpoutProvider provider = new KafkaSpoutProvider();
    IRichSpout spout = provider.getSpout(config);

    int numOfSpoutTasks = config.getInt(SPOUT_TASK_NUM);
    int numOfParserTasks = config.getInt(PARSER_TASK_NUM);
    int numOfSensitivityJoinTasks = config.getInt(SENSITIVITY_JOIN_TASK_NUM);
    int numOfIPZoneJoinTasks = config.getInt(IPZONE_JOIN_TASK_NUM);
    int numOfSinkTasks = config.getInt(SINK_TASK_NUM);

    builder.setSpout("ingest", spout, numOfSpoutTasks).setNumTasks(numOfSpoutTasks);

    BaseRichBolt parserBolt = getParserBolt(config);
    BoltDeclarer boltDeclarer = builder.setBolt("parserBolt", parserBolt, numOfParserTasks).setNumTasks(numOfParserTasks).shuffleGrouping("ingest");
    boltDeclarer.shuffleGrouping("ingest");

    HdfsSensitivityDataEnrichBolt sensitivityDataJoinBolt = new HdfsSensitivityDataEnrichBolt(config);
    BoltDeclarer sensitivityDataJoinBoltDeclarer = builder.setBolt("sensitivityJoin", sensitivityDataJoinBolt, numOfSensitivityJoinTasks).setNumTasks(numOfSensitivityJoinTasks);
    // sensitivityDataJoinBoltDeclarer.fieldsGrouping("parserBolt", new Fields("f1"));
    sensitivityDataJoinBoltDeclarer.shuffleGrouping("parserBolt");

    // ------------------------------
    // sensitivityJoin -> ipZoneJoin
    // ------------------------------
    IPZoneDataEnrichBolt ipZoneDataJoinBolt = new IPZoneDataEnrichBolt(config);
    BoltDeclarer ipZoneDataJoinBoltDeclarer = builder.setBolt("ipZoneJoin", ipZoneDataJoinBolt, numOfIPZoneJoinTasks).setNumTasks(numOfIPZoneJoinTasks);
    // ipZoneDataJoinBoltDeclarer.fieldsGrouping("sensitivityJoin", new Fields("user"));
    ipZoneDataJoinBoltDeclarer.shuffleGrouping("sensitivityJoin");

    // ------------------------
    // ipZoneJoin -> kafkaSink
    // ------------------------

    StormStreamSink sinkBolt = environment.getStreamSink("HDFS_AUDIT_LOG_ENRICHED_STREAM", config);
    BoltDeclarer kafkaBoltDeclarer = builder.setBolt("kafkaSink", sinkBolt, numOfSinkTasks).setNumTasks(numOfSinkTasks);
    kafkaBoltDeclarer.shuffleGrouping("ipZoneJoin");

    if (config.hasPath(TRAFFIC_MONITOR_ENABLED) && config.getBoolean(TRAFFIC_MONITOR_ENABLED)) {
        builder.setSpout("trafficSpout", environment.getStreamSource("HADOOP_JMX_RESOURCE_STREAM", config), 1)
                .setNumTasks(1);

        builder.setBolt("trafficParserBolt", new TrafficParserBolt(config), 1)
                .setNumTasks(1)
                .shuffleGrouping("trafficSpout");
        builder.setBolt("trafficSinkBolt", environment.getStreamSink("HDFS_AUDIT_LOG_TRAFFIC_STREAM", config), 1)
                .setNumTasks(1)
                .shuffleGrouping("trafficParserBolt");
    }

    return builder.createTopology();
}
 
Example #26
Source File: TransactionTopologyBuilder.java    From jstorm with Apache License 2.0 2 votes vote down vote up
/**
 * Build spout to provide the compatibility with Storm's ack mechanism
 *
 * @param id spout Id
 * @param spout
 * @return
 */
public SpoutDeclarer setSpoutWithAck(String id, IRichSpout spout, Number parallelismHint) {
    return setSpout(id, new AckTransactionSpout(spout), parallelismHint);
}
 
Example #27
Source File: AbstractTopology.java    From jea with Apache License 2.0 votes vote down vote up
protected abstract IRichSpout initSpout();