Java Code Examples for backtype.storm.topology.TopologyBuilder

The following examples show how to use backtype.storm.topology.TopologyBuilder. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: storm-solr   Source File: EventsimTopology.java    License: Apache License 2.0 6 votes vote down vote up
public StormTopology build(StreamingApp app) throws Exception {
  SpringSpout eventsimSpout = new SpringSpout("eventsimSpout", spoutFields);
  SpringBolt collectionPerTimeFrameSolrBolt = new SpringBolt("collectionPerTimeFrameSolrBoltAction",
      app.tickRate("collectionPerTimeFrameSolrBoltAction"));

  // Send all docs for the same hash range to the same bolt instance,
  // which allows us to use a streaming approach to send docs to the leader
  int numShards = Integer.parseInt(String.valueOf(app.getStormConfig().get("spring.eventsimNumShards")));
  HashRangeGrouping hashRangeGrouping = new HashRangeGrouping(app.getStormConfig(), numShards);
  int tasksPerShard = hashRangeGrouping.getNumShards()*2;

  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout("eventsimSpout", eventsimSpout, app.parallelism("eventsimSpout"));
  builder.setBolt("collectionPerTimeFrameSolrBolt", collectionPerTimeFrameSolrBolt, tasksPerShard)
         .customGrouping("eventsimSpout", hashRangeGrouping);

  return builder.createTopology();
}
 
Example 2
Source Project: eagle   Source File: SparkHistoryJobApp.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology execute(Config config, StormEnvironment environment) {
    // 1. Init conf
    SparkHistoryJobAppConfig sparkHistoryJobAppConfig = SparkHistoryJobAppConfig.newInstance(config);

    final String jobFetchSpoutName = SparkHistoryJobAppConfig.SPARK_HISTORY_JOB_FETCH_SPOUT_NAME;
    final String jobParseBoltName = SparkHistoryJobAppConfig.SPARK_HISTORY_JOB_PARSE_BOLT_NAME;

    // 2. Config topology.
    TopologyBuilder topologyBuilder = new TopologyBuilder();

    topologyBuilder.setSpout(
            jobFetchSpoutName,
            new SparkHistoryJobSpout(sparkHistoryJobAppConfig), sparkHistoryJobAppConfig.stormConfig.numOfSpoutExecutors
    ).setNumTasks(sparkHistoryJobAppConfig.stormConfig.numOfSpoutTasks);

    topologyBuilder.setBolt(
            jobParseBoltName,
            new SparkHistoryJobParseBolt(sparkHistoryJobAppConfig),
            sparkHistoryJobAppConfig.stormConfig.numOfParserBoltExecutors
    ).setNumTasks(sparkHistoryJobAppConfig.stormConfig.numOfParserBoltTasks).shuffleGrouping(jobFetchSpoutName);

    return topologyBuilder.createTopology();
}
 
Example 3
public static void test() {
    int spout_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
    int count_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_COUNT_PARALLELISM_HINT), 1);

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new FastRandomSentenceSpout(), spout_Parallelism_hint);

    WordCount wordCountBolt = new WordCount();
    builder.setBolt("count", wordCountBolt.sessionTimeWindow(Time.seconds(1L))
            .withWindowStateMerger(wordCountBolt), count_Parallelism_hint)
            .fieldsGrouping("spout", new Fields("word"));
    //.allGrouping("spout", Common.WATERMARK_STREAM_ID);

    String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
    String topologyName = className[className.length - 1];

    try {
        JStormHelper.runTopology(builder.createTopology(), topologyName, conf, 60,
                new JStormHelper.CheckAckedFail(conf), true);
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
Example 4
Source Project: eagle   Source File: OozieAuditLogApplication.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology execute(Config config, StormEnvironment environment) {
    TopologyBuilder builder = new TopologyBuilder();
    KafkaSpoutProvider provider = new KafkaSpoutProvider();
    IRichSpout spout = provider.getSpout(config);

    int numOfSpoutTasks = config.getInt(SPOUT_TASK_NUM);
    int numOfParserTask = config.getInt(PARSER_TASK_NUM);
    int numOfJoinTasks = config.getInt(JOIN_TASK_NUM);

    builder.setSpout("ingest", spout, numOfSpoutTasks);

    OozieAuditLogParserBolt parserBolt = new OozieAuditLogParserBolt();
    BoltDeclarer parserBoltDeclarer = builder.setBolt("parserBolt", parserBolt, numOfParserTask);
    parserBoltDeclarer.fieldsGrouping("ingest", new Fields(StringScheme.STRING_SCHEME_KEY));

    OozieResourceSensitivityDataJoinBolt joinBolt = new OozieResourceSensitivityDataJoinBolt(config);
    BoltDeclarer boltDeclarer = builder.setBolt("joinBolt", joinBolt, numOfJoinTasks);
    boltDeclarer.fieldsGrouping("parserBolt", new Fields("f1"));

    StormStreamSink sinkBolt = environment.getStreamSink("oozie_audit_log_stream", config);
    int numOfSinkTasks = config.getInt(SINK_TASK_NUM);
    BoltDeclarer kafkaBoltDeclarer = builder.setBolt("kafkaSink", sinkBolt, numOfSinkTasks);
    kafkaBoltDeclarer.fieldsGrouping("joinBolt", new Fields("user"));
    return builder.createTopology();
}
 
Example 5
Source Project: jstorm   Source File: SlidingTupleTsTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void test() {
    
    String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
    String topologyName = className[className.length - 1];
    
    try {
        TopologyBuilder builder = new TopologyBuilder();
        BaseWindowedBolt bolt = new SlidingWindowSumBolt()
                .withWindow(new Duration(5, TimeUnit.SECONDS), new Duration(3, TimeUnit.SECONDS))
                .withTimestampField("ts").withLag(new Duration(5, TimeUnit.SECONDS));
        builder.setSpout("integer", new RandomIntegerSpout(), 1);
        builder.setBolt("slidingsum", bolt, 1).shuffleGrouping("integer");
        builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("slidingsum");
        
        conf.setDebug(true);
        
        JStormHelper.runTopology(builder.createTopology(), topologyName, conf, 60,
                new JStormHelper.CheckAckedFail(conf), isLocal);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
        Assert.fail("Failed");
    }
}
 
Example 6
Source Project: jstorm   Source File: TridentMinMaxOfDevicesTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void test() {
    TopologyBuilder builder = new TopologyBuilder();
    
    builder.setSpout("spout", new InOrderSpout(), 8);
    builder.setBolt("count", new Check(), 8).fieldsGrouping("spout", new Fields("c1"));
    
    conf.setMaxSpoutPending(20);
    
    String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
    String topologyName = className[className.length - 1];
    
    if (isLocal) {
        drpc = new LocalDRPC();
    }
    
    try {
        JStormHelper.runTopology(buildDevicesTopology(), topologyName, conf, 60,
                new JStormHelper.CheckAckedFail(conf), isLocal);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
        Assert.fail("Failed");
    }
}
 
Example 7
public static void test() throws Exception {
    int spout_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
    int count_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_COUNT_PARALLELISM_HINT), 1);

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new FastRandomSentenceSpout(), spout_Parallelism_hint);

    WordCount wordCountBolt = new WordCount();
    builder.setBolt("count", wordCountBolt.sessionEventTimeWindow(Time.milliseconds(3L))
            .withTimestampExtractor(wordCountBolt)
            .withWindowStateMerger(wordCountBolt), count_Parallelism_hint)
            .fieldsGrouping("spout", new Fields("word", "ts"));

    String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
    String topologyName = className[className.length - 1];

    JStormHelper.runTopology(builder.createTopology(), topologyName, conf, 60,
            new JStormHelper.CheckAckedFail(conf), true);
}
 
Example 8
Source Project: jstorm   Source File: TestTopology.java    License: Apache License 2.0 6 votes vote down vote up
private static void submitTopology(TopologyBuilder builder) {
	try {
		if (local_mode(conf)) {

			LocalCluster cluster = new LocalCluster();

			cluster.submitTopology(String.valueOf(conf.get("topology.name")), conf, builder.createTopology());

			Thread.sleep(200000);

			cluster.shutdown();
		} else {
			StormSubmitter.submitTopology(String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());
		}

	} catch (Exception e) {
		e.printStackTrace();
	}
}
 
Example 9
Source Project: eagle   Source File: TestStormStreamIdRouting.java    License: Apache License 2.0 6 votes vote down vote up
@Ignore
@Test
public void testRoutingByStreamId() throws Exception {
    Config conf = new Config();
    conf.setNumWorkers(2); // use two worker processes
    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout("blue-spout", new BlueSpout()); // parallelism hint

    topologyBuilder.setBolt("green-bolt-1", new GreenBolt(1))
        .shuffleGrouping("blue-spout", "green-bolt-stream-1");
    topologyBuilder.setBolt("green-bolt-2", new GreenBolt(2))
        .shuffleGrouping("blue-spout", "green-bolt-stream-2");

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("mytopology", new HashMap(), topologyBuilder.createTopology());

    while (true) {
        try {
            Thread.sleep(1000);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
 
Example 10
Source Project: jstorm   Source File: TransactionTopologyBuilder.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public BoltDeclarer setBolt(String id, IRichBolt bolt, Number parallelismHint) throws IllegalArgumentException{
    upToDownstreamComponentsMap.put(id, new HashSet<String>());
    validateUnusedId(id);
    IRichBolt boltExecutor;
    boolean isStatefulBolt = false;
    if (bolt instanceof ITransactionStatefulBoltExecutor) {
        isStatefulBolt = true;
        boltExecutor = new TransactionStatefulBolt((ITransactionStatefulBoltExecutor) bolt);
    } else {
        boltExecutor = new TransactionBolt((ITransactionBoltExecutor) bolt);
    }
    initCommon(id, boltExecutor, parallelismHint);
    _bolts.put(id, boltExecutor);
    BoltDeclarer ret = new TransactionBoltDeclarer(id);
    ret.addConfiguration(TransactionCommon.TRANSACTION_STATEFUL_BOLT, isStatefulBolt);

    // If using KvState bolt, the corresponding init operater would be registered here.
    if (bolt instanceof KvStatefulBoltExecutor) {
        ConfigExtension.registerTransactionTaskStateInitOp(TopologyBuilder.getStormConf(), id, KeyRangeStateTaskInit.class);
    }

    return ret;
}
 
Example 11
Source Project: jstorm   Source File: FastWordCountTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testFastWordCount()
{
    int spout_Parallelism_hint = 1;
    int split_Parallelism_hint = 1;
    int count_Parallelism_hint = 2;

    TopologyBuilder builder = new TopologyBuilder();

    boolean isLocalShuffle = false;

    builder.setSpout("spout", new FastWordCountTopology.FastRandomSentenceSpout(), spout_Parallelism_hint);
    if (isLocalShuffle)
        builder.setBolt("split", new FastWordCountTopology.SplitSentence(), split_Parallelism_hint).localFirstGrouping("spout");
    else
        builder.setBolt("split", new FastWordCountTopology.SplitSentence(), split_Parallelism_hint).shuffleGrouping("spout");
    builder.setBolt("count", new FastWordCountTopology.WordCount(), count_Parallelism_hint).fieldsGrouping("split", new Fields("word"));

    Map config = new HashMap();
    config.put(Config.TOPOLOGY_NAME, "FastWordCountTest");

    JStormUnitTestRunner.submitTopology(builder.createTopology(), config, 60, null);
}
 
Example 12
Source Project: jstorm   Source File: TestTopology.java    License: Apache License 2.0 6 votes vote down vote up
private static void submitTopology(TopologyBuilder builder) {
	try {
		if (local_mode(conf)) {

			LocalCluster cluster = new LocalCluster();

			cluster.submitTopology(
					String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());

			Thread.sleep(200000);

			cluster.shutdown();
		} else {
			StormSubmitter.submitTopology(
					String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());
		}

	} catch (Exception e) {
		LOG.error(e.getMessage(), e.getCause());
	}
}
 
Example 13
Source Project: jstorm   Source File: SlidingWindowTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void test() {
    
    String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
    String topologyName = className[className.length - 1];
    
    try {
        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("integer", new RandomIntegerSpout(), 1);
        builder.setBolt("slidingsum", new SlidingWindowSumBolt().withWindow(new Count(30), new Count(10)), 1)
                .shuffleGrouping("integer");
        builder.setBolt("tumblingavg", new TumblingWindowAvgBolt().withTumblingWindow(new Count(3)), 1)
                .shuffleGrouping("slidingsum");
        builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("tumblingavg");
        
        conf.setDebug(true);
        
        JStormHelper.runTopology(builder.createTopology(), topologyName, conf, 60,
                new JStormHelper.CheckAckedFail(conf), isLocal);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.fillInStackTrace();
        Assert.fail("Failed to submit topology");
    }
}
 
Example 14
Source Project: jstorm   Source File: PerformanceTestTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void SetRemoteTopology()
        throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException {
    String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
    if (streamName == null) {
        String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
        streamName = className[className.length - 1];
    }
    
    TopologyBuilder builder = new TopologyBuilder();
    
    int spout_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
    int bolt_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);
    builder.setSpout("spout", new TestSpout(), spout_Parallelism_hint);
    
    BoltDeclarer boltDeclarer = builder.setBolt("bolt", new TestBolt(), bolt_Parallelism_hint);
    // localFirstGrouping is only for jstorm
    // boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
    boltDeclarer.shuffleGrouping("spout");
    // .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 60);
    
    conf.put(Config.STORM_CLUSTER_MODE, "distributed");
    
    StormSubmitter.submitTopology(streamName, conf, builder.createTopology());
    
}
 
Example 15
Source Project: jstorm   Source File: TridentMapExample.java    License: Apache License 2.0 6 votes vote down vote up
public static void test() {
    TopologyBuilder builder = new TopologyBuilder();
    
    builder.setSpout("spout", new InOrderSpout(), 8);
    builder.setBolt("count", new Check(), 8).fieldsGrouping("spout", new Fields("c1"));
    
    Config conf = new Config();
    conf.setMaxSpoutPending(20);
    
    String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
    String topologyName = className[className.length - 1];
    
    if (isLocal) {
        drpc = new LocalDRPC();
    }
    
    try {
        JStormHelper.runTopology(buildTopology(drpc), topologyName, conf, 60, new DrpcValidator(), isLocal);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
        Assert.fail("Failed");
    }
}
 
Example 16
Source Project: jstorm   Source File: RollingTopWordsTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testRollingTopWords()
{
    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout("windowTestWordSpout", new WindowTestWordSpout(), 5);
    topologyBuilder.setBolt("windowTestRollingCountBolt", new WindowTestRollingCountBolt(9, 3), 4)
            .fieldsGrouping("windowTestWordSpout", new Fields("word")).addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 3);
    topologyBuilder.setBolt("windowTestIntermediateRankingBolt", new WindowTestIntermediateRankingBolt(DEFAULT_COUNT), 4)
            .fieldsGrouping("windowTestRollingCountBolt", new Fields("obj"));
    topologyBuilder.setBolt("windowTestTotalRankingsBolt", new WindowTestTotalRankingsBolt(DEFAULT_COUNT))
            .globalGrouping("windowTestIntermediateRankingBolt");

    Map config = new HashMap();
    config.put(Config.TOPOLOGY_NAME, "RollingTopWordsTest");

    //I really don't know how to validate if the result is right since
    //the tick time is not precise. It makes the output after passing
    //a window is unpredictable.
    //Now I just let it pass all the time.
    //TODO:FIX ME: how to validate if the result is right?
    JStormUnitTestRunner.submitTopology(topologyBuilder.createTopology(), config, 90, null);
}
 
Example 17
Source Project: storm-camel-example   Source File: Runner.java    License: Apache License 2.0 6 votes vote down vote up
public static final void main(final String[] args) throws Exception {
    final ApplicationContext applicationContext = new ClassPathXmlApplicationContext("applicationContext.xml");

    final JmsProvider jmsQueueProvider = new SpringJmsProvider(applicationContext, "jmsConnectionFactory",
            "notificationQueue");

    final TopologyBuilder topologyBuilder = new TopologyBuilder();

    final JmsBolt jmsBolt = new JmsBolt();
    jmsBolt.setJmsProvider(jmsQueueProvider);
    jmsBolt.setJmsMessageProducer((session, input) -> {
        final String json = "{\"word\":\"" + input.getString(0) + "\", \"count\":" + String.valueOf(input.getInteger(1)) + "}";
        return session.createTextMessage(json);
    });

    topologyBuilder.setSpout("wordGenerator", new RandomWordFeeder());
    topologyBuilder.setBolt("counter", new WordCounterBolt()).shuffleGrouping("wordGenerator");
    topologyBuilder.setBolt("jmsBolt", jmsBolt).shuffleGrouping("counter");

    final Config config = new Config();
    config.setDebug(false);

    final LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("word-count", config, topologyBuilder.createTopology());
}
 
Example 18
Source Project: jstorm   Source File: TridentFastWordCount.java    License: Apache License 2.0 6 votes vote down vote up
public static void test() {
    TopologyBuilder builder = new TopologyBuilder();
    
    
    String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
    String topologyName = className[className.length - 1];
    
    if (isLocal) {
        drpc = new LocalDRPC();
    }
    
    try {
        JStormHelper.runTopology(buildTopology(drpc), topologyName, conf, 60, new JStormHelper.CheckAckedFail(conf),
                isLocal);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
        Assert.fail("Failed");
    }
}
 
Example 19
Source Project: storm-benchmark   Source File: Grep.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology getTopology(Config config) {

  final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
  final int matBoltNum = BenchmarkUtils.getInt(config, FM_NUM, DEFAULT_MAT_BOLT_NUM);
  final int cntBoltNum = BenchmarkUtils.getInt(config, CM_NUM, DEFAULT_CNT_BOLT_NUM);
  final String ptnString = (String) Utils.get(config, PATTERN_STRING, DEFAULT_PATTERN_STR);

  spout = new KafkaSpout(KafkaUtils.getSpoutConfig(config, new SchemeAsMultiScheme(new StringScheme())));

  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout(SPOUT_ID, spout, spoutNum);
  builder.setBolt(FM_ID, new FindMatchingSentence(ptnString), matBoltNum)
          .localOrShuffleGrouping(SPOUT_ID);
  builder.setBolt(CM_ID, new CountMatchingSentence(), cntBoltNum)
          .fieldsGrouping(FM_ID, new Fields(FindMatchingSentence.FIELDS));

  return builder.createTopology();
}
 
Example 20
Source Project: storm-benchmark   Source File: RollingSort.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology getTopology(Config config) {
  final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
  final int boltNum =  BenchmarkUtils.getInt(config, SORT_BOLT_NUM, DEFAULT_SORT_BOLT_NUM);
  final int msgSize = BenchmarkUtils.getInt(config, RandomMessageSpout.MESSAGE_SIZE,
          RandomMessageSpout.DEFAULT_MESSAGE_SIZE);
  final int chunkSize = BenchmarkUtils.getInt(config, SortBolt.CHUNK_SIZE,
          SortBolt.DEFAULT_CHUNK_SIZE);
  final int emitFreq = BenchmarkUtils.getInt(config, SortBolt.EMIT_FREQ,
          SortBolt.DEFAULT_EMIT_FREQ);
  spout = new RandomMessageSpout(msgSize, BenchmarkUtils.ifAckEnabled(config));
  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout(SPOUT_ID, spout, spoutNum);
  builder.setBolt(SORT_BOLT_ID, new SortBolt(emitFreq, chunkSize), boltNum)
          .localOrShuffleGrouping(SPOUT_ID);
  return builder.createTopology();
}
 
Example 21
Source Project: PoseidonX   Source File: JStormApplication.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * <默认构造函数>
 *
 */
public JStormApplication(StreamingConfig config, String appName, DriverContext driverContext) throws StreamingException {


	super(appName, config);

       this.driverContext = driverContext;
	builder = new TopologyBuilder();
	stormConf = new StormConf(config,this.driverContext.getCqlClient().getCustomizedConfigurationMap());
	streamingSecurity = SecurityFactory.createSecurity(config);
}
 
Example 22
public static void main(String[] args) throws Exception {
    if (getProperties() == null || getProperties().isEmpty()) {
        System.out.println("Property file <ignite-storm.property> is not found or empty");
        return;
    }
    // Ignite Stream Ibolt
    final StormStreamer<String, String> stormStreamer = new StormStreamer<>();

    stormStreamer.setAutoFlushFrequency(10L);
    stormStreamer.setAllowOverwrite(true);
    stormStreamer.setCacheName(getProperties().getProperty("cache.name"));

    stormStreamer.setIgniteTupleField(getProperties().getProperty("tuple.name"));
    stormStreamer.setIgniteConfigFile(getProperties().getProperty("ignite.spring.xml"));


    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new FileSourceSpout(), 1);
    builder.setBolt("limit", new SpeedLimitBolt(), 1).fieldsGrouping("spout", new Fields("trafficLog"));
    // set ignite bolt
    builder.setBolt("ignite-bolt", stormStreamer, STORM_EXECUTORS).shuffleGrouping("limit");

    Config conf = new Config();
    conf.setDebug(false);

    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("speed-violation", conf, builder.createTopology());
    Thread.sleep(10000);
    cluster.shutdown();

}
 
Example 23
Source Project: jstorm   Source File: SequenceTopology.java    License: Apache License 2.0 5 votes vote down vote up
public static void SetLocalTopology() throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    
    conf.put(TOPOLOGY_BOLT_PARALLELISM_HINT, 1);
    SetBuilder(builder, conf);
    
    LOG.debug("test");
    LOG.info("Submit log");
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("SplitMerge", conf, builder.createTopology());
    
    Thread.sleep(60000);
    cluster.killTopology("SplitMerge");
    cluster.shutdown();
}
 
Example 24
Source Project: storm-kafka-examples   Source File: CounterTopology.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @param args
 * http://www.programcreek.com/java-api-examples/index.php?api=storm.kafka.KafkaSpout
 */
public static void main(String[] args) {
	try{
		//设置喷发节点并分配并发数,该并发数将会控制该对象在集群中的线程数(6个)
		String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
		String topic = "order";
		String groupId = "id";
		int spoutNum = 3;
		int boltNum = 1;
		ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
		SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId);  // create /order /id
		spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
		KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", kafkaSpout, spoutNum);
		builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
        builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");

        Config config = new Config();
        config.setDebug(true);
        
        if(args!=null && args.length > 0) {
            config.setNumWorkers(2);
            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
        } else {        
            config.setMaxTaskParallelism(2);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());

            Thread.sleep(500000);

            cluster.shutdown();
        }
	}catch (Exception e) {
		e.printStackTrace();
	}
}
 
Example 25
Source Project: incubator-heron   Source File: TaskHookTopology.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  if (args.length != 1) {
    throw new RuntimeException("Specify topology name");
  }
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word", new AckingTestWordSpout(), 2);
  builder.setBolt("count", new CountBolt(), 2)
      .shuffleGrouping("word");

  Config conf = new Config();
  conf.setDebug(true);
  // Put an arbitrary large number here if you don't want to slow the topology down
  conf.setMaxSpoutPending(1000 * 1000 * 1000);
  // To enable acking, we need to setEnableAcking true
  conf.setNumAckers(1);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  // Set the task hook
  List<String> taskHooks = new LinkedList<>();
  taskHooks.add("org.apache.heron.examples.TaskHookTopology$TestTaskHook");
  org.apache.heron.api.Config.setAutoTaskHooks(conf, taskHooks);

  // component resource configuration
  org.apache.heron.api.Config.setComponentRam(conf, "word", ByteAmount.fromMegabytes(512));
  org.apache.heron.api.Config.setComponentRam(conf, "count", ByteAmount.fromMegabytes(512));

  // container resource configuration
  org.apache.heron.api.Config.setContainerDiskRequested(conf, ByteAmount.fromGigabytes(2));
  org.apache.heron.api.Config.setContainerRamRequested(conf, ByteAmount.fromGigabytes(2));
  org.apache.heron.api.Config.setContainerCpuRequested(conf, 2);


  conf.setNumWorkers(2);
  StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
 
Example 26
Source Project: eagle   Source File: AggregationApplication.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public StormTopology execute(Config config, StormEnvironment environment) {
    List<String> metricNames = new ArrayList<>();
    String[] metricNamesArr = config.getString("aggregate.counters.metrics").split(",");
    for (int i = 0; i < metricNamesArr.length; i++) {
        metricNames.add(metricNamesArr[i].trim().toLowerCase());
    }
    List<String> groupByColumns = new ArrayList<>();

    String[] groupByColumnsArr = config.getString("aggregate.counters.groupBys").split(";");
    for (int i = 0; i < groupByColumnsArr.length; i++) {
        groupByColumns.add(groupByColumnsArr[i].trim());
    }

    Map<String, List<List<String>>> metrics = new HashMap<>();
    for (String metric : metricNames) {
        metrics.put(metric, new ArrayList<>());
        for (String cols : groupByColumns) {
            metrics.get(metric).add(Arrays.asList(cols.replaceAll(" ", "").split(",")));
        }
    }

    TopologyBuilder topologyBuilder = new TopologyBuilder();
    String spoutName = "mrHistoryAggregationSpout";
    String boltName = "mrHistoryAggregationBolt";
    AggregationConfig aggregationConfig = AggregationConfig.newInstance(config);
    int tasks = aggregationConfig.getConfig().getInt("stormConfig." + spoutName + "Tasks");
    topologyBuilder.setSpout(
        spoutName,
        new AggregationSpout(aggregationConfig, new MRMetricsAggregateContainer(metrics, aggregationConfig)),
        tasks
    ).setNumTasks(tasks);

    tasks = aggregationConfig.getConfig().getInt("stormConfig." + boltName + "Tasks");
    topologyBuilder.setBolt(boltName,
        new AggregationBolt(aggregationConfig.getStormConfig(), new MRMetricsAggregateContainer(metrics, aggregationConfig)),
        tasks).setNumTasks(tasks).shuffleGrouping(spoutName);

    return topologyBuilder.createTopology();
}
 
Example 27
Source Project: eagle   Source File: HadoopQueueRunningApp.java    License: Apache License 2.0 5 votes vote down vote up
public StormTopology execute(Config config, StormEnvironment environment) {
    HadoopQueueRunningAppConfig appConfig = new HadoopQueueRunningAppConfig(config);

    String spoutName = "runningQueueSpout";
    String persistBoltName = "persistBolt";

    IRichSpout spout = new HadoopQueueRunningSpout(appConfig);

    //String acceptedAppStreamId = persistBoltName + "-to-" + DataSource.RUNNING_APPS.toString();
    //String schedulerStreamId = persistBoltName + "-to-" + DataSource.SCHEDULER.toString();
    //streamMaps.put(DataSource.RUNNING_APPS, acceptedAppStreamId);
    //streamMaps.put(DataSource.SCHEDULER, schedulerStreamId);

    int numOfPersistTasks = appConfig.topology.numPersistTasks;
    int numOfSinkTasks = appConfig.topology.numSinkTasks;
    int numOfSpoutTasks = 1;

    HadoopQueueMetricPersistBolt bolt = new HadoopQueueMetricPersistBolt(appConfig);
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(spoutName, spout, numOfSpoutTasks).setNumTasks(numOfSpoutTasks);
    builder.setBolt(persistBoltName, bolt, numOfPersistTasks).setNumTasks(numOfPersistTasks).shuffleGrouping(spoutName);

    StormStreamSink queueSinkBolt = environment.getStreamSink("HADOOP_QUEUE_STREAM", config);
    builder.setBolt("queueKafkaSink", queueSinkBolt, numOfSinkTasks)
            .setNumTasks(numOfSinkTasks).shuffleGrouping(persistBoltName);

    //StormStreamSink appSinkBolt = environment.getStreamSink("ACCEPTED_APP_STREAM", config);
    //builder.setBolt("appKafkaSink", appSinkBolt, numOfSinkTasks)
    //        .setNumTasks(numOfSinkTasks).shuffleGrouping(persistBoltName, acceptedAppStreamId);

    return builder.createTopology();
}
 
Example 28
Source Project: jstorm   Source File: SkewedRollingTopWords.java    License: Apache License 2.0 5 votes vote down vote up
public SkewedRollingTopWords(String topologyName) throws InterruptedException {
    builder = new TopologyBuilder();
    this.topologyName = topologyName;
    topologyConfig = createTopologyConfiguration();
    runtimeInSeconds = DEFAULT_RUNTIME_IN_SECONDS;
    
    wireTopology();
}
 
Example 29
Source Project: flink-perf   Source File: ThroughputHostsTracking.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	int par = pt.getInt("para");

	TopologyBuilder builder = new TopologyBuilder();

	builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism"));
	int i = 0;
	for(; i < pt.getInt("repartitions", 1) - 1;i++) {
		System.out.println("adding source"+i+" --> source"+(i+1));
		builder.setBolt("source"+(i+1), new RepartPassThroughBolt(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source" + i, new Fields("id"));
	}
	System.out.println("adding final source"+i+" --> sink");

	builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source"+i, new Fields("id"));


	Config conf = new Config();
	conf.setDebug(false);
	//System.exit(1);

	if (!pt.has("local")) {
		conf.setNumWorkers(par);

		StormSubmitter.submitTopologyWithProgressBar("throughput-"+pt.get("name", "no_name"), conf, builder.createTopology());
	}
	else {
		conf.setMaxTaskParallelism(par);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("throughput", conf, builder.createTopology());

		Thread.sleep(300000);

		cluster.shutdown();
	}

}
 
Example 30
Source Project: jstorm   Source File: FastWordCountTopNWindowTopology.java    License: Apache License 2.0 5 votes vote down vote up
public static void test() {
    int spout_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
    int split_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPLIT_PARALLELISM_HINT), 1);
    int count_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_COUNT_PARALLELISM_HINT), 1);

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new FastRandomSentenceSpout(), spout_Parallelism_hint);
    builder.setBolt("split", new SplitSentence(), split_Parallelism_hint).shuffleGrouping("spout");

    int topN = 10;
    Time win = Time.seconds(10L);
    builder.setBolt("count", new WordCount(topN)
                    .timeWindow(win)
                    .withStateSize(Time.seconds(120L)),
            count_Parallelism_hint).fieldsGrouping("split", new Fields("word"));
    builder.setBolt("merge",
            new MergeTopN(topN).timeWindow(win), 1).allGrouping("count");

    String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
    String topologyName = className[className.length - 1];

    try {
        JStormHelper.runTopology(builder.createTopology(), topologyName, conf, 60,
                new JStormHelper.CheckAckedFail(conf), true);
    } catch (Exception e) {
        e.printStackTrace();
    }
}