Java Code Examples for org.apache.storm.topology.TopologyBuilder

The following examples show how to use org.apache.storm.topology.TopologyBuilder. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: storm-nginx-log   Source File: NginxStorm.java    License: MIT License 6 votes vote down vote up
public static void main(String[] argv) throws InterruptedException {

        Config config = new Config();
        config.setDebug(true);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("LogSpout", new LogSpout(), 1);
        builder.setBolt("SpliteBolt", new SpliteBolt(), 1).shuffleGrouping("LogSpout");
        builder.setBolt("CounterBolt", new CounterBolt(), 1)
                .fieldsGrouping("SpliteBolt", new Fields("item"));

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("NginxLog", config, builder.createTopology());
//        Thread.sleep(10000);
//
//        cluster.killTopology("NginxLog");
//        cluster.shutdown();
    }
 
Example 2
Source Project: elasticsearch-hadoop   Source File: StreamToEs.java    License: Apache License 2.0 6 votes vote down vote up
public static void submitJob(String principal, String keytab, String esNodes) throws Exception {
    List doc1 = Collections.singletonList("{\"reason\" : \"business\",\"airport\" : \"SFO\"}");
    List doc2 = Collections.singletonList("{\"participants\" : 5,\"airport\" : \"OTP\"}");

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("Input", new TestSpout(ImmutableList.of(doc1, doc2), new Fields("json"), true));
    builder.setBolt("ES", new EsBolt("storm-test"))
            .shuffleGrouping("Input")
            .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 2);

    // Nimbus needs to be started with the cred renewer and credentials plugins set in its config file

    Config conf = new Config();
    List<Object> plugins = new ArrayList<Object>();
    plugins.add(AutoElasticsearch.class.getName());
    conf.put(Config.TOPOLOGY_AUTO_CREDENTIALS, plugins);
    conf.put(ConfigurationOptions.ES_NODES, esNodes);
    conf.put(ConfigurationOptions.ES_SECURITY_AUTHENTICATION, "kerberos");
    conf.put(ConfigurationOptions.ES_NET_SPNEGO_AUTH_ELASTICSEARCH_PRINCIPAL, "HTTP/[email protected]");
    conf.put(ConfigurationOptions.ES_INPUT_JSON, "true");
    StormSubmitter.submitTopology("test-run", conf, builder.createTopology());
}
 
Example 3
Source Project: storm-net-adapter   Source File: MultipleLoggerTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("word", new TestWordSpout(), 10);
    builder.setBolt("exclaim1", new ExclamationLoggingBolt(), 3).shuffleGrouping("word");
    builder.setBolt("exclaim2", new ExclamationLoggingBolt(), 2).shuffleGrouping("exclaim1");

    Config conf = new Config();
    conf.setDebug(true);
    String topoName = MultipleLoggerTopology.class.getName();
    if (args != null && args.length > 0) {
        topoName = args[0];
    }
    conf.setNumWorkers(2);
    StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology());
}
 
Example 4
@Test
public void testMultiIndexRead() throws Exception {
    testRuns++;

    RestUtils.postData(index + "/foo",
            "{\"message\" : \"Hello World\",\"message_date\" : \"2014-05-25\"}".getBytes());
    RestUtils.postData(index + "/bar",
            "{\"message\" : \"Goodbye World\",\"message_date\" : \"2014-05-25\"}".getBytes());
    RestUtils.refresh(index);

    String target = "_all/foo";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("es-spout", new TestSpout(new EsSpout(target)));
    builder.setBolt("test-bolt", new CapturingBolt()).shuffleGrouping("es-spout");

    MultiIndexSpoutStormSuite.run(index + "multi", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    assumeTrue(COMPONENT_HAS_COMPLETED.is(2));
    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("Hello"));

    assertThat(CapturingBolt.CAPTURED.size(), greaterThanOrEqualTo(testRuns));
}
 
Example 5
public static void main(String[] args) throws Exception {
    LocalCluster cluster = new LocalCluster();
    TopologyBuilder builder = new TopologyBuilder();

    Config conf = new Config();
    conf.setDebug(false);
    conf.setNumWorkers(1);
    conf.setMaxTaskParallelism(1);
    //Disable Acking
    conf.setNumAckers(0);

    KeyedScottyWindowOperator scottyBolt = new KeyedScottyWindowOperator<Integer, Integer>(new Sum(), 0);
    scottyBolt.addWindow(new TumblingWindow(WindowMeasure.Time, 1000));
    scottyBolt.addWindow(new SlidingWindow(WindowMeasure.Time, 1000, 250));
    scottyBolt.addWindow(new SessionWindow(WindowMeasure.Time, 1000));

    builder.setSpout("spout", new DataGeneratorSpout());
    builder.setBolt("scottyWindow", scottyBolt).fieldsGrouping("spout", new Fields("key"));
    builder.setBolt("printer", new PrinterBolt()).shuffleGrouping("scottyWindow");

    cluster.submitTopology("testTopology", conf, builder.createTopology());
    //cluster.killTopology("testTopology");
    //cluster.shutdown();
}
 
Example 6
private static TopologyBuilder buildTopology() throws Exception {
        TopologyBuilder builder = new TopologyBuilder();
        String topicName = Configuration.getConfig().getString("rtc.mq.spout.topic");
        String groupName = Configuration.getConfig().getString("rtc.mq.spout.group");
        BrokerHosts hosts = new ZkHosts(Configuration.getConfig().getString("rtc.zk.hosts"));
        SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName, "/consumers", groupName);

        spoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
        spoutConfig.zkServers = Arrays.asList(Configuration.getConfig().getString("rtc.storm.zkServers").split(","));
        spoutConfig.zkPort = Configuration.getConfig().getInt("rtc.storm.zkPort");
        spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
        KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
        builder.setSpout("MQSpout", kafkaSpout, Configuration.getConfig().getInt("rtc.storm.spout.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.spout.task"));
        builder.setBolt("ExtractBolt", new ExtractBolt(), Configuration.getConfig().getInt("rtc.storm.extract.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.extract.bolt.task")).shuffleGrouping("MQSpout");
        builder.setBolt("Statistic", new StatisticBolt(), Configuration.getConfig().getInt("rtc.storm.statistic.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.statistic.bolt.task")).fieldsGrouping("ExtractBolt", new Fields(new String[]{"hashKeys"}));
//        builder.setBolt("Alarm", new AlarmBolt(), Configuration.getConfig().getInt("rtc.storm.alarm.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.alarm.bolt.task")).fieldsGrouping("Statistic", new Fields(new String[]{"EventName"}));
        return builder;
    }
 
Example 7
Source Project: storm-net-adapter   Source File: WordCountTopologyNode.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {

        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout("spout", new RandomSentence(), 5);

        builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
        builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

        Config conf = new Config();
        conf.setDebug(true);
        String topoName = "word-count";
        if (args != null && args.length > 0) {
            topoName = args[0];
        }
        conf.setNumWorkers(3);
        StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology());
    }
 
Example 8
Source Project: breeze   Source File: TopologyFactoryBean.java    License: Apache License 2.0 6 votes vote down vote up
private StormTopology build() {
	run();
	verify();

	Map<String,BoltDeclarer> declaredBolts = new HashMap<>();

	TopologyBuilder builder = new TopologyBuilder();
	for (Map.Entry<ConfiguredSpout,List<ConfiguredBolt>> line : entrySet()) {
		ConfiguredSpout spout = line.getKey();
		String lastId = spout.getId();
		String streamId = spout.getOutputStreamId();
		builder.setSpout(lastId, spout, spout.getParallelism());
		for (ConfiguredBolt bolt : line.getValue()) {
			String id = bolt.getId();
			BoltDeclarer declarer = declaredBolts.get(id);
			if (declarer == null)
				declarer = builder.setBolt(id, bolt, bolt.getParallelism());
			declarer.noneGrouping(lastId, streamId);
			if (declaredBolts.put(id, declarer) != null) break;
			lastId = id;
			streamId = bolt.getOutputStreamId();
		}
	}

	return builder.createTopology();
}
 
Example 9
Source Project: elasticsearch-hadoop   Source File: StreamFromEs.java    License: Apache License 2.0 6 votes vote down vote up
public static void submitJob(String principal, String keytab, String esNodes) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("ES", new EsSpout("storm-test"));
    builder.setBolt("Output", new CapturingBolt()).shuffleGrouping("ES");

    // Nimbus needs to be started with the cred renewer and credentials plugins set in its config file

    Config conf = new Config();
    List<Object> plugins = new ArrayList<Object>();
    plugins.add(AutoElasticsearch.class.getName());
    conf.put(Config.TOPOLOGY_AUTO_CREDENTIALS, plugins);
    conf.put(ConfigurationOptions.ES_NODES, esNodes);
    conf.put(ConfigurationOptions.ES_SECURITY_AUTHENTICATION, "kerberos");
    conf.put(ConfigurationOptions.ES_NET_SPNEGO_AUTH_ELASTICSEARCH_PRINCIPAL, "HTTP/[email protected]");
    conf.put(ConfigurationOptions.ES_INPUT_JSON, "true");
    StormSubmitter.submitTopology("test-read", conf, builder.createTopology());
}
 
Example 10
@Test
public void testSimpleWriteTopology() throws Exception {
    List doc1 = Collections.singletonList("{\"reason\" : \"business\",\"airport\" : \"SFO\"}");
    List doc2 = Collections.singletonList("{\"participants\" : 5,\"airport\" : \"OTP\"}");

    String target = index + "/json-simple-write";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-1", new TestSpout(ImmutableList.of(doc1, doc2), new Fields("json")));
    builder.setBolt("es-bolt-1", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-1");

    assumeTrue(COMPONENT_HAS_COMPLETED.is(2));

    MultiIndexSpoutStormSuite.run(index + "json-simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    RestUtils.refresh(index);
    assertTrue(RestUtils.exists(target));
    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("SFO"));
}
 
Example 11
Source Project: ranger   Source File: StormRangerAuthorizerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateTopologyBob() throws Exception {
    final Config conf = new Config();
    conf.setDebug(true);

    final TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("words", new WordSpout());
    builder.setBolt("counter", new WordCounterBolt()).shuffleGrouping("words");

    final Subject subject = new Subject();
    subject.getPrincipals().add(new SimplePrincipal("bob"));
    Subject.doAs(subject, new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            try {
                cluster.submitTopology("word-count2", conf, builder.createTopology());
                Assert.fail("Authorization failure expected");
            } catch (Exception ex) {
                // expected
            }

            return null;
        }
    });
}
 
Example 12
Source Project: storm-crawler   Source File: ConfigurableTopology.java    License: Apache License 2.0 6 votes vote down vote up
/** Submits the topology under a specific name **/
protected int submit(String name, Config conf, TopologyBuilder builder) {

    // register Metadata for serialization with FieldsSerializer
    Config.registerSerialization(conf, Metadata.class);

    if (isLocal) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(name, conf, builder.createTopology());
        if (ttl != -1) {
            Utils.sleep(ttl * 1000);
            cluster.shutdown();
        }
    }

    else {
        try {
            StormSubmitter.submitTopology(name, conf,
                    builder.createTopology());
        } catch (Exception e) {
            e.printStackTrace();
            return -1;
        }
    }
    return 0;
}
 
Example 13
Source Project: ranger   Source File: StormRangerAuthorizerTest.java    License: Apache License 2.0 6 votes vote down vote up
@org.junit.BeforeClass
public static void setup() throws Exception {
    cluster = new LocalCluster();

    final Config conf = new Config();
    conf.setDebug(true);

    final TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("words", new WordSpout());
    builder.setBolt("counter", new WordCounterBolt()).shuffleGrouping("words");

    // bob can create a new topology
    final Subject subject = new Subject();
    subject.getPrincipals().add(new SimplePrincipal("bob"));
    Subject.doAs(subject, new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            cluster.submitTopology("word-count", conf, builder.createTopology());
            return null;
        }
    });

}
 
Example 14
Source Project: incubator-heron   Source File: SentenceWordCountTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  String name = "fast-word-count-topology";
  if (args != null && args.length > 0) {
    name = args[0];
  }

  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("spout", new FastRandomSentenceSpout(), 1);
  builder.setBolt("split", new SplitSentence(), 2).shuffleGrouping("spout");
  builder.setBolt("count", new WordCount(), 2).fieldsGrouping("split", new Fields("word"));

  Config conf = new Config();

  StormSubmitter.submitTopology(name, conf, builder.createTopology());
}
 
Example 15
Source Project: incubator-heron   Source File: SlidingWindowTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout("integer", new RandomIntegerSpout(), 1);
  builder.setBolt("slidingsum", new SlidingWindowSumBolt()
      .withWindow(BaseWindowedBolt.Count.of(30), BaseWindowedBolt.Count.of(10)), 1)
      .shuffleGrouping("integer");
  builder.setBolt("tumblingavg", new TumblingWindowAvgBolt()
      .withTumblingWindow(BaseWindowedBolt.Count.of(3)), 1)
      .shuffleGrouping("slidingsum");
  builder.setBolt("printer", new PrinterBolt(), 1)
      .shuffleGrouping("tumblingavg");
  Config conf = new Config();
  conf.setDebug(true);
  String topoName = "test";

  if (args != null && args.length > 0) {
    topoName = args[0];
  }
  StormSubmitter.submitTopology(topoName, conf, builder.createTopology());
}
 
Example 16
Source Project: incubator-heron   Source File: AckingTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  if (args.length != 1) {
    throw new RuntimeException("Specify topology name");
  }
  TopologyBuilder builder = new TopologyBuilder();

  int spouts = 2;
  int bolts = 2;
  builder.setSpout("word", new AckingTestWordSpout(), spouts);
  builder.setBolt("exclaim1", new ExclamationBolt(), bolts)
      .shuffleGrouping("word");

  Config conf = new Config();
  conf.setDebug(true);

  // Put an arbitrary large number here if you don't want to slow the topology down
  conf.setMaxSpoutPending(1000 * 1000 * 1000);

  // To enable acking, we need to setEnableAcking true
  conf.setNumAckers(1);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  // Set the number of workers or stream managers
  conf.setNumWorkers(2);
  StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
 
Example 17
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word0", new TestWordSpout(), 2);
  builder.setSpout("word1", new TestWordSpout(), 2);
  builder.setSpout("word2", new TestWordSpout(), 2);
  builder.setBolt("exclaim1", new ExclamationBolt(), 2)
      .shuffleGrouping("word0")
      .shuffleGrouping("word1")
      .shuffleGrouping("word2");

  Config conf = new Config();
  conf.setDebug(true);
  conf.setMaxSpoutPending(10);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
  if (args != null && args.length > 0) {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
  } else {
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
Example 18
Source Project: storm-net-adapter   Source File: SlidingWindowTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("integer", new RandomIntegerSpout(), 1);
    builder.setBolt("slidingsum", new SlidingWindowSumBolt().withWindow(Count.of(30), Count.of(10)), 1)
           .shuffleGrouping("integer");
    builder.setBolt("tumblingavg", new TumblingWindowAvgBolt().withTumblingWindow(Count.of(3)), 1)
           .shuffleGrouping("slidingsum");
    builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("tumblingavg");
    Config conf = new Config();
    conf.setDebug(true);
    String topoName = "test";
    if (args != null && args.length > 0) {
        topoName = args[0];
    }
    conf.setNumWorkers(1);
    StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology());
}
 
Example 19
Source Project: incubator-heron   Source File: StormEcoBuilderTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testBuildTopologyBuilder_BuildsAsExpected()
    throws IllegalAccessException, ClassNotFoundException,
    InstantiationException, NoSuchFieldException, InvocationTargetException {
  Config config = new Config();
  EcoExecutionContext context = new EcoExecutionContext(ecoTopologyDefinition, config);
  ObjectBuilder objectBuilder = new ObjectBuilder();
  subject.buildTopologyBuilder(context, objectBuilder);

  verify(mockSpoutBuilder).buildSpouts(same(context),
      any(TopologyBuilder.class), same(objectBuilder));
  verify(mockBoltBuilder).buildBolts(same(context), same(objectBuilder));
  verify(mockStreamBuilder).buildStreams(same(context), any(TopologyBuilder.class),
      same(objectBuilder));
  verify(mockComponentBuilder).buildComponents(same(context), same(objectBuilder));
}
 
Example 20
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new RandomIntegerSpout());
    builder.setBolt("sumbolt", new WindowSumBolt().withWindow(new Count(5), new Count(3))
                                                  .withMessageIdField("msgid"), 1).shuffleGrouping("spout");
    builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("sumbolt");
    Config conf = new Config();
    conf.setDebug(false);
    //conf.put(Config.TOPOLOGY_STATE_PROVIDER, "org.apache.storm.redis.state.RedisKeyValueStateProvider");

    String topoName = "test";
    if (args != null && args.length > 0) {
        topoName = args[0];
    }
    conf.setNumWorkers(1);
    StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology());
}
 
Example 21
Source Project: storm-net-adapter   Source File: SlidingTupleTsTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    BaseWindowedBolt bolt = new SlidingWindowSumBolt()
        .withWindow(new Duration(5, TimeUnit.SECONDS), new Duration(3, TimeUnit.SECONDS))
        .withTimestampField("ts")
        .withLag(new Duration(5, TimeUnit.SECONDS));
    builder.setSpout("integer", new RandomIntegerSpout(), 1);
    builder.setBolt("slidingsum", bolt, 1).shuffleGrouping("integer");
    builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("slidingsum");
    Config conf = new Config();
    conf.setDebug(true);
    String topoName = "test";

    if (args != null && args.length > 0) {
        topoName = args[0];
    }

    conf.setNumWorkers(1);
    StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology());
}
 
Example 22
@Test
public void testSimpleRead() throws Exception {
    String target = index + "/basic-read";

    RestUtils.touch(index);
    RestUtils.postData(target, "{\"message\" : \"Hello World\",\"message_date\" : \"2014-05-25\"}".getBytes());
    RestUtils.postData(target, "{\"message\" : \"Goodbye World\",\"message_date\" : \"2014-05-25\"}".getBytes());
    RestUtils.refresh(index);

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("es-spout", new TestSpout(new EsSpout(target)));
    builder.setBolt("test-bolt", new CapturingBolt()).shuffleGrouping("es-spout");

    MultiIndexSpoutStormSuite.run(index + "simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    assertTrue(RestUtils.exists(target));
    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("Hello"));
    assertThat(results, containsString("Goodbye"));

    System.out.println(CapturingBolt.CAPTURED);
    assertThat(CapturingBolt.CAPTURED.size(), is(2));
}
 
Example 23
Source Project: storm-net-adapter   Source File: AnchoredWordCount.java    License: Apache License 2.0 6 votes vote down vote up
protected int run(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSentenceSpout(), 4);

    builder.setBolt("split", new SplitSentence(), 4).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 4).fieldsGrouping("split", new Fields("word"));

    Config conf = new Config();
    conf.setMaxTaskParallelism(3);

    String topologyName = "word-count";

    conf.setNumWorkers(3);

    if (args != null && args.length > 0) {
        topologyName = args[0];
    }
    return submit(topologyName, conf, builder);
}
 
Example 24
Source Project: springBoot-study   Source File: App.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args)  {
		//定义一个拓扑
		TopologyBuilder builder=new TopologyBuilder();
		//设置1个Executeor(线程),默认一个
		builder.setSpout(test_spout, new TestSpout(),1);
		//shuffleGrouping:表示是随机分组
		//设置1个Executeor(线程),和两个task
		builder.setBolt(test_bolt, new TestBolt(),1).setNumTasks(1).shuffleGrouping(test_spout);
		//fieldsGrouping:表示是按字段分组
		//设置1个Executeor(线程),和1个task
		builder.setBolt(test2_bolt, new Test2Bolt(),1).setNumTasks(1).fieldsGrouping(test_bolt, new Fields("count"));
		Config conf = new Config();
		conf.put("test", "test");
		try{
		  //运行拓扑
	       if(args !=null&&args.length>0){ //有参数时,表示向集群提交作业,并把第一个参数当做topology名称
	       	 System.out.println("运行远程模式");
			 StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
	      } else{//没有参数时,本地提交
	        //启动本地模式
	     	System.out.println("运行本地模式");
	        LocalCluster cluster = new LocalCluster();
	        cluster.submitTopology("Word-counts" ,conf,  builder.createTopology() );
	        Thread.sleep(20000);
//	        //关闭本地集群
	        cluster.shutdown();
	      }
		}catch (Exception e){
			e.printStackTrace();
		}
	}
 
Example 25
Source Project: springBoot-study   Source File: App.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args)  {
		// TODO Auto-generated method stub
		//定义一个拓扑
		TopologyBuilder builder=new TopologyBuilder();
		builder.setSpout(str1, new TestSpout());
		builder.setBolt(str2, new TestBolt()).shuffleGrouping(str1);
		Config conf = new Config();
		conf.put("test", "test");
		try{
		  //运行拓扑
	       if(args !=null&&args.length>0){ //有参数时,表示向集群提交作业,并把第一个参数当做topology名称
	       	 System.out.println("远程模式");
			 StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
	      } else{//没有参数时,本地提交
	        //启动本地模式
	     	System.out.println("本地模式");
	        LocalCluster cluster = new LocalCluster();
	        cluster.submitTopology("111" ,conf,  builder.createTopology() );
//	        Thread.sleep(2000);
//	        //关闭本地集群
//	        cluster.shutdown();
	      }
		}catch (Exception e){
			e.printStackTrace();
		}
		
	}
 
Example 26
Source Project: storm-crawler   Source File: ConfigurableTopology.java    License: Apache License 2.0 5 votes vote down vote up
/** Submits the topology with the name taken from the configuration **/
protected int submit(Config conf, TopologyBuilder builder) {
    String name = ConfUtils.getString(conf, Config.TOPOLOGY_NAME);
    if (StringUtils.isBlank(name))
        throw new RuntimeException("No value found for "
                + Config.TOPOLOGY_NAME);
    return submit(name, conf, builder);
}
 
Example 27
Source Project: monasca-thresh   Source File: TopologyModule.java    License: Apache License 2.0 5 votes vote down vote up
@Provides
StormTopology topology() {
  TopologyBuilder builder = new TopologyBuilder();

  // Receives metrics
  builder.setSpout("metrics-spout", Injector.getInstance(IRichSpout.class, "metrics"),
      config.metricSpoutThreads).setNumTasks(config.metricSpoutTasks);

  // Receives events
  builder.setSpout("event-spout", Injector.getInstance(IRichSpout.class, "event"),
      config.eventSpoutThreads).setNumTasks(config.eventSpoutTasks);

  // Event -> Events
  builder
      .setBolt("event-bolt", new EventProcessingBolt(config.database), config.eventBoltThreads)
      .shuffleGrouping("event-spout").setNumTasks(config.eventBoltTasks);

  // Metrics / Event -> Filtering
  builder
      .setBolt("filtering-bolt", new MetricFilteringBolt(config.database),
          config.filteringBoltThreads)
      .fieldsGrouping("metrics-spout", new Fields(MetricSpout.FIELDS[0]))
      .allGrouping("event-bolt", EventProcessingBolt.METRIC_ALARM_EVENT_STREAM_ID)
      .allGrouping("event-bolt", EventProcessingBolt.ALARM_DEFINITION_EVENT_STREAM_ID)
      .setNumTasks(config.filteringBoltTasks);

  // Filtering /Event -> Alarm Creation
  builder
      .setBolt("alarm-creation-bolt", new AlarmCreationBolt(config.database),
          config.alarmCreationBoltThreads)
      .fieldsGrouping("filtering-bolt",
          MetricFilteringBolt.NEW_METRIC_FOR_ALARM_DEFINITION_STREAM,
          new Fields(AlarmCreationBolt.ALARM_CREATION_FIELDS[3]))
      .allGrouping("event-bolt", EventProcessingBolt.METRIC_SUB_ALARM_EVENT_STREAM_ID)
      .allGrouping("event-bolt", EventProcessingBolt.ALARM_EVENT_STREAM_ID)
      .allGrouping("event-bolt", EventProcessingBolt.ALARM_DEFINITION_EVENT_STREAM_ID)
      .setNumTasks(config.alarmCreationBoltTasks);

  // Filtering / Event / Alarm Creation -> Aggregation
  builder
      .setBolt("aggregation-bolt",
          new MetricAggregationBolt(config, config.database), config.aggregationBoltThreads)
      .fieldsGrouping("filtering-bolt", new Fields(MetricFilteringBolt.FIELDS[0]))
      .allGrouping("filtering-bolt", MetricAggregationBolt.METRIC_AGGREGATION_CONTROL_STREAM)
      .fieldsGrouping("filtering-bolt", AlarmCreationBolt.ALARM_CREATION_STREAM,
          new Fields(AlarmCreationBolt.ALARM_CREATION_FIELDS[1]))
      .allGrouping("event-bolt", EventProcessingBolt.METRIC_SUB_ALARM_EVENT_STREAM_ID)
      .fieldsGrouping("event-bolt", EventProcessingBolt.METRIC_ALARM_EVENT_STREAM_ID,
          new Fields(EventProcessingBolt.METRIC_ALARM_EVENT_STREAM_FIELDS[1]))
      .fieldsGrouping("alarm-creation-bolt", AlarmCreationBolt.ALARM_CREATION_STREAM,
          new Fields(AlarmCreationBolt.ALARM_CREATION_FIELDS[1]))
      .setNumTasks(config.aggregationBoltTasks);

  // Alarm Creation / Event
  // Aggregation / Event -> Thresholding
  builder
      .setBolt("thresholding-bolt",
          new AlarmThresholdingBolt(config.database, config.kafkaProducerConfig),
          config.thresholdingBoltThreads)
      .fieldsGrouping("aggregation-bolt", new Fields(MetricAggregationBolt.FIELDS[0]))
      .fieldsGrouping("event-bolt", EventProcessingBolt.ALARM_EVENT_STREAM_ID,
          new Fields(EventProcessingBolt.ALARM_EVENT_STREAM_FIELDS[1]))
      .allGrouping("event-bolt", EventProcessingBolt.ALARM_DEFINITION_EVENT_STREAM_ID)
      .allGrouping("event-bolt", EventProcessingBolt.METRIC_SUB_ALARM_EVENT_STREAM_ID)
      .setNumTasks(config.thresholdingBoltTasks);

  return builder.createTopology();
}
 
Example 28
public static void main(String[] args) {
    //根据Spout和Bolt构建TopologyBuilder
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("DataSourceSpout", new DataSourceSpout());
    builder.setBolt("SplitBolt", new SplitBolt()).shuffleGrouping("DataSourceSpout");
    builder.setBolt("CountBolt", new CountBolt()).shuffleGrouping("SplitBolt");

    Map hikariConfigMap = Maps.newHashMap();
    hikariConfigMap.put("dataSourceClassName","com.mysql.jdbc.jdbc2.optional.MysqlDataSource");
    hikariConfigMap.put("dataSource.url", "jdbc:mysql://192.168.60.11/storm");
    hikariConfigMap.put("dataSource.user","root");
    hikariConfigMap.put("dataSource.password","123");
    ConnectionProvider connectionProvider = new HikariCPConnectionProvider(hikariConfigMap);

    String tableName = "wc";
    JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(tableName, connectionProvider);

    JdbcInsertBolt userPersistanceBolt = new JdbcInsertBolt(connectionProvider, simpleJdbcMapper)
            .withTableName(tableName)
            .withQueryTimeoutSecs(30);
    builder.setBolt("JdbcInsertBolt", userPersistanceBolt).shuffleGrouping("CountBolt");

    //创建本地集群
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("LocalWordCountRedisStormTopology", new Config(), builder.createTopology());

}
 
Example 29
public static void main(String[] args) {
    //根据Spout和Bolt构建TopologyBuilder
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("DataSourceSpout", new DataSourceSpout());
    builder.setBolt("SplitBolt", new SplitBolt()).shuffleGrouping("DataSourceSpout");
    builder.setBolt("CountBolt", new CountBolt()).shuffleGrouping("SplitBolt");

    //创建本地集群
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("LocalWordCountStormTopology", new Config(), builder.createTopology());

}
 
Example 30
Source Project: java-study   Source File: WordCountApp.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws InterruptedException, AlreadyAliveException, InvalidTopologyException {
 	//定义拓扑
     TopologyBuilder builder = new TopologyBuilder();
     builder.setSpout("word-reader" , new WordReader());
     builder.setBolt("word-normalizer" , new WordNormalizer()).shuffleGrouping("word-reader" );
     builder.setBolt("word-counter" , new WordCounter()).fieldsGrouping("word-normalizer" , new Fields("word"));
     StormTopology topology = builder.createTopology();
     //配置
     
     Config conf = new Config();
     String fileName ="words.txt" ;
     conf.put("fileName" , fileName );
     conf.setDebug(false);
 
      //运行拓扑
      System.out.println("开始...");
      if(args !=null&&args.length>0){ //有参数时,表示向集群提交作业,并把第一个参数当做topology名称
     	 System.out.println("远程模式");
          try {
	StormSubmitter.submitTopology(args[0], conf, topology);
} catch (AuthorizationException e) {
	e.printStackTrace();
}
    } else{//没有参数时,本地提交
      //启动本地模式
 	 System.out.println("本地模式");
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("Getting-Started-Topologie" , conf , topology );
      Thread.sleep(5000);
      //关闭本地集群
      cluster.shutdown();
    }
      System.out.println("结束");
    
 }