backtype.storm.LocalCluster Java Examples

The following examples show how to use backtype.storm.LocalCluster. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SubmitTopologyHelper.java    From galaxy-sdk-java with Apache License 2.0 6 votes vote down vote up
public static void submitTopology(StormTopology stormTopology, Map topologyConfig) throws Exception {
    // setup StormTopology

    Config submitConfig = new Config();

    // set the configuration for topology
    submitConfig.put(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS, 5000);
    submitConfig.put(Config.TOPOLOGY_ACKER_EXECUTORS, 100);
    submitConfig.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 20);

    // set the worker process number
    submitConfig.setNumWorkers(ConfigHelper.getInt(topologyConfig, ConfigKeys.STORM_WORKER_NUMBER));

    // get topologyName adn clusterMode;
    String topologyName = ConfigHelper.getString(topologyConfig, ConfigKeys.STORM_TOPOLOGY_NAME);
    String clusterMode = ConfigHelper.getString(topologyConfig, ConfigKeys.STORM_CLUSTER_MODE);

    if (clusterMode.equals("local")) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("storm-emq", submitConfig, stormTopology);
    } else {
        submitConfig.put(Config.NIMBUS_HOST, ConfigHelper.getString(topologyConfig, ConfigKeys.STORM_NIMBUS_HOSTNAME));
        StormSubmitter.submitTopology(topologyName, submitConfig, stormTopology);
    }

}
 
Example #2
Source File: TestStormStreamIdRouting.java    From eagle with Apache License 2.0 6 votes vote down vote up
@Ignore
@Test
public void testRoutingByStreamId() throws Exception {
    Config conf = new Config();
    conf.setNumWorkers(2); // use two worker processes
    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout("blue-spout", new BlueSpout()); // parallelism hint

    topologyBuilder.setBolt("green-bolt-1", new GreenBolt(1))
        .shuffleGrouping("blue-spout", "green-bolt-stream-1");
    topologyBuilder.setBolt("green-bolt-2", new GreenBolt(2))
        .shuffleGrouping("blue-spout", "green-bolt-stream-2");

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("mytopology", new HashMap(), topologyBuilder.createTopology());

    while (true) {
        try {
            Thread.sleep(1000);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
 
Example #3
Source File: MovingAvgLocalTopologyRunner.java    From hadoop-arch-book with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {

    Config config = new Config();
    config.setDebug(true);

    StormTopology topology = buildTopology();
    // Un-comment to run locally:
    LocalCluster localCluster = new LocalCluster();
    localCluster.submitTopology("local-moving-avg", config, topology);
    // Un-comment to run as part of a Storm cluster:
    // try {
    //   StormSubmitter.submitTopology("cluster-moving-average",
    //  				    config,
    // 				    topology);
    // } catch(AlreadyAliveException e) {
    //   e.printStackTrace();
    // } catch(InvalidTopologyException e) {
    //   e.printStackTrace();
    //}
  }
 
Example #4
Source File: ExampleRunner.java    From flowmix with Apache License 2.0 6 votes vote down vote up
public void run() {

    StormTopology topology = new FlowmixBuilder()
        .setFlowLoader(new SimpleFlowLoaderSpout(provider.getFlows(), 60000))
        .setEventsLoader(new MockEventGeneratorSpout(getMockEvents(), 10))
        .setOutputBolt(new PrinterBolt())
        .setParallelismHint(6)
      .create()
    .createTopology();

    Config conf = new Config();
    conf.setNumWorkers(20);
    conf.setMaxSpoutPending(5000);
    conf.setDebug(false);
    conf.registerSerialization(BaseEvent.class, EventSerializer.class);
    conf.setSkipMissingKryoRegistrations(false);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("example-topology", conf, topology);
  }
 
Example #5
Source File: SwitchBoltIT.java    From flowmix with Apache License 2.0 6 votes vote down vote up
@Test
public void test_timeDiffActivated_countEviction() throws InterruptedException {
    Flow flow = new FlowBuilder()
            .id("flow")
            .flowDefs()
            .stream("stream1")
            .stopGate().open(Policy.TIME_DELTA_LT, 1000).close(Policy.TIME, 5).evict(Policy.COUNT, 5).end()
            .endStream()   // send ALL results to stream2 and not to standard output
            .endDefs()
            .createFlow();

    StormTopology topology = buildTopology(flow, 50);
    Config conf = new Config();
    conf.setNumWorkers(20);
    conf.registerSerialization(BaseEvent.class, EventSerializer.class);
    conf.setSkipMissingKryoRegistrations(false);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, topology);

    Thread.sleep(5000);

    assertEquals(5, MockSinkBolt.getEvents().size());
}
 
Example #6
Source File: Part05_AdvancedStateAndDRPC.java    From trident-tutorial with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    FakeTweetGenerator fakeTweets = new FakeTweetGenerator();
    FeederBatchSpout testSpout = new FeederBatchSpout(ImmutableList.of("id", "text", "actor", "location", "date"));

    Config conf = new Config();
    LocalCluster cluster = new LocalCluster();
    LocalDRPC drpc = new LocalDRPC();
    cluster.submitTopology("external_state_drpc", conf, externalState(drpc, testSpout));

    // You can use FeederBatchSpout to feed know values to the topology. Very useful for tests.
    testSpout.feed(fakeTweets.getNextTweetTuples("ted"));
    testSpout.feed(fakeTweets.getNextTweetTuples("ted"));
    testSpout.feed(fakeTweets.getNextTweetTuples("mary"));
    testSpout.feed(fakeTweets.getNextTweetTuples("jason"));

    System.out.println(drpc.execute("age_stats", ""));
    System.out.println("OK");
}
 
Example #7
Source File: ManualDRPC.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void testDrpc() {
    TopologyBuilder builder = new TopologyBuilder();
    LocalDRPC drpc = new LocalDRPC();
    
    DRPCSpout spout = new DRPCSpout("exclamation", drpc);
    builder.setSpout("drpc", spout);
    builder.setBolt("exclaim", new ExclamationBolt(), 3).shuffleGrouping("drpc");
    builder.setBolt("return", new ReturnResults(), 3).shuffleGrouping("exclaim");
    
    LocalCluster cluster = new LocalCluster();
    Config conf = new Config();
    cluster.submitTopology("exclaim", conf, builder.createTopology());
    
    JStormUtils.sleepMs(30 * 1000);
    
    try {
        System.out.println(drpc.execute("exclamation", "aaa"));
        System.out.println(drpc.execute("exclamation", "bbb"));
    } catch (Exception e) {
        Assert.fail("Failed to test drpc");
    }
    
    drpc.shutdown();
    cluster.shutdown();
}
 
Example #8
Source File: Part04_BasicStateAndDRPC.java    From trident-tutorial with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception{
        FakeTweetGenerator fakeTweets = new FakeTweetGenerator();
        FeederBatchSpout testSpout = new FeederBatchSpout(ImmutableList.of("id", "text", "actor", "location", "date"));

        Config conf = new Config();
        LocalCluster cluster = new LocalCluster();
        LocalDRPC drpc = new LocalDRPC();
        cluster.submitTopology("state_drpc", conf, basicStateAndDRPC(drpc, testSpout));

        // You can use FeederBatchSpout to feed known values to the topology. Very useful for tests.
        testSpout.feed(fakeTweets.getNextTweetTuples("ted"));
        testSpout.feed(fakeTweets.getNextTweetTuples("ted"));
        testSpout.feed(fakeTweets.getNextTweetTuples("mary"));
        testSpout.feed(fakeTweets.getNextTweetTuples("jason"));

        // This is how you make DRPC calls. First argument must match the function name
        // System.out.println(drpc.execute("ping", "ping pang pong"));
        // System.out.println(drpc.execute("count", "america america ace ace ace item"));
        System.out.println(drpc.execute("count_per_actor", "ted"));
        // System.out.println(drpc.execute("count_per_actors", "ted mary pere jason"));

        // You can use a client library to make calls remotely
//        DRPCClient client = new DRPCClient("drpc.server.location", 3772);
//        System.out.println(client.execute("ping", "ping pang pong"));
    }
 
Example #9
Source File: DrpcTopology.java    From storm-example with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    final LocalCluster cluster = new LocalCluster();
    final Config conf = new Config();

    LocalDRPC client = new LocalDRPC();
    TridentTopology drpcTopology = new TridentTopology();

    drpcTopology.newDRPCStream("drpc", client)
            .each(new Fields("args"), new ArgsFunction(), new Fields("gamestate"))
            .each(new Fields("gamestate"), new GenerateBoards(), new Fields("children"))
            .each(new Fields("children"), new ScoreFunction(), new Fields("board", "score", "player"))
            .groupBy(new Fields("gamestate"))
            .aggregate(new Fields("board", "score"), new FindBestMove(), new Fields("bestMove"))
            .project(new Fields("bestMove"));

    cluster.submitTopology("drpcTopology", conf, drpcTopology.build());

    Board board = new Board();
    board.board[1][1] = "O";
    board.board[2][2] = "X";
    board.board[0][1] = "O";
    board.board[0][0] = "X";
    LOG.info("Determing best move for O on:" + board.toString());
    LOG.info("RECEIVED RESPONSE [" + client.execute("drpc", board.toKey()) + "]");
}
 
Example #10
Source File: JStormHelper.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void runTopologyLocally(StormTopology topology, String topologyName, Config conf,
                                      int runtimeInSeconds, Callback callback) throws Exception {
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(topologyName, conf, topology);

    if (runtimeInSeconds < 120) {
        JStormUtils.sleepMs(120 * 1000);
    } else {
        JStormUtils.sleepMs(runtimeInSeconds * 1000);
    }

    if (callback != null) {
        callback.execute(topologyName);
    }

    cluster.killTopology(topologyName);
    cluster.shutdown();
}
 
Example #11
Source File: LocalRunner.java    From storm-benchmark with Apache License 2.0 6 votes vote down vote up
private static void run(String name)
        throws ClassNotFoundException, IllegalAccessException,
        InstantiationException, AlreadyAliveException, InvalidTopologyException {
  LOG.info("running benchmark " + name);
  IBenchmark benchmark =  (IBenchmark) Runner.getApplicationFromName(PACKAGE + "." + name);
  Config config = new Config();
  config.putAll(Utils.readStormConfig());
  config.setDebug(true);
  StormTopology topology = benchmark.getTopology(config);
  LocalCluster localCluster = new LocalCluster();
  localCluster.submitTopology(name, config, topology);
  final int runtime = BenchmarkUtils.getInt(config, MetricsCollectorConfig.METRICS_TOTAL_TIME,
          MetricsCollectorConfig.DEFAULT_TOTAL_TIME);
  IMetricsCollector collector = benchmark.getMetricsCollector(config, topology);
  collector.run();
  try {
    Thread.sleep(runtime);
  } catch (InterruptedException e) {
    LOG.error("benchmark interrupted", e);
  }
  localCluster.shutdown();
}
 
Example #12
Source File: TridentWordCount.java    From flink-perf with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  Config conf = new Config();
  conf.setMaxSpoutPending(20);
  if (args.length == 0) {
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
    for (int i = 0; i < 100; i++) {
      System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped"));
      Thread.sleep(1000);
    }
  }
  else {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
  }
}
 
Example #13
Source File: TestTopology.java    From jstorm with Apache License 2.0 6 votes vote down vote up
private static void submitTopology(TopologyBuilder builder) {
	try {
		if (local_mode(conf)) {

			LocalCluster cluster = new LocalCluster();

			cluster.submitTopology(
					String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());

			Thread.sleep(200000);

			cluster.shutdown();
		} else {
			StormSubmitter.submitTopology(
					String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());
		}

	} catch (Exception e) {
		LOG.error(e.getMessage(), e.getCause());
	}
}
 
Example #14
Source File: ThroughputHostsTracking.java    From flink-perf with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	int par = pt.getInt("para");

	TopologyBuilder builder = new TopologyBuilder();

	builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism"));
	int i = 0;
	for(; i < pt.getInt("repartitions", 1) - 1;i++) {
		System.out.println("adding source"+i+" --> source"+(i+1));
		builder.setBolt("source"+(i+1), new RepartPassThroughBolt(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source" + i, new Fields("id"));
	}
	System.out.println("adding final source"+i+" --> sink");

	builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source"+i, new Fields("id"));


	Config conf = new Config();
	conf.setDebug(false);
	//System.exit(1);

	if (!pt.has("local")) {
		conf.setNumWorkers(par);

		StormSubmitter.submitTopologyWithProgressBar("throughput-"+pt.get("name", "no_name"), conf, builder.createTopology());
	}
	else {
		conf.setMaxTaskParallelism(par);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("throughput", conf, builder.createTopology());

		Thread.sleep(300000);

		cluster.shutdown();
	}

}
 
Example #15
Source File: TridentThroughput.java    From flink-perf with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	int par = pt.getInt("para");


	TridentTopology topology = new TridentTopology();
	Stream sourceStream = topology.newStream("source", new Generator(pt)).parallelismHint(pt.getInt("sourceParallelism"));

	Stream repart = sourceStream.partitionBy(new Fields("id"));
	for(int i = 0; i < pt.getInt("repartitions", 1) - 1; i++) {
		repart = repart.each(new Fields("id"), new IdentityEach(), new Fields("id"+i)).partitionBy(new Fields("id"+i));
	}
	repart.each(new Fields("id", "host", "time", "payload"), new Sink(pt), new Fields("dontcare")).parallelismHint(pt.getInt("sinkParallelism"));

	Config conf = new Config();
	conf.setDebug(false);

	if (!pt.has("local")) {
		conf.setNumWorkers(par);

		StormSubmitter.submitTopologyWithProgressBar("throughput-"+pt.get("name", "no_name"), conf, topology.build());
	}
	else {
		conf.setMaxTaskParallelism(par);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("throughput", conf, topology.build());

		Thread.sleep(30000);

		cluster.shutdown();
	}

}
 
Example #16
Source File: SequenceTopologyTool.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public void SetLocalTopology() throws Exception {
    Config conf = getConf();
    
    StormTopology topology = buildTopology();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("SplitMerge", conf, topology);
    Thread.sleep(60000);
    cluster.shutdown();
}
 
Example #17
Source File: StreamingApp.java    From storm-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Builds and runs a StormTopology in the configured environment (development|staging|production)
 */
public void run() throws Exception {
  log.info(String.format("Running %s in %s mode.", topo.getName(), env));

  String topologyName = topo.getName();
  if (ENV.development == env) {
    int localRunSecs = Integer.parseInt(cli.getOptionValue("localRunSecs", "30"));
    try {
      LocalCluster cluster = new LocalCluster();
      stormConf.put("topology.tick.tuple.freq.secs", 5);
      cluster.submitTopology(topologyName, stormConf, topo.build(this));

      log.info("Submitted " + topologyName + " to LocalCluster at " + timestamp() + " ... sleeping for " +
        localRunSecs + " seconds before terminating.");
      try {
        Thread.sleep(localRunSecs * 1000);
      } catch (InterruptedException ie) {
        Thread.interrupted();
      }

      log.info("Killing " + topologyName);
      cluster.killTopology(topologyName);

      cluster.shutdown();
      log.info("Shut down LocalCluster at " + timestamp());
    } catch (Exception exc) {
      Throwable rootCause = getRootCause(exc);
      log.error("Storm topology " + topologyName + " failed due to: " + rootCause, rootCause);
      throw exc;
    } finally {
      cleanup();
    }
    System.exit(0);
  } else {
    StormSubmitter.submitTopology(topologyName, stormConf, topo.build(this));
  }
}
 
Example #18
Source File: Latency.java    From flink-perf with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	int par = pt.getInt("para");

	TopologyBuilder builder = new TopologyBuilder();

	builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism"));
	int i = 0;
	for(; i < pt.getInt("repartitions", 1) - 1;i++) {
		System.out.println("adding source"+i+" --> source"+(i+1));
		builder.setBolt("source"+(i+1), new PassThroughBolt(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source" + i, new Fields("id"));
	}
	System.out.println("adding final source"+i+" --> sink");

	builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source"+i, new Fields("id"));


	Config conf = new Config();
	conf.setDebug(false);
	//System.exit(1);

	if (!pt.has("local")) {
		conf.setNumWorkers(par);

		StormSubmitter.submitTopologyWithProgressBar("throughput-"+pt.get("name", "no_name"), conf, builder.createTopology());
	}
	else {
		conf.setMaxTaskParallelism(par);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("throughput", conf, builder.createTopology());

		Thread.sleep(300000);

		cluster.shutdown();
	}

}
 
Example #19
Source File: WordCountTopologyBroken.java    From storm-example with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        SentenceSpout spout = new SentenceSpout();
        SplitSentenceBolt splitBolt = new SplitSentenceBolt();
        WordCountBolt countBolt = new WordCountBolt();
        ReportBolt reportBolt = new ReportBolt();


        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout(SENTENCE_SPOUT_ID, spout, 2);
        // SentenceSpout --> SplitSentenceBolt
        builder.setBolt(SPLIT_BOLT_ID, splitBolt, 2)
                .setNumTasks(4)
                .shuffleGrouping(SENTENCE_SPOUT_ID);
        // SplitSentenceBolt --> WordCountBolt
        builder.setBolt(COUNT_BOLT_ID, countBolt, 4)
                .shuffleGrouping(SPLIT_BOLT_ID);
        // WordCountBolt --> ReportBolt
        builder.setBolt(REPORT_BOLT_ID, reportBolt)
                .globalGrouping(COUNT_BOLT_ID);

        Config config = new Config();
        config.setNumWorkers(2);

        LocalCluster cluster = new LocalCluster();

        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        waitForSeconds(10);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
    }
 
Example #20
Source File: WordCountTopology.java    From storm-example with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        SentenceSpout spout = new SentenceSpout();
        SplitSentenceBolt splitBolt = new SplitSentenceBolt();
        WordCountBolt countBolt = new WordCountBolt();
        ReportBolt reportBolt = new ReportBolt();


        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout(SENTENCE_SPOUT_ID, spout, 2);
        // SentenceSpout --> SplitSentenceBolt
        builder.setBolt(SPLIT_BOLT_ID, splitBolt, 2)
                .setNumTasks(4)
                .shuffleGrouping(SENTENCE_SPOUT_ID);
        // SplitSentenceBolt --> WordCountBolt
        builder.setBolt(COUNT_BOLT_ID, countBolt, 4)
                .fieldsGrouping(SPLIT_BOLT_ID, new Fields("word"));
        // WordCountBolt --> ReportBolt
        builder.setBolt(REPORT_BOLT_ID, reportBolt)
                .globalGrouping(COUNT_BOLT_ID);

        Config config = new Config();
        config.setNumWorkers(2);

        LocalCluster cluster = new LocalCluster();

        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        waitForSeconds(10);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
    }
 
Example #21
Source File: WordCountTopology.java    From storm-example with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        SentenceSpout spout = new SentenceSpout();
        SplitSentenceBolt splitBolt = new SplitSentenceBolt();
        WordCountBolt countBolt = new WordCountBolt();
        ReportBolt reportBolt = new ReportBolt();


        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout(SENTENCE_SPOUT_ID, spout, 2);
        // SentenceSpout --> SplitSentenceBolt
        builder.setBolt(SPLIT_BOLT_ID, splitBolt, 2)
                .setNumTasks(4)
                .shuffleGrouping(SENTENCE_SPOUT_ID);
        // SplitSentenceBolt --> WordCountBolt
        builder.setBolt(COUNT_BOLT_ID, countBolt, 4)
                .fieldsGrouping(SPLIT_BOLT_ID, new Fields("word"));
        // WordCountBolt --> ReportBolt
        builder.setBolt(REPORT_BOLT_ID, reportBolt)
                .globalGrouping(COUNT_BOLT_ID);

        Config config = new Config();
        config.setNumWorkers(2);

        LocalCluster cluster = new LocalCluster();

        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        waitForSeconds(10);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
    }
 
Example #22
Source File: WordCountTopology.java    From storm-example with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        SentenceSpout spout = new SentenceSpout();
        SplitSentenceBolt splitBolt = new SplitSentenceBolt();
        WordCountBolt countBolt = new WordCountBolt();
        ReportBolt reportBolt = new ReportBolt();


        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout(SENTENCE_SPOUT_ID, spout, 2);
        // SentenceSpout --> SplitSentenceBolt
        builder.setBolt(SPLIT_BOLT_ID, splitBolt, 2)
                .setNumTasks(4)
                .shuffleGrouping(SENTENCE_SPOUT_ID);
        // SplitSentenceBolt --> WordCountBolt
        builder.setBolt(COUNT_BOLT_ID, countBolt, 4)
                .fieldsGrouping(SPLIT_BOLT_ID, new Fields("word"));
        // WordCountBolt --> ReportBolt
        builder.setBolt(REPORT_BOLT_ID, reportBolt)
                .globalGrouping(COUNT_BOLT_ID);

        Config config = new Config();
        config.setNumWorkers(2);

        LocalCluster cluster = new LocalCluster();

        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        waitForSeconds(10);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
    }
 
Example #23
Source File: WordCountTopology.java    From storm-example with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        SentenceSpout spout = new SentenceSpout();
        SplitSentenceBolt splitBolt = new SplitSentenceBolt();
        WordCountBolt countBolt = new WordCountBolt();
        ReportBolt reportBolt = new ReportBolt();


        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout(SENTENCE_SPOUT_ID, spout);
        // SentenceSpout --> SplitSentenceBolt
        builder.setBolt(SPLIT_BOLT_ID, splitBolt)
                .shuffleGrouping(SENTENCE_SPOUT_ID);
        // SplitSentenceBolt --> WordCountBolt
        builder.setBolt(COUNT_BOLT_ID, countBolt)
                .fieldsGrouping(SPLIT_BOLT_ID, new Fields("word"));
        // WordCountBolt --> ReportBolt
        builder.setBolt(REPORT_BOLT_ID, reportBolt)
                .globalGrouping(COUNT_BOLT_ID);

        Config config = new Config();

        LocalCluster cluster = new LocalCluster();

        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        waitForSeconds(10);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
    }
 
Example #24
Source File: NlpTopology.java    From storm-example with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
        //https://api.hmsonline.com/v1/search/masterfile?timestamp=1381505612735&key=cQUroC22o0OFvyHVt2cTYg&signature=0r7Zm++/ve+/vTIk77+9eXkh77+9al/vv70h77+9
        String time = "1381505612735";
        String BaseUrl = "https://api.hmsonline.com";
//        String Signature= ExecutionUtil.getDynamicProcessProperty("Signature");    
        String str_Key = "F8EnkY4tINSfinyXgioFV1bpq9HIHZ4XhjmkXkYSqoA=";

        System.out.println("Secret_Decoded" + str_Key);
        String contentTosign = "/v1/search/masterfile?timestamp=" + time + "&key=cQUroC22o0OFvyHVt2cTYg";

        System.out.println("" + Base64.isArrayByteBase64(str_Key.getBytes()));

        byte[] key = Base64.decodeBase64(str_Key);
//             System.out.println("Secret_Decoded" +key );
        SecretKeySpec sha1Key = new SecretKeySpec(key, "HmacSHA1");
//              System.out.println("SecretKeySpec" +sha1Key );
        Mac mac = Mac.getInstance("HmacSHA1");

//              System.out.println("mac");   
        mac.init(sha1Key);
        byte[] bytes = mac.doFinal(contentTosign.getBytes("UTF-8"));

        System.out.println("SystemMiili = https://api.hmsonline.com/" + contentTosign + "&signature=" + new String(bytes, "UTF-8"));
        System.out.println("BaseUrl = " + contentTosign + "&signature=");
        System.out.println("Signature = " + new String(Base64.encodeBase64String(bytes)));

        final Config conf = new Config();
        final LocalCluster cluster = new LocalCluster();

        LOG.info("Submitting topology.");

        cluster.submitTopology("nlp", conf, buildTopology());
        LOG.info("Topology submitted.");
        Thread.sleep(600000);
    }
 
Example #25
Source File: UnitTopologyRunner.java    From eagle with Apache License 2.0 5 votes vote down vote up
private void run(String topologyId,
                 int numOfTotalWorkers,
                 int numOfSpoutTasks,
                 int numOfRouterBolts,
                 int numOfAlertBolts,
                 int numOfPublishExecutors,
                 int numOfPublishTasks,
                 Config config,
                 boolean localMode) {

    backtype.storm.Config stormConfig = givenStormConfig == null ? new backtype.storm.Config() : givenStormConfig;
    // TODO: Configurable metric consumer instance number

    int messageTimeoutSecs = config.hasPath(MESSAGE_TIMEOUT_SECS) ? config.getInt(MESSAGE_TIMEOUT_SECS) : DEFAULT_MESSAGE_TIMEOUT_SECS;
    LOG.info("Set topology.message.timeout.secs as {}", messageTimeoutSecs);
    stormConfig.setMessageTimeoutSecs(messageTimeoutSecs);

    if (config.hasPath("metric")) {
        stormConfig.registerMetricsConsumer(StormMetricTaggedConsumer.class, config.root().render(ConfigRenderOptions.concise()), 1);
    }

    stormConfig.setNumWorkers(numOfTotalWorkers);
    StormTopology topology = buildTopology(topologyId, numOfSpoutTasks, numOfRouterBolts, numOfAlertBolts, numOfPublishExecutors, numOfPublishTasks, config).createTopology();

    if (localMode) {
        LOG.info("Submitting as local mode");
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyId, stormConfig, topology);
        Utils.sleep(Long.MAX_VALUE);
    } else {
        LOG.info("Submitting as cluster mode");
        try {
            StormSubmitter.submitTopologyWithProgressBar(topologyId, stormConfig, topology);
        } catch (Exception ex) {
            LOG.error("fail submitting topology {}", topology, ex);
            throw new IllegalStateException(ex);
        }
    }
}
 
Example #26
Source File: RecursiveTopology.java    From storm-example with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    final Config conf = new Config();
    final LocalCluster cluster = new LocalCluster();

    LOG.info("Submitting topology.");
    cluster.submitTopology("recursiveTopology", conf, RecursiveTopology.buildTopology());
    LOG.info("Topology submitted.");
    Thread.sleep(600000);
}
 
Example #27
Source File: ScoringTopology.java    From storm-example with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    final Config conf = new Config();
    final LocalCluster cluster = new LocalCluster();

    LOG.info("Submitting topology.");
    cluster.submitTopology("scoringTopology", conf, ScoringTopology.buildTopology());
    LOG.info("Topology submitted.");
    Thread.sleep(600000);
}
 
Example #28
Source File: BaseLocalClusterTest.java    From storm-trident-elasticsearch with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() {
    esSetup = new EsSetup(settings);
    esSetup.execute(createIndex(index));

    drpc = new LocalDRPC();
    StormTopology topology = buildTopology();

    cluster = new LocalCluster();
    cluster.submitTopology("elastic-storm", new Config(), topology);

    Utils.sleep(10000); // let's do some work
}
 
Example #29
Source File: SpeedViolationTopology.java    From ignite-book-code-samples with GNU General Public License v3.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    if (getProperties() == null || getProperties().isEmpty()) {
        System.out.println("Property file <ignite-storm.property> is not found or empty");
        return;
    }
    // Ignite Stream Ibolt
    final StormStreamer<String, String> stormStreamer = new StormStreamer<>();

    stormStreamer.setAutoFlushFrequency(10L);
    stormStreamer.setAllowOverwrite(true);
    stormStreamer.setCacheName(getProperties().getProperty("cache.name"));

    stormStreamer.setIgniteTupleField(getProperties().getProperty("tuple.name"));
    stormStreamer.setIgniteConfigFile(getProperties().getProperty("ignite.spring.xml"));


    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new FileSourceSpout(), 1);
    builder.setBolt("limit", new SpeedLimitBolt(), 1).fieldsGrouping("spout", new Fields("trafficLog"));
    // set ignite bolt
    builder.setBolt("ignite-bolt", stormStreamer, STORM_EXECUTORS).shuffleGrouping("limit");

    Config conf = new Config();
    conf.setDebug(false);

    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("speed-violation", conf, builder.createTopology());
    Thread.sleep(10000);
    cluster.shutdown();

}
 
Example #30
Source File: SalesTopology.java    From storm-cassandra-cql with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    final Config configuration = new Config();
    configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_CQL_HOSTS, "localhost");
    final LocalCluster cluster = new LocalCluster();
    LOG.info("Submitting topology.");
    cluster.submitTopology("cqlexample", configuration, buildTopology());
    LOG.info("Topology submitted.");
    Thread.sleep(600000);
}