Java Code Examples for backtype.storm.LocalCluster#shutdown()

The following examples show how to use backtype.storm.LocalCluster#shutdown() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestTopology.java    From jstorm with Apache License 2.0 6 votes vote down vote up
private static void submitTopology(TopologyBuilder builder) {
	try {
		if (local_mode(conf)) {

			LocalCluster cluster = new LocalCluster();

			cluster.submitTopology(
					String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());

			Thread.sleep(200000);

			cluster.shutdown();
		} else {
			StormSubmitter.submitTopology(
					String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());
		}

	} catch (Exception e) {
		LOG.error(e.getMessage(), e.getCause());
	}
}
 
Example 2
Source File: TestTopology.java    From jstorm with Apache License 2.0 6 votes vote down vote up
private static void submitTopology(TopologyBuilder builder) {
	try {
		if (local_mode(conf)) {

			LocalCluster cluster = new LocalCluster();

			cluster.submitTopology(String.valueOf(conf.get("topology.name")), conf, builder.createTopology());

			Thread.sleep(200000);

			cluster.shutdown();
		} else {
			StormSubmitter.submitTopology(String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());
		}

	} catch (Exception e) {
		e.printStackTrace();
	}
}
 
Example 3
Source File: ManualDRPC.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void testDrpc() {
    TopologyBuilder builder = new TopologyBuilder();
    LocalDRPC drpc = new LocalDRPC();
    
    DRPCSpout spout = new DRPCSpout("exclamation", drpc);
    builder.setSpout("drpc", spout);
    builder.setBolt("exclaim", new ExclamationBolt(), 3).shuffleGrouping("drpc");
    builder.setBolt("return", new ReturnResults(), 3).shuffleGrouping("exclaim");
    
    LocalCluster cluster = new LocalCluster();
    Config conf = new Config();
    cluster.submitTopology("exclaim", conf, builder.createTopology());
    
    JStormUtils.sleepMs(30 * 1000);
    
    try {
        System.out.println(drpc.execute("exclamation", "aaa"));
        System.out.println(drpc.execute("exclamation", "bbb"));
    } catch (Exception e) {
        Assert.fail("Failed to test drpc");
    }
    
    drpc.shutdown();
    cluster.shutdown();
}
 
Example 4
Source File: JStormHelper.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void runTopologyLocally(StormTopology topology, String topologyName, Config conf,
                                      int runtimeInSeconds, Callback callback) throws Exception {
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(topologyName, conf, topology);

    if (runtimeInSeconds < 120) {
        JStormUtils.sleepMs(120 * 1000);
    } else {
        JStormUtils.sleepMs(runtimeInSeconds * 1000);
    }

    if (callback != null) {
        callback.execute(topologyName);
    }

    cluster.killTopology(topologyName);
    cluster.shutdown();
}
 
Example 5
Source File: WordCountTopology.java    From storm-example with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        SentenceSpout spout = new SentenceSpout();
        SplitSentenceBolt splitBolt = new SplitSentenceBolt();
        WordCountBolt countBolt = new WordCountBolt();
        ReportBolt reportBolt = new ReportBolt();


        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout(SENTENCE_SPOUT_ID, spout, 2);
        // SentenceSpout --> SplitSentenceBolt
        builder.setBolt(SPLIT_BOLT_ID, splitBolt, 2)
                .setNumTasks(4)
                .shuffleGrouping(SENTENCE_SPOUT_ID);
        // SplitSentenceBolt --> WordCountBolt
        builder.setBolt(COUNT_BOLT_ID, countBolt, 4)
                .fieldsGrouping(SPLIT_BOLT_ID, new Fields("word"));
        // WordCountBolt --> ReportBolt
        builder.setBolt(REPORT_BOLT_ID, reportBolt)
                .globalGrouping(COUNT_BOLT_ID);

        Config config = new Config();
        config.setNumWorkers(2);

        LocalCluster cluster = new LocalCluster();

        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        waitForSeconds(10);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
    }
 
Example 6
Source File: ThroughputHostsTracking.java    From flink-perf with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	int par = pt.getInt("para");

	TopologyBuilder builder = new TopologyBuilder();

	builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism"));
	int i = 0;
	for(; i < pt.getInt("repartitions", 1) - 1;i++) {
		System.out.println("adding source"+i+" --> source"+(i+1));
		builder.setBolt("source"+(i+1), new RepartPassThroughBolt(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source" + i, new Fields("id"));
	}
	System.out.println("adding final source"+i+" --> sink");

	builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source"+i, new Fields("id"));


	Config conf = new Config();
	conf.setDebug(false);
	//System.exit(1);

	if (!pt.has("local")) {
		conf.setNumWorkers(par);

		StormSubmitter.submitTopologyWithProgressBar("throughput-"+pt.get("name", "no_name"), conf, builder.createTopology());
	}
	else {
		conf.setMaxTaskParallelism(par);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("throughput", conf, builder.createTopology());

		Thread.sleep(300000);

		cluster.shutdown();
	}

}
 
Example 7
Source File: Latency.java    From flink-perf with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	int par = pt.getInt("para");

	TopologyBuilder builder = new TopologyBuilder();

	builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism"));
	int i = 0;
	for(; i < pt.getInt("repartitions", 1) - 1;i++) {
		System.out.println("adding source"+i+" --> source"+(i+1));
		builder.setBolt("source"+(i+1), new PassThroughBolt(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source" + i, new Fields("id"));
	}
	System.out.println("adding final source"+i+" --> sink");

	builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source"+i, new Fields("id"));


	Config conf = new Config();
	conf.setDebug(false);
	//System.exit(1);

	if (!pt.has("local")) {
		conf.setNumWorkers(par);

		StormSubmitter.submitTopologyWithProgressBar("throughput-"+pt.get("name", "no_name"), conf, builder.createTopology());
	}
	else {
		conf.setMaxTaskParallelism(par);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("throughput", conf, builder.createTopology());

		Thread.sleep(300000);

		cluster.shutdown();
	}

}
 
Example 8
Source File: StormRunner.java    From flink-perf with Apache License 2.0 5 votes vote down vote up
public static void runTopologyLocally(StormTopology topology, String topologyName, Config conf, int runtimeInSeconds)
    throws InterruptedException {
  LocalCluster cluster = new LocalCluster();
  cluster.submitTopology(topologyName, conf, topology);
  Thread.sleep((long) runtimeInSeconds * MILLIS_IN_SEC);
  cluster.killTopology(topologyName);
  cluster.shutdown();
}
 
Example 9
Source File: WordCountTopology.java    From storm-example with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        SentenceSpout spout = new SentenceSpout();
        SplitSentenceBolt splitBolt = new SplitSentenceBolt();
        WordCountBolt countBolt = new WordCountBolt();
        ReportBolt reportBolt = new ReportBolt();


        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout(SENTENCE_SPOUT_ID, spout);
        // SentenceSpout --> SplitSentenceBolt
        builder.setBolt(SPLIT_BOLT_ID, splitBolt)
                .shuffleGrouping(SENTENCE_SPOUT_ID);
        // SplitSentenceBolt --> WordCountBolt
        builder.setBolt(COUNT_BOLT_ID, countBolt)
                .fieldsGrouping(SPLIT_BOLT_ID, new Fields("word"));
        // WordCountBolt --> ReportBolt
        builder.setBolt(REPORT_BOLT_ID, reportBolt)
                .globalGrouping(COUNT_BOLT_ID);

        Config config = new Config();

        LocalCluster cluster = new LocalCluster();

        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        waitForSeconds(10);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
    }
 
Example 10
Source File: TridentThroughput.java    From flink-perf with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	int par = pt.getInt("para");


	TridentTopology topology = new TridentTopology();
	Stream sourceStream = topology.newStream("source", new Generator(pt)).parallelismHint(pt.getInt("sourceParallelism"));

	Stream repart = sourceStream.partitionBy(new Fields("id"));
	for(int i = 0; i < pt.getInt("repartitions", 1) - 1; i++) {
		repart = repart.each(new Fields("id"), new IdentityEach(), new Fields("id"+i)).partitionBy(new Fields("id"+i));
	}
	repart.each(new Fields("id", "host", "time", "payload"), new Sink(pt), new Fields("dontcare")).parallelismHint(pt.getInt("sinkParallelism"));

	Config conf = new Config();
	conf.setDebug(false);

	if (!pt.has("local")) {
		conf.setNumWorkers(par);

		StormSubmitter.submitTopologyWithProgressBar("throughput-"+pt.get("name", "no_name"), conf, topology.build());
	}
	else {
		conf.setMaxTaskParallelism(par);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("throughput", conf, topology.build());

		Thread.sleep(30000);

		cluster.shutdown();
	}

}
 
Example 11
Source File: SelectorBoltIT.java    From flowmix with Apache License 2.0 5 votes vote down vote up
@Test
public void testSelection_fieldsDontExistDontReturn() {

  Flow flow = new FlowBuilder()
          .id("myflow")
          .flowDefs()
          .stream("stream1")
          .select().fields("key7").end()
          .endStream()
          .endDefs()
          .createFlow();

  StormTopology topology = buildTopology(flow, 10);
  Config conf = new Config();
  conf.registerSerialization(Event.class, EventSerializer.class);
  conf.setSkipMissingKryoRegistrations(false);
  conf.setNumWorkers(20);

  LocalCluster cluster = new LocalCluster();
  cluster.submitTopology("test", conf, topology);

  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {
    e.printStackTrace();
  }

  cluster.shutdown();
  assertEquals(0, MockSinkBolt.getEvents().size());
}
 
Example 12
Source File: SelectorBoltIT.java    From flowmix with Apache License 2.0 5 votes vote down vote up
@Test
public void testSelection_basic() {

  Flow flow = new FlowBuilder()
    .id("myflow")
    .flowDefs()
      .stream("stream1")
        .select().fields("key1", "key2").end()
      .endStream()
    .endDefs()
  .createFlow();

  StormTopology topology = buildTopology(flow, 10);
  Config conf = new Config();
  conf.registerSerialization(Event.class, EventSerializer.class);
  conf.setSkipMissingKryoRegistrations(false);
  conf.setNumWorkers(20);

  LocalCluster cluster = new LocalCluster();
  cluster.submitTopology("test", conf, topology);

  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {
    e.printStackTrace();
  }

  cluster.shutdown();
  System.out.println(MockSinkBolt.getEvents());
  assertTrue(MockSinkBolt.getEvents().size() > 0);

  for(Event event : MockSinkBolt.getEvents()) {
    assertNotNull(event.get("key1"));
    assertNotNull(event.get("key2"));
    assertNull(event.get("key3"));
    assertNull(event.get("key4"));
    assertNull(event.get("key5"));
  }
}
 
Example 13
Source File: LocalTopologyRunner.java    From yuzhouwan with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {

        StormTopology topology = CreditCardTopologyBuilder.build();
        Config config = new Config();
        config.setDebug(true);

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("local-topology", config, topology);
        Utils.sleep(30000);

        cluster.killTopology("local-topology");
        cluster.shutdown();
    }
 
Example 14
Source File: SequenceTopologyTool.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public void SetLocalTopology() throws Exception {
    Config conf = getConf();
    
    StormTopology topology = buildTopology();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("SplitMerge", conf, topology);
    Thread.sleep(60000);
    cluster.shutdown();
}
 
Example 15
Source File: WordCountTopology.java    From storm-example with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        SentenceSpout spout = new SentenceSpout();
        SplitSentenceBolt splitBolt = new SplitSentenceBolt();
        WordCountBolt countBolt = new WordCountBolt();
        ReportBolt reportBolt = new ReportBolt();


        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout(SENTENCE_SPOUT_ID, spout, 2);
        // SentenceSpout --> SplitSentenceBolt
        builder.setBolt(SPLIT_BOLT_ID, splitBolt, 2)
                .setNumTasks(4)
                .shuffleGrouping(SENTENCE_SPOUT_ID);
        // SplitSentenceBolt --> WordCountBolt
        builder.setBolt(COUNT_BOLT_ID, countBolt, 4)
                .fieldsGrouping(SPLIT_BOLT_ID, new Fields("word"));
        // WordCountBolt --> ReportBolt
        builder.setBolt(REPORT_BOLT_ID, reportBolt)
                .globalGrouping(COUNT_BOLT_ID);

        Config config = new Config();
        config.setNumWorkers(2);

        LocalCluster cluster = new LocalCluster();

        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        waitForSeconds(10);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
    }
 
Example 16
Source File: E9_ContrastEnhancementTopology.java    From StormCV with Apache License 2.0 4 votes vote down vote up
/**
 * @param args
 */
public static void main(String[] args) 
{
	// first some global (topology configuration)
	StormCVConfig conf = new StormCVConfig();

	/**
	 * Sets the OpenCV library to be used which depends on the system the topology is being executed on
	 */
	//conf.put( StormCVConfig.STORMCV_OPENCV_LIB, "mac64_opencv_java248.dylib" );

	conf.setNumWorkers( 4 );                                           // number of workers in the topology
	conf.setMaxSpoutPending( 32 );                                     // maximum un-acked/un-failed frames per spout (spout blocks if this number is reached)
	conf.put( StormCVConfig.STORMCV_FRAME_ENCODING, Frame.JPG_IMAGE ); // indicates frames will be encoded as JPG throughout the topology (JPG is the default when not explicitly set)
	conf.put( Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, true );         // True if Storm should timeout messages or not.
	conf.put( Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS , 10 );             // The maximum amount of time given to the topology to fully process a message emitted by a spout (default = 30)
	conf.put( StormCVConfig.STORMCV_SPOUT_FAULTTOLERANT, false );      // indicates if the spout must be fault tolerant; i.e. spouts do NOT! replay tuples on fail
	conf.put( StormCVConfig.STORMCV_CACHES_TIMEOUT_SEC, 30 );          // TTL (seconds) for all elements in all caches throughout the topology (avoids memory overload)

	// some live camera feeds from http://webcam.prvgld.nl/
	List<String> urls = new ArrayList<String>();
	urls.add( "rtsp://streaming3.webcam.nl:1935/n224/n224.stream" );
	urls.add( "rtsp://streaming3.webcam.nl:1935/n233/n233.stream" );

	int frameSkip = 13;

	// now create the topology itself (spout -> contrast enhancement --> streamer)
	TopologyBuilder builder = new TopologyBuilder();

	// just one spout reading streams; i.e. this spout reads two streams in parallel
	builder.setSpout( "spout", new CVParticleSpout( new StreamFrameFetcher( urls ).frameSkip( frameSkip ) ), 1 );

	// add bolt that does contrast enhancement (choose HSV_EQUALIZE_HIST or GRAY_EQUALIZE_HIST as algorithm)
	builder.setBolt( "contrastenhancement", new SingleInputBolt( new GlobalContrastEnhancementOp().setAlgorithm( CEAlgorithm.HSV_EQUALIZE_HIST ) ), 1 )
	.shuffleGrouping( "spout" );

	// add bolt that creates a web service on port 8558 enabling users to view the result
	builder.setBolt( "streamer", new BatchInputBolt(
			new SlidingWindowBatcher( 2, frameSkip).maxSize( 6 ), // note the required batcher used as a buffer and maintains the order of the frames
			new MjpegStreamingOp().port( 8558 ).framerate( 5 ) ).groupBy( new Fields( FrameSerializer.STREAMID ) )
			, 1)
			.shuffleGrouping( "contrastenhancement" );

	// NOTE: if the topology is started (locally) go to http://localhost:8558/streaming/tiles and click the image to see the stream!

	try 
	{	
		// run in local mode
		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology( "BackgroundSubtraction", conf, builder.createTopology() );
		Utils.sleep( 120 * 1000 ); // run for one minute and then kill the topology
		cluster.shutdown();
		System.exit( 1 );

		// run on a storm cluster
		// StormSubmitter.submitTopology("some_topology_name", conf, builder.createTopology());
	} 
	catch (Exception e)
	{
		e.printStackTrace();
	}
}
 
Example 17
Source File: SentimentAnalysisTopology.java    From StormTweetsSentimentD3Viz with Apache License 2.0 4 votes vote down vote up
public static final void main(final String[] args) throws Exception {
	final ApplicationContext applicationContext = new ClassPathXmlApplicationContext("applicationContext.xml");

	final JmsProvider jmsProvider = new SpringJmsProvider(applicationContext, "jmsConnectionFactory",
			                                                          "notificationQueue");

	final TopologyBuilder topologyBuilder = new TopologyBuilder();

	final JmsBolt jmsBolt = new JmsBolt();
	jmsBolt.setJmsProvider(jmsProvider);
	jmsBolt.setJmsMessageProducer((session, input) -> {
           final String json = "{\"stateCode\":\"" + input.getString(0) + "\", \"sentiment\":" + input.getInteger(1) + "}";
           return session.createTextMessage(json);
       });

	try {
		final Config config = new Config();
		config.setMessageTimeoutSecs(120);
		config.setDebug(true);

		topologyBuilder.setSpout("twitterspout", new TwitterSpout());
		topologyBuilder.setBolt("statelocatorbolt", new StateLocatorBolt())
				.shuffleGrouping("twitterspout");
		topologyBuilder.setBolt("sentimentcalculatorbolt", new SentimentCalculatorBolt())
				.fieldsGrouping("statelocatorbolt", new Fields("state"));
		topologyBuilder.setBolt("jmsBolt", jmsBolt).fieldsGrouping("sentimentcalculatorbolt", new Fields("stateCode"));

		//Submit it to the cluster, or submit it locally
		if (null != args && 0 < args.length) {
			config.setNumWorkers(3);
			StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
		} else {
			config.setMaxTaskParallelism(10);
			final LocalCluster localCluster = new LocalCluster();
			localCluster.submitTopology(Constants.TOPOLOGY_NAME, config, topologyBuilder.createTopology());
			//Run this topology for 600 seconds so that we can complete processing of decent # of tweets.
			Utils.sleep(600 * 1000);

			LOGGER.info("Shutting down the cluster...");
			localCluster.killTopology(Constants.TOPOLOGY_NAME);
			localCluster.shutdown();

			Runtime.getRuntime().addShutdownHook(new Thread()	{
				@Override
				public void run()	{
					LOGGER.info("Shutting down the cluster...");
					localCluster.killTopology(Constants.TOPOLOGY_NAME);
					localCluster.shutdown();
				}
			});
		}
	} catch (final Exception exception) {
		//Deliberate no op;
		exception.printStackTrace();
	}
	LOGGER.info("\n\n\n\t\t*****Please clean your temp folder \"{}\" now!!!*****", System.getProperty("java.io.tmpdir"));
}
 
Example 18
Source File: E2_FacedetectionTopology.java    From StormCV with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args){
	// first some global (topology configuration)
	StormCVConfig conf = new StormCVConfig();

	/**
	 * Sets the OpenCV library to be used which depends on the system the topology is being executed on
	 */
	conf.put(StormCVConfig.STORMCV_OPENCV_LIB, "mac64_opencv_java248.dylib");
	//conf.put(StormCVConfig.STORMCV_OPENCV_LIB, "win64_opencv_java248.dll");
	
	conf.setNumWorkers(3); // number of workers in the topology
	conf.setMaxSpoutPending(32); // maximum un-acked/un-failed frames per spout (spout blocks if this number is reached)
	conf.put(StormCVConfig.STORMCV_FRAME_ENCODING, Frame.JPG_IMAGE); // indicates frames will be encoded as JPG throughout the topology (JPG is the default when not explicitly set)
	conf.put(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, true); // True if Storm should timeout messages or not.
	conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS , 10); // The maximum amount of time given to the topology to fully process a message emitted by a spout (default = 30)
	conf.put(StormCVConfig.STORMCV_SPOUT_FAULTTOLERANT, false); // indicates if the spout must be fault tolerant; i.e. spouts do NOT! replay tuples on fail
	conf.put(StormCVConfig.STORMCV_CACHES_TIMEOUT_SEC, 30); // TTL (seconds) for all elements in all caches throughout the topology (avoids memory overload)

	String userDir = System.getProperty("user.dir").replaceAll("\\\\", "/");
	
	// create a list with files to be processed, in this case just one. Multiple files will be spread over the available spouts
	List<String> files = new ArrayList<String>();
	files.add( "file://"+ userDir +"/resources/data/" ); // actual use of this directory depends on the fetcher. The ImageFetcher will read all image files present within the directory.

	// now create the topology itself (spout -> facedetection --> drawer)
	TopologyBuilder builder = new TopologyBuilder();
	 // just one spout reading images from a directory, sleeping 100ms after each file was read
	builder.setSpout("spout", new CVParticleSpout( new ImageFetcher(files).sleepTime(100) ), 1 );
	
	// one bolt with a HaarCascade classifier with the lbpcascade_frontalface model to detecting faces. This operation outputs a Frame including the Features with detected faces
	builder.setBolt("face_detect", new SingleInputBolt(
		new HaarCascadeOp("face", "lbpcascade_frontalface.xml")
				.outputFrame(true)
		), 1)
		.shuffleGrouping("spout");
			
	// The bounding boxes of the Face Feature are extracted from the Frame and emitted as separate frames
	builder.setBolt("face_extraction", new SingleInputBolt(
			new ROIExtractionOp("face").spacing(25)
		), 1)
		.shuffleGrouping("face_detect");
			
	// simple bolt that draws Features (i.e. locations of features) into the frame and writes the frame to the local file system at /output/facedetections
	builder.setBolt("drawer", new SingleInputBolt(new DrawFeaturesOp().destination("file://"+userDir+ "/output/facedetections/")), 1)
		.shuffleGrouping("face_extraction");
	
	try {
		// run in local mode
		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology( "facedetection", conf, builder.createTopology() );
		Utils.sleep(300*1000); // run for some time and then kill the topology
		cluster.shutdown();
		System.exit(1);
		
		// run on a storm cluster
		// StormSubmitter.submitTopology("some_topology_name", conf, builder.createTopology());
	} catch (Exception e){
		e.printStackTrace();
	}
}
 
Example 19
Source File: WordCountTopologyNode.java    From flink-perf with Apache License 2.0 3 votes vote down vote up
public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSentence(), 5);

    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

    Config conf = new Config();
    conf.setDebug(true);


    if (args != null && args.length > 0) {
      conf.setNumWorkers(3);

      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    }
    else {
      conf.setMaxTaskParallelism(3);

      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, builder.createTopology());

      Thread.sleep(10000);

      cluster.shutdown();
    }
  }
 
Example 20
Source File: BatchMetaTopology.java    From jstorm with Apache License 2.0 3 votes vote down vote up
public static void SetLocalTopology() throws Exception {
    TopologyBuilder builder = SetBuilder();

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(topologyName, conf, builder.createTopology());

    Thread.sleep(600000);

    cluster.shutdown();
}