Java Code Examples for backtype.storm.Config.setMaxSpoutPending()

The following are Jave code examples for showing how to use setMaxSpoutPending() of the backtype.storm.Config class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: storm-hbase-1.0.x   File: WordCountTrident.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(5);
    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
        Thread.sleep(60 * 1000);
        cluster.killTopology("wordCounter");
        cluster.shutdown();
        System.exit(0);
    }
    else if(args.length == 2) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
    } else{
        System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
    }
}
 
Example 2
Project: jstorm-0.9.6.3-   File: TransactionalGlobalCount.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
	    MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
	    TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 2);
	    builder.setBolt("partial-count", new BatchCount(), 3).noneGrouping("spout");
	    builder.setBolt("sum", new UpdateGlobalCount(), 1).globalGrouping("partial-count");

//	    LocalCluster cluster = new LocalCluster();

	    Config config = new Config();
	    config.setDebug(true);
	    config.setMaxSpoutPending(3);
	    config.put(Config.TOPOLOGY_WORKERS, 9);
	    Config.setNumAckers(config, 0);
	    
	    StormSubmitter.submitTopology("global-count-topology", config, builder.buildTopology());

//	    Thread.sleep(3000);
//	    cluster.shutdown();
	  }
 
Example 3
Project: learn_jstorm   File: TransactionalGlobalCount.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
	    MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
	    TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 2);
	    builder.setBolt("partial-count", new BatchCount(), 3).noneGrouping("spout");
	    builder.setBolt("sum", new UpdateGlobalCount(), 1).globalGrouping("partial-count");

//	    LocalCluster cluster = new LocalCluster();

	    Config config = new Config();
	    config.setDebug(true);
	    config.setMaxSpoutPending(3);
	    config.put(Config.TOPOLOGY_WORKERS, 9);
	    Config.setNumAckers(config, 0);
	    
	    StormSubmitter.submitTopology("global-count-topology", config, builder.buildTopology());

//	    Thread.sleep(3000);
//	    cluster.shutdown();
	  }
 
Example 4
Project: big-data-system   File: TransactionalGlobalCount.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
  builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
  builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");

  LocalCluster cluster = new LocalCluster();

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);

  cluster.submitTopology("global-count-topology", config, builder.buildTopology());

  Thread.sleep(3000);
  cluster.shutdown();
}
 
Example 5
Project: big-data-system   File: TridentWordCount.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
  Config conf = new Config();
  conf.setMaxSpoutPending(20);
  if (args.length == 0) {
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
    for (int i = 0; i < 100; i++) {
      System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped"));
      Thread.sleep(1000);
    }
  }
  else {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
  }
}
 
Example 6
Project: big-data-system   File: TransactionalWords.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
  builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
  builder.setBolt("bucketize", new Bucketize()).noneGrouping("count");
  builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));


  LocalCluster cluster = new LocalCluster();

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);

  cluster.submitTopology("top-n-topology", config, builder.buildTopology());

  Thread.sleep(3000);
  cluster.shutdown();
}
 
Example 7
Project: jstrom   File: TransactionalWordsTest.java   Source Code and License Vote up 6 votes
@Test
public void test_transaction_word() {
    try {
        MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
        TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
        builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
        builder.setBolt("bucketize", new Bucketize()).shuffleGrouping("count");
        builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));

        LocalCluster cluster = new LocalCluster();

        Config config = new Config();
        config.setDebug(true);
        config.setMaxSpoutPending(3);

        cluster.submitTopology("top-n-topology", config, builder.buildTopology());

        JStormUtils.sleepMs(60 * 1000);
        cluster.shutdown();
    } catch (Exception e) {
        Assert.fail("Failed to run simple transaction");
    }

}
 
Example 8
Project: cdh-storm   File: TransactionalGlobalCount.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
  builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
  builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");

  LocalCluster cluster = new LocalCluster();

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);

  cluster.submitTopology("global-count-topology", config, builder.buildTopology());

  Thread.sleep(3000);
  cluster.shutdown();
}
 
Example 9
Project: cdh-storm   File: TridentWordCount.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
  Config conf = new Config();
  conf.setMaxSpoutPending(20);
  if (args.length == 0) {
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
    for (int i = 0; i < 100; i++) {
      System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped"));
      Thread.sleep(1000);
    }
  }
  else {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
  }
}
 
Example 10
Project: cdh-storm   File: TransactionalWords.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
  builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
  builder.setBolt("bucketize", new Bucketize()).noneGrouping("count");
  builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));


  LocalCluster cluster = new LocalCluster();

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);

  cluster.submitTopology("top-n-topology", config, builder.buildTopology());

  Thread.sleep(3000);
  cluster.shutdown();
}
 
Example 11
Project: LearnStorm   File: LogAnalyzer.java   Source Code and License Vote up 6 votes
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
		final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
		Config config = new Config();
		config.setDebug(DEBUG);
		config.setNumWorkers(numWorkers);
		config.setMaxSpoutPending(1000000);
		// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
		config.setMessageTimeoutSecs(600);	// This value(30 secs by default) must
							// be larger than retryDelayMaxMs
							// (60 secs by default) in
							/// KafkaSpout.

		TopologyBuilder builder = new TopologyBuilder();
		configureKafkaSpout(builder, config);
		configureESBolts(builder, config);

//		LocalCluster cluster = new LocalCluster();
		StormSubmitter.submitTopology("LogAnalyzerV1", config, builder.createTopology());
	}
 
Example 12
Project: LearnStorm   File: ApLogGenerator.java   Source Code and License Vote up 6 votes
private void configureKafkaBolt(TopologyBuilder builder, Config config) {
	String topic = topologyConfig.getProperty("kafka.topic");
	Properties props = new Properties();
	props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl);
	props.put(ProducerConfig.CLIENT_ID_CONFIG, "storm-kafka-producer");
	props.put("metadata.broker.list", brokerUrl);
	props.put("serializer.class", "kafka.serializer.StringEncoder");
	props.put("request.required.acks", "1");
	config.setMaxSpoutPending(20);
	config.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props);
	KafkaBolt<String, String> kafkaBolt = new KafkaBolt<String, String>().withTopicSelector(new DefaultTopicSelector(topic))
									.withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper<String, String>("key", "log"));
	builder.setBolt("KafkaBolt", kafkaBolt, 3).shuffleGrouping(SPOUT_ID).setDebug(DEBUG);
}
 
Example 13
Project: openimaj   File: StormPlayground.java   Source Code and License Vote up 6 votes
public static void main(String[] args) {
	final Config conf = new Config();
	conf.setDebug(false);
	conf.setNumWorkers(2);
	conf.setMaxSpoutPending(1);
	conf.setFallBackOnJavaSerialization(false);
	conf.setSkipMissingKryoRegistrations(false);
	final LocalCluster cluster = new LocalCluster();
	final TopologyBuilder builder = new TopologyBuilder();
	builder.setSpout("randomSpout1", new RandomFieldSpout(2, 0, 0, 1)); // (nfields,seed,min,max)
	builder.setSpout("randomSpout2", new RandomFieldSpout(2, 10, 0, 1)); // (nfields,seed,min,max)
	JoinBolt.connectNewBolt(builder);
	final StormTopology topology = builder.createTopology();
	cluster.submitTopology("playTopology", conf, topology);
	Utils.sleep(10000);
	cluster.killTopology("playTopology");
	cluster.shutdown();

}
 
Example 14
Project: LearnStorm   File: TridentWordCount.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
	Config conf = new Config();
	conf.setMaxSpoutPending(20);
	if (args.length == 0) {
		LocalDRPC drpc = new LocalDRPC();
		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
		for (int i = 0; i < 100; i++) {
			// The query takes as input a whitespace separated list of words
			// and return the sum of the counts for those words.
			System.out.println("DRPC RESULT: " + drpc.execute("words", "cat$$dog$$the$$man"));
			Thread.sleep(1000);
		}
	} else {
		conf.setNumWorkers(3);
		StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
	}
}
 
Example 15
Project: java   File: DeliveryTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
  TopologyBuilder builder = new TopologyBuilder();

  LOGGER.info("Starting..");
  builder.setSpout("trade", new DeliveryCheckSpout(), 1);
  builder.setBolt("eligibility", new DeliveryCheckBolt(), 10).shuffleGrouping("trade");
  builder.setBolt("odd", new DeliveryCheckOddBolt(), 10).shuffleGrouping("eligibility",
      "oddstream");
  builder.setBolt("even", new DeliveryCheckEvenBolt(), 10).shuffleGrouping("eligibility",
      "evenstream");

  Config conf = new Config();
  conf.setDebug(false);
  conf.setMaxSpoutPending(5);

  if (args != null && args.length > 0) {
    conf.setNumWorkers(1);
    LOGGER.info("Submitting DeliveryTopology");
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  } else {

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("DeliveryTopology", conf, builder.createTopology());
    Utils.sleep(100000000);
    cluster.killTopology("DeliveryTopology");
    cluster.shutdown();
  }
}
 
Example 16
Project: jstorm-0.9.6.3-   File: TransactionalWordsTest.java   Source Code and License Vote up 5 votes
@Test
public void test_transaction_word(){
	try {
		MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA,
				new Fields("word"), PARTITION_TAKE_PER_BATCH);
		TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder(
				"top-n-words", "spout", spout, 2);
		builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping(
				"spout", new Fields("word"));
		builder.setBolt("bucketize", new Bucketize()).shuffleGrouping("count");
		builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping(
				"bucketize", new Fields("bucket"));

		LocalCluster cluster = new LocalCluster();

		Config config = new Config();
		config.setDebug(true);
		config.setMaxSpoutPending(3);

		cluster.submitTopology("top-n-topology", config,
				builder.buildTopology());

		JStormUtils.sleepMs(60 * 1000);
		cluster.shutdown();
	}catch(Exception e) {
		Assert.fail("Failed to run simple transaction");
	}
	
}
 
Example 17
Project: learn_jstorm   File: TransactionalWordsTest.java   Source Code and License Vote up 5 votes
@Test
public void test_transaction_word(){
	try {
		MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA,
				new Fields("word"), PARTITION_TAKE_PER_BATCH);
		TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder(
				"top-n-words", "spout", spout, 2);
		builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping(
				"spout", new Fields("word"));
		builder.setBolt("bucketize", new Bucketize()).shuffleGrouping("count");
		builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping(
				"bucketize", new Fields("bucket"));

		LocalCluster cluster = new LocalCluster();

		Config config = new Config();
		config.setDebug(true);
		config.setMaxSpoutPending(3);

		cluster.submitTopology("top-n-topology", config,
				builder.buildTopology());

		JStormUtils.sleepMs(60 * 1000);
		cluster.shutdown();
	}catch(Exception e) {
		Assert.fail("Failed to run simple transaction");
	}
	
}
 
Example 18
Project: jstrom   File: TransactionalGlobalCount.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
  builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
  builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);
  
  if (args.length == 0){
      LocalCluster cluster = new LocalCluster();    
  
      cluster.submitTopology("global-count-topology", config, builder.buildTopology());
  
      Thread.sleep(100000);
      cluster.shutdown();
  }else {
  	
      config.setNumWorkers(3);
  	try {
      	Map yamlConf = LoadConf.LoadYaml(args[0]);
      	if (yamlConf != null) {
      		config.putAll(yamlConf);
      	}
  	}catch (Exception e) {
  		System.out.println("Input " + args[0] + " isn't one yaml ");
  	}

      StormSubmitter.submitTopology("global", config, builder.buildTopology());
  }
}
 
Example 19
Project: trident-aerospike   File: StormTridentAerospikeTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(4);
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("wordCounter", conf, buildTopology1(drpc));
    cluster.submitTopology("fruitCounter", conf, buildTopology2(drpc));
    for (int i = 0; i < 100; i++) {
        System.out.println("DRPC RESULT 1: " + drpc.execute("words", "cat the dog jumped"));
        System.out.println("DRPC RESULT 2: " + drpc.execute("fruits", "2 orange"));
        Thread.sleep(1000);
    }
}
 
Example 20
Project: LearnStorm   File: TridentKafkaWordCount.java   Source Code and License Vote up 5 votes
/**
 * Return the consumer topology config.
 *
 * @return the topology config
 */
public Config getConsumerConfig() {
    Config conf = new Config();
    conf.setMaxSpoutPending(20);
    //  conf.setDebug(true);
    return conf;
}
 
Example 21
Project: LearnStorm   File: TridentKafkaWordCount.java   Source Code and License Vote up 5 votes
/**
     * Returns the storm config for the topology that publishes sentences to kafka "test" topic using a kafka bolt.
     * The KAFKA_BROKER_PROPERTIES is needed for the KafkaBolt.
     *
     * @return the topology config
     */
    public Config getProducerConfig() {
        Config conf = new Config();
        conf.setMaxSpoutPending(20);
        Properties props = new Properties();
        props.put("metadata.broker.list", brokerUrl);
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl);
//        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
//        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        props.put(ProducerConfig.CLIENT_ID_CONFIG, "storm-kafka-producer");
        conf.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props);
        return conf;
    }
 
Example 22
Project: heron   File: MultiStageAckingTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
  if (args.length != 1) {
    throw new RuntimeException("Please specify the name of the topology");
  }
  TopologyBuilder builder = new TopologyBuilder();

  int parallelism = 2;
  builder.setSpout("word", new AckingTestWordSpout(), parallelism);
  builder.setBolt("exclaim1", new ExclamationBolt(true), parallelism)
      .shuffleGrouping("word");
  builder.setBolt("exclaim2", new ExclamationBolt(false), parallelism)
      .shuffleGrouping("exclaim1");

  Config conf = new Config();
  conf.setDebug(true);

  // Put an arbitrary large number here if you don't want to slow the topology down
  conf.setMaxSpoutPending(1000 * 1000 * 1000);

  // To enable acking, we need to setEnableAcking true
  conf.setNumAckers(1);

  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  conf.setNumWorkers(parallelism);
  StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
 
Example 23
Project: java   File: TradeProcessingTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
  TopologyBuilder builder = new TopologyBuilder();

  LOGGER.info("Building Trade Processing Topology..");

  builder.setSpout(TRD_COLLECTOR_SPOUT, new TradeCollectorSpout(),
      CONFIG.getNumber("TRD_COLLECTOR_SPOUT_PARALLELISM"));

  builder
      .setBolt(TRD_ELIGIBILITY_BOLT, new TradeEligibilityBolt(),
          CONFIG.getNumber("TRD_ELIGIBILITY_BOLT_PARALLELISM"))
      .shuffleGrouping(TRD_COLLECTOR_SPOUT);

  builder
      .setBolt(TRD_REPORTING_BOLT, new TradeReportPersistenceBolt(),
          CONFIG.getNumber("TRD_REPORTING_BOLT_PARALLELISM"))
      .shuffleGrouping(TRD_ELIGIBILITY_BOLT, REPORT_STREAM);

  builder
      .setBolt(TRD_EXCLUSION_BOLT, new TradeExclusionPersistenceBolt(),
          CONFIG.getNumber("TRD_EXCLUSION_BOLT_PARALLELISM"))
      .shuffleGrouping(TRD_ELIGIBILITY_BOLT, EXCLUDE_STREAM);

  Config conf = new Config();
  conf.setDebug(CONFIG.is("DEBUG_FLAG"));
  conf.setNumWorkers(CONFIG.getInt("NUMBER_OF_WORKERS"));
  conf.setMaxTaskParallelism(CONFIG.getInt("MAX_TASK_PARALLELISM"));
  conf.setMaxSpoutPending(CONFIG.getInt("MAX_SPOUT_PENDING"));
  conf.put(Config.TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS,
      CONFIG.getInt("MAX_SPOUT_PENDING_WAIT_MS"));
  conf.put(Config.TOPOLOGY_SPOUT_WAIT_STRATEGY, CONFIG.get("TOPOLOGY_WAIT_STRATEGY"));
  conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, CONFIG.getInt("TOPOLOGY_MESSAGE_TIMEOUT_SECS"));
  conf.put(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS,
      CONFIG.is("TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS"));
  LOGGER.info("Submitting Trade Processing Topology..");
  if (args != null && args.length > 0) {
    conf.put(Config.NIMBUS_HOST, CONFIG.get("LOCAL_NIMBUS_HOST"));
    conf.put(Config.NIMBUS_THRIFT_PORT, CONFIG.getInt("LOCAL_NIMBUS_PORT"));
    conf.put(Config.STORM_ZOOKEEPER_PORT, CONFIG.getInt("LOCAL_ZOOKEEPER_PORT"));
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  } else {
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(TRD_PROCESSING_TOPOLOGY, conf, builder.createTopology());
    Utils.sleep(CONFIG.getLong("LOCAL_CLUSTER_RUNTIME"));
    cluster.killTopology(TRD_PROCESSING_TOPOLOGY);
    cluster.shutdown();
  }
}