Java Code Examples for backtype.storm.Config.setDebug()

The following are Jave code examples for showing how to use setDebug() of the backtype.storm.Config class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: RealEstate-Streaming   File: PhoenixTest.java   Source Code and License Vote up 7 votes
public void buildAndSubmit() throws Exception {
	 TopologyBuilder builder = new TopologyBuilder();
	 Config config = new Config();
     config.setDebug(true);
     // String nimbusHost = topologyConfig.getProperty("nimbus.host");
     config.put(Config.NIMBUS_HOST, "localhost");
     
     configureKafkaSpout(builder);
     //configureRouteBolt(builder);
     configurePhoenixTest(builder);
     
     /*
     builder.setBolt("submitter", new SubmitBolt())
        .shuffleGrouping(ROUTE_BOLT);
     */
     
     try {
         StormSubmitter.submitTopology("simple-topology", config, builder.createTopology());
     } catch (Exception e) {
         LOG.error("Error submiting Topology", e);
     }

}
 
Example 2
Project: splice-community-sample-code   File: MySqlToSpliceTopology.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws SQLException {

        // tableName is the name of the table in splice to insert records to
        // server is the server instance running splice
        String tableName = "students";
        String server = "localhost";
        TopologyBuilder builder = new TopologyBuilder();

        // set the spout for the topology
        builder.setSpout("seedDataFromMySql", new MySqlSpout());

        // dump the stream data into splice       
        builder.setBolt("dbRowProcessing", new MySqlSpliceBolt(server, tableName), 1).shuffleGrouping("seedDataFromMySql");

        Config conf = new Config();
        conf.setDebug(true);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("mysql-splice-topology", conf, builder.createTopology());
        Utils.sleep(3000);
        cluster.shutdown();
    }
 
Example 3
Project: hadooparchitecturebook   File: MovingAvgLocalTopologyRunner.java   Source Code and License Vote up 6 votes
public static void main(String[] args) {

    Config config = new Config();
    config.setDebug(true);

    StormTopology topology = buildTopology();
    // Un-comment to run locally:
    LocalCluster localCluster = new LocalCluster();
    localCluster.submitTopology("local-moving-avg", config, topology);
    // Un-comment to run as part of a Storm cluster:
    // try {
    //   StormSubmitter.submitTopology("cluster-moving-average",
    //  				    config,
    // 				    topology);
    // } catch(AlreadyAliveException e) {
    //   e.printStackTrace();
    // } catch(InvalidTopologyException e) {
    //   e.printStackTrace();
    //}
  }
 
Example 4
Project: RealEstate-Streaming   File: KafkaPhoenixTopology.java   Source Code and License Vote up 6 votes
public void buildAndSubmit() throws Exception {
	 TopologyBuilder builder = new TopologyBuilder();
	 Config config = new Config();
     config.setDebug(true);
     // String nimbusHost = topologyConfig.getProperty("nimbus.host");
     config.put(Config.NIMBUS_HOST, "localhost");
     
     configureKafkaSpout(builder);
     configureRouteBolt(builder);
     configureInsertBolt(builder);
     
     //builder.setBolt("submitter", new SubmitBolt())
     //   .shuffleGrouping(ROUTE_BOLT);
     
     try {
         StormSubmitter.submitTopology("realestate-topology", config, builder.createTopology());
     } catch (Exception e) {
         LOG.error("Error submiting Topology", e);
     }

}
 
Example 5
Project: storm-demo   File: LogStatisticsTopology.java   Source Code and License Vote up 6 votes
public static void main(String[] args) {
    Config config = new Config();

    HdfsBolt hdfsBolt = makeHdfsBolt();
    KafkaSpout kafkaSpout = makeKafkaSpout(TOPIC, TOPOLOGY_NAME);

    LOG.info("Topology name is {}", TOPOLOGY_NAME);

    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout(KAFKA_SPOUT_ID, kafkaSpout, 10);
    topologyBuilder.setBolt(CROP_BOLT_ID, new CropBolt(), 10).shuffleGrouping(KAFKA_SPOUT_ID);
    topologyBuilder.setBolt(SPLIT_FIELDS_BOLT_ID, new SplitFieldsBolt(), 10).shuffleGrouping(CROP_BOLT_ID);
    topologyBuilder.setBolt(STORM_HDFS_BOLT_ID, hdfsBolt, 4).fieldsGrouping(SPLIT_FIELDS_BOLT_ID, new Fields("timestamp", "fieldvalues"));

    if (args != null && args.length > 0) {
        config.setDebug(false);
        config.setNumWorkers(3);

        try {
            StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
        } catch (InvalidTopologyException | AlreadyAliveException | AuthorizationException e) {
            e.printStackTrace();
        }
    }
}
 
Example 6
Project: cdh-storm   File: ExclamationTopology.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word", new TestWordSpout(), 10);
  builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
  builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");

  Config conf = new Config();
  conf.setDebug(true);

  if (args != null && args.length > 0) {
    conf.setNumWorkers(3);

    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  }
  else {

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
Example 7
Project: jstrom   File: TransactionalWordsTest.java   Source Code and License Vote up 6 votes
@Test
public void test_transaction_word() {
    try {
        MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
        TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
        builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
        builder.setBolt("bucketize", new Bucketize()).shuffleGrouping("count");
        builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));

        LocalCluster cluster = new LocalCluster();

        Config config = new Config();
        config.setDebug(true);
        config.setMaxSpoutPending(3);

        cluster.submitTopology("top-n-topology", config, builder.buildTopology());

        JStormUtils.sleepMs(60 * 1000);
        cluster.shutdown();
    } catch (Exception e) {
        Assert.fail("Failed to run simple transaction");
    }

}
 
Example 8
Project: StreamBench   File: IterativeTest.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws WorkloadException {
    TopologyBuilder builder = new TopologyBuilder();


    builder.setSpout("spout", new NumberSpout());
    builder.setBolt("minusone", new MinusOne())
            .shuffleGrouping("spout")
            .shuffleGrouping("DoNothing", "GreaterThanZero");


    builder.setBolt("DoNothing", new Filter())
            .shuffleGrouping("minusone");

    Config conf = new Config();
    conf.setDebug(true);
    conf.setNumWorkers(3);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
 
Example 9
Project: StreamBench   File: KMeansTest.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws WorkloadException {
    TopologyBuilder builder = new TopologyBuilder();


    builder.setSpout("points", new PointSpout());
    builder.setBolt("assign", new Assign())
            .shuffleGrouping("points")
            .allGrouping("aggregator", "centroids");

    builder.setBolt("aggregator", new Aggregator())
            .fieldsGrouping("assign", new Fields("centroid_index"));

    Config conf = new Config();
    conf.setDebug(true);
    conf.setNumWorkers(3);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
 
Example 10
Project: StreamBench   File: TickTest.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws WorkloadException {
    TopologyBuilder builder = new TopologyBuilder();
    BrokerHosts hosts = new ZkHosts("localhost:2181");
    SpoutConfig spoutConfig = new SpoutConfig(hosts, "WordCount", "/" + "WordCount", UUID.randomUUID().toString());
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    spoutConfig.ignoreZkOffsets = true;

    builder.setSpout("spout", new KafkaSpout(spoutConfig));
    builder.setBolt("split", new SplitSentence()).shuffleGrouping("spout");
    builder.setBolt("counter", new CounterBolt(), 3).shuffleGrouping("split");
    builder.setBolt("aggregator", new AggregatorBolt(), 1)
            .fieldsGrouping("counter", Utils.DEFAULT_STREAM_ID, new Fields("word"))
            .allGrouping("counter", "tick");

    Config conf = new Config();
    conf.setDebug(true);
    conf.setNumWorkers(3);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
 
Example 11
Project: StreamBench   File: AppTest.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws WorkloadException {
    TopologyBuilder builder = new TopologyBuilder();
    BrokerHosts hosts = new ZkHosts("localhost:2181");
    SpoutConfig spoutConfig = new SpoutConfig(hosts, "WordCount", "/" + "WordCount", UUID.randomUUID().toString());
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    spoutConfig.ignoreZkOffsets = true;

    builder.setSpout("spout", new KafkaSpout(spoutConfig));
    builder.setBolt("split", new SplitSentence()).shuffleGrouping("spout");
    builder.setBolt("counter", new CounterBolt(), 3).fieldsGrouping("split", new Fields("wordCountPair"));

    Config conf = new Config();
    conf.setDebug(true);
    conf.setNumWorkers(3);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
 
Example 12
Project: LearnStorm   File: WordCountTopology.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {

		TopologyBuilder builder = new TopologyBuilder();
		builder.setSpout("spout", new RandomSentenceSpout(), 5);
		builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
		builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

		Config conf = new Config();
		conf.setDebug(true);

		if (args != null && args.length > 0) {
			conf.setNumWorkers(3);
			StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
		} else {
			conf.setMaxTaskParallelism(3);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("word-count", conf, builder.createTopology());
			Thread.sleep(10000);
			cluster.shutdown();
		}
	}
 
Example 13
Project: LearnStorm   File: LogAnalyzer.java   Source Code and License Vote up 6 votes
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
		final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
		Config config = new Config();
		config.setDebug(DEBUG);
		config.setNumWorkers(numWorkers);
		config.setMaxSpoutPending(1000000);
		// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
		config.setMessageTimeoutSecs(600);	// This value(30 secs by default) must
							// be larger than retryDelayMaxMs
							// (60 secs by default) in
							/// KafkaSpout.

		TopologyBuilder builder = new TopologyBuilder();
		configureKafkaSpout(builder, config);
		configureESBolts(builder, config);

//		LocalCluster cluster = new LocalCluster();
		StormSubmitter.submitTopology("LogAnalyzerV1", config, builder.createTopology());
	}
 
Example 14
Project: Mastering-Apache-Storm   File: Topology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws AlreadyAliveException,
			InvalidTopologyException {
		TopologyBuilder builder = new TopologyBuilder();
		
		List<String> zks = new ArrayList<String>();
		zks.add("192.168.41.122");
		
		List<String> cFs = new ArrayList<String>();
		cFs.add("personal");
		cFs.add("company");
		
		// set the spout class
		builder.setSpout("spout", new SampleSpout(), 2);
		// set the bolt class
		builder.setBolt("bolt", new StormRedisBolt("192.168.41.122",2181), 2).shuffleGrouping("spout");

		Config conf = new Config();
		conf.setDebug(true);
		// create an instance of LocalCluster class for
		// executing topology in local mode.
		LocalCluster cluster = new LocalCluster();

		// LearningStormTopolgy is the name of submitted topology.
		cluster.submitTopology("StormRedisTopology", conf,
				builder.createTopology());
		try {
			Thread.sleep(10000);
		} catch (Exception exception) {
			System.out.println("Thread interrupted exception : " + exception);
		}
		// kill the LearningStormTopology
		cluster.killTopology("StormRedisTopology");
		// shutdown the storm test cluster
		cluster.shutdown();
}
 
Example 15
Project: fiware-sinfonier   File: DynamicTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {

        LOG.info("Reading JSON file configuration...");
        JSONProperties config = new JSONProperties("/topology.json");
        TopologyBuilder builder = new TopologyBuilder();

        /* Spout Configuration */
        JSONArray spouts = config.getSpouts();
        configureSpouts(builder, spouts);

        /* Bolt Configuration */
        JSONArray bolts = config.getBolts();
        configureBolts(builder, bolts);

        /* Drain Configuration */
        JSONArray drains = config.getDrains();
        configureDrains(builder, drains);

        /* Configure more Storm options */
        Config conf = setTopologyStormConfig(config.getProperties());


        if(config.getProperty("name") != null){
            StormSubmitter.submitTopology((String)config.getProperty("name"), conf, builder.createTopology());
        } else {
            conf.setDebug(true);
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("test", conf, builder.createTopology());
            Utils.sleep(1000000); // Alive for 100 seconds = 100000 ms
            cluster.killTopology("test");
            cluster.shutdown();
        }

    }
 
Example 16
Project: splice-community-sample-code   File: SpliceDumperTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws SQLException {

        ArrayList<String> columnNames = new ArrayList<String>();
        ArrayList<String> columnTypes = new ArrayList<String>();
        // this table must exist in splice
        // create table testTable (word varchar(100), number int);
        String tableName = "testTable";
        String server = "localhost";

        // add the column names and the respective types in the two arraylists
        columnNames.add("word");
        columnNames.add("number");

        // add the types
        columnTypes.add("varchar (100)");
        columnTypes.add("int");

        TopologyBuilder builder = new TopologyBuilder();

        // set the spout for the topology
        builder.setSpout("spout", new SpliceIntegerSpout(), 10);

        // dump the stream data into splice       
        SpliceDumperBolt dumperBolt = new SpliceDumperBolt(server, tableName);
        builder.setBolt("dumperBolt", dumperBolt, 1).shuffleGrouping("spout");
        Config conf = new Config();
        conf.setDebug(true);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("splice-topology", conf, builder.createTopology());
        Utils.sleep(10000);
        cluster.shutdown();
    }
 
Example 17
Project: storm-kafka-examples   File: CounterTopology.java   Source Code and License Vote up 5 votes
/**
 * @param args
 * http://www.programcreek.com/java-api-examples/index.php?api=storm.kafka.KafkaSpout
 */
public static void main(String[] args) {
	try{
		//设置喷发节点并分配并发数,该并发数将会控制该对象在集群中的线程数(6个)
		String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
		String topic = "order";
		String groupId = "id";
		int spoutNum = 3;
		int boltNum = 1;
		ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
		SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId);  // create /order /id
		spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
		KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", kafkaSpout, spoutNum);
		builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
        builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");

        Config config = new Config();
        config.setDebug(true);
        
        if(args!=null && args.length > 0) {
            config.setNumWorkers(2);
            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
        } else {        
            config.setMaxTaskParallelism(2);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());

            Thread.sleep(500000);

            cluster.shutdown();
        }
	}catch (Exception e) {
		e.printStackTrace();
	}
}
 
Example 18
Project: miner   File: TopologyMain.java   Source Code and License Vote up 5 votes
public static void main(String[] args) {
	try{
		TopologyBuilder topologyBuilder = new TopologyBuilder();
		topologyBuilder.setSpout("Spout", new EmitMessageSpout(), 1);

		topologyBuilder.setBolt("generate", new ParseLoopBolt(), 1)
				.shuffleGrouping("Spout");



		topologyBuilder.setBolt("Store", new PrintBolt(), 1)
				.shuffleGrouping("generate");
		
		Config config = new Config();
		config.setDebug(false);
		
		if(args != null && args.length>0){
			config.setNumWorkers(4);
			StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
		}else{
			config.setMaxTaskParallelism(2);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("test", config, topologyBuilder.createTopology());
		}
		
	}catch(Exception e){
		e.printStackTrace();
	}
}
 
Example 19
Project: miner   File: TopologyMain.java   Source Code and License Vote up 5 votes
public static void main(String[] args) {
	try{
		TopologyBuilder topologyBuilder = new TopologyBuilder();
		topologyBuilder.setSpout("spout-number", new ProduceRecordSpout(Type.NUMBER, new String[]{"111 222 333", "80966 31"}), 1);
		topologyBuilder.setSpout("spout-string", new ProduceRecordSpout(Type.STRING, new String[]{"abc ddd fasko", "hello the world"}), 1);
		topologyBuilder.setSpout("spout-sign", new ProduceRecordSpout(Type.SIGN, new String[]{"++ -*% *** @@", "{+-} ^#######"}), 1);

		topologyBuilder.setBolt("bolt-splitter", new SplitRecordBolt(), 2)
				.shuffleGrouping("spout-number")
				.shuffleGrouping("spout-string")
				.shuffleGrouping("spout-sign");

		topologyBuilder.setBolt("bolt-distributor", new DistributeWordByTypeBolt(), 1)
				.fieldsGrouping("bolt-splitter", new Fields("type"));

		topologyBuilder.setBolt("bolt-number-saver", new SaveDataBolt(Type.NUMBER), 1)
				.shuffleGrouping("bolt-distributor", "stream-number-saver");
		topologyBuilder.setBolt("bolt-string-saver", new SaveDataBolt(Type.STRING), 1)
				.shuffleGrouping("bolt-distributor", "stream-string-saver");
		topologyBuilder.setBolt("bolt-sign-saver", new SaveDataBolt(Type.SIGN), 1)
				.shuffleGrouping("bolt-distributor", "stream-sign-saver");

		Config config = new Config();
		config.setDebug(false);
		
		if(args != null && args.length>0){
			config.setNumWorkers(4);
			StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
		}else{
			config.setMaxTaskParallelism(2);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("test", config, topologyBuilder.createTopology());
		}
		
	}catch(Exception e){
		e.printStackTrace();
	}
}
 
Example 20
Project: miner   File: ExclaimBasicTopo.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSpout());
    builder.setBolt("exclaim", new ProxyBolt()).shuffleGrouping("spout");
    builder.setBolt("print", new PrintBolt()).shuffleGrouping("exclaim");

    Config conf = new Config();
    conf.setDebug(false);

    /* Config里封装了Redis的配置 */
    conf.put("ip","127.0.0.1");
    conf.put("port","6379");
    conf.put("password","password");

    if (args != null && args.length > 0) {
        conf.setNumWorkers(1);

        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", conf, builder.createTopology());
        Utils.sleep(10*1000);
        cluster.killTopology("test");
        cluster.shutdown();
    }
}
 
Example 21
Project: miner   File: TopologyMain.java   Source Code and License Vote up 5 votes
public static void main(String[] args) {
		try{
			TopologyBuilder topologyBuilder = new TopologyBuilder();
			topologyBuilder.setSpout("Spout", new EmitMessageSpout(), 1);

			topologyBuilder.setBolt("generate", new GenerateUrlBolt(), 1)
					.shuffleGrouping("Spout");
			topologyBuilder.setBolt("generate_loop", new GenerateUrlBolt(), 1)
					.shuffleGrouping("Parse", "loop");

//			topologyBuilder.setBolt("Parse", new ParseTestBolt(), 1).shuffleGrouping("Spout");
			topologyBuilder.setBolt("Parse", new ParseLoopBolt(), 1)
					.shuffleGrouping("generate")
					.shuffleGrouping("generate_loop");

			topologyBuilder.setBolt("Store", new StoreTestBolt(), 1)
					.shuffleGrouping("Parse", "store");
			
			Config config = new Config();
			config.setDebug(false);
			
			if(args != null && args.length>0){
				config.setNumWorkers(4);
				StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
			}else{
				config.setMaxTaskParallelism(2);
				LocalCluster cluster = new LocalCluster();
				cluster.submitTopology("test", config, topologyBuilder.createTopology());
			}
			
		}catch(Exception e){
			e.printStackTrace();
		}
	}
 
Example 22
Project: erad2016-streamprocessing   File: SentimentAnalysisTopology.java   Source Code and License Vote up 5 votes
private static Config createConfig(boolean local) {
    int workers = Properties.getInt("sa.storm.workers");
    Config conf = new Config();
    conf.setDebug(true);
    if (local)
        conf.setMaxTaskParallelism(workers);
    else
        conf.setNumWorkers(workers);
    return conf;
}
 
Example 23
Project: java   File: DeliveryTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
  TopologyBuilder builder = new TopologyBuilder();

  LOGGER.info("Starting..");
  builder.setSpout("trade", new DeliveryCheckSpout(), 1);
  builder.setBolt("eligibility", new DeliveryCheckBolt(), 10).shuffleGrouping("trade");
  builder.setBolt("odd", new DeliveryCheckOddBolt(), 10).shuffleGrouping("eligibility",
      "oddstream");
  builder.setBolt("even", new DeliveryCheckEvenBolt(), 10).shuffleGrouping("eligibility",
      "evenstream");

  Config conf = new Config();
  conf.setDebug(false);
  conf.setMaxSpoutPending(5);

  if (args != null && args.length > 0) {
    conf.setNumWorkers(1);
    LOGGER.info("Submitting DeliveryTopology");
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  } else {

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("DeliveryTopology", conf, builder.createTopology());
    Utils.sleep(100000000);
    cluster.killTopology("DeliveryTopology");
    cluster.shutdown();
  }
}
 
Example 24
Project: incubator-samoa   File: LocalStormDoTask.java   Source Code and License Vote up 5 votes
/**
 * The main method.
 * 
 * @param args
 *          the arguments
 */
public static void main(String[] args) {

  List<String> tmpArgs = new ArrayList<String>(Arrays.asList(args));

  int numWorker = StormSamoaUtils.numWorkers(tmpArgs);

  args = tmpArgs.toArray(new String[0]);

  // convert the arguments into Storm topology
  StormTopology stormTopo = StormSamoaUtils.argsToTopology(args);
  String topologyName = stormTopo.getTopologyName();

  Config conf = new Config();
  // conf.putAll(Utils.readStormConfig());
  conf.setDebug(false);

  // local mode
  conf.setMaxTaskParallelism(numWorker);

  backtype.storm.LocalCluster cluster = new backtype.storm.LocalCluster();
  cluster.submitTopology(topologyName, conf, stormTopo.getStormBuilder().createTopology());

  // Read local mode execution duration from property file
  Configuration stormConfig = StormSamoaUtils.getPropertyConfig(LocalStormDoTask.SAMOA_STORM_PROPERTY_FILE_LOC);
  long executionDuration= stormConfig.getLong(LocalStormDoTask.EXECUTION_DURATION_KEY);
  backtype.storm.utils.Utils.sleep(executionDuration * 1000);

  cluster.killTopology(topologyName);
  cluster.shutdown();

}
 
Example 25
Project: netty-storm   File: NettyTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {

		TopologyBuilder builder = createTopology();

		Config config = new Config();
		config.setDebug(true);
		//config.setMaxSpoutPending(1);
		try {
			StormRunner.runTopologyLocally(builder.createTopology(),
					"NettySpoutTest", config, 0);
		} catch (InterruptedException e) {
			System.out.println("\n\n Execution interrupted. \n\n");
		}
	}
 
Example 26
Project: jstrom   File: TransactionalGlobalCount.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
  builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
  builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);
  
  if (args.length == 0){
      LocalCluster cluster = new LocalCluster();    
  
      cluster.submitTopology("global-count-topology", config, builder.buildTopology());
  
      Thread.sleep(100000);
      cluster.shutdown();
  }else {
  	
      config.setNumWorkers(3);
  	try {
      	Map yamlConf = LoadConf.LoadYaml(args[0]);
      	if (yamlConf != null) {
      		config.putAll(yamlConf);
      	}
  	}catch (Exception e) {
  		System.out.println("Input " + args[0] + " isn't one yaml ");
  	}

      StormSubmitter.submitTopology("global", config, builder.buildTopology());
  }
}
 
Example 27
Project: cloud-computing-specialization   File: TopWordFinderTopologyPartB.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {


        TopologyBuilder builder = new TopologyBuilder();

        Config config = new Config();
        config.setDebug(true);


    /*
    ----------------------TODO-----------------------
    Task: wire up the topology

    NOTE:make sure when connecting components together, using the functions setBolt(name,…) and setSpout(name,…),
    you use the following names for each component:
    FileReaderSpout -> "spout"
    SplitSentenceBolt -> "split"
    WordCountBolt -> "count"
    ------------------------------------------------- */

        String spoutId = "spout";
        String splitId = "split";
        String countId = "count";

        builder.setSpout(spoutId, new FileReaderSpout(args[0]), 5);
        builder.setBolt(splitId, new SplitSentenceBolt(), 8).shuffleGrouping(spoutId);
        builder.setBolt(countId, new WordCountBolt(), 12).fieldsGrouping(splitId, new Fields("word"));

        config.setMaxTaskParallelism(3);

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("word-count", config, builder.createTopology());

        //wait for 2 minutes and then kill the job
        Thread.sleep(2 * 60 * 1000);

        cluster.shutdown();
    }
 
Example 28
Project: LearnStorm   File: ApLogGenerator.java   Source Code and License Vote up 5 votes
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
		Config config = new Config();
		config.setDebug(DEBUG);
		config.setNumWorkers(1);

		TopologyBuilder builder = new TopologyBuilder();
		configureRandomLogSpout(builder, config);
		configureKafkaBolt(builder, config);

//		LocalCluster cluster = new LocalCluster();
		StormSubmitter.submitTopology("ApLogGeneratorV1", config, builder.createTopology());
	}
 
Example 29
Project: yuzhouwan   File: LocalTopologyRunner.java   Source Code and License Vote up 5 votes
public static void main(String[] args) {

        StormTopology topology = CreditCardTopologyBuilder.build();
        Config config = new Config();
        config.setDebug(true);

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("local-topology", config, topology);
        Utils.sleep(30000);

        cluster.killTopology("local-topology");
        cluster.shutdown();
    }
 
Example 30
Project: cloud-computing-specialization   File: TopWordFinderTopologyPartC.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    Config config = new Config();
    config.setDebug(true);

/*
----------------------TODO-----------------------
Task: wire up the topology

NOTE:make sure when connecting components together, using the functions setBolt(name,…) and setSpout(name,…),
you use the following names for each component:

FileReaderSpout -> "spout"
SplitSentenceBolt -> "split"
WordCountBolt -> "count"
NormalizerBolt -> "normalize"
------------------------------------------------- */
    String spoutId = "spout";
    String splitId = "split";
    String countId = "count";
    String normalizeId = "normalize";

    builder.setSpout(spoutId, new FileReaderSpout(args[0]), 5);
    builder.setBolt(splitId, new SplitSentenceBolt(), 8).shuffleGrouping(spoutId);
    builder.setBolt(normalizeId, new NormalizerBolt(), 12).fieldsGrouping(splitId, new Fields("word"));
    builder.setBolt(countId, new WordCountBolt(), 12).fieldsGrouping(normalizeId, new Fields("word"));

    config.setMaxTaskParallelism(3);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("word-count", config, builder.createTopology());

    //wait for 2 minutes then kill the job
    Thread.sleep(2 * 60 * 1000);

    cluster.shutdown();
}
 
Example 31
Project: storm-kafka-examples   File: HdfsTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) {
    try{
        String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
        String topic = "order";
        String groupId = "id";
        int spoutNum = 3;
        int boltNum = 1;
        ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
        SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId);  // create /order /id
        spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
        KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

        // HDFS bolt
        // use "|" instead of "," for field delimiter
        RecordFormat format = new DelimitedRecordFormat()
                .withFieldDelimiter("|");

        // sync the filesystem after every 1k tuples
        SyncPolicy syncPolicy = new CountSyncPolicy(1000);

        // rotate files when they reach 5MB
        FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);
        // FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f, TimedRotationPolicy.TimeUnit.MINUTES);

        FileNameFormat fileNameFormat = new DefaultFileNameFormat()
                .withPath("/tmp/").withPrefix("order_").withExtension(".log");

        HdfsBolt hdfsBolt = new HdfsBolt()
                .withFsUrl("hdfs://wxb-1:8020")
                .withFileNameFormat(fileNameFormat)
                .withRecordFormat(format)
                .withRotationPolicy(rotationPolicy)
                .withSyncPolicy(syncPolicy);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", kafkaSpout, spoutNum);
        builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
        builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");
        builder.setBolt("hdfs", hdfsBolt,boltNum).shuffleGrouping("counter");

        Config config = new Config();
        config.setDebug(true);

        if(args!=null && args.length > 0) {
            config.setNumWorkers(2);
            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
        } else {
            config.setMaxTaskParallelism(2);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());

            Thread.sleep(500000);

            cluster.shutdown();
        }
    }catch (Exception e) {
        e.printStackTrace();
    }
}
 
Example 32
Project: miner   File: TopologyMain.java   Source Code and License Vote up 4 votes
public static void main(String[] args) {
	try{
		TopologyBuilder topologyBuilder = new TopologyBuilder();

		topologyBuilder.setSpout("beginspout", new BeginSpout(), PlatformParas.begin_spout_num).setMaxSpoutPending(200);//1,500
		topologyBuilder.setSpout("loopspout", new LoopSpout(), PlatformParas.loop_spout_num).setMaxSpoutPending(200);

		topologyBuilder.setBolt("generateurl", new GenerateUrlBolt(), PlatformParas.generateurl_bolt_num)//2
				.shuffleGrouping("beginspout")
				.shuffleGrouping("loopspout");
		topologyBuilder.setBolt("generateurl-loop-bolt", new GenerateUrlBolt(), PlatformParas.generateurl_bolt_num)
				.shuffleGrouping("parse", "generate-loop");

		topologyBuilder.setBolt("proxy", new ProxyBolt(), PlatformParas.proxy_bolt_num)
				.shuffleGrouping("generateurl")
				.shuffleGrouping("generateurl-loop-bolt");

		topologyBuilder.setBolt("fetch", new FetchBolt(), PlatformParas.fetch_bolt_num)
				.shuffleGrouping("proxy");

		topologyBuilder.setBolt("parse", new ParseBolt(), PlatformParas.parse_bolt_num)
				.shuffleGrouping("fetch");

		topologyBuilder.setBolt("store", new StoreBolt(), PlatformParas.store_bolt_num)
				.shuffleGrouping("parse", "store");
		
		Config config = new Config();
		config.setDebug(false);
		//default:30s
		config.setMessageTimeoutSecs(PlatformParas.message_timeout_secs);
		//config.setMaxSpoutPending(2000);
		
		if(args != null && args.length>0){
			config.setNumWorkers(PlatformParas.work_num);
			StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
		}else{
			config.setMaxTaskParallelism(2);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("test", config, topologyBuilder.createTopology());
		}

	}catch(Exception e){
		e.printStackTrace();
	}
}
 
Example 33
Project: Get-ENVS   File: WordCountTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws Exception {
//Used to build the topology
  TopologyBuilder builder = new TopologyBuilder();
  //Add the spout, with a name of 'spout'
  //and parallelism hint of 5 executors
  builder.setSpout("spout", new RandomSentenceSpout(), 5);
  //Add the SplitSentence bolt, with a name of 'split'
  //and parallelism hint of 8 executors
  //shufflegrouping subscribes to the spout, and equally distributes
  //tuples (sentences) across instances of the SplitSentence bolt
  builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
  //Add the counter, with a name of 'count'
  //and parallelism hint of 12 executors
  //fieldsgrouping subscribes to the split bolt, and
  //ensures that the same word is sent to the same instance (group by field 'word')
  builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

  //new configuration
  Config conf = new Config();
  conf.setDebug(true);

  //If there are arguments, we are running on a cluster
  if (args != null && args.length > 0) {
    //parallelism hint to set the number of workers
    conf.setNumWorkers(3);
    //submit the topology
    StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
  }
  //Otherwise, we are running locally
  else {
    //Cap the maximum number of executors that can be spawned
    //for a component to 3
    conf.setMaxTaskParallelism(3);
    //LocalCluster is used to run locally
    LocalCluster cluster = new LocalCluster();
    //submit the topology
    cluster.submitTopology("word-count", conf, builder.createTopology());
    //sleep
    Thread.sleep(10000);
    //shut down the cluster
    cluster.shutdown();
  }
}
 
Example 34
Project: ignite-book-code-samples   File: SpeedViolationTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws Exception {
    if (getProperties() == null || getProperties().isEmpty()) {
        System.out.println("Property file <ignite-storm.property> is not found or empty");
        return;
    }
    // Ignite Stream Ibolt
    final StormStreamer<String, String> stormStreamer = new StormStreamer<>();

    stormStreamer.setAutoFlushFrequency(10L);
    stormStreamer.setAllowOverwrite(true);
    stormStreamer.setCacheName(getProperties().getProperty("cache.name"));

    stormStreamer.setIgniteTupleField(getProperties().getProperty("tuple.name"));
    stormStreamer.setIgniteConfigFile(getProperties().getProperty("ignite.spring.xml"));


    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new FileSourceSpout(), 1);
    builder.setBolt("limit", new SpeedLimitBolt(), 1).fieldsGrouping("spout", new Fields("trafficLog"));
    // set ignite bolt
    builder.setBolt("ignite-bolt", stormStreamer, STORM_EXECUTORS).shuffleGrouping("limit");

    Config conf = new Config();
    conf.setDebug(false);

    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("speed-violation", conf, builder.createTopology());
    Thread.sleep(10000);
    cluster.shutdown();

}
 
Example 35
Project: ignite-book-code-samples   File: WordCountTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws Exception {
    // Ignite Stream Ibolt
    final StormStreamer<String, String> stormStreamer = new StormStreamer<>();

    stormStreamer.setAutoFlushFrequency(10L);
    stormStreamer.setAllowOverwrite(true);
    stormStreamer.setCacheName("testCache");
    stormStreamer.setIgniteTupleField("ignite");
    stormStreamer.setIgniteConfigFile("/Users/shamim/Development/workshop/assembla/ignite-book/chapters/chapter-cep-storm/src/main/resources/example-ignite.xml");

    //Used to build the topology
    TopologyBuilder builder = new TopologyBuilder();
    //Add the spout, with a name of 'spout'
    //and parallelism hint of 5 executors
    builder.setSpout("spout", new RandomSentenceSpout(), 5);
    //Add the SplitSentence bolt, with a name of 'split'
    //and parallelism hint of 8 executors
    //shufflegrouping subscribes to the spout, and equally distributes
    //tuples (sentences) across instances of the SplitSentence bolt
    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    //Add the counter, with a name of 'count'
    //and parallelism hint of 12 executors
    //fieldsgrouping subscribes to the split bolt, and
    //ensures that the same word is sent to the same instance (group by field 'word')
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));
    // set ignite bolt
    builder.setBolt("ignite-bolt", stormStreamer,STORM_EXECUTORS).shuffleGrouping("count");

    //new configuration
    Config conf = new Config();
    //Set to false to disable debug information
    // when running in production mode.
    conf.setDebug(false);

    //If there are arguments, we are running on a cluster
    if (args != null && args.length > 0) {
        //parallelism hint to set the number of workers
        conf.setNumWorkers(3);
        //submit the topology
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    }
    //Otherwise, we are running locally
    else {
        //Cap the maximum number of executors that can be spawned
        //for a component to 3
        conf.setMaxTaskParallelism(3);
        //LocalCluster is used to run locally
        LocalCluster cluster = new LocalCluster();
        //submit the topology
        cluster.submitTopology("word-count", conf, builder.createTopology());
        //sleep
        Thread.sleep(10000);
        //shut down the cluster
        cluster.shutdown();
    }
}
 
Example 36
Project: java   File: TradeProcessingTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
  TopologyBuilder builder = new TopologyBuilder();

  LOGGER.info("Building Trade Processing Topology..");

  builder.setSpout(TRD_COLLECTOR_SPOUT, new TradeCollectorSpout(),
      CONFIG.getNumber("TRD_COLLECTOR_SPOUT_PARALLELISM"));

  builder
      .setBolt(TRD_ELIGIBILITY_BOLT, new TradeEligibilityBolt(),
          CONFIG.getNumber("TRD_ELIGIBILITY_BOLT_PARALLELISM"))
      .shuffleGrouping(TRD_COLLECTOR_SPOUT);

  builder
      .setBolt(TRD_REPORTING_BOLT, new TradeReportPersistenceBolt(),
          CONFIG.getNumber("TRD_REPORTING_BOLT_PARALLELISM"))
      .shuffleGrouping(TRD_ELIGIBILITY_BOLT, REPORT_STREAM);

  builder
      .setBolt(TRD_EXCLUSION_BOLT, new TradeExclusionPersistenceBolt(),
          CONFIG.getNumber("TRD_EXCLUSION_BOLT_PARALLELISM"))
      .shuffleGrouping(TRD_ELIGIBILITY_BOLT, EXCLUDE_STREAM);

  Config conf = new Config();
  conf.setDebug(CONFIG.is("DEBUG_FLAG"));
  conf.setNumWorkers(CONFIG.getInt("NUMBER_OF_WORKERS"));
  conf.setMaxTaskParallelism(CONFIG.getInt("MAX_TASK_PARALLELISM"));
  conf.setMaxSpoutPending(CONFIG.getInt("MAX_SPOUT_PENDING"));
  conf.put(Config.TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS,
      CONFIG.getInt("MAX_SPOUT_PENDING_WAIT_MS"));
  conf.put(Config.TOPOLOGY_SPOUT_WAIT_STRATEGY, CONFIG.get("TOPOLOGY_WAIT_STRATEGY"));
  conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, CONFIG.getInt("TOPOLOGY_MESSAGE_TIMEOUT_SECS"));
  conf.put(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS,
      CONFIG.is("TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS"));
  LOGGER.info("Submitting Trade Processing Topology..");
  if (args != null && args.length > 0) {
    conf.put(Config.NIMBUS_HOST, CONFIG.get("LOCAL_NIMBUS_HOST"));
    conf.put(Config.NIMBUS_THRIFT_PORT, CONFIG.getInt("LOCAL_NIMBUS_PORT"));
    conf.put(Config.STORM_ZOOKEEPER_PORT, CONFIG.getInt("LOCAL_ZOOKEEPER_PORT"));
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  } else {
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(TRD_PROCESSING_TOPOLOGY, conf, builder.createTopology());
    Utils.sleep(CONFIG.getLong("LOCAL_CLUSTER_RUNTIME"));
    cluster.killTopology(TRD_PROCESSING_TOPOLOGY);
    cluster.shutdown();
  }
}
 
Example 37
Project: big-data-system   File: RollingTopWords.java   Source Code and License Vote up 4 votes
private static Config createTopologyConfiguration() {
  Config conf = new Config();
  conf.setDebug(true);
  return conf;
}
 
Example 38
Project: docker-kafka-storm   File: WordCountTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws Exception {

        Config conf = new Config();
        String TOPOLOGY_NAME;

        if (args != null && args.length > 0) {
            TOPOLOGY_NAME = args[0];
            /**
             * Remote deployment as part of Docker Compose multi-application setup
             *
             * @TOPOLOGY_NAME:       Name of Storm topology
             * @ZK_HOST:             Host IP address of ZooKeeper
             * @ZK_PORT:             Port of ZooKeeper
             * @TOPIC:               Kafka Topic which this Storm topology is consuming from
             */
            LOG.info("Submitting topology " + TOPOLOGY_NAME + " to remote cluster.");
            String ZK_HOST = args[1];
            int ZK_PORT = Integer.parseInt(args[2]);
            String TOPIC = args[3];
            String NIMBUS_HOST = args[4];
            int NIMBUS_THRIFT_PORT = Integer.parseInt(args[5]);

            conf.setDebug(false);
            conf.setNumWorkers(2);
            conf.setMaxTaskParallelism(5);
            conf.put(Config.NIMBUS_HOST, NIMBUS_HOST);
            conf.put(Config.NIMBUS_THRIFT_PORT, NIMBUS_THRIFT_PORT);
            conf.put(Config.STORM_ZOOKEEPER_PORT, ZK_PORT);
            conf.put(Config.STORM_ZOOKEEPER_SERVERS, Arrays.asList(ZK_HOST));

            WordCountTopology wordCountTopology = new WordCountTopology(ZK_HOST, String.valueOf(ZK_PORT));
            StormSubmitter.submitTopology(TOPOLOGY_NAME, conf, wordCountTopology.buildTopology(TOPIC));

        }
        else {
            TOPOLOGY_NAME = "wordcount-topology";
            /**
             * Local mode (only for testing purposes)
             */
            LOG.info("Starting topology " + TOPOLOGY_NAME + " in LocalMode.");

            conf.setDebug(false);
            conf.setNumWorkers(2);
            conf.setMaxTaskParallelism(2);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology(TOPOLOGY_NAME, conf, new WordCountTopology().buildTopology());

            Thread.sleep(10000);
            cluster.shutdown();
        }
    }
 
Example 39
Project: LearnStorm   File: TestTridentTopology.java   Source Code and License Vote up 4 votes
public static void main(String args[]) throws Exception {

		TridentTopology topology = new TridentTopology();
		Config conf = new Config();

		@SuppressWarnings("unchecked")
		FixedBatchSpout spout = new FixedBatchSpout(
				new Fields("sentence"), 3,
				new Values("the cow jumped over the moon"),
				new Values("the man went to the store and bought some candy"),
				new Values("four score and seven years ago"),
				new Values("how many apples can you eat"));
		spout.setCycle(true);

		TridentState wordCounts = topology.newStream("spout1", spout)
				.each(new Fields("sentence"), new Split(), new Fields("word"))
				.groupBy(new Fields("word"))
				.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
				.parallelismHint(6);

		// MapGet() : gets the count for each word
		topology.newDRPCStream("words")
				.each(new Fields("args"), new Split(), new Fields("word"))
				.groupBy(new Fields("word"))
				.stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count"))
				.each(new Fields("count"), new FilterNull())
				.aggregate(new Fields("count"), new Sum(), new Fields("sum"));

		conf.setDebug(true);

		conf.put("storm.thrift.transport", "backtype.storm.security.auth.SimpleTransportPlugin");
		conf.put(Config.STORM_NIMBUS_RETRY_TIMES, 3);
		conf.put(Config.STORM_NIMBUS_RETRY_INTERVAL, 10);
		conf.put(Config.STORM_NIMBUS_RETRY_INTERVAL_CEILING, 20);
		conf.put(Config.DRPC_MAX_BUFFER_SIZE, 1048576);

		DRPCClient client = new DRPCClient(conf, "hdp02.localdomain", 3772);

		System.out.println(client.execute("words", "cat dog the man"));

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("test", conf, topology.build());

		Utils.sleep(1000);

		cluster.killTopology("test");
		cluster.shutdown();

	}
 
Example 40
Project: learn_jstorm   File: SingleJoinTest.java   Source Code and License Vote up 4 votes
@Test
public void test_single_join() {
	try {
		FeederSpout genderSpout = new FeederSpout(
				new Fields("id", "gender"));
		FeederSpout ageSpout = new FeederSpout(new Fields("id", "age"));

		TopologyBuilder builder = new TopologyBuilder();
		builder.setSpout("gender", genderSpout);
		builder.setSpout("age", ageSpout);
		builder.setBolt("join",
				new SingleJoinBolt(new Fields("gender", "age")))
				.fieldsGrouping("gender", new Fields("id"))
				.fieldsGrouping("age", new Fields("id"));

		Config conf = new Config();
		conf.setDebug(true);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("join-example", conf,
				builder.createTopology());

		for (int i = 0; i < 10; i++) {
			String gender;
			if (i % 2 == 0) {
				gender = "male";
			} else {
				gender = "female";
			}
			genderSpout.feed(new Values(i, gender));
		}

		for (int i = 9; i >= 0; i--) {
			ageSpout.feed(new Values(i, i + 20));
		}

		JStormUtils.sleepMs(60 * 1000);
		cluster.shutdown();
	} catch (Exception e) {
		Assert.fail("Failed to run SingleJoinExample");
	}
}
 
Example 41
Project: Infrastructure   File: CoordinationConfigurationTests.java   Source Code and License Vote up 4 votes
/**
 * Tests the pipeline options.
 */
@Test
public void pipelineOptionsTest() {
    PipelineOptions opts = new PipelineOptions();
    opts.setNumberOfWorkers(5);
    Properties prop = new Properties();
    prop.put(CoordinationConfiguration.PIPELINE_START_SOURCE_AUTOCONNECT, "true");
    prop.put(CoordinationConfiguration.INIT_MODE, InitializationMode.DYNAMIC.name());
    prop.put(Configuration.HOST_EVENT, "local");
    prop.put(Configuration.PORT_EVENT, 1234);
    prop.put(Configuration.EVENT_DISABLE_LOGGING, "aaa,bbb");
    prop.put(Configuration.PIPELINE_INTERCONN_PORTS, "10-20");
    CoordinationConfiguration.configure(prop, false);
    System.out.println("Configured " + prop);
    
    // during submission
    @SuppressWarnings("rawtypes")
    Map stormConf = Utils.readStormConfig();
    StormPipelineOptionsSetter optSetter = new StormPipelineOptionsSetter(stormConf, opts);
    StormUtils.doCommonConfiguration(optSetter);
    System.out.println("Conf " + stormConf);
    System.out.println("OPTS " + opts);
    String[] args = opts.toArgs("pip");
    System.out.println("ARGS " + java.util.Arrays.toString(args));
    
    // in topology
    PipelineOptions options = new PipelineOptions(args);
    Config config = new Config();
    config.setMessageTimeoutSecs(100);
    config.setDebug(false);
    config.put("windowSize", 1 * 30);  // Window size (in secs)
    config.put("windowAdvance", 1);  // Advance of the window (in secs)
    config.put("SUBPIPELINE.NAME", "pip"); //sub-pipeline namespace
    //The settings to optimize the storm performance.
    config.put(Config.TOPOLOGY_RECEIVER_BUFFER_SIZE, 8);
    config.put(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE, 32);
    config.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, 16384);
    config.put(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE, 16384);
    config.put(Configuration.HOST_EVENT, Configuration.getEventHost());
    config.put(Configuration.PORT_EVENT, Configuration.getEventPort());
    config.put(Configuration.EVENT_DISABLE_LOGGING, Configuration.getEventDisableLogging());
    config.put(Configuration.PIPELINE_INTERCONN_PORTS, Configuration.getPipelinePorts());
    options.toConf(config);
    System.out.println("Pip Config " + config);
    
    Assert.assertEquals("true", config.get(Constants.CONFIG_KEY_SOURCE_AUTOCONNECT));
    Assert.assertEquals(InitializationMode.DYNAMIC.name(), config.get(Constants.CONFIG_KEY_INIT_MODE));
    Assert.assertEquals("local", config.get(Configuration.HOST_EVENT));
    Assert.assertEquals("1234", config.get(Configuration.PORT_EVENT));
    Assert.assertEquals("aaa,bbb", config.get(Configuration.EVENT_DISABLE_LOGGING));
    Assert.assertEquals("10-20", config.get(Configuration.PIPELINE_INTERCONN_PORTS));
    
    CoordinationConfiguration.clear();
}