Java Code Examples for backtype.storm.Config.setNumWorkers()

The following are Jave code examples for showing how to use setNumWorkers() of the backtype.storm.Config class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: storm-hbase-1.0.x   File: WordCountTrident.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(5);
    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
        Thread.sleep(60 * 1000);
        cluster.killTopology("wordCounter");
        cluster.shutdown();
        System.exit(0);
    }
    else if(args.length == 2) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
    } else{
        System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
    }
}
 
Example 2
Project: storm-demo   File: LogStatisticsTopology.java   Source Code and License Vote up 6 votes
public static void main(String[] args) {
    Config config = new Config();

    HdfsBolt hdfsBolt = makeHdfsBolt();
    KafkaSpout kafkaSpout = makeKafkaSpout(TOPIC, TOPOLOGY_NAME);

    LOG.info("Topology name is {}", TOPOLOGY_NAME);

    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout(KAFKA_SPOUT_ID, kafkaSpout, 10);
    topologyBuilder.setBolt(CROP_BOLT_ID, new CropBolt(), 10).shuffleGrouping(KAFKA_SPOUT_ID);
    topologyBuilder.setBolt(SPLIT_FIELDS_BOLT_ID, new SplitFieldsBolt(), 10).shuffleGrouping(CROP_BOLT_ID);
    topologyBuilder.setBolt(STORM_HDFS_BOLT_ID, hdfsBolt, 4).fieldsGrouping(SPLIT_FIELDS_BOLT_ID, new Fields("timestamp", "fieldvalues"));

    if (args != null && args.length > 0) {
        config.setDebug(false);
        config.setNumWorkers(3);

        try {
            StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
        } catch (InvalidTopologyException | AlreadyAliveException | AuthorizationException e) {
            e.printStackTrace();
        }
    }
}
 
Example 3
Project: LearnStorm   File: ApLogAnalyzer.java   Source Code and License Vote up 6 votes
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
		final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
		Config config = new Config();
		config.setDebug(DEBUG);
		config.setNumWorkers(numWorkers);
		config.setMaxSpoutPending(1000000);
		// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
		config.setMessageTimeoutSecs(600);	// This value(30 secs by default) must
							// be larger than retryDelayMaxMs
							// (60 secs by default) in
							// KafkaSpout.
		TopologyBuilder builder = new TopologyBuilder();
		configureKafkaSpout(builder, config);
		configureESBolts(builder, config);
//		configureHBaseBolts(builder, config);

//		conf.put(Config.NIMBUS_HOST, "hdp01.localdomain");
//		System.setProperty("storm.jar", "/root/workspace//LearnStorm/target/LearnStorm-0.0.1-SNAPSHOT.jar");
//		System.setProperty("hadoop.home.dir", "/tmp");
//		LocalCluster cluster = new LocalCluster();
		StormSubmitter.submitTopology("ApLogAnalyzerV1", config, builder.createTopology());
	}
 
Example 4
Project: LogRTA   File: ExclamationTopology.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("word", new TestWordSpout(), 10);
    builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
    builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");

    Config conf = new Config();
    conf.setDebug(true);

    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);

        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    }
    else {

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("www_nginx_accesslog_stat", conf, builder.createTopology());
        Utils.sleep(10000);
        cluster.killTopology("www_nginx_accesslog_stat");
        cluster.shutdown();
    }
}
 
Example 5
Project: StreamBench   File: KMeansTest.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws WorkloadException {
    TopologyBuilder builder = new TopologyBuilder();


    builder.setSpout("points", new PointSpout());
    builder.setBolt("assign", new Assign())
            .shuffleGrouping("points")
            .allGrouping("aggregator", "centroids");

    builder.setBolt("aggregator", new Aggregator())
            .fieldsGrouping("assign", new Fields("centroid_index"));

    Config conf = new Config();
    conf.setDebug(true);
    conf.setNumWorkers(3);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
 
Example 6
Project: Infrastructure   File: Topology.java   Source Code and License Vote up 6 votes
/**
 * Creates a standalone topology.
 * 
 * @param args the topology arguments
 * @throws Exception in case of creation problems
 */
public static void main(String[] args) throws Exception {
    Config config = new Config();
    config.setMessageTimeoutSecs(100);
    PipelineOptions options = new PipelineOptions(args);
    RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
    createTopology(b);
    b.close(args[0], config);
    
    // main topology: int numWorkers = options.getNumberOfWorkers(2);
    options.toConf(config);
    
    if (args != null && args.length > 0) {
        config.setNumWorkers(2);
        StormSubmitter.submitTopology(args[0], config, b.createTopology());
    } else {
        config.setMaxTaskParallelism(2);
        final LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(Naming.PIPELINE_NAME, config, b.createTopology());
    }
}
 
Example 7
Project: heron   File: WordCountTopology.java   Source Code and License Vote up 6 votes
/**
 * Main method
 */
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
  if (args.length < 1) {
    throw new RuntimeException("Specify topology name");
  }

  int parallelism = 1;
  if (args.length > 1) {
    parallelism = Integer.parseInt(args[1]);
  }
  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout("word", new WordSpout(), parallelism);
  builder.setBolt("consumer", new ConsumerBolt(), parallelism)
      .fieldsGrouping("word", new Fields("word"));
  Config conf = new Config();
  conf.setNumWorkers(parallelism);

  StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
 
Example 8
Project: Infrastructure   File: AbstractTopology.java   Source Code and License Vote up 6 votes
/**
 * Creates a standalone topology.
 * 
 * @param args the topology arguments
 * @param topo the topology instance
 * @throws Exception in case of creation problems
 */
public static void main(String[] args, AbstractTopology topo) throws Exception {
    Config config = new Config();
    config.setMessageTimeoutSecs(100);
    PipelineOptions options = new PipelineOptions(args);
    RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
    topo.createTopology(config, b);
    
    // main topology: int numWorkers = options.getNumberOfWorkers(2);
    options.toConf(config);
    
    if (args != null && args.length > 0) {
        config.setNumWorkers(2);
        StormSubmitter.submitTopology(args[0], config, b.createTopology());
    } else {
        config.setMaxTaskParallelism(2);
        final LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("testGenPip", config, b.createTopology());
    }
}
 
Example 9
Project: LearnStorm   File: WordCountTopology.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {

		TopologyBuilder builder = new TopologyBuilder();
		builder.setSpout("spout", new RandomSentenceSpout(), 5);
		builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
		builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

		Config conf = new Config();
		conf.setDebug(true);

		if (args != null && args.length > 0) {
			conf.setNumWorkers(3);
			StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
		} else {
			conf.setMaxTaskParallelism(3);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("word-count", conf, builder.createTopology());
			Thread.sleep(10000);
			cluster.shutdown();
		}
	}
 
Example 10
Project: big-data-system   File: ReachTopology.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
  LinearDRPCTopologyBuilder builder = construct();


  Config conf = new Config();

  if (args == null || args.length == 0) {
    conf.setMaxTaskParallelism(3);
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));

    String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
    for (String url : urlsToTry) {
      System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
    }

    cluster.shutdown();
    drpc.shutdown();
  }
  else {
    conf.setNumWorkers(6);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
  }
}
 
Example 11
Project: Infrastructure   File: TestTopology.java   Source Code and License Vote up 6 votes
/**
 * Creates a standalone topology.
 * 
 * @param args the topology arguments
 * @throws Exception in case of creation problems
 */
public static void main(String[] args) throws Exception {
    Config config = new Config();
    Naming.setDefaultInitializeAlgorithms(config, defaultInitAlgorithms);
    config.setMessageTimeoutSecs(100);
    PipelineOptions options = new PipelineOptions(args);
    RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
    createTopology(b);
    b.close(args[0], config);
    
    // main topology: int numWorkers = options.getNumberOfWorkers(2);
    options.toConf(config);
    
    if (args != null && args.length > 0) {
        config.setNumWorkers(2);
        StormSubmitter.submitTopology(args[0], config, b.createTopology());
    } else {
        config.setMaxTaskParallelism(2);
        final LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(PIP_NAME, config, b.createTopology());
    }
}
 
Example 12
Project: LearnStorm   File: DRPCTest.java   Source Code and License Vote up 6 votes
public static void main(String args[]) throws Exception {
	LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("exclamation");
	builder.addBolt(new ExclaimBolt(), 3);

	Config conf = new Config();

	if (args == null || args.length == 0) {
		LocalDRPC drpc = new LocalDRPC();
		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc));
		for (String word : new String[] { "hello", "goodbye" }) {
			System.out.println("Result for \"" + word + "\": " + drpc.execute("exclamation", word));
		}

		Thread.sleep(10000);
		drpc.shutdown();
		cluster.shutdown();
	} else {
		conf.setNumWorkers(3);
		StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
	}
}
 
Example 13
Project: cdh-storm   File: ReachTopology.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
  LinearDRPCTopologyBuilder builder = construct();


  Config conf = new Config();

  if (args == null || args.length == 0) {
    conf.setMaxTaskParallelism(3);
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));

    String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
    for (String url : urlsToTry) {
      System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
    }

    cluster.shutdown();
    drpc.shutdown();
  }
  else {
    conf.setNumWorkers(6);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
  }
}
 
Example 14
Project: storm-kafka-examples   File: CounterTopology.java   Source Code and License Vote up 5 votes
/**
 * @param args
 * http://www.programcreek.com/java-api-examples/index.php?api=storm.kafka.KafkaSpout
 */
public static void main(String[] args) {
	try{
		//设置喷发节点并分配并发数,该并发数将会控制该对象在集群中的线程数(6个)
		String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
		String topic = "order";
		String groupId = "id";
		int spoutNum = 3;
		int boltNum = 1;
		ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
		SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId);  // create /order /id
		spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
		KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", kafkaSpout, spoutNum);
		builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
        builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");

        Config config = new Config();
        config.setDebug(true);
        
        if(args!=null && args.length > 0) {
            config.setNumWorkers(2);
            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
        } else {        
            config.setMaxTaskParallelism(2);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());

            Thread.sleep(500000);

            cluster.shutdown();
        }
	}catch (Exception e) {
		e.printStackTrace();
	}
}
 
Example 15
Project: miner   File: TopologyMain.java   Source Code and License Vote up 5 votes
public static void main(String[] args) {
	try{
		TopologyBuilder topologyBuilder = new TopologyBuilder();
		topologyBuilder.setSpout("Spout", new EmitMessageSpout(), 1);

		topologyBuilder.setBolt("generate", new ParseLoopBolt(), 1)
				.shuffleGrouping("Spout");



		topologyBuilder.setBolt("Store", new PrintBolt(), 1)
				.shuffleGrouping("generate");
		
		Config config = new Config();
		config.setDebug(false);
		
		if(args != null && args.length>0){
			config.setNumWorkers(4);
			StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
		}else{
			config.setMaxTaskParallelism(2);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("test", config, topologyBuilder.createTopology());
		}
		
	}catch(Exception e){
		e.printStackTrace();
	}
}
 
Example 16
Project: miner   File: TopologyMain.java   Source Code and License Vote up 5 votes
public static void main(String[] args) {
	try{
		TopologyBuilder topologyBuilder = new TopologyBuilder();
		topologyBuilder.setSpout("spout-number", new ProduceRecordSpout(Type.NUMBER, new String[]{"111 222 333", "80966 31"}), 1);
		topologyBuilder.setSpout("spout-string", new ProduceRecordSpout(Type.STRING, new String[]{"abc ddd fasko", "hello the world"}), 1);
		topologyBuilder.setSpout("spout-sign", new ProduceRecordSpout(Type.SIGN, new String[]{"++ -*% *** @@", "{+-} ^#######"}), 1);

		topologyBuilder.setBolt("bolt-splitter", new SplitRecordBolt(), 2)
				.shuffleGrouping("spout-number")
				.shuffleGrouping("spout-string")
				.shuffleGrouping("spout-sign");

		topologyBuilder.setBolt("bolt-distributor", new DistributeWordByTypeBolt(), 1)
				.fieldsGrouping("bolt-splitter", new Fields("type"));

		topologyBuilder.setBolt("bolt-number-saver", new SaveDataBolt(Type.NUMBER), 1)
				.shuffleGrouping("bolt-distributor", "stream-number-saver");
		topologyBuilder.setBolt("bolt-string-saver", new SaveDataBolt(Type.STRING), 1)
				.shuffleGrouping("bolt-distributor", "stream-string-saver");
		topologyBuilder.setBolt("bolt-sign-saver", new SaveDataBolt(Type.SIGN), 1)
				.shuffleGrouping("bolt-distributor", "stream-sign-saver");

		Config config = new Config();
		config.setDebug(false);
		
		if(args != null && args.length>0){
			config.setNumWorkers(4);
			StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
		}else{
			config.setMaxTaskParallelism(2);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("test", config, topologyBuilder.createTopology());
		}
		
	}catch(Exception e){
		e.printStackTrace();
	}
}
 
Example 17
Project: miner   File: ExclaimBasicTopo.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSpout());
    builder.setBolt("exclaim", new ProxyBolt()).shuffleGrouping("spout");
    builder.setBolt("print", new PrintBolt()).shuffleGrouping("exclaim");

    Config conf = new Config();
    conf.setDebug(false);

    /* Config里封装了Redis的配置 */
    conf.put("ip","127.0.0.1");
    conf.put("port","6379");
    conf.put("password","password");

    if (args != null && args.length > 0) {
        conf.setNumWorkers(1);

        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", conf, builder.createTopology());
        Utils.sleep(10*1000);
        cluster.killTopology("test");
        cluster.shutdown();
    }
}
 
Example 18
Project: miner   File: TopologyMain.java   Source Code and License Vote up 5 votes
public static void main(String[] args) {
		try{
			TopologyBuilder topologyBuilder = new TopologyBuilder();
			topologyBuilder.setSpout("Spout", new EmitMessageSpout(), 1);

			topologyBuilder.setBolt("generate", new GenerateUrlBolt(), 1)
					.shuffleGrouping("Spout");
			topologyBuilder.setBolt("generate_loop", new GenerateUrlBolt(), 1)
					.shuffleGrouping("Parse", "loop");

//			topologyBuilder.setBolt("Parse", new ParseTestBolt(), 1).shuffleGrouping("Spout");
			topologyBuilder.setBolt("Parse", new ParseLoopBolt(), 1)
					.shuffleGrouping("generate")
					.shuffleGrouping("generate_loop");

			topologyBuilder.setBolt("Store", new StoreTestBolt(), 1)
					.shuffleGrouping("Parse", "store");
			
			Config config = new Config();
			config.setDebug(false);
			
			if(args != null && args.length>0){
				config.setNumWorkers(4);
				StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
			}else{
				config.setMaxTaskParallelism(2);
				LocalCluster cluster = new LocalCluster();
				cluster.submitTopology("test", config, topologyBuilder.createTopology());
			}
			
		}catch(Exception e){
			e.printStackTrace();
		}
	}
 
Example 19
Project: erad2016-streamprocessing   File: SentimentAnalysisTopology.java   Source Code and License Vote up 5 votes
private static Config createConfig(boolean local) {
    int workers = Properties.getInt("sa.storm.workers");
    Config conf = new Config();
    conf.setDebug(true);
    if (local)
        conf.setMaxTaskParallelism(workers);
    else
        conf.setNumWorkers(workers);
    return conf;
}
 
Example 20
Project: java   File: DeliveryTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
  TopologyBuilder builder = new TopologyBuilder();

  LOGGER.info("Starting..");
  builder.setSpout("trade", new DeliveryCheckSpout(), 1);
  builder.setBolt("eligibility", new DeliveryCheckBolt(), 10).shuffleGrouping("trade");
  builder.setBolt("odd", new DeliveryCheckOddBolt(), 10).shuffleGrouping("eligibility",
      "oddstream");
  builder.setBolt("even", new DeliveryCheckEvenBolt(), 10).shuffleGrouping("eligibility",
      "evenstream");

  Config conf = new Config();
  conf.setDebug(false);
  conf.setMaxSpoutPending(5);

  if (args != null && args.length > 0) {
    conf.setNumWorkers(1);
    LOGGER.info("Submitting DeliveryTopology");
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  } else {

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("DeliveryTopology", conf, builder.createTopology());
    Utils.sleep(100000000);
    cluster.killTopology("DeliveryTopology");
    cluster.shutdown();
  }
}
 
Example 21
Project: jstorm-0.9.6.3-   File: ReachTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
	
    LinearDRPCTopologyBuilder builder = construct();
    
    
    Config conf = new Config();
    conf.setNumWorkers(6);
    if (args.length == 0) {
    	StormSubmitter.submitTopology(TOPOLOGY_NAME, conf, builder.createRemoteTopology());
    }else {
    
    
        conf.setMaxTaskParallelism(3);
        LocalDRPC drpc = new LocalDRPC();
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(TOPOLOGY_NAME, conf, builder.createLocalTopology(drpc));
        
        JStormUtils.sleepMs(50000);
        
        String[] urlsToTry = new String[] { "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com"};
        for(String url: urlsToTry) {
            System.out.println("Reach of " + url + ": " + drpc.execute(TOPOLOGY_NAME, url));
        }
        
        cluster.shutdown();
        drpc.shutdown();
    }
}
 
Example 22
Project: heron   File: MultiStageAckingTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
  if (args.length != 1) {
    throw new RuntimeException("Please specify the name of the topology");
  }
  TopologyBuilder builder = new TopologyBuilder();

  int parallelism = 2;
  builder.setSpout("word", new AckingTestWordSpout(), parallelism);
  builder.setBolt("exclaim1", new ExclamationBolt(true), parallelism)
      .shuffleGrouping("word");
  builder.setBolt("exclaim2", new ExclamationBolt(false), parallelism)
      .shuffleGrouping("exclaim1");

  Config conf = new Config();
  conf.setDebug(true);

  // Put an arbitrary large number here if you don't want to slow the topology down
  conf.setMaxSpoutPending(1000 * 1000 * 1000);

  // To enable acking, we need to setEnableAcking true
  conf.setNumAckers(1);

  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  conf.setNumWorkers(parallelism);
  StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
 
Example 23
Project: LearnStorm   File: ApLogGenerator.java   Source Code and License Vote up 5 votes
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
		Config config = new Config();
		config.setDebug(DEBUG);
		config.setNumWorkers(1);

		TopologyBuilder builder = new TopologyBuilder();
		configureRandomLogSpout(builder, config);
		configureKafkaBolt(builder, config);

//		LocalCluster cluster = new LocalCluster();
		StormSubmitter.submitTopology("ApLogGeneratorV1", config, builder.createTopology());
	}
 
Example 24
Project: jstrom   File: TransactionalGlobalCount.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
  builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
  builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);
  
  if (args.length == 0){
      LocalCluster cluster = new LocalCluster();    
  
      cluster.submitTopology("global-count-topology", config, builder.buildTopology());
  
      Thread.sleep(100000);
      cluster.shutdown();
  }else {
  	
      config.setNumWorkers(3);
  	try {
      	Map yamlConf = LoadConf.LoadYaml(args[0]);
      	if (yamlConf != null) {
      		config.putAll(yamlConf);
      	}
  	}catch (Exception e) {
  		System.out.println("Input " + args[0] + " isn't one yaml ");
  	}

      StormSubmitter.submitTopology("global", config, builder.buildTopology());
  }
}
 
Example 25
Project: storm-kafka-examples   File: HdfsTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) {
    try{
        String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
        String topic = "order";
        String groupId = "id";
        int spoutNum = 3;
        int boltNum = 1;
        ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
        SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId);  // create /order /id
        spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
        KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

        // HDFS bolt
        // use "|" instead of "," for field delimiter
        RecordFormat format = new DelimitedRecordFormat()
                .withFieldDelimiter("|");

        // sync the filesystem after every 1k tuples
        SyncPolicy syncPolicy = new CountSyncPolicy(1000);

        // rotate files when they reach 5MB
        FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);
        // FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f, TimedRotationPolicy.TimeUnit.MINUTES);

        FileNameFormat fileNameFormat = new DefaultFileNameFormat()
                .withPath("/tmp/").withPrefix("order_").withExtension(".log");

        HdfsBolt hdfsBolt = new HdfsBolt()
                .withFsUrl("hdfs://wxb-1:8020")
                .withFileNameFormat(fileNameFormat)
                .withRecordFormat(format)
                .withRotationPolicy(rotationPolicy)
                .withSyncPolicy(syncPolicy);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", kafkaSpout, spoutNum);
        builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
        builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");
        builder.setBolt("hdfs", hdfsBolt,boltNum).shuffleGrouping("counter");

        Config config = new Config();
        config.setDebug(true);

        if(args!=null && args.length > 0) {
            config.setNumWorkers(2);
            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
        } else {
            config.setMaxTaskParallelism(2);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());

            Thread.sleep(500000);

            cluster.shutdown();
        }
    }catch (Exception e) {
        e.printStackTrace();
    }
}
 
Example 26
Project: miner   File: TopologyMain.java   Source Code and License Vote up 4 votes
public static void main(String[] args) {
	try{
		TopologyBuilder topologyBuilder = new TopologyBuilder();

		topologyBuilder.setSpout("beginspout", new BeginSpout(), PlatformParas.begin_spout_num).setMaxSpoutPending(200);//1,500
		topologyBuilder.setSpout("loopspout", new LoopSpout(), PlatformParas.loop_spout_num).setMaxSpoutPending(200);

		topologyBuilder.setBolt("generateurl", new GenerateUrlBolt(), PlatformParas.generateurl_bolt_num)//2
				.shuffleGrouping("beginspout")
				.shuffleGrouping("loopspout");
		topologyBuilder.setBolt("generateurl-loop-bolt", new GenerateUrlBolt(), PlatformParas.generateurl_bolt_num)
				.shuffleGrouping("parse", "generate-loop");

		topologyBuilder.setBolt("proxy", new ProxyBolt(), PlatformParas.proxy_bolt_num)
				.shuffleGrouping("generateurl")
				.shuffleGrouping("generateurl-loop-bolt");

		topologyBuilder.setBolt("fetch", new FetchBolt(), PlatformParas.fetch_bolt_num)
				.shuffleGrouping("proxy");

		topologyBuilder.setBolt("parse", new ParseBolt(), PlatformParas.parse_bolt_num)
				.shuffleGrouping("fetch");

		topologyBuilder.setBolt("store", new StoreBolt(), PlatformParas.store_bolt_num)
				.shuffleGrouping("parse", "store");
		
		Config config = new Config();
		config.setDebug(false);
		//default:30s
		config.setMessageTimeoutSecs(PlatformParas.message_timeout_secs);
		//config.setMaxSpoutPending(2000);
		
		if(args != null && args.length>0){
			config.setNumWorkers(PlatformParas.work_num);
			StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
		}else{
			config.setMaxTaskParallelism(2);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("test", config, topologyBuilder.createTopology());
		}

	}catch(Exception e){
		e.printStackTrace();
	}
}
 
Example 27
Project: Get-ENVS   File: WordCountTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws Exception {
//Used to build the topology
  TopologyBuilder builder = new TopologyBuilder();
  //Add the spout, with a name of 'spout'
  //and parallelism hint of 5 executors
  builder.setSpout("spout", new RandomSentenceSpout(), 5);
  //Add the SplitSentence bolt, with a name of 'split'
  //and parallelism hint of 8 executors
  //shufflegrouping subscribes to the spout, and equally distributes
  //tuples (sentences) across instances of the SplitSentence bolt
  builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
  //Add the counter, with a name of 'count'
  //and parallelism hint of 12 executors
  //fieldsgrouping subscribes to the split bolt, and
  //ensures that the same word is sent to the same instance (group by field 'word')
  builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

  //new configuration
  Config conf = new Config();
  conf.setDebug(true);

  //If there are arguments, we are running on a cluster
  if (args != null && args.length > 0) {
    //parallelism hint to set the number of workers
    conf.setNumWorkers(3);
    //submit the topology
    StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
  }
  //Otherwise, we are running locally
  else {
    //Cap the maximum number of executors that can be spawned
    //for a component to 3
    conf.setMaxTaskParallelism(3);
    //LocalCluster is used to run locally
    LocalCluster cluster = new LocalCluster();
    //submit the topology
    cluster.submitTopology("word-count", conf, builder.createTopology());
    //sleep
    Thread.sleep(10000);
    //shut down the cluster
    cluster.shutdown();
  }
}
 
Example 28
Project: ignite-book-code-samples   File: WordCountTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws Exception {
    // Ignite Stream Ibolt
    final StormStreamer<String, String> stormStreamer = new StormStreamer<>();

    stormStreamer.setAutoFlushFrequency(10L);
    stormStreamer.setAllowOverwrite(true);
    stormStreamer.setCacheName("testCache");
    stormStreamer.setIgniteTupleField("ignite");
    stormStreamer.setIgniteConfigFile("/Users/shamim/Development/workshop/assembla/ignite-book/chapters/chapter-cep-storm/src/main/resources/example-ignite.xml");

    //Used to build the topology
    TopologyBuilder builder = new TopologyBuilder();
    //Add the spout, with a name of 'spout'
    //and parallelism hint of 5 executors
    builder.setSpout("spout", new RandomSentenceSpout(), 5);
    //Add the SplitSentence bolt, with a name of 'split'
    //and parallelism hint of 8 executors
    //shufflegrouping subscribes to the spout, and equally distributes
    //tuples (sentences) across instances of the SplitSentence bolt
    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    //Add the counter, with a name of 'count'
    //and parallelism hint of 12 executors
    //fieldsgrouping subscribes to the split bolt, and
    //ensures that the same word is sent to the same instance (group by field 'word')
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));
    // set ignite bolt
    builder.setBolt("ignite-bolt", stormStreamer,STORM_EXECUTORS).shuffleGrouping("count");

    //new configuration
    Config conf = new Config();
    //Set to false to disable debug information
    // when running in production mode.
    conf.setDebug(false);

    //If there are arguments, we are running on a cluster
    if (args != null && args.length > 0) {
        //parallelism hint to set the number of workers
        conf.setNumWorkers(3);
        //submit the topology
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    }
    //Otherwise, we are running locally
    else {
        //Cap the maximum number of executors that can be spawned
        //for a component to 3
        conf.setMaxTaskParallelism(3);
        //LocalCluster is used to run locally
        LocalCluster cluster = new LocalCluster();
        //submit the topology
        cluster.submitTopology("word-count", conf, builder.createTopology());
        //sleep
        Thread.sleep(10000);
        //shut down the cluster
        cluster.shutdown();
    }
}
 
Example 29
Project: java   File: TradeProcessingTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
  TopologyBuilder builder = new TopologyBuilder();

  LOGGER.info("Building Trade Processing Topology..");

  builder.setSpout(TRD_COLLECTOR_SPOUT, new TradeCollectorSpout(),
      CONFIG.getNumber("TRD_COLLECTOR_SPOUT_PARALLELISM"));

  builder
      .setBolt(TRD_ELIGIBILITY_BOLT, new TradeEligibilityBolt(),
          CONFIG.getNumber("TRD_ELIGIBILITY_BOLT_PARALLELISM"))
      .shuffleGrouping(TRD_COLLECTOR_SPOUT);

  builder
      .setBolt(TRD_REPORTING_BOLT, new TradeReportPersistenceBolt(),
          CONFIG.getNumber("TRD_REPORTING_BOLT_PARALLELISM"))
      .shuffleGrouping(TRD_ELIGIBILITY_BOLT, REPORT_STREAM);

  builder
      .setBolt(TRD_EXCLUSION_BOLT, new TradeExclusionPersistenceBolt(),
          CONFIG.getNumber("TRD_EXCLUSION_BOLT_PARALLELISM"))
      .shuffleGrouping(TRD_ELIGIBILITY_BOLT, EXCLUDE_STREAM);

  Config conf = new Config();
  conf.setDebug(CONFIG.is("DEBUG_FLAG"));
  conf.setNumWorkers(CONFIG.getInt("NUMBER_OF_WORKERS"));
  conf.setMaxTaskParallelism(CONFIG.getInt("MAX_TASK_PARALLELISM"));
  conf.setMaxSpoutPending(CONFIG.getInt("MAX_SPOUT_PENDING"));
  conf.put(Config.TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS,
      CONFIG.getInt("MAX_SPOUT_PENDING_WAIT_MS"));
  conf.put(Config.TOPOLOGY_SPOUT_WAIT_STRATEGY, CONFIG.get("TOPOLOGY_WAIT_STRATEGY"));
  conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, CONFIG.getInt("TOPOLOGY_MESSAGE_TIMEOUT_SECS"));
  conf.put(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS,
      CONFIG.is("TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS"));
  LOGGER.info("Submitting Trade Processing Topology..");
  if (args != null && args.length > 0) {
    conf.put(Config.NIMBUS_HOST, CONFIG.get("LOCAL_NIMBUS_HOST"));
    conf.put(Config.NIMBUS_THRIFT_PORT, CONFIG.getInt("LOCAL_NIMBUS_PORT"));
    conf.put(Config.STORM_ZOOKEEPER_PORT, CONFIG.getInt("LOCAL_ZOOKEEPER_PORT"));
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  } else {
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(TRD_PROCESSING_TOPOLOGY, conf, builder.createTopology());
    Utils.sleep(CONFIG.getLong("LOCAL_CLUSTER_RUNTIME"));
    cluster.killTopology(TRD_PROCESSING_TOPOLOGY);
    cluster.shutdown();
  }
}
 
Example 30
Project: streaming_outliers   File: Topology.java   Source Code and License Vote up 4 votes
public static void main(String... argv) throws Exception {
    CommandLine cli = OutlierOptions.parse(new PosixParser(), argv);
    DataPointExtractorConfig extractorConfig = JSONUtil.INSTANCE.load(new FileInputStream(new File(OutlierOptions.EXTRACTOR_CONFIG.get(cli)))
                                                                     , DataPointExtractorConfig.class
                                                                     );
    com.caseystella.analytics.outlier.streaming.OutlierConfig streamingOutlierConfig = JSONUtil.INSTANCE.load(new FileInputStream(new File(OutlierOptions.STREAM_OUTLIER_CONFIG.get(cli)))
                                                                     , com.caseystella.analytics.outlier.streaming.OutlierConfig.class
                                                                     );

    PersistenceConfig persistenceConfig = JSONUtil.INSTANCE.load(new FileInputStream(new File(OutlierOptions.TIMESERIES_DB_CONFIG.get(cli)))
                                                                     , PersistenceConfig.class
                                                                     );
    int numSpouts = 1;
    int numWorkers = 10;
    if(OutlierOptions.NUM_WORKERS.has(cli)) {
        numWorkers = Integer.parseInt(OutlierOptions.NUM_WORKERS.get(cli));
    }
    if(OutlierOptions.NUM_SPOUTS.has(cli)) {
        numSpouts = Integer.parseInt(OutlierOptions.NUM_SPOUTS.get(cli));
    }
    Map clusterConf = Utils.readStormConfig();
    clusterConf.put("topology.max.spout.pending", 100);
    Config config = new Config();
    config.put("topology.max.spout.pending", 100);
    config.setNumWorkers(numWorkers);
    config.registerMetricsConsumer(LoggingMetricsConsumer.class);

    String topicName = OutlierOptions.TOPIC.get(cli);
    String topologyName = "streaming_outliers_" + topicName;
    String zkConnectString = OutlierOptions.ZK_QUORUM.get(cli);
    /*DataPointExtractorConfig extractorConfig
                                            , com.caseystella.analytics.outlier.streaming.OutlierConfig streamingOutlierConfig
                                            , com.caseystella.analytics.outlier.batch.OutlierConfig batchOutlierConfig
                                            , PersistenceConfig persistenceConfig
                                            , String kafkaTopic
                                            , String zkQuorum
                                            , int numWorkers*/
    boolean startAtBeginning = OutlierOptions.FROM_BEGINNING.has(cli);
    TopologyBuilder topology = createTopology( extractorConfig
                                             , streamingOutlierConfig
                                             , persistenceConfig
                                             , topicName
                                             , zkConnectString
                                             , OutlierOptions.ES_NODE.get(cli)
                                             , numWorkers
                                             , numSpouts
                                             , OutlierOptions.NUM_INDEXING_WORKERS.has(cli)?
                                               Integer.parseInt(OutlierOptions.NUM_INDEXING_WORKERS.get(cli)):
                                               5
                                             , OutlierOptions.INDEX.has(cli)?
                                               OutlierOptions.INDEX.get(cli):
                                               "{source}/outlier"
                                             , startAtBeginning
                                             );
    StormSubmitter.submitTopologyWithProgressBar( topologyName, clusterConf, topology.createTopology());
    //Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();
}
 
Example 31
Project: storm-kafka-hdfs-example   File: StormConfig.java   Source Code and License Vote up 4 votes
public static Config createStormConfig(boolean enableDebug, int numOfWorkers) {
    Config conf = new Config();
    conf.setDebug(enableDebug);
    conf.setNumWorkers(numOfWorkers);
    return conf;
}
 
Example 32
Project: incubator-samoa   File: StormDoTask.java   Source Code and License Vote up 4 votes
/**
 * The main method.
 * 
 * @param args
 *          the arguments
 */
public static void main(String[] args) {

  List<String> tmpArgs = new ArrayList<String>(Arrays.asList(args));

  boolean isLocal = isLocal(tmpArgs);
  int numWorker = StormSamoaUtils.numWorkers(tmpArgs);

  args = tmpArgs.toArray(new String[0]);

  // convert the arguments into Storm topology
  StormTopology stormTopo = StormSamoaUtils.argsToTopology(args);
  String topologyName = stormTopo.getTopologyName();

  Config conf = new Config();
  conf.putAll(Utils.readStormConfig());
  conf.setDebug(false);

  if (isLocal) {
    // local mode
    conf.setMaxTaskParallelism(numWorker);

    backtype.storm.LocalCluster cluster = new backtype.storm.LocalCluster();
    cluster.submitTopology(topologyName, conf, stormTopo.getStormBuilder().createTopology());

    backtype.storm.utils.Utils.sleep(600 * 1000);

    cluster.killTopology(topologyName);
    cluster.shutdown();

  } else {
    // cluster mode
    conf.setNumWorkers(numWorker);
    try {
      backtype.storm.StormSubmitter.submitTopology(topologyName, conf,
          stormTopo.getStormBuilder().createTopology());
    } catch (backtype.storm.generated.AlreadyAliveException ale) {
      ale.printStackTrace();
    } catch (backtype.storm.generated.InvalidTopologyException ite) {
      ite.printStackTrace();
    }
  }
}
 
Example 33
Project: cdh-storm   File: WordCountTopology.java   Source Code and License Vote up 3 votes
public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSentenceSpout(), 5);

    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

    Config conf = new Config();
    conf.setDebug(true);


    if (args != null && args.length > 0) {
      conf.setNumWorkers(3);

      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    }
    else {
      conf.setMaxTaskParallelism(3);

      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, builder.createTopology());

      Thread.sleep(10000);

      cluster.shutdown();
    }
  }
 
Example 34
Project: jstrom   File: ClusterInfoTopology.java   Source Code and License Vote up 3 votes
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    
    builder.setBolt("ClusterInfo", new ClusterInfoBolt(), 1);
    Config conf = new Config();
    conf.setNumWorkers(1);
    
    StormSubmitter.submitTopology("ClusterMonitor", conf, builder.createTopology());

}
 
Example 35
Project: big-data-system   File: WordCountTopology.java   Source Code and License Vote up 3 votes
public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSentenceSpout(), 5);

    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

    Config conf = new Config();
    conf.setDebug(true);


    if (args != null && args.length > 0) {
      conf.setNumWorkers(3);

      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    }
    else {
      conf.setMaxTaskParallelism(3);

      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, builder.createTopology());

      Thread.sleep(10000);

      cluster.shutdown();
    }
  }