Java Code Examples for backtype.storm.Config.setNumAckers()

The following are Jave code examples for showing how to use setNumAckers() of the backtype.storm.Config class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: jstorm-0.9.6.3-   File: TransactionalGlobalCount.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
	    MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
	    TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 2);
	    builder.setBolt("partial-count", new BatchCount(), 3).noneGrouping("spout");
	    builder.setBolt("sum", new UpdateGlobalCount(), 1).globalGrouping("partial-count");

//	    LocalCluster cluster = new LocalCluster();

	    Config config = new Config();
	    config.setDebug(true);
	    config.setMaxSpoutPending(3);
	    config.put(Config.TOPOLOGY_WORKERS, 9);
	    Config.setNumAckers(config, 0);
	    
	    StormSubmitter.submitTopology("global-count-topology", config, builder.buildTopology());

//	    Thread.sleep(3000);
//	    cluster.shutdown();
	  }
 
Example 2
Project: learn_jstorm   File: TransactionalGlobalCount.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
	    MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
	    TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 2);
	    builder.setBolt("partial-count", new BatchCount(), 3).noneGrouping("spout");
	    builder.setBolt("sum", new UpdateGlobalCount(), 1).globalGrouping("partial-count");

//	    LocalCluster cluster = new LocalCluster();

	    Config config = new Config();
	    config.setDebug(true);
	    config.setMaxSpoutPending(3);
	    config.put(Config.TOPOLOGY_WORKERS, 9);
	    Config.setNumAckers(config, 0);
	    
	    StormSubmitter.submitTopology("global-count-topology", config, builder.buildTopology());

//	    Thread.sleep(3000);
//	    cluster.shutdown();
	  }
 
Example 3
Project: aeolus   File: LRBTopologyMain.java   Source Code and License Vote up 5 votes
/**
 * Encapsulation of creation of the cluster after parameter parsing.
 * 
 * @param offset
 * @param executors
 * @param xways
 * @param host
 * @param port
 * @param histFile
 * @param tasks
 * @param submit
 * @param stormConfigDebug
 * @param workers
 * @param nameext
 * @param runtimeMillis
 * @throws AlreadyAliveException
 * @throws InvalidTopologyException
 * @throws java.io.FileNotFoundException
 */
public static void main0(int offset, int executors, int xways, String host, int port, String histFile, int tasks, boolean submit, boolean stormConfigDebug, int workers, String nameext, int runtimeMillis)
	throws AlreadyAliveException, InvalidTopologyException, FileNotFoundException {
	StopWatch stormTimer = new StopWatch(offset);
	String topologyNamePrefix = nameext + "_lrbNormal_" + "_L" + xways + "_" + workers + "W_T" + tasks + "_"
		+ executors + "E_O" + offset;
	Config conf = new Config();
	LRBTopology lRBTopology = new LRBTopology(nameext, xways, workers, tasks, executors, offset,
		new FileReaderSpout(), // add AbstractOrderedFileInputSpout.INPUT_FILE_NAME
								// and
								// AbstractOrderedFileInputSpout.INPUT_FILE_SUFFIXES
								// in Config below
		stormTimer, submit, histFile, topologyNamePrefix, conf);
	StormTopology topology = lRBTopology.getStormTopology();
	conf.setDebug(stormConfigDebug);
	conf.put(AbstractOrderedFileInputSpout.INPUT_FILE_NAME,
		LRBTopologyMain.class.getResource("/datafile20seconds.dat").getFile());
	
	Locale newLocale = new Locale("en", "US");
	LOGGER.debug(String.format("setting locale to %s", newLocale));
	Locale.setDefault(newLocale); // why??
	
	LOGGER.debug("starting cluster: " + "stormlrb" + topologyNamePrefix);
	if(submit) {
		
		conf.setNumWorkers(workers);
		conf.setNumAckers(workers);
		
		StormSubmitter.submitTopology(topologyNamePrefix, conf, topology);
		
	} else {
		
		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology(TopologyControl.TOPOLOGY_NAME, conf, topology);
		
		Utils.sleep(runtimeMillis);
		cluster.killTopology(TopologyControl.TOPOLOGY_NAME);
		cluster.shutdown();
	}
}
 
Example 4
Project: heron   File: MultiStageAckingTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
  if (args.length != 1) {
    throw new RuntimeException("Please specify the name of the topology");
  }
  TopologyBuilder builder = new TopologyBuilder();

  int parallelism = 2;
  builder.setSpout("word", new AckingTestWordSpout(), parallelism);
  builder.setBolt("exclaim1", new ExclamationBolt(true), parallelism)
      .shuffleGrouping("word");
  builder.setBolt("exclaim2", new ExclamationBolt(false), parallelism)
      .shuffleGrouping("exclaim1");

  Config conf = new Config();
  conf.setDebug(true);

  // Put an arbitrary large number here if you don't want to slow the topology down
  conf.setMaxSpoutPending(1000 * 1000 * 1000);

  // To enable acking, we need to setEnableAcking true
  conf.setNumAckers(1);

  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  conf.setNumWorkers(parallelism);
  StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
 
Example 5
Project: heron   File: TaskHookTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
  if (args.length != 1) {
    throw new RuntimeException("Specify topology name");
  }
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word", new AckingTestWordSpout(), 2);
  builder.setBolt("count", new CountBolt(), 2)
      .shuffleGrouping("word");

  Config conf = new Config();
  conf.setDebug(true);
  // Put an arbitrary large number here if you don't want to slow the topology down
  conf.setMaxSpoutPending(1000 * 1000 * 1000);
  // To enable acking, we need to setEnableAcking true
  conf.setNumAckers(1);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  // Set the task hook
  List<String> taskHooks = new LinkedList<>();
  taskHooks.add("com.twitter.heron.examples.TaskHookTopology$TestTaskHook");
  com.twitter.heron.api.Config.setAutoTaskHooks(conf, taskHooks);

  // component resource configuration
  com.twitter.heron.api.Config.setComponentRam(conf, "word", ByteAmount.fromMegabytes(512));
  com.twitter.heron.api.Config.setComponentRam(conf, "count", ByteAmount.fromMegabytes(512));

  // container resource configuration
  com.twitter.heron.api.Config.setContainerDiskRequested(conf, ByteAmount.fromGigabytes(2));
  com.twitter.heron.api.Config.setContainerRamRequested(conf, ByteAmount.fromGigabytes(2));
  com.twitter.heron.api.Config.setContainerCpuRequested(conf, 2);


  conf.setNumWorkers(2);
  StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
 
Example 6
Project: jstorm-0.9.6.3-   File: SequenceTopologyTool.java   Source Code and License Vote up 4 votes
public StormTopology buildTopology()
{
	Config conf = getConf();
	TopologyBuilder builder = new TopologyBuilder();

	int spout_Parallelism_hint = JStormUtils.parseInt(
			conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
	int bolt_Parallelism_hint = JStormUtils.parseInt(
			conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);

	builder.setSpout(SequenceTopologyDef.SEQUENCE_SPOUT_NAME,
			new SequenceSpout(), spout_Parallelism_hint);

	boolean isEnableSplit = JStormUtils.parseBoolean(
			conf.get("enable.split"), false);

	if (isEnableSplit == false) {
		builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME,
				new TotalCount(), bolt_Parallelism_hint).localFirstGrouping(
				SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
	} else {

		builder.setBolt(SequenceTopologyDef.SPLIT_BOLT_NAME,
				new SplitRecord(), bolt_Parallelism_hint)
				.localOrShuffleGrouping(
						SequenceTopologyDef.SEQUENCE_SPOUT_NAME);

		builder.setBolt(SequenceTopologyDef.TRADE_BOLT_NAME,
				new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
				SequenceTopologyDef.SPLIT_BOLT_NAME,
				SequenceTopologyDef.TRADE_STREAM_ID);
		builder.setBolt(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
				new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
				SequenceTopologyDef.SPLIT_BOLT_NAME,
				SequenceTopologyDef.CUSTOMER_STREAM_ID);

		builder.setBolt(SequenceTopologyDef.MERGE_BOLT_NAME,
				new MergeRecord(), bolt_Parallelism_hint)
				.fieldsGrouping(SequenceTopologyDef.TRADE_BOLT_NAME,
						new Fields("ID"))
				.fieldsGrouping(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
						new Fields("ID"));

		builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME,
				new TotalCount(), bolt_Parallelism_hint).noneGrouping(
				SequenceTopologyDef.MERGE_BOLT_NAME);
	}

	boolean kryoEnable = JStormUtils.parseBoolean(conf.get("kryo.enable"),
			false);
	if (kryoEnable == true) {
		System.out.println("Use Kryo ");
		boolean useJavaSer = JStormUtils.parseBoolean(
				conf.get("fall.back.on.java.serialization"), true);

		Config.setFallBackOnJavaSerialization(conf, useJavaSer);

		Config.registerSerialization(conf, TradeCustomer.class);
		Config.registerSerialization(conf, Pair.class);
	}
	int ackerNum = JStormUtils.parseInt(
			conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), 1);
	Config.setNumAckers(conf, ackerNum);

	int workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS),
			20);
	conf.put(Config.TOPOLOGY_WORKERS, workerNum);

	return  builder.createTopology();	
}
 
Example 7
Project: jstorm-0.9.6.3-   File: SequenceTopology.java   Source Code and License Vote up 4 votes
@SuppressWarnings("unchecked")
public static void SetBuilder(TopologyBuilder builder, Map conf) {

	int spout_Parallelism_hint = JStormUtils.parseInt(
			conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
	int bolt_Parallelism_hint = JStormUtils.parseInt(
			conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);

	builder.setSpout(SequenceTopologyDef.SEQUENCE_SPOUT_NAME,
			new SequenceSpout(), spout_Parallelism_hint);

	boolean isEnableSplit = JStormUtils.parseBoolean(
			conf.get("enable.split"), false);

	if (isEnableSplit == false) {
		BoltDeclarer boltDeclarer = builder.setBolt(
				SequenceTopologyDef.TOTAL_BOLT_NAME, new TotalCount(),
				bolt_Parallelism_hint);

		// localFirstGrouping is only for jstorm
		// boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
		boltDeclarer
				.localOrShuffleGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME)
				.addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 3);
	} else {

		builder.setBolt(SequenceTopologyDef.SPLIT_BOLT_NAME,
				new SplitRecord(), bolt_Parallelism_hint)
				.localOrShuffleGrouping(
						SequenceTopologyDef.SEQUENCE_SPOUT_NAME);

		builder.setBolt(SequenceTopologyDef.TRADE_BOLT_NAME,
				new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
				SequenceTopologyDef.SPLIT_BOLT_NAME,
				SequenceTopologyDef.TRADE_STREAM_ID);
		builder.setBolt(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
				new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
				SequenceTopologyDef.SPLIT_BOLT_NAME,
				SequenceTopologyDef.CUSTOMER_STREAM_ID);

		builder.setBolt(SequenceTopologyDef.MERGE_BOLT_NAME,
				new MergeRecord(), bolt_Parallelism_hint)
				.fieldsGrouping(SequenceTopologyDef.TRADE_BOLT_NAME,
						new Fields("ID"))
				.fieldsGrouping(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
						new Fields("ID"));

		builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME,
				new TotalCount(), bolt_Parallelism_hint).noneGrouping(
				SequenceTopologyDef.MERGE_BOLT_NAME);
	}

	boolean kryoEnable = JStormUtils.parseBoolean(conf.get("kryo.enable"),
			false);
	if (kryoEnable == true) {
		System.out.println("Use Kryo ");
		boolean useJavaSer = JStormUtils.parseBoolean(
				conf.get("fall.back.on.java.serialization"), true);

		Config.setFallBackOnJavaSerialization(conf, useJavaSer);

		Config.registerSerialization(conf, TradeCustomer.class);
		Config.registerSerialization(conf, Pair.class);
	}

	// conf.put(Config.TOPOLOGY_DEBUG, false);
	// conf.put(ConfigExtension.TOPOLOGY_DEBUG_RECV_TUPLE, false);
	// conf.put(Config.STORM_LOCAL_MODE_ZMQ, false);

	int ackerNum = JStormUtils.parseInt(
			conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), 1);
	Config.setNumAckers(conf, ackerNum);
	// conf.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 6);
	// conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 20);
	// conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1);

	int workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS),
			20);
	conf.put(Config.TOPOLOGY_WORKERS, workerNum);

}
 
Example 8
Project: learn_jstorm   File: SequenceTopologyTool.java   Source Code and License Vote up 4 votes
public StormTopology buildTopology()
{
	Config conf = getConf();
	TopologyBuilder builder = new TopologyBuilder();

	int spout_Parallelism_hint = JStormUtils.parseInt(
			conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
	int bolt_Parallelism_hint = JStormUtils.parseInt(
			conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);

	builder.setSpout(SequenceTopologyDef.SEQUENCE_SPOUT_NAME,
			new SequenceSpout(), spout_Parallelism_hint);

	boolean isEnableSplit = JStormUtils.parseBoolean(
			conf.get("enable.split"), false);

	if (isEnableSplit == false) {
		builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME,
				new TotalCount(), bolt_Parallelism_hint).localFirstGrouping(
				SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
	} else {

		builder.setBolt(SequenceTopologyDef.SPLIT_BOLT_NAME,
				new SplitRecord(), bolt_Parallelism_hint)
				.localOrShuffleGrouping(
						SequenceTopologyDef.SEQUENCE_SPOUT_NAME);

		builder.setBolt(SequenceTopologyDef.TRADE_BOLT_NAME,
				new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
				SequenceTopologyDef.SPLIT_BOLT_NAME,
				SequenceTopologyDef.TRADE_STREAM_ID);
		builder.setBolt(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
				new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
				SequenceTopologyDef.SPLIT_BOLT_NAME,
				SequenceTopologyDef.CUSTOMER_STREAM_ID);

		builder.setBolt(SequenceTopologyDef.MERGE_BOLT_NAME,
				new MergeRecord(), bolt_Parallelism_hint)
				.fieldsGrouping(SequenceTopologyDef.TRADE_BOLT_NAME,
						new Fields("ID"))
				.fieldsGrouping(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
						new Fields("ID"));

		builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME,
				new TotalCount(), bolt_Parallelism_hint).noneGrouping(
				SequenceTopologyDef.MERGE_BOLT_NAME);
	}

	boolean kryoEnable = JStormUtils.parseBoolean(conf.get("kryo.enable"),
			false);
	if (kryoEnable == true) {
		System.out.println("Use Kryo ");
		boolean useJavaSer = JStormUtils.parseBoolean(
				conf.get("fall.back.on.java.serialization"), true);

		Config.setFallBackOnJavaSerialization(conf, useJavaSer);

		Config.registerSerialization(conf, TradeCustomer.class);
		Config.registerSerialization(conf, Pair.class);
	}
	int ackerNum = JStormUtils.parseInt(
			conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), 1);
	Config.setNumAckers(conf, ackerNum);

	int workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS),
			20);
	conf.put(Config.TOPOLOGY_WORKERS, workerNum);

	return  builder.createTopology();	
}
 
Example 9
Project: learn_jstorm   File: SequenceTopology.java   Source Code and License Vote up 4 votes
public static void SetBuilder(TopologyBuilder builder, Map conf) {

		int spout_Parallelism_hint = JStormUtils.parseInt(
				conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
		int bolt_Parallelism_hint = JStormUtils.parseInt(
				conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);

		builder.setSpout(SequenceTopologyDef.SEQUENCE_SPOUT_NAME,
				new SequenceSpout(), spout_Parallelism_hint);

		boolean isEnableSplit = JStormUtils.parseBoolean(
				conf.get("enable.split"), false);

		if (isEnableSplit == false) {
			BoltDeclarer boltDeclarer = builder.setBolt(
					SequenceTopologyDef.TOTAL_BOLT_NAME, new TotalCount(),
					bolt_Parallelism_hint);

			// localFirstGrouping is only for jstorm
			// boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
			boltDeclarer
					.localOrShuffleGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME)
					.addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 3);
		} else {

			builder.setBolt(SequenceTopologyDef.SPLIT_BOLT_NAME,
					new SplitRecord(), bolt_Parallelism_hint)
					.localOrShuffleGrouping(
							SequenceTopologyDef.SEQUENCE_SPOUT_NAME);

			builder.setBolt(SequenceTopologyDef.TRADE_BOLT_NAME,
					new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
					SequenceTopologyDef.SPLIT_BOLT_NAME,
					SequenceTopologyDef.TRADE_STREAM_ID);
			builder.setBolt(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
					new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
					SequenceTopologyDef.SPLIT_BOLT_NAME,
					SequenceTopologyDef.CUSTOMER_STREAM_ID);

			builder.setBolt(SequenceTopologyDef.MERGE_BOLT_NAME,
					new MergeRecord(), bolt_Parallelism_hint)
					.fieldsGrouping(SequenceTopologyDef.TRADE_BOLT_NAME,
							new Fields("ID"))
					.fieldsGrouping(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
							new Fields("ID"));

			builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME,
					new TotalCount(), bolt_Parallelism_hint).noneGrouping(
					SequenceTopologyDef.MERGE_BOLT_NAME);
		}

		boolean kryoEnable = JStormUtils.parseBoolean(conf.get("kryo.enable"),
				false);
		if (kryoEnable == true) {
			System.out.println("Use Kryo ");
			boolean useJavaSer = JStormUtils.parseBoolean(
					conf.get("fall.back.on.java.serialization"), true);

			Config.setFallBackOnJavaSerialization(conf, useJavaSer);

			Config.registerSerialization(conf, TradeCustomer.class);
			Config.registerSerialization(conf, Pair.class);
		}

		// conf.put(Config.TOPOLOGY_DEBUG, false);
		// conf.put(ConfigExtension.TOPOLOGY_DEBUG_RECV_TUPLE, false);
		// conf.put(Config.STORM_LOCAL_MODE_ZMQ, false);

		int ackerNum = JStormUtils.parseInt(
				conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), 1);
		Config.setNumAckers(conf, ackerNum);
		// conf.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 6);
		// conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 20);
		// conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1);

		int workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS),
				20);
		conf.put(Config.TOPOLOGY_WORKERS, workerNum);

	}
 
Example 10
Project: streaming-benchmarks   File: AdvertisingTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    Options opts = new Options();
    opts.addOption("conf", true, "Path to the config file.");

    CommandLineParser parser = new DefaultParser();
    CommandLine cmd = parser.parse(opts, args);
    String configPath = cmd.getOptionValue("conf");
    Map commonConfig = Utils.findAndReadConfigFile(configPath, true);

    String zkServerHosts = joinHosts((List<String>)commonConfig.get("zookeeper.servers"),
                                     Integer.toString((Integer)commonConfig.get("zookeeper.port")));
    String redisServerHost = (String)commonConfig.get("redis.host");
    String kafkaTopic = (String)commonConfig.get("kafka.topic");
    int kafkaPartitions = ((Number)commonConfig.get("kafka.partitions")).intValue();
    int workers = ((Number)commonConfig.get("storm.workers")).intValue();
    int ackers = ((Number)commonConfig.get("storm.ackers")).intValue();
    int cores = ((Number)commonConfig.get("process.cores")).intValue();
    int parallel = Math.max(1, cores/7);

    ZkHosts hosts = new ZkHosts(zkServerHosts);



    SpoutConfig spoutConfig = new SpoutConfig(hosts, kafkaTopic, "/" + kafkaTopic, UUID.randomUUID().toString());
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

    builder.setSpout("ads", kafkaSpout, kafkaPartitions);
    builder.setBolt("event_deserializer", new DeserializeBolt(), parallel).shuffleGrouping("ads");
    builder.setBolt("event_filter", new EventFilterBolt(), parallel).shuffleGrouping("event_deserializer");
    builder.setBolt("event_projection", new EventProjectionBolt(), parallel).shuffleGrouping("event_filter");
    builder.setBolt("redis_join", new RedisJoinBolt(redisServerHost), parallel).shuffleGrouping("event_projection");
    builder.setBolt("campaign_processor", new CampaignProcessor(redisServerHost), parallel*2)
        .fieldsGrouping("redis_join", new Fields("campaign_id"));

    Config conf = new Config();

    if (args != null && args.length > 0) {
        conf.setNumWorkers(workers);
        conf.setNumAckers(ackers);
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    }
    else {

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", conf, builder.createTopology());
        backtype.storm.utils.Utils.sleep(10000);
        cluster.killTopology("test");
        cluster.shutdown();
    }
}
 
Example 11
Project: jstrom   File: SequenceTopologyTool.java   Source Code and License Vote up 4 votes
public StormTopology buildTopology()
{
	Config conf = getConf();
	TopologyBuilder builder = new TopologyBuilder();

	int spout_Parallelism_hint = JStormUtils.parseInt(
			conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
	int bolt_Parallelism_hint = JStormUtils.parseInt(
			conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);

	builder.setSpout(SequenceTopologyDef.SEQUENCE_SPOUT_NAME,
			new SequenceSpout(), spout_Parallelism_hint);

	boolean isEnableSplit = JStormUtils.parseBoolean(
			conf.get("enable.split"), false);

	if (isEnableSplit == false) {
		builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME,
				new TotalCount(), bolt_Parallelism_hint).localFirstGrouping(
				SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
	} else {

		builder.setBolt(SequenceTopologyDef.SPLIT_BOLT_NAME,
				new SplitRecord(), bolt_Parallelism_hint)
				.localOrShuffleGrouping(
						SequenceTopologyDef.SEQUENCE_SPOUT_NAME);

		builder.setBolt(SequenceTopologyDef.TRADE_BOLT_NAME,
				new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
				SequenceTopologyDef.SPLIT_BOLT_NAME,
				SequenceTopologyDef.TRADE_STREAM_ID);
		builder.setBolt(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
				new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
				SequenceTopologyDef.SPLIT_BOLT_NAME,
				SequenceTopologyDef.CUSTOMER_STREAM_ID);

		builder.setBolt(SequenceTopologyDef.MERGE_BOLT_NAME,
				new MergeRecord(), bolt_Parallelism_hint)
				.fieldsGrouping(SequenceTopologyDef.TRADE_BOLT_NAME,
						new Fields("ID"))
				.fieldsGrouping(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
						new Fields("ID"));

		builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME,
				new TotalCount(), bolt_Parallelism_hint).noneGrouping(
				SequenceTopologyDef.MERGE_BOLT_NAME);
	}

	boolean kryoEnable = JStormUtils.parseBoolean(conf.get("kryo.enable"),
			false);
	if (kryoEnable == true) {
		System.out.println("Use Kryo ");
		boolean useJavaSer = JStormUtils.parseBoolean(
				conf.get("fall.back.on.java.serialization"), true);

		Config.setFallBackOnJavaSerialization(conf, useJavaSer);

		Config.registerSerialization(conf, TradeCustomer.class);
		Config.registerSerialization(conf, Pair.class);
	}
	int ackerNum = JStormUtils.parseInt(
			conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), 1);
	Config.setNumAckers(conf, ackerNum);

	int workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS),
			20);
	conf.put(Config.TOPOLOGY_WORKERS, workerNum);

	return  builder.createTopology();	
}
 
Example 12
Project: jstrom   File: SequenceTopology.java   Source Code and License Vote up 4 votes
public static void SetBuilder(TopologyBuilder builder, Map conf) {

		int spout_Parallelism_hint = JStormUtils.parseInt(
				conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
		int bolt_Parallelism_hint = JStormUtils.parseInt(
				conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);

		builder.setSpout(SequenceTopologyDef.SEQUENCE_SPOUT_NAME,
				new SequenceSpout(), spout_Parallelism_hint);

		boolean isEnableSplit = JStormUtils.parseBoolean(
				conf.get("enable.split"), false);

		if (isEnableSplit == false) {
			BoltDeclarer boltDeclarer = builder.setBolt(
					SequenceTopologyDef.TOTAL_BOLT_NAME, new TotalCount(),
					bolt_Parallelism_hint);

			// localFirstGrouping is only for jstorm
			// boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
			boltDeclarer
					.shuffleGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME)
					.addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 3);
		} else {

			builder.setBolt(SequenceTopologyDef.SPLIT_BOLT_NAME,
					new SplitRecord(), bolt_Parallelism_hint)
					.localOrShuffleGrouping(
							SequenceTopologyDef.SEQUENCE_SPOUT_NAME);

			builder.setBolt(SequenceTopologyDef.TRADE_BOLT_NAME,
					new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
					SequenceTopologyDef.SPLIT_BOLT_NAME,
					SequenceTopologyDef.TRADE_STREAM_ID);
			builder.setBolt(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
					new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
					SequenceTopologyDef.SPLIT_BOLT_NAME,
					SequenceTopologyDef.CUSTOMER_STREAM_ID);

			builder.setBolt(SequenceTopologyDef.MERGE_BOLT_NAME,
					new MergeRecord(), bolt_Parallelism_hint)
					.fieldsGrouping(SequenceTopologyDef.TRADE_BOLT_NAME,
							new Fields("ID"))
					.fieldsGrouping(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
							new Fields("ID"));

			builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME,
					new TotalCount(), bolt_Parallelism_hint).noneGrouping(
					SequenceTopologyDef.MERGE_BOLT_NAME);
		}

		boolean kryoEnable = JStormUtils.parseBoolean(conf.get("kryo.enable"),
				false);
		if (kryoEnable == true) {
			System.out.println("Use Kryo ");
			boolean useJavaSer = JStormUtils.parseBoolean(
					conf.get("fall.back.on.java.serialization"), true);

			Config.setFallBackOnJavaSerialization(conf, useJavaSer);

			Config.registerSerialization(conf, TradeCustomer.class);
			Config.registerSerialization(conf, Pair.class);
		}

		// conf.put(Config.TOPOLOGY_DEBUG, false);
		// conf.put(ConfigExtension.TOPOLOGY_DEBUG_RECV_TUPLE, false);
		// conf.put(Config.STORM_LOCAL_MODE_ZMQ, false);

		int ackerNum = JStormUtils.parseInt(
				conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), 1);
		Config.setNumAckers(conf, ackerNum);
		// conf.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 6);
		// conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 20);
		// conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1);

		int workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS),
				20);
		conf.put(Config.TOPOLOGY_WORKERS, workerNum);

	}