Java Code Examples for backtype.storm.StormSubmitter#submitTopologyWithProgressBar()

The following examples show how to use backtype.storm.StormSubmitter#submitTopologyWithProgressBar() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TridentWordCount.java    From flink-perf with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  Config conf = new Config();
  conf.setMaxSpoutPending(20);
  if (args.length == 0) {
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
    for (int i = 0; i < 100; i++) {
      System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped"));
      Thread.sleep(1000);
    }
  }
  else {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
  }
}
 
Example 2
Source File: UnitTopologyRunner.java    From eagle with Apache License 2.0 5 votes vote down vote up
private void run(String topologyId,
                 int numOfTotalWorkers,
                 int numOfSpoutTasks,
                 int numOfRouterBolts,
                 int numOfAlertBolts,
                 int numOfPublishExecutors,
                 int numOfPublishTasks,
                 Config config,
                 boolean localMode) {

    backtype.storm.Config stormConfig = givenStormConfig == null ? new backtype.storm.Config() : givenStormConfig;
    // TODO: Configurable metric consumer instance number

    int messageTimeoutSecs = config.hasPath(MESSAGE_TIMEOUT_SECS) ? config.getInt(MESSAGE_TIMEOUT_SECS) : DEFAULT_MESSAGE_TIMEOUT_SECS;
    LOG.info("Set topology.message.timeout.secs as {}", messageTimeoutSecs);
    stormConfig.setMessageTimeoutSecs(messageTimeoutSecs);

    if (config.hasPath("metric")) {
        stormConfig.registerMetricsConsumer(StormMetricTaggedConsumer.class, config.root().render(ConfigRenderOptions.concise()), 1);
    }

    stormConfig.setNumWorkers(numOfTotalWorkers);
    StormTopology topology = buildTopology(topologyId, numOfSpoutTasks, numOfRouterBolts, numOfAlertBolts, numOfPublishExecutors, numOfPublishTasks, config).createTopology();

    if (localMode) {
        LOG.info("Submitting as local mode");
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyId, stormConfig, topology);
        Utils.sleep(Long.MAX_VALUE);
    } else {
        LOG.info("Submitting as cluster mode");
        try {
            StormSubmitter.submitTopologyWithProgressBar(topologyId, stormConfig, topology);
        } catch (Exception ex) {
            LOG.error("fail submitting topology {}", topology, ex);
            throw new IllegalStateException(ex);
        }
    }
}
 
Example 3
Source File: KafkaThroughput.java    From flink-perf with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException, UnknownHostException, InterruptedException {
	final ParameterTool pt = ParameterTool.fromArgs(args);

	TopologyBuilder builder = new TopologyBuilder();
	BrokerHosts hosts = new ZkHosts(pt.getRequired("zookeeper"));
	SpoutConfig spoutConfig = new SpoutConfig(hosts, pt.getRequired("topic"), "/" + pt.getRequired("topic"), UUID.randomUUID().toString());
	spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
	KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
	builder.setSpout("source", kafkaSpout, pt.getInt("sourceParallelism"));

	builder.setBolt("sink", new Throughput.Sink(pt), pt.getInt("sinkParallelism")).noneGrouping("source");

	Config conf = new Config();
	conf.setDebug(false);

	if (!pt.has("local")) {
		conf.setNumWorkers(pt.getInt("par", 2));

		StormSubmitter.submitTopologyWithProgressBar("kafka-spout-"+pt.get("name", "no_name"), conf, builder.createTopology());
	} else {
		conf.setMaxTaskParallelism(pt.getInt("par", 2));

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("kafka-spout", conf, builder.createTopology());

		Thread.sleep(300000);

		cluster.shutdown();
	}
}
 
Example 4
Source File: TridentThroughput.java    From flink-perf with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	int par = pt.getInt("para");


	TridentTopology topology = new TridentTopology();
	Stream sourceStream = topology.newStream("source", new Generator(pt)).parallelismHint(pt.getInt("sourceParallelism"));

	Stream repart = sourceStream.partitionBy(new Fields("id"));
	for(int i = 0; i < pt.getInt("repartitions", 1) - 1; i++) {
		repart = repart.each(new Fields("id"), new IdentityEach(), new Fields("id"+i)).partitionBy(new Fields("id"+i));
	}
	repart.each(new Fields("id", "host", "time", "payload"), new Sink(pt), new Fields("dontcare")).parallelismHint(pt.getInt("sinkParallelism"));

	Config conf = new Config();
	conf.setDebug(false);

	if (!pt.has("local")) {
		conf.setNumWorkers(par);

		StormSubmitter.submitTopologyWithProgressBar("throughput-"+pt.get("name", "no_name"), conf, topology.build());
	}
	else {
		conf.setMaxTaskParallelism(par);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("throughput", conf, topology.build());

		Thread.sleep(30000);

		cluster.shutdown();
	}

}
 
Example 5
Source File: ThroughputHostsTracking.java    From flink-perf with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	int par = pt.getInt("para");

	TopologyBuilder builder = new TopologyBuilder();

	builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism"));
	int i = 0;
	for(; i < pt.getInt("repartitions", 1) - 1;i++) {
		System.out.println("adding source"+i+" --> source"+(i+1));
		builder.setBolt("source"+(i+1), new RepartPassThroughBolt(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source" + i, new Fields("id"));
	}
	System.out.println("adding final source"+i+" --> sink");

	builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source"+i, new Fields("id"));


	Config conf = new Config();
	conf.setDebug(false);
	//System.exit(1);

	if (!pt.has("local")) {
		conf.setNumWorkers(par);

		StormSubmitter.submitTopologyWithProgressBar("throughput-"+pt.get("name", "no_name"), conf, builder.createTopology());
	}
	else {
		conf.setMaxTaskParallelism(par);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("throughput", conf, builder.createTopology());

		Thread.sleep(300000);

		cluster.shutdown();
	}

}
 
Example 6
Source File: Latency.java    From flink-perf with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	int par = pt.getInt("para");

	TopologyBuilder builder = new TopologyBuilder();

	builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism"));
	int i = 0;
	for(; i < pt.getInt("repartitions", 1) - 1;i++) {
		System.out.println("adding source"+i+" --> source"+(i+1));
		builder.setBolt("source"+(i+1), new PassThroughBolt(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source" + i, new Fields("id"));
	}
	System.out.println("adding final source"+i+" --> sink");

	builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source"+i, new Fields("id"));


	Config conf = new Config();
	conf.setDebug(false);
	//System.exit(1);

	if (!pt.has("local")) {
		conf.setNumWorkers(par);

		StormSubmitter.submitTopologyWithProgressBar("throughput-"+pt.get("name", "no_name"), conf, builder.createTopology());
	}
	else {
		conf.setMaxTaskParallelism(par);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("throughput", conf, builder.createTopology());

		Thread.sleep(300000);

		cluster.shutdown();
	}

}
 
Example 7
Source File: TopologySubmitter.java    From eagle with Apache License 2.0 4 votes vote down vote up
public static void submit(StormTopology topology, Config config){
    backtype.storm.Config stormConfig = new backtype.storm.Config();
    int messageTimeoutSecs = config.hasPath(MESSAGE_TIMEOUT_SECS)?config.getInt(MESSAGE_TIMEOUT_SECS) : DEFAULT_MESSAGE_TIMEOUT_SECS;
    LOG.info("Set topology.message.timeout.secs as {}",messageTimeoutSecs);
    stormConfig.setMessageTimeoutSecs(messageTimeoutSecs);

    // set kafka sink
    if(config.hasPath("dataSinkConfig.brokerList")){
        Map props = new HashMap<>();
        props.put("metadata.broker.list", config.getString("dataSinkConfig.brokerList"));
        props.put("serializer.class", config.getString("dataSinkConfig.serializerClass"));
        props.put("key.serializer.class", config.getString("dataSinkConfig.keySerializerClass"));
        stormConfig.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props);
    }

    if(config.hasPath("dataSinkConfig.serializerClass")){
        stormConfig.put(KafkaBolt.TOPIC, config.getString("dataSinkConfig.topic"));
    }

    if(config.hasPath("dataSinkConfig.topic")){
        stormConfig.put(KafkaBolt.TOPIC, config.getString("dataSinkConfig.topic"));
    }

    boolean localMode = config.getBoolean(LOCAL_MODE);
    int numOfTotalWorkers = config.getInt(TOTAL_WORKER_NUM);
    stormConfig.setNumWorkers(numOfTotalWorkers);
    String topologyId = config.getString("topology.name");
    if(localMode) {
        LOG.info("Submitting as local mode");
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyId, stormConfig, topology);
        Utils.sleep(Long.MAX_VALUE);
    }else{
        LOG.info("Submitting as cluster mode");
        try {
            StormSubmitter.submitTopologyWithProgressBar(topologyId, stormConfig, topology);
        } catch(Exception ex) {
            LOG.error("fail submitting topology {}", topology, ex);
            throw new IllegalStateException(ex);
        }
    }
}
 
Example 8
Source File: Throughput.java    From flink-perf with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	int par = pt.getInt("para");

	TopologyBuilder builder = new TopologyBuilder();

	builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism"));
	int i = 0;
	for(; i < pt.getInt("repartitions", 1) - 1;i++) {
		System.out.println("adding source"+i+" --> source"+(i+1));
		builder.setBolt("source"+(i+1), new RepartPassThroughBolt(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source" + i, new Fields("id"));
	}
	System.out.println("adding final source"+i+" --> sink");

	builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source"+i, new Fields("id"));


	Config conf = new Config();
	conf.setDebug(false);
	//System.exit(1);
	if(pt.has("ft") || pt.has("maxPending")) {
		conf.setMaxSpoutPending(pt.getInt("maxPending", 1000));
	}

	if (!pt.has("local")) {
		conf.setNumWorkers(par);

		StormSubmitter.submitTopologyWithProgressBar("throughput-"+pt.get("name", "no_name"), conf, builder.createTopology());
	}
	else {
		conf.setMaxTaskParallelism(par);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("throughput", conf, builder.createTopology());

		Thread.sleep(300000);

		cluster.shutdown();
	}

}
 
Example 9
Source File: ForwardThroughput.java    From flink-perf with Apache License 2.0 3 votes vote down vote up
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	int par = pt.getInt("para");

	TopologyBuilder builder = new TopologyBuilder();

	builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism"));

	//builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).noneGrouping("source0");
	builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).localOrShuffleGrouping("source0");


	Config conf = new Config();
	conf.setDebug(false);

	conf.setMaxSpoutPending(pt.getInt("maxPending", 1000));
	//System.exit(1);

	if (!pt.has("local")) {
		conf.setNumWorkers(par);

		StormSubmitter.submitTopologyWithProgressBar("forward-throughput-"+pt.get("name", "no_name"), conf, builder.createTopology());
	}
	else {
		conf.setMaxTaskParallelism(par);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("forward-throughput", conf, builder.createTopology());

		Thread.sleep(300000);

		cluster.shutdown();
	}

}
 
Example 10
Source File: TridentForwardThroughput.java    From flink-perf with Apache License 2.0 3 votes vote down vote up
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	int par = pt.getInt("para");

	TridentTopology topology = new TridentTopology();
	Stream sourceStream = topology.newStream("source", new Generator(pt)).parallelismHint(pt.getInt("sourceParallelism"));
	sourceStream.localOrShuffle().each(FIELDS, new Sink(pt), new Fields("dontcare"));

	Config conf = new Config();
	conf.setDebug(false);

//	conf.setMaxSpoutPending(pt.getInt("maxPending", 1000));

	//System.exit(1);

	if (!pt.has("local")) {
		conf.setNumWorkers(par);

		StormSubmitter.submitTopologyWithProgressBar("forward-throughput-"+pt.get("name", "no_name"), conf, topology.build());
	}
	else {
		conf.setMaxTaskParallelism(par);

		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("forward-throughput", conf, topology.build());

		Thread.sleep(300000);

		cluster.shutdown();
	}

}
 
Example 11
Source File: WordCountTopology.java    From flink-perf with Apache License 2.0 3 votes vote down vote up
public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSentenceSpout(), 5);

    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

    Config conf = new Config();
    conf.setDebug(true);


    if (args != null && args.length > 0) {
      conf.setNumWorkers(3);

      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    }
    else {
      conf.setMaxTaskParallelism(3);

      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, builder.createTopology());

      Thread.sleep(10000);

      cluster.shutdown();
    }
  }
 
Example 12
Source File: WordCountTopologyNode.java    From flink-perf with Apache License 2.0 3 votes vote down vote up
public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSentence(), 5);

    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

    Config conf = new Config();
    conf.setDebug(true);


    if (args != null && args.length > 0) {
      conf.setNumWorkers(3);

      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    }
    else {
      conf.setMaxTaskParallelism(3);

      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, builder.createTopology());

      Thread.sleep(10000);

      cluster.shutdown();
    }
  }