Java Code Examples for backtype.storm.StormSubmitter#submitTopology()

The following examples show how to use backtype.storm.StormSubmitter#submitTopology() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RealTimeTextSearch.java    From trident-tutorial with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config conf = new Config();


    if (args.length == 2) {
        // Ready & submit the topology
        String name = args[0];
        BrokerHosts hosts = new ZkHosts(args[1]);
        TransactionalTridentKafkaSpout kafkaSpout = TestUtils.testTweetSpout(hosts);

        StormSubmitter.submitTopology(name, conf, buildTopology(kafkaSpout));

    }else{
        System.err.println("<topologyName> <zookeeperHost>");
    }

}
 
Example 2
Source File: BatchAckerTest.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
    Config conf = JStormHelper.getConfig(args);
    int spoutParallelism = JStormUtils.parseInt(conf.get(SPOUT_PARALLELISM_HINT), 1);
    int splitParallelism = JStormUtils.parseInt(conf.get(SPLIT_PARALLELISM_HINT), 2);
    int countParallelism = JStormUtils.parseInt(conf.get(COUNT_PARALLELISM_HINT), 2);
    boolean isValueSpout = JStormUtils.parseBoolean(conf.get("is.value.spout"), false);

    TransactionTopologyBuilder builder = new TransactionTopologyBuilder();
    if (isValueSpout)
        builder.setSpoutWithAck("spout", new BatchAckerValueSpout(), spoutParallelism);
    else
        builder.setSpoutWithAck("spout", new BatchAckerSpout(), spoutParallelism);
    builder.setBoltWithAck("split", new BatchAckerSplit(), splitParallelism).localOrShuffleGrouping("spout");;
    builder.setBoltWithAck("count", new BatchAckerCount(), countParallelism).fieldsGrouping("split", new Fields("word"));
    
    String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
    String topologyName = className[className.length - 1];
    StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
}
 
Example 3
Source File: WordCountTopology.java    From incubator-heron with Apache License 2.0 6 votes vote down vote up
/**
 * Main method
 */
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
  if (args.length < 1) {
    throw new RuntimeException("Specify topology name");
  }

  int parallelism = 1;
  if (args.length > 1) {
    parallelism = Integer.parseInt(args[1]);
  }
  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout("word", new WordSpout(), parallelism);
  builder.setBolt("consumer", new ConsumerBolt(), parallelism)
      .fieldsGrouping("word", new Fields("word"));
  Config conf = new Config();
  conf.setNumWorkers(parallelism);

  StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
 
Example 4
Source File: JoinExample.java    From trident-tutorial with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config conf = new Config();

    if (args.length == 2) {
        // Ready & submit the topology
        String name = args[0];
        BrokerHosts hosts = new ZkHosts(args[1]);
        TransactionalTridentKafkaSpout kafkaSpout = TestUtils.testTweetSpout(hosts);

        StormSubmitter.submitTopology(name, conf, buildTopology(kafkaSpout));

    }else{
        System.err.println("<topologyName> <zookeeperHost>");
    }

}
 
Example 5
Source File: SequenceTopologyTool.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public void SetRemoteTopology() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException {
    Config conf = getConf();
    StormTopology topology = buildTopology();
    
    conf.put(Config.STORM_CLUSTER_MODE, "distributed");
    String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
    if (streamName == null) {
        streamName = "SequenceTest";
    }
    
    if (streamName.contains("zeromq")) {
        conf.put(Config.STORM_MESSAGING_TRANSPORT, "com.alibaba.jstorm.message.zeroMq.MQContext");
        
    } else {
        conf.put(Config.STORM_MESSAGING_TRANSPORT, "com.alibaba.jstorm.message.netty.NettyContext");
    }
    
    StormSubmitter.submitTopology(streamName, conf, topology);
    
}
 
Example 6
Source File: PerformanceTestTopology.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void SetRemoteTopology()
        throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException {
    String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
    if (streamName == null) {
        String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
        streamName = className[className.length - 1];
    }
    
    TopologyBuilder builder = new TopologyBuilder();
    
    int spout_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
    int bolt_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);
    builder.setSpout("spout", new TestSpout(), spout_Parallelism_hint);
    
    BoltDeclarer boltDeclarer = builder.setBolt("bolt", new TestBolt(), bolt_Parallelism_hint);
    // localFirstGrouping is only for jstorm
    // boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
    boltDeclarer.shuffleGrouping("spout");
    // .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 60);
    
    conf.put(Config.STORM_CLUSTER_MODE, "distributed");
    
    StormSubmitter.submitTopology(streamName, conf, builder.createTopology());
    
}
 
Example 7
Source File: PerformanceTestTopology.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void SetRemoteTopology()
        throws Exception {
    String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
    if (streamName == null) {
        String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
        streamName = className[className.length - 1];
    }
    
    TopologyBuilder builder = new TopologyBuilder();
    
    int spout_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
    int bolt_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);
    builder.setSpout("spout", new TestSpout(), spout_Parallelism_hint);
    
    BoltDeclarer boltDeclarer = builder.setBolt("bolt", new TestBolt(), bolt_Parallelism_hint);
    // localFirstGrouping is only for jstorm
    // boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
    boltDeclarer.shuffleGrouping("spout");
    // .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 60);
    
    StormSubmitter.submitTopology(streamName, conf, builder.createTopology());
    
}
 
Example 8
Source File: PerformanceTestTopology.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void SetRemoteTopology()
        throws Exception {
    String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
    if (streamName == null) {
        String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
        streamName = className[className.length - 1];
    }
    
    TopologyBuilder builder = new TopologyBuilder();
    
    int spout_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
    int bolt_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);
    builder.setSpout("spout", new TestSpout(), spout_Parallelism_hint);
    
    BoltDeclarer boltDeclarer = builder.setBolt("bolt", new TestBolt(), bolt_Parallelism_hint);
    // localFirstGrouping is only for jstorm
    // boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
    boltDeclarer.shuffleGrouping("spout");
    // .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 60);
    
    StormSubmitter.submitTopology(streamName, conf, builder.createTopology());
    
}
 
Example 9
Source File: TransactionTestTopology.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void test() throws Exception {
    TransactionTopologyBuilder builder = new TransactionTopologyBuilder();
    if (isLocal) {
        conf.put("tuple.num.per.batch", 5);
        conf.put("transaction.scheduler.spout", false);
        conf.put("transaction.exactly.cache.type", "default");
    }

    int spoutParallelism = JStormUtils.parseInt(conf.get(SPOUT_PARALLELISM_HINT), 1);
    int splitParallelism = JStormUtils.parseInt(conf.get(SPLIT_PARALLELISM_HINT), 2);
    int countParallelism = JStormUtils.parseInt(conf.get(COUNT_PARALLELISM_HINT), 2);

    boolean isScheduleSpout = JStormUtils.parseBoolean(conf.get("transaction.scheduler.spout"), true);
    if (isScheduleSpout)
        // Generate batch by configured time. "transaction.schedule.batch.delay.ms: 1000 # 1sec"
        builder.setSpout("spout", new ScheduleTxSpout(), spoutParallelism);
    else
        // Generate batch by user when calling emitBarrier
        builder.setSpout("spout", new BasicTxSpout(), spoutParallelism, false);
    builder.setBolt("split", new TxSplitSentence(), splitParallelism).localOrShuffleGrouping("spout");
    builder.setBolt("count", new TxWordCount(), countParallelism).fieldsGrouping("split", new Fields("word"));

    String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
    String topologyName = className[className.length - 1];
    StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
}
 
Example 10
Source File: TridentSequenceTopology.java    From storm-hdfs with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(5);
    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
        Thread.sleep(120 * 1000);
    }
    else if(args.length == 2) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
    } else{
        System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
    }
}
 
Example 11
Source File: TridentFileTopology.java    From storm-hdfs with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(5);
    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
        Thread.sleep(120 * 1000);
    }
    else if(args.length == 2) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
    } else{
        System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
    }
}
 
Example 12
Source File: Runner.java    From storm-benchmark with Apache License 2.0 5 votes vote down vote up
private static void runApplication(IApplication app)
        throws AlreadyAliveException, InvalidTopologyException {
  config.putAll(Utils.readStormConfig());
  String name = (String) config.get(Config.TOPOLOGY_NAME);
  topology = app.getTopology(config);
  StormSubmitter.submitTopology(name, config, topology);
}
 
Example 13
Source File: DeployTopology.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public void realMain(String[] args) throws Exception {

        String _name = "MetricTest";
/*        if (args.length > 0){
            conf = Utils.loadConf(args[0]);
        }*/

        int _killTopologyTimeout = JStormUtils.parseInt(conf.get(ConfigExtension.TASK_CLEANUP_TIMEOUT_SEC), 180);
        conf.put(ConfigExtension.TASK_CLEANUP_TIMEOUT_SEC, _killTopologyTimeout);

        int _numWorkers = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS), 6);

        int _numTopologies = JStormUtils.parseInt(conf.get(TOPOLOGY_NUMS), 1);
        int _spoutParallel = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 2);
        int _boltParallel = JStormUtils.parseInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 4);
        int _messageSize = JStormUtils.parseInt(conf.get(TOPOLOGY_MESSAGE_SIZES), 10);
        int _numAcker = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), 2);
        int _boltNum = JStormUtils.parseInt(conf.get(TOPOLOGY_BOLTS_NUMS), 3);
        boolean _ackEnabled = false;
        if (_numAcker > 0)
            _ackEnabled = true;

        for (int topoNum = 0; topoNum < _numTopologies; topoNum++) {
            TopologyBuilder builder = new TopologyBuilder();
            builder.setSpout("messageSpout",
                    new DeploySpoult(_messageSize, _ackEnabled), _spoutParallel);
            builder.setBolt("messageBolt1", new DeployBolt(), _boltParallel)
                    .shuffleGrouping("messageSpout");
            for (int levelNum = 2; levelNum <= _boltNum; levelNum++) {
                builder.setBolt("messageBolt" + levelNum, new DeployBolt(), _boltParallel)
                        .shuffleGrouping("messageBolt" + (levelNum - 1));
            }

            conf.put(Config.TOPOLOGY_WORKERS, _numWorkers);
            conf.put(Config.TOPOLOGY_ACKER_EXECUTORS, _numAcker);
            StormSubmitter.submitTopology(_name + "_" + topoNum, conf, builder.createTopology());
        }
    }
 
Example 14
Source File: StormAbstractCloudLiveTest.java    From brooklyn-library with Apache License 2.0 5 votes vote down vote up
public boolean submitTopology(StormTopology stormTopology, String topologyName, int numOfWorkers, boolean debug, long timeoutMs) {
    if (log.isDebugEnabled()) log.debug("Connecting to NimbusClient: {}", nimbus.getConfig(Storm.NIMBUS_HOSTNAME));
    Config conf = new Config();
    conf.setDebug(debug);
    conf.setNumWorkers(numOfWorkers);

    // TODO - confirm this creats the JAR correctly
    String jar = createJar(
        new File(Os.mergePaths(ResourceUtils.create(this).getClassLoaderDir(), "org/apache/brooklyn/entity/messaging/storm/topologies")),
        "org/apache/brooklyn/entity/messaging/storm/");
    System.setProperty("storm.jar", jar);
    long startMs = System.currentTimeMillis();
    long endMs = (timeoutMs == -1) ? Long.MAX_VALUE : (startMs + timeoutMs);
    long currentTime = startMs;
    Throwable lastError = null;
    int attempt = 0;
    while (currentTime <= endMs) {
        currentTime = System.currentTimeMillis();
        if (attempt != 0) Time.sleep(Duration.ONE_SECOND);
        if (log.isTraceEnabled()) log.trace("trying connection to {} at time {}", nimbus.getConfig(Storm.NIMBUS_HOSTNAME), currentTime);

        try {
            StormSubmitter.submitTopology(topologyName, conf, stormTopology);
            return true;
        } catch (Exception e) {
            if (shouldRetryOn(e)) {
                if (log.isDebugEnabled()) log.debug("Attempt {} failed connecting to {} ({})", new Object[] {attempt + 1, nimbus.getConfig(Storm.NIMBUS_HOSTNAME), e.getMessage()});
                lastError = e;
            } else {
                throw Throwables.propagate(e);
            }
        }
        attempt++;
    }
    log.warn("unable to connect to Nimbus client: ", lastError);
    Assert.fail();
    return false;
}
 
Example 15
Source File: TMUdfStreamTopology.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
    Map config = new Config();
    config.put(ConfigExtension.TOPOLOGY_MASTER_USER_DEFINED_STREAM_CLASS, "com.alipay.dw.jstorm.example.tm.TMUdfHandler");
    config.put(Config.TOPOLOGY_WORKERS, 2);

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("TMUdfSpout", new TMUdfSpout(), 2);
    builder.setBolt("TMUdfBolt", new TMUdfBolt(), 4);
    StormTopology topology = builder.createTopology();

    StormSubmitter.submitTopology("TMUdfTopology", config, topology);
}
 
Example 16
Source File: DeploymentTopology.java    From StormCV with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args){
	
	// first some global (topology) configuration
	StormCVConfig conf = new StormCVConfig();
	
	/**
	 * Sets the OpenCV library to be used which depends on the system the topology is being executed on
	 */
	//conf.put(StormCVConfig.STORMCV_OPENCV_LIB, "mac64_opencv_java248.dylib");
	
	conf.setNumWorkers(4); // number of workers in the topology
	conf.setMaxSpoutPending(20); // maximum un-acked/un-failed frames per spout (spout blocks if this number is reached)
	conf.put(StormCVConfig.STORMCV_FRAME_ENCODING, Frame.JPG_IMAGE); // indicates frames will be encoded as JPG throughout the topology (JPG is the default when not explicitly set)
	conf.put(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, true); // True if Storm should timeout messages or not.
	conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS , 10); // The maximum amount of time given to the topology to fully process a message emitted by a spout (default = 30)
	conf.put(StormCVConfig.STORMCV_SPOUT_FAULTTOLERANT, false); // indicates if the spout must be fault tolerant; i.e. spouts do NOT! replay tuples on fail
	conf.put(StormCVConfig.STORMCV_CACHES_TIMEOUT_SEC, 30); // TTL (seconds) for all elements in all caches throughout the topology (avoids memory overload)
	
	List<String> urls = new ArrayList<String>();
	urls.add( "rtsp://streaming3.webcam.nl:1935/n224/n224.stream" );
	urls.add("rtsp://streaming3.webcam.nl:1935/n233/n233.stream");
	urls.add("rtsp://streaming3.webcam.nl:1935/n302/n302.stream"); 
	urls.add("rtsp://streaming3.webcam.nl:1935/n346/n346.stream");
	urls.add("rtsp://streaming3.webcam.nl:1935/n319/n319.stream"); 
	urls.add("rtsp://streaming3.webcam.nl:1935/n794b/n794b.stream"); 

	int frameSkip = 13;
	
	// specify the list with SingleInputOperations to be executed sequentially by the 'fat' bolt
	@SuppressWarnings("rawtypes")
	List<ISingleInputOperation> operations = new ArrayList<ISingleInputOperation>();
	operations.add(new ScaleImageOp(0.5f) );
	operations.add(new FeatureExtractionOp("sift", FeatureDetector.SIFT, DescriptorExtractor.SIFT));
	operations.add(new FeatureExtractionOp("surf", FeatureDetector.SURF, DescriptorExtractor.SURF));
	operations.add(new DrawFeaturesOp());
	
	// now create the topology itself (spout -> background subtraction --> streamer)
	TopologyBuilder builder = new TopologyBuilder();
			
	// number of tasks must match the number of urls!
	builder.setSpout("spout", new CVParticleSpout( new StreamFrameFetcher(urls).frameSkip(frameSkip) ), 1 ).setNumTasks(6);
	
	// three 'fat' bolts containing a SequentialFrameOperation will will emit a Frame object containing the detected features
	builder.setBolt("features", new SingleInputBolt( new SequentialFrameOp(operations).outputFrame(true).retainImage(true)), 2)
		.shuffleGrouping("spout");
	
	// add bolt that creates a webservice on port 8558 enabling users to view the result
	builder.setBolt("streamer", new BatchInputBolt(
			new SlidingWindowBatcher(2, frameSkip).maxSize(6), // note the required batcher used as a buffer and maintains the order of the frames
			new MjpegStreamingOp().port(8558).framerate(5)).groupBy(new Fields(FrameSerializer.STREAMID))
		, 1)
		.shuffleGrouping("features");
	
	try {
		
		// run in local mode
		/*
		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology( "deployment_Test", conf, builder.createTopology() );
		Utils.sleep(120*1000); // run for two minutes and then kill the topology
		cluster.shutdown();
		System.exit(1);
		*/
		// run on a storm cluster
		StormSubmitter.submitTopology("Your_topology_name", conf, builder.createTopology());
	} catch (Exception e){
		e.printStackTrace();
	}
}
 
Example 17
Source File: ZkTopology.java    From yuzhouwan with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) {

        //这个地方其实就是kafka配置文件里边的zookeeper.connect这个参数,可以去那里拿过来
        String brokerZkStr = "10.100.90.201:2181/kafka_online_sample";
        String brokerZkPath = "/brokers";
        ZkHosts zkHosts = new ZkHosts(brokerZkStr, brokerZkPath);

        String topic = "mars-wap";
        //以下:将offset汇报到哪个zk集群,相应配置
        String offsetZkServers = "10.199.203.169";
        String offsetZkPort = "2181";
        List<String> zkServersList = new ArrayList<>();
        zkServersList.add(offsetZkServers);
        //汇报offset信息的root路径
        String offsetZkRoot = "/stormExample";
        //存储该spout id的消费offset信息,譬如以topoName来命名
        String offsetZkId = "storm-example";

        SpoutConfig kafkaConfig = new SpoutConfig(zkHosts, topic, offsetZkRoot, offsetZkId);
        kafkaConfig.zkPort = Integer.parseInt(offsetZkPort);
        kafkaConfig.zkServers = zkServersList;
        kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

        KafkaSpout spout = new KafkaSpout(kafkaConfig);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", spout, 1);
        builder.setBolt("bolt", new EsBolt("storm/docs"), 1).shuffleGrouping("spout");

        Config config = new Config();
        config.put("es.index.auto.create", "true");

        if (args.length > 0) {
            try {
                StormSubmitter.submitTopology("storm-kafka-example", config, builder.createTopology());
            } catch (Exception e) {
                LOG.error("", e);
            }
        } else {
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("test", config, builder.createTopology());
        }
    }
 
Example 18
Source File: PersistentWordCount.java    From storm-hbase with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config config = new Config();

    Map<String, Object> hbConf = new HashMap<String, Object>();
    if(args.length > 0){
        hbConf.put("hbase.rootdir", args[0]);
    }
    config.put("hbase.conf", hbConf);

    WordSpout spout = new WordSpout();
    WordCounter bolt = new WordCounter();

    SimpleHBaseMapper mapper = new SimpleHBaseMapper()
            .withRowKeyField("word")
            .withColumnFields(new Fields("word"))
            .withCounterFields(new Fields("count"))
            .withColumnFamily("cf");

    HBaseBolt hbase = new HBaseBolt("WordCount", mapper)
            .withConfigKey("hbase.conf");


    // wordSpout ==> countBolt ==> HBaseBolt
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(WORD_SPOUT, spout, 1);
    builder.setBolt(COUNT_BOLT, bolt, 1).shuffleGrouping(WORD_SPOUT);
    builder.setBolt(HBASE_BOLT, hbase, 1).fieldsGrouping(COUNT_BOLT, new Fields("word"));


    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", config, builder.createTopology());
        Thread.sleep(30000);
        cluster.killTopology("test");
        cluster.shutdown();
        System.exit(0);
    } else if (args.length == 2) {
        StormSubmitter.submitTopology(args[1], config, builder.createTopology());
    } else{
        System.out.println("Usage: HdfsFileTopology <hdfs url> [topology name]");
    }
}
 
Example 19
Source File: FastWordCountTopology.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public static void test() throws Exception{
    
    int spout_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
    int split_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_SPLIT_PARALLELISM_HINT), 1);
    int count_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_COUNT_PARALLELISM_HINT), 2);
    
    TopologyBuilder builder = new TopologyBuilder();
    
    
    builder.setSpout("spout", new FastRandomSentenceSpout(), spout_Parallelism_hint);
    builder.setBolt("split", new SplitSentence(), split_Parallelism_hint).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), count_Parallelism_hint).fieldsGrouping("split", new Fields("word"));
    
    String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
    String topologyName = className[className.length - 1];
    
    
    StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
}
 
Example 20
Source File: BatchMetaTopology.java    From jstorm with Apache License 2.0 3 votes vote down vote up
public static void SetRemoteTopology() throws AlreadyAliveException,
        InvalidTopologyException, TopologyAssignException {

    TopologyBuilder builder = SetBuilder();

    StormSubmitter.submitTopology(topologyName, conf,
            builder.createTopology());

}