Java Code Examples for org.apache.storm.LocalCluster#killTopology()

The following examples show how to use org.apache.storm.LocalCluster#killTopology() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MultiSpoutExclamationTopology.java    From incubator-heron with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word0", new TestWordSpout(), 2);
  builder.setSpout("word1", new TestWordSpout(), 2);
  builder.setSpout("word2", new TestWordSpout(), 2);
  builder.setBolt("exclaim1", new ExclamationBolt(), 2)
      .shuffleGrouping("word0")
      .shuffleGrouping("word1")
      .shuffleGrouping("word2");

  Config conf = new Config();
  conf.setDebug(true);
  conf.setMaxSpoutPending(10);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
  if (args != null && args.length > 0) {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
  } else {
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
Example 2
Source File: StormMain.java    From chuidiang-ejemplos with GNU Lesser General Public License v3.0 6 votes vote down vote up
public static void main(String[] args) throws InterruptedException, AlreadyAliveException, InvalidTopologyException, AuthorizationException {
   TopologyBuilder builder = new TopologyBuilder();
   builder.setSpout(DATA_GENERATOR, new ASpout());
   builder.setBolt(DATA_CALCULATOR, new ABolt()).shuffleGrouping(DATA_GENERATOR);
   builder.setBolt(DATA_PRINTER, new DataPrinter()).shuffleGrouping(DATA_CALCULATOR).shuffleGrouping(DATA_GENERATOR);

   Config config = new Config();

   LocalCluster cluster = new LocalCluster();
   cluster.submitTopology(TOPOLOGY_NAME, config,
         builder.createTopology());

   Thread.sleep(100000);
   cluster.killTopology(TOPOLOGY_NAME);
   cluster.shutdown();
}
 
Example 3
Source File: ParserBoltTest.java    From logparser with Apache License 2.0 6 votes vote down vote up
@Test
public void runRest() throws InterruptedException, NoSuchMethodException {
    TopologyBuilder builder = new TopologyBuilder();

    // ----------
    builder.setSpout("Spout", new TestApacheLogsSpout());
    // ----------
    HttpdLoglineParserBolt parserBolt = new HttpdLoglineParserBolt(TestCase.getLogFormat(), INPUT_FIELD_NAME, OUTPUT_FIELD_NAME);

    builder.setBolt("Parser", parserBolt, 1).shuffleGrouping("Spout");
    // ----------
    builder.setBolt("Printer", new ValidateOutput(), 1).shuffleGrouping("Parser");
    // ----------

    StormTopology topology = builder.createTopology();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("Unit test", new HashMap<String, String>(), topology);
    Thread.sleep(10000L); // Run for 10 seconds
    cluster.killTopology("Unit test");
    cluster.shutdown();

}
 
Example 4
Source File: KafkaStormWordCountTopology.java    From Building-Data-Streaming-Applications-with-Apache-Kafka with MIT License 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        String zkConnString = "localhost:2181";
        String topic = "words";
        BrokerHosts hosts = new ZkHosts(zkConnString);

        SpoutConfig kafkaSpoutConfig = new SpoutConfig(hosts, topic, "/" + topic,
                "wordcountID");
        kafkaSpoutConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
        kafkaSpoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

        TopologyBuilder topologyBuilder = new TopologyBuilder();
        topologyBuilder.setSpout("kafkaspout", new KafkaSpout(kafkaSpoutConfig));
        topologyBuilder.setBolt("stringsplit", new StringToWordsSpliterBolt()).shuffleGrouping("kafkaspout");
        topologyBuilder.setBolt("counter", new WordCountCalculatorBolt()).shuffleGrouping("stringsplit");

        Config config = new Config();
        config.setDebug(true);
        if (args != null && args.length > 1) {
            config.setNumWorkers(3);
            StormSubmitter.submitTopology(args[1], config, topologyBuilder.createTopology());
        } else {
            // Cap the maximum number of executors that can be spawned
            // for a component to 3
            config.setMaxTaskParallelism(3);
            // LocalCluster is used to run locally
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("KafkaLocal", config, topologyBuilder.createTopology());
            // sleep
            try {
                Thread.sleep(10000);
            } catch (InterruptedException e) {
                // TODO Auto-generated catch block
                cluster.killTopology("KafkaToplogy");
                cluster.shutdown();
            }

            cluster.shutdown();
        }
    }
 
Example 5
Source File: StormKafkaProcess.java    From BigData with GNU General Public License v3.0 5 votes vote down vote up
public static void main(String[] args)
		throws InterruptedException, InvalidTopologyException, AuthorizationException, AlreadyAliveException {

	String topologyName = "TSAS";// 元组名
	// Zookeeper主机地址,会自动选取其中一个
	ZkHosts zkHosts = new ZkHosts("192.168.230.128:2181,192.168.230.129:2181,192.168.230.131:2181");
	String topic = "trademx";
	String zkRoot = "/storm";// storm在Zookeeper上的根路径
	String id = "tsaPro";

	// 创建SpoutConfig对象
	SpoutConfig spontConfig = new SpoutConfig(zkHosts, topic, zkRoot, id);

	TopologyBuilder builder = new TopologyBuilder();
	builder.setSpout("kafka", new KafkaSpout(spontConfig), 2);
	builder.setBolt("AccBolt", new AccBolt()).shuffleGrouping("kafka");
	builder.setBolt("ToDbBolt", new ToDbBolt()).shuffleGrouping("AccBolt");

	Config config = new Config();
	config.setDebug(false);

	if (args.length == 0) { // 本地运行,用于测试
		LocalCluster localCluster = new LocalCluster();
		localCluster.submitTopology(topologyName, config, builder.createTopology());
		Thread.sleep(1000 * 3600);
		localCluster.killTopology(topologyName);
		localCluster.shutdown();
	} else { // 提交至集群运行
		StormSubmitter.submitTopology(topologyName, config, builder.createTopology());
	}

}
 
Example 6
Source File: ExclamationTopology.java    From incubator-heron with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();
  int parallelism = 2;

  int spouts = parallelism;
  builder.setSpout("word", new TestWordSpout(Duration.ofMillis(50)), spouts);
  int bolts = 2 * parallelism;
  builder.setBolt("exclaim1", new ExclamationBolt(), bolts)
      .shuffleGrouping("word");

  Config conf = new Config();
  conf.setDebug(true);
  conf.setMaxSpoutPending(10);
  conf.setMessageTimeoutSecs(600);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  if (args != null && args.length > 0) {
    conf.setNumWorkers(parallelism);
    StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
  } else {
    System.out.println("Topology name not provided as an argument, running in simulator mode.");
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
Example 7
Source File: IPFraudDetectionTopology.java    From Building-Data-Streaming-Applications-with-Apache-Kafka with MIT License 4 votes vote down vote up
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
    Intialize(args[0]);
    logger.info("Successfully loaded Configuration ");


    BrokerHosts hosts = new ZkHosts(zkhost);
    SpoutConfig spoutConfig = new SpoutConfig(hosts, inputTopic, "/" + KafkaBroker, consumerGroup);
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    spoutConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
    KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
    String[] partNames = {"status_code"};
    String[] colNames = {"date", "request_url", "protocol_type", "status_code"};

    DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper().withColumnFields(new Fields(colNames))
            .withPartitionFields(new Fields(partNames));


    HiveOptions hiveOptions;
    //make sure you change batch size and all paramtere according to requirement
    hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper).withTxnsPerBatch(250).withBatchSize(2)
            .withIdleTimeout(10).withCallTimeout(10000000);

    logger.info("Creating Storm Topology");
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("KafkaSpout", kafkaSpout, 1);

    builder.setBolt("frauddetect", new FraudDetectorBolt()).shuffleGrouping("KafkaSpout");
    builder.setBolt("KafkaOutputBolt",
            new IPFraudKafkaBolt(zkhost, "kafka.serializer.StringEncoder", KafkaBroker, outputTopic), 1)
            .shuffleGrouping("frauddetect");

    builder.setBolt("HiveOutputBolt", new IPFraudHiveBolt(), 1).shuffleGrouping("frauddetect");
    builder.setBolt("HiveBolt", new HiveBolt(hiveOptions)).shuffleGrouping("HiveOutputBolt");

    Config conf = new Config();
    if (args != null && args.length > 1) {
        conf.setNumWorkers(3);
        logger.info("Submiting  topology to storm cluster");

        StormSubmitter.submitTopology(args[1], conf, builder.createTopology());
    } else {
        // Cap the maximum number of executors that can be spawned
        // for a component to 3
        conf.setMaxTaskParallelism(3);
        // LocalCluster is used to run locally
        LocalCluster cluster = new LocalCluster();
        logger.info("Submitting  topology to local cluster");
        cluster.submitTopology("KafkaLocal", conf, builder.createTopology());
        // sleep
        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            // TODO Auto-generated catch block
            logger.error("Exception ocuured" + e);
            cluster.killTopology("KafkaToplogy");
            logger.info("Shutting down cluster");
            cluster.shutdown();
        }
        cluster.shutdown();

    }

}
 
Example 8
Source File: LogLevelCountTopology.java    From nifi-streaming-examples with Apache License 2.0 4 votes vote down vote up
public static void main( String[] args ) throws Exception {
    String propertiesFile = DEFAULT_PROPERTIES_FILE;
    if (args != null && args.length == 1 && args[0] != null) {
        propertiesFile = args[0];
    }

    LogLevelCountProperties props = new LogLevelCountProperties(propertiesFile);

    int windowMillis = props.getStormWindowMillis();
    double rateThreshold = props.getStormRateThreshold();

    // Build the spout for pulling data from NiFi and pull out the log level into a tuple field
    NiFiSpout niFiSpout = new NiFiSpout(getSourceConfig(props), Collections.singletonList(props.getLogLevelAttribute()));

    // Build the bolt for counting log levels over a tumbling window
    BaseWindowedBolt logLevelWindowBolt = new LogLevelWindowBolt(props.getLogLevelAttribute())
            .withTumblingWindow(new BaseWindowedBolt.Duration(windowMillis, TimeUnit.MILLISECONDS));

    // Build the bolt for pushing results back to NiFi
    NiFiDataPacketBuilder dictionaryBuilder = new DictionaryBuilder(windowMillis, rateThreshold);
    NiFiBolt niFiBolt = new NiFiBolt(getSinkConfig(props), dictionaryBuilder, 10).withBatchSize(1);

    // Build the topology of NiFiSpout -> LogLevelWindowBolt -> NiFiBolt
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("nifiInput", niFiSpout);
    builder.setBolt("logLevels", logLevelWindowBolt).shuffleGrouping("nifiInput");
    builder.setBolt("nifiOutput", niFiBolt).shuffleGrouping("logLevels");

    // Submit the topology
    Config conf = new Config();
    conf.setDebug(true);

    // Need to set the message timeout to twice the window size in seconds
    conf.setMessageTimeoutSecs((props.getStormWindowMillis()/1000) * 2);

    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    }
    else {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("log-levels", conf, builder.createTopology());
        Utils.sleep(130000);
        cluster.killTopology("log-levels");
        cluster.shutdown();
    }
}
 
Example 9
Source File: AdvertisingTopology.java    From streaming-benchmarks with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    Options opts = new Options();
    opts.addOption("conf", true, "Path to the config file.");

    CommandLineParser parser = new DefaultParser();
    CommandLine cmd = parser.parse(opts, args);
    String configPath = cmd.getOptionValue("conf");
    Map commonConfig = Utils.findAndReadConfigFile(configPath, true);

    String zkServerHosts = joinHosts((List<String>)commonConfig.get("zookeeper.servers"),
                                     Integer.toString((Integer)commonConfig.get("zookeeper.port")));
    String redisServerHost = (String)commonConfig.get("redis.host");
    String kafkaTopic = (String)commonConfig.get("kafka.topic");
    int kafkaPartitions = ((Number)commonConfig.get("kafka.partitions")).intValue();
    int workers = ((Number)commonConfig.get("storm.workers")).intValue();
    int ackers = ((Number)commonConfig.get("storm.ackers")).intValue();
    int cores = ((Number)commonConfig.get("process.cores")).intValue();
    int parallel = Math.max(1, cores/7);

    ZkHosts hosts = new ZkHosts(zkServerHosts);



    SpoutConfig spoutConfig = new SpoutConfig(hosts, kafkaTopic, "/" + kafkaTopic, UUID.randomUUID().toString());
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

    builder.setSpout("ads", kafkaSpout, kafkaPartitions);
    builder.setBolt("event_deserializer", new DeserializeBolt(), parallel).shuffleGrouping("ads");
    builder.setBolt("event_filter", new EventFilterBolt(), parallel).shuffleGrouping("event_deserializer");
    builder.setBolt("event_projection", new EventProjectionBolt(), parallel).shuffleGrouping("event_filter");
    builder.setBolt("redis_join", new RedisJoinBolt(redisServerHost), parallel).shuffleGrouping("event_projection");
    builder.setBolt("campaign_processor", new CampaignProcessor(redisServerHost), parallel*2)
        .fieldsGrouping("redis_join", new Fields("campaign_id"));

    Config conf = new Config();

    if (args != null && args.length > 0) {
        conf.setNumWorkers(workers);
        conf.setNumAckers(ackers);
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    }
    else {

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", conf, builder.createTopology());
        org.apache.storm.utils.Utils.sleep(10000);
        cluster.killTopology("test");
        cluster.shutdown();
    }
}
 
Example 10
Source File: WordCountApp.java    From java-study with Apache License 2.0 2 votes vote down vote up
public static void main( String[] args ) //throws Exception
{
    //System.out.println( "Hello World!" );
    //实例化spout和bolt

    SentenceSpout spout = new SentenceSpout();
    SplitSentenceBolt splitBolt = new SplitSentenceBolt();
    WordCountBolt countBolt = new WordCountBolt();
    ReportBolt reportBolt = new ReportBolt();

    TopologyBuilder builder = new TopologyBuilder();//创建了一个TopologyBuilder实例

    //TopologyBuilder提供流式风格的API来定义topology组件之间的数据流

    //builder.setSpout(SENTENCE_SPOUT_ID, spout);//注册一个sentence spout

    //设置两个Executeor(线程),默认一个
    builder.setSpout(SENTENCE_SPOUT_ID, spout,2);

    // SentenceSpout --> SplitSentenceBolt

    //注册一个bolt并订阅sentence发射出的数据流,shuffleGrouping方法告诉Storm要将SentenceSpout发射的tuple随机均匀的分发给SplitSentenceBolt的实例
    //builder.setBolt(SPLIT_BOLT_ID, splitBolt).shuffleGrouping(SENTENCE_SPOUT_ID);

    //SplitSentenceBolt单词分割器设置4个Task,2个Executeor(线程)
    builder.setBolt(SPLIT_BOLT_ID, splitBolt,2).setNumTasks(4).shuffleGrouping(SENTENCE_SPOUT_ID);

    // SplitSentenceBolt --> WordCountBolt

    //fieldsGrouping将含有特定数据的tuple路由到特殊的bolt实例中
    //这里fieldsGrouping()方法保证所有“word”字段相同的tuuple会被路由到同一个WordCountBolt实例中
    //builder.setBolt(COUNT_BOLT_ID, countBolt).fieldsGrouping( SPLIT_BOLT_ID, new Fields("word"));

    //WordCountBolt单词计数器设置4个Executeor(线程)
    builder.setBolt(COUNT_BOLT_ID, countBolt,4).fieldsGrouping( SPLIT_BOLT_ID, new Fields("word"));

    // WordCountBolt --> ReportBolt

    //globalGrouping是把WordCountBolt发射的所有tuple路由到唯一的ReportBolt
    builder.setBolt(REPORT_BOLT_ID, reportBolt).globalGrouping(COUNT_BOLT_ID);


    Config config = new Config();//Config类是一个HashMap<String,Object>的子类,用来配置topology运行时的行为
    //设置worker数量
    //config.setNumWorkers(2);
    //本地提交
    LocalCluster cluster = new LocalCluster();

    cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());

    Utils.sleep(10000);
    cluster.killTopology(TOPOLOGY_NAME);        
    cluster.shutdown();

}