Java Code Examples for org.apache.storm.LocalCluster#submitTopology()

The following examples show how to use org.apache.storm.LocalCluster#submitTopology() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ScottyDemoTopology.java    From scotty-window-processor with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    LocalCluster cluster = new LocalCluster();
    TopologyBuilder builder = new TopologyBuilder();

    Config conf = new Config();
    conf.setDebug(false);
    conf.setNumWorkers(1);
    conf.setMaxTaskParallelism(1);
    //Disable Acking
    conf.setNumAckers(0);

    KeyedScottyWindowOperator scottyBolt = new KeyedScottyWindowOperator<Integer, Integer>(new Sum(), 0);
    scottyBolt.addWindow(new TumblingWindow(WindowMeasure.Time, 1000));
    scottyBolt.addWindow(new SlidingWindow(WindowMeasure.Time, 1000, 250));
    scottyBolt.addWindow(new SessionWindow(WindowMeasure.Time, 1000));

    builder.setSpout("spout", new DataGeneratorSpout());
    builder.setBolt("scottyWindow", scottyBolt).fieldsGrouping("spout", new Fields("key"));
    builder.setBolt("printer", new PrinterBolt()).shuffleGrouping("scottyWindow");

    cluster.submitTopology("testTopology", conf, builder.createTopology());
    //cluster.killTopology("testTopology");
    //cluster.shutdown();
}
 
Example 2
Source File: StatisticTopology.java    From storm-statistic with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    /**
     * 设置spout和bolt的dag(有向无环图)
     */
    KafkaSpout kafkaSpout = createKafkaSpout();
    builder.setSpout("id_kafka_spout", kafkaSpout);
    builder.setBolt("id_convertIp_bolt", new ConvertIPBolt()).shuffleGrouping("id_kafka_spout"); // 通过不同的数据流转方式,来指定数据的上游组件
    builder.setBolt("id_statistic_bolt", new StatisticBolt()).shuffleGrouping("id_convertIp_bolt"); // 通过不同的数据流转方式,来指定数据的上游组件
    // 使用builder构建topology
    StormTopology topology = builder.createTopology();
    String topologyName = KafkaStormTopology.class.getSimpleName();  // 拓扑的名称
    Config config = new Config();   // Config()对象继承自HashMap,但本身封装了一些基本的配置

    // 启动topology,本地启动使用LocalCluster,集群启动使用StormSubmitter
    if (args == null || args.length < 1) {  // 没有参数时使用本地模式,有参数时使用集群模式
        LocalCluster localCluster = new LocalCluster(); // 本地开发模式,创建的对象为LocalCluster
        localCluster.submitTopology(topologyName, config, topology);
    } else {
        StormSubmitter.submitTopology(topologyName, config, topology);
    }
}
 
Example 3
Source File: FullPullerTopology.java    From DBus with Apache License 2.0 6 votes vote down vote up
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
    Config conf = new Config();
    conf.put(FullPullConstants.FULL_SPLITTER_TOPOLOGY_ID, fullSplitterTopologyId);
    conf.put(FullPullConstants.FULL_PULLER_TOPOLOGY_ID, fullPullerTopologyId);
    conf.put(FullPullConstants.DS_NAME, topologyId);
    conf.put(FullPullConstants.ZKCONNECT, zkConnect);
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, commonConfSplit.getProperty(FullPullConstants.TOPOLOGY_WORKER_CHILDOPTS));
    //设置message超时时间为,保证每个分片都能在该内拉完数据
    conf.setMessageTimeoutSecs(Integer.parseInt(commonConfSplit.getProperty(FullPullConstants.STORM_MESSAGE_TIMEOUT)));
    conf.setMaxSpoutPending(Integer.parseInt(commonConfSplit.getProperty(FullPullConstants.STORM_MAX_SPOUT_PENDING)));
    conf.setNumWorkers(Integer.parseInt(commonConfSplit.getProperty(FullPullConstants.STORM_NUM_WORKERS)));
    conf.setDebug(true);

    if (runAsLocal) {
        conf.setMaxTaskParallelism(3);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyName, conf, topology);
    } else {
        StormSubmitter.submitTopology(topologyName, conf, topology);
    }
}
 
Example 4
Source File: LocalWordCountRedisStormTopology.java    From 163-bigdate-note with GNU General Public License v3.0 6 votes vote down vote up
public static void main(String[] args) {
    //根据Spout和Bolt构建TopologyBuilder
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("DataSourceSpout", new DataSourceSpout());
    builder.setBolt("SplitBolt", new SplitBolt()).shuffleGrouping("DataSourceSpout");
    builder.setBolt("CountBolt", new CountBolt()).shuffleGrouping("SplitBolt");

    JedisPoolConfig poolConfig = new JedisPoolConfig.Builder()
            .setHost("192.168.60.11").setPort(6379).build();
    RedisStoreMapper storeMapper = new WordCountStoreMapper();
    RedisStoreBolt storeBolt = new RedisStoreBolt(poolConfig, storeMapper);

    builder.setBolt("RedisStoreBolt", storeBolt).shuffleGrouping("CountBolt");

    //创建本地集群
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("LocalWordCountRedisStormTopology", new Config(), builder.createTopology());

}
 
Example 5
Source File: StormMain.java    From chuidiang-ejemplos with GNU Lesser General Public License v3.0 6 votes vote down vote up
public static void main(String[] args) throws InterruptedException, AlreadyAliveException, InvalidTopologyException, AuthorizationException {
   TopologyBuilder builder = new TopologyBuilder();
   builder.setSpout(DATA_GENERATOR, new ASpout());
   builder.setBolt(DATA_CALCULATOR, new ABolt()).shuffleGrouping(DATA_GENERATOR);
   builder.setBolt(DATA_PRINTER, new DataPrinter()).shuffleGrouping(DATA_CALCULATOR).shuffleGrouping(DATA_GENERATOR);

   Config config = new Config();

   LocalCluster cluster = new LocalCluster();
   cluster.submitTopology(TOPOLOGY_NAME, config,
         builder.createTopology());

   Thread.sleep(100000);
   cluster.killTopology(TOPOLOGY_NAME);
   cluster.shutdown();
}
 
Example 6
Source File: NiFiStormTopology.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
public static void main( String[] args ) {
    // Build a Site-To-Site client config for pulling data
    final SiteToSiteClientConfig inputConfig = new SiteToSiteClient.Builder()
            .url("http://localhost:8080/nifi")
            .portName("Data for Storm")
            .buildConfig();

    // Build a Site-To-Site client config for pushing data
    final SiteToSiteClientConfig outputConfig = new SiteToSiteClient.Builder()
            .url("http://localhost:8080/nifi")
            .portName("Data from Storm")
            .buildConfig();

    final int tickFrequencySeconds = 5;
    final NiFiDataPacketBuilder niFiDataPacketBuilder = new SimpleNiFiDataPacketBuilder();
    final NiFiBolt niFiBolt = new NiFiBolt(outputConfig, niFiDataPacketBuilder, tickFrequencySeconds)
            //.withBatchSize(1)
            ;

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("nifiInput", new NiFiSpout(inputConfig));
    builder.setBolt("nifiOutput", niFiBolt).shuffleGrouping("nifiInput");

    // Submit the topology running in local mode
    Config conf = new Config();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());

    Utils.sleep(90000);
    cluster.shutdown();
}
 
Example 7
Source File: StormKafkaProcess.java    From BigData with GNU General Public License v3.0 5 votes vote down vote up
public static void main(String[] args)
		throws InterruptedException, InvalidTopologyException, AuthorizationException, AlreadyAliveException {

	String topologyName = "TSAS";// 元组名
	// Zookeeper主机地址,会自动选取其中一个
	ZkHosts zkHosts = new ZkHosts("192.168.230.128:2181,192.168.230.129:2181,192.168.230.131:2181");
	String topic = "trademx";
	String zkRoot = "/storm";// storm在Zookeeper上的根路径
	String id = "tsaPro";

	// 创建SpoutConfig对象
	SpoutConfig spontConfig = new SpoutConfig(zkHosts, topic, zkRoot, id);

	TopologyBuilder builder = new TopologyBuilder();
	builder.setSpout("kafka", new KafkaSpout(spontConfig), 2);
	builder.setBolt("AccBolt", new AccBolt()).shuffleGrouping("kafka");
	builder.setBolt("ToDbBolt", new ToDbBolt()).shuffleGrouping("AccBolt");

	Config config = new Config();
	config.setDebug(false);

	if (args.length == 0) { // 本地运行,用于测试
		LocalCluster localCluster = new LocalCluster();
		localCluster.submitTopology(topologyName, config, builder.createTopology());
		Thread.sleep(1000 * 3600);
		localCluster.killTopology(topologyName);
		localCluster.shutdown();
	} else { // 提交至集群运行
		StormSubmitter.submitTopology(topologyName, config, builder.createTopology());
	}

}
 
Example 8
Source File: SingleTopology.java    From nightwatch with GNU Lesser General Public License v3.0 5 votes vote down vote up
private static void submitTopology(TopologyBuilder builder) {
    try {
        LocalCluster cluster = new LocalCluster();
        config.put(Config.STORM_CLUSTER_MODE, "local");
        cluster.submitTopology(Configuration.getConfig().getString("topology.name"), config, builder.createTopology());
    } catch (Exception e) {
        LOG.error(e.getMessage(), e.getCause());
    }
}
 
Example 9
Source File: App.java    From java-study with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args)  {
		// TODO Auto-generated method stub
		//定义一个拓扑
		TopologyBuilder builder=new TopologyBuilder();
		builder.setSpout(str1, new TestSpout());
		builder.setBolt(str2, new TestBolt()).shuffleGrouping(str1);
		Config conf = new Config();
		conf.put("test", "test");
		try{
		  //运行拓扑
	       if(args !=null&&args.length>0){ //有参数时,表示向集群提交作业,并把第一个参数当做topology名称
	       	 System.out.println("远程模式");
			 StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
	      } else{//没有参数时,本地提交
	        //启动本地模式
	     	System.out.println("本地模式");
	        LocalCluster cluster = new LocalCluster();
	        cluster.submitTopology("111" ,conf,  builder.createTopology() );
//	        Thread.sleep(2000);
//	        //关闭本地集群
//	        cluster.shutdown();
	      }
		}catch (Exception e){
			e.printStackTrace();
		}
		
	}
 
Example 10
Source File: AppMain.java    From storm_spring_boot_demo with MIT License 5 votes vote down vote up
/**
 * 用于debug
 * @param name
 * @param builder
 * @throws InterruptedException
 */
private static void localSubmit(String name,TopologyBuilder builder, Config conf)
        throws InterruptedException {
    conf.setDebug(true);
    conf.setMaxTaskParallelism(3);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(name, conf, builder.createTopology());
    Thread.sleep(100000);
    cluster.shutdown();
}
 
Example 11
Source File: WordCountApp.java    From java-study with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws InterruptedException, AlreadyAliveException, InvalidTopologyException {
 	//定义拓扑
     TopologyBuilder builder = new TopologyBuilder();
     builder.setSpout("word-reader" , new WordReader());
     builder.setBolt("word-normalizer" , new WordNormalizer()).shuffleGrouping("word-reader" );
     builder.setBolt("word-counter" , new WordCounter()).fieldsGrouping("word-normalizer" , new Fields("word"));
     StormTopology topology = builder.createTopology();
     //配置
     
     Config conf = new Config();
     String fileName ="words.txt" ;
     conf.put("fileName" , fileName );
     conf.setDebug(false);
 
      //运行拓扑
      System.out.println("开始...");
      if(args !=null&&args.length>0){ //有参数时,表示向集群提交作业,并把第一个参数当做topology名称
     	 System.out.println("远程模式");
          try {
	StormSubmitter.submitTopology(args[0], conf, topology);
} catch (AuthorizationException e) {
	e.printStackTrace();
}
    } else{//没有参数时,本地提交
      //启动本地模式
 	 System.out.println("本地模式");
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("Getting-Started-Topologie" , conf , topology );
      Thread.sleep(5000);
      //关闭本地集群
      cluster.shutdown();
    }
      System.out.println("结束");
    
 }
 
Example 12
Source File: ParserTopologyComponent.java    From metron with Apache License 2.0 5 votes vote down vote up
@Override
public void start() throws UnableToStartException {
  try {
    final Map<String, Object> stormConf = new HashMap<>();
    stormConf.put(Config.TOPOLOGY_DEBUG, true);
    ParserTopologyBuilder.ParserTopology topologyBuilder = ParserTopologyBuilder.build (
            topologyProperties.getProperty(ZKServerComponent.ZOOKEEPER_PROPERTY),
            Optional.ofNullable(brokerUrl),
            sensorTypes,
            (x,y) -> Collections.nCopies(sensorTypes.size(), 1),
            (x,y) -> Collections.nCopies(sensorTypes.size(), 1),
            (x,y) -> 1,
            (x,y) -> 1,
            (x,y) -> 1,
            (x,y) -> 1,
            (x,y) -> Collections.nCopies(sensorTypes.size(), new HashMap<>()),
            (x,y) -> null,
            (x,y) -> outputTopic,
            (x,y) -> errorTopic,
            (x,y) -> {
              Config c = new Config();
              c.putAll(stormConf);
              return c;
            }
    );

    stormCluster = new LocalCluster();
    stormCluster.submitTopology(getTopologyName(), stormConf, topologyBuilder.getBuilder().createTopology());
  } catch (Exception e) {
    throw new UnableToStartException("Unable to start parser topology for sensorTypes: " + sensorTypes, e);
  }
}
 
Example 13
Source File: DBusRouterTopology.java    From DBus with Apache License 2.0 5 votes vote down vote up
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
    Config conf = new Config();
    conf.put(com.creditease.dbus.commons.Constants.ZOOKEEPER_SERVERS, zkConnect);
    conf.put(Constants.TOPOLOGY_ID, topologyId);
    conf.put(Constants.TOPOLOGY_ALIAS, alias);
    conf.put(Constants.ROUTER_PROJECT_NAME, projectName);

    String workerChildOpts = routerConf.getProperty(DBusRouterConstants.STORM_TOPOLOGY_WORKER_CHILDOPTS, "-Xmx2g");
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, workerChildOpts);

    int msgTimeout = Integer.valueOf(routerConf.getProperty(DBusRouterConstants.STORM_MESSAGE_TIMEOUT, "10"));
    conf.setMessageTimeoutSecs(msgTimeout);

    int maxSpoutPending = Integer.valueOf(routerConf.getProperty(DBusRouterConstants.STORM_MAX_SPOUT_PENDING, "100"));
    conf.setMaxSpoutPending(maxSpoutPending);
    conf.setDebug(true);

    int numWorks = Integer.valueOf(routerConf.getProperty(DBusRouterConstants.STORM_NUM_WORKS, "1"));
    conf.setNumWorkers(numWorks);

    if (runAsLocal) {
        conf.setMaxTaskParallelism(10);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyName, conf, topology);
    } else {
        StormSubmitter.submitTopology(topologyName, conf, topology);
    }
}
 
Example 14
Source File: LocalDRPCTopology.java    From 163-bigdate-note with GNU General Public License v3.0 5 votes vote down vote up
public static void main(String[] args) {
    LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("addUser");
    builder.addBolt(new MyBolt());

    LocalCluster localCluster = new LocalCluster();
    LocalDRPC drpc = new LocalDRPC();
    localCluster.submitTopology("local-drpc", new Config(), builder.createLocalTopology(drpc));

    String result = drpc.execute("addUser", "zhangsan");
    System.out.println("From client: " + result);

    localCluster.shutdown();
    drpc.shutdown();
}
 
Example 15
Source File: KafkaStormWordCountTopology.java    From Building-Data-Streaming-Applications-with-Apache-Kafka with MIT License 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        String zkConnString = "localhost:2181";
        String topic = "words";
        BrokerHosts hosts = new ZkHosts(zkConnString);

        SpoutConfig kafkaSpoutConfig = new SpoutConfig(hosts, topic, "/" + topic,
                "wordcountID");
        kafkaSpoutConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
        kafkaSpoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

        TopologyBuilder topologyBuilder = new TopologyBuilder();
        topologyBuilder.setSpout("kafkaspout", new KafkaSpout(kafkaSpoutConfig));
        topologyBuilder.setBolt("stringsplit", new StringToWordsSpliterBolt()).shuffleGrouping("kafkaspout");
        topologyBuilder.setBolt("counter", new WordCountCalculatorBolt()).shuffleGrouping("stringsplit");

        Config config = new Config();
        config.setDebug(true);
        if (args != null && args.length > 1) {
            config.setNumWorkers(3);
            StormSubmitter.submitTopology(args[1], config, topologyBuilder.createTopology());
        } else {
            // Cap the maximum number of executors that can be spawned
            // for a component to 3
            config.setMaxTaskParallelism(3);
            // LocalCluster is used to run locally
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("KafkaLocal", config, topologyBuilder.createTopology());
            // sleep
            try {
                Thread.sleep(10000);
            } catch (InterruptedException e) {
                // TODO Auto-generated catch block
                cluster.killTopology("KafkaToplogy");
                cluster.shutdown();
            }

            cluster.shutdown();
        }
    }
 
Example 16
Source File: NiFiStormTopology.java    From nifi with Apache License 2.0 5 votes vote down vote up
public static void main( String[] args ) {
    // Build a Site-To-Site client config for pulling data
    final SiteToSiteClientConfig inputConfig = new SiteToSiteClient.Builder()
            .url("http://localhost:8080/nifi")
            .portName("Data for Storm")
            .buildConfig();

    // Build a Site-To-Site client config for pushing data
    final SiteToSiteClientConfig outputConfig = new SiteToSiteClient.Builder()
            .url("http://localhost:8080/nifi")
            .portName("Data from Storm")
            .buildConfig();

    final int tickFrequencySeconds = 5;
    final NiFiDataPacketBuilder niFiDataPacketBuilder = new SimpleNiFiDataPacketBuilder();
    final NiFiBolt niFiBolt = new NiFiBolt(outputConfig, niFiDataPacketBuilder, tickFrequencySeconds)
            //.withBatchSize(1)
            ;

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("nifiInput", new NiFiSpout(inputConfig));
    builder.setBolt("nifiOutput", niFiBolt).shuffleGrouping("nifiInput");

    // Submit the topology running in local mode
    Config conf = new Config();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());

    Utils.sleep(90000);
    cluster.shutdown();
}
 
Example 17
Source File: App.java    From springBoot-study with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args)  {
		//定义一个拓扑
		TopologyBuilder builder=new TopologyBuilder();
		//设置1个Executeor(线程),默认一个
		builder.setSpout(test_spout, new TestSpout(),1);
		//shuffleGrouping:表示是随机分组
		//设置1个Executeor(线程),和两个task
		builder.setBolt(test_bolt, new TestBolt(),1).setNumTasks(1).shuffleGrouping(test_spout);
		//fieldsGrouping:表示是按字段分组
		//设置1个Executeor(线程),和1个task
		builder.setBolt(test2_bolt, new Test2Bolt(),1).setNumTasks(1).fieldsGrouping(test_bolt, new Fields("count"));
		Config conf = new Config();
		conf.put("test", "test");
		try{
		  //运行拓扑
	       if(args !=null&&args.length>0){ //有参数时,表示向集群提交作业,并把第一个参数当做topology名称
	       	 System.out.println("运行远程模式");
			 StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
	      } else{//没有参数时,本地提交
	        //启动本地模式
	     	System.out.println("运行本地模式");
	        LocalCluster cluster = new LocalCluster();
	        cluster.submitTopology("Word-counts" ,conf,  builder.createTopology() );
	        Thread.sleep(20000);
//	        //关闭本地集群
	        cluster.shutdown();
	      }
		}catch (Exception e){
			e.printStackTrace();
		}
	}
 
Example 18
Source File: IPFraudDetectionTopology.java    From Building-Data-Streaming-Applications-with-Apache-Kafka with MIT License 4 votes vote down vote up
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
    Intialize(args[0]);
    logger.info("Successfully loaded Configuration ");


    BrokerHosts hosts = new ZkHosts(zkhost);
    SpoutConfig spoutConfig = new SpoutConfig(hosts, inputTopic, "/" + KafkaBroker, consumerGroup);
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    spoutConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
    KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
    String[] partNames = {"status_code"};
    String[] colNames = {"date", "request_url", "protocol_type", "status_code"};

    DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper().withColumnFields(new Fields(colNames))
            .withPartitionFields(new Fields(partNames));


    HiveOptions hiveOptions;
    //make sure you change batch size and all paramtere according to requirement
    hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper).withTxnsPerBatch(250).withBatchSize(2)
            .withIdleTimeout(10).withCallTimeout(10000000);

    logger.info("Creating Storm Topology");
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("KafkaSpout", kafkaSpout, 1);

    builder.setBolt("frauddetect", new FraudDetectorBolt()).shuffleGrouping("KafkaSpout");
    builder.setBolt("KafkaOutputBolt",
            new IPFraudKafkaBolt(zkhost, "kafka.serializer.StringEncoder", KafkaBroker, outputTopic), 1)
            .shuffleGrouping("frauddetect");

    builder.setBolt("HiveOutputBolt", new IPFraudHiveBolt(), 1).shuffleGrouping("frauddetect");
    builder.setBolt("HiveBolt", new HiveBolt(hiveOptions)).shuffleGrouping("HiveOutputBolt");

    Config conf = new Config();
    if (args != null && args.length > 1) {
        conf.setNumWorkers(3);
        logger.info("Submiting  topology to storm cluster");

        StormSubmitter.submitTopology(args[1], conf, builder.createTopology());
    } else {
        // Cap the maximum number of executors that can be spawned
        // for a component to 3
        conf.setMaxTaskParallelism(3);
        // LocalCluster is used to run locally
        LocalCluster cluster = new LocalCluster();
        logger.info("Submitting  topology to local cluster");
        cluster.submitTopology("KafkaLocal", conf, builder.createTopology());
        // sleep
        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            // TODO Auto-generated catch block
            logger.error("Exception ocuured" + e);
            cluster.killTopology("KafkaToplogy");
            logger.info("Shutting down cluster");
            cluster.shutdown();
        }
        cluster.shutdown();

    }

}
 
Example 19
Source File: DispatcherAppenderTopology.java    From DBus with Apache License 2.0 4 votes vote down vote up
private void start(StormTopology topology, boolean runAsLocal) throws Exception {

        Config conf = new Config();

        // 启动类型为all,或者dispatcher
        if (topologyType.equals(Constants.TopologyType.ALL) || topologyType.equals(Constants.TopologyType.DISPATCHER)) {
            /**
             * dispatcher配置
             */

            conf.put(com.creditease.dbus.commons.Constants.ZOOKEEPER_SERVERS, zookeeper);
            conf.put(com.creditease.dbus.commons.Constants.TOPOLOGY_ID, dispatcherTopologyId);
            logger.info(com.creditease.dbus.commons.Constants.ZOOKEEPER_SERVERS + "=" + zookeeper);
            logger.info(com.creditease.dbus.commons.Constants.TOPOLOGY_ID + "=" + dispatcherTopologyId);
        }

        // 启动类型为all,或者appender
        if (topologyType.equals(Constants.TopologyType.ALL) || topologyType.equals(Constants.TopologyType.APPENDER)) {

            /**
             * appender配置
             */
            conf.put(Constants.StormConfigKey.TOPOLOGY_ID, appenderTopologyId);
            conf.put(Constants.StormConfigKey.ZKCONNECT, zookeeper);
            conf.put(Constants.StormConfigKey.DATASOURCE, datasource);
        }

//        conf.put(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE, 4096);
//        conf.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, 4096);
//        conf.put(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE, 4096);

        conf.setDebug(true);

        conf.setNumAckers(1);
        //设置worker数
        conf.setNumWorkers(1);
        //设置任务在发出后,但还没处理完成的中间状态任务的最大数量, 如果没有设置最大值为50
        int MaxSpoutPending = getConfigureValueWithDefault(Constants.ConfigureKey.MAX_SPOUT_PENDING, 50);
        conf.setMaxSpoutPending(MaxSpoutPending);
        //设置任务在多久之内没处理完成,则这个任务处理失败
        conf.setMessageTimeoutSecs(120);

        String opts = getWorkerChildopts();
        if (opts != null && opts.trim().length() > 0) {
            conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, opts);
        }

//        conf.put(Config.TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS, true);
//        conf.registerSerialization(org.apache.avro.util.Utf8.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DBusConsumerRecord.class);
//        conf.registerSerialization(org.apache.kafka.common.record.TimestampType.class);
//        conf.registerSerialization(com.creditease.dbus.stream.common.appender.bean.EmitData.class);
//        conf.registerSerialization(com.creditease.dbus.stream.common.appender.enums.Command.class);
//        conf.registerSerialization(org.apache.avro.generic.GenericData.class);
//        conf.registerSerialization(com.creditease.dbus.stream.oracle.appender.avro.GenericData.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage12.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage12.Schema12.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage13.Schema13.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage13.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage.Field.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage.Payload.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage.Protocol.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage.ProtocolType.class);
//        conf.registerSerialization(com.creditease.dbus.stream.oracle.appender.bolt.processor.appender.OraWrapperData.class);
//        conf.registerSerialization(com.creditease.dbus.stream.common.appender.spout.cmds.TopicResumeCmd.class);

        if (runAsLocal) {
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology(topologyId, conf, topology);
            /*String cmd;
            do {
                cmd = System.console().readLine();
            } while (!cmd.equals("exit"));
            cluster.shutdown();*/
        } else {
            StormSubmitter.submitTopology(topologyId, conf, topology);
        }
    }
 
Example 20
Source File: WordCountApp.java    From java-study with Apache License 2.0 2 votes vote down vote up
public static void main( String[] args ) //throws Exception
{
    //System.out.println( "Hello World!" );
    //实例化spout和bolt

    SentenceSpout spout = new SentenceSpout();
    SplitSentenceBolt splitBolt = new SplitSentenceBolt();
    WordCountBolt countBolt = new WordCountBolt();
    ReportBolt reportBolt = new ReportBolt();

    TopologyBuilder builder = new TopologyBuilder();//创建了一个TopologyBuilder实例

    //TopologyBuilder提供流式风格的API来定义topology组件之间的数据流

    //builder.setSpout(SENTENCE_SPOUT_ID, spout);//注册一个sentence spout

    //设置两个Executeor(线程),默认一个
    builder.setSpout(SENTENCE_SPOUT_ID, spout,2);

    // SentenceSpout --> SplitSentenceBolt

    //注册一个bolt并订阅sentence发射出的数据流,shuffleGrouping方法告诉Storm要将SentenceSpout发射的tuple随机均匀的分发给SplitSentenceBolt的实例
    //builder.setBolt(SPLIT_BOLT_ID, splitBolt).shuffleGrouping(SENTENCE_SPOUT_ID);

    //SplitSentenceBolt单词分割器设置4个Task,2个Executeor(线程)
    builder.setBolt(SPLIT_BOLT_ID, splitBolt,2).setNumTasks(4).shuffleGrouping(SENTENCE_SPOUT_ID);

    // SplitSentenceBolt --> WordCountBolt

    //fieldsGrouping将含有特定数据的tuple路由到特殊的bolt实例中
    //这里fieldsGrouping()方法保证所有“word”字段相同的tuuple会被路由到同一个WordCountBolt实例中
    //builder.setBolt(COUNT_BOLT_ID, countBolt).fieldsGrouping( SPLIT_BOLT_ID, new Fields("word"));

    //WordCountBolt单词计数器设置4个Executeor(线程)
    builder.setBolt(COUNT_BOLT_ID, countBolt,4).fieldsGrouping( SPLIT_BOLT_ID, new Fields("word"));

    // WordCountBolt --> ReportBolt

    //globalGrouping是把WordCountBolt发射的所有tuple路由到唯一的ReportBolt
    builder.setBolt(REPORT_BOLT_ID, reportBolt).globalGrouping(COUNT_BOLT_ID);


    Config config = new Config();//Config类是一个HashMap<String,Object>的子类,用来配置topology运行时的行为
    //设置worker数量
    //config.setNumWorkers(2);
    //本地提交
    LocalCluster cluster = new LocalCluster();

    cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());

    Utils.sleep(10000);
    cluster.killTopology(TOPOLOGY_NAME);        
    cluster.shutdown();

}