Java Code Examples for org.apache.storm.Config#setMaxTaskParallelism()

The following examples show how to use org.apache.storm.Config#setMaxTaskParallelism() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ScottyDemoTopology.java    From scotty-window-processor with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    LocalCluster cluster = new LocalCluster();
    TopologyBuilder builder = new TopologyBuilder();

    Config conf = new Config();
    conf.setDebug(false);
    conf.setNumWorkers(1);
    conf.setMaxTaskParallelism(1);
    //Disable Acking
    conf.setNumAckers(0);

    KeyedScottyWindowOperator scottyBolt = new KeyedScottyWindowOperator<Integer, Integer>(new Sum(), 0);
    scottyBolt.addWindow(new TumblingWindow(WindowMeasure.Time, 1000));
    scottyBolt.addWindow(new SlidingWindow(WindowMeasure.Time, 1000, 250));
    scottyBolt.addWindow(new SessionWindow(WindowMeasure.Time, 1000));

    builder.setSpout("spout", new DataGeneratorSpout());
    builder.setBolt("scottyWindow", scottyBolt).fieldsGrouping("spout", new Fields("key"));
    builder.setBolt("printer", new PrinterBolt()).shuffleGrouping("scottyWindow");

    cluster.submitTopology("testTopology", conf, builder.createTopology());
    //cluster.killTopology("testTopology");
    //cluster.shutdown();
}
 
Example 2
Source File: FullPullerTopology.java    From DBus with Apache License 2.0 6 votes vote down vote up
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
    Config conf = new Config();
    conf.put(FullPullConstants.FULL_SPLITTER_TOPOLOGY_ID, fullSplitterTopologyId);
    conf.put(FullPullConstants.FULL_PULLER_TOPOLOGY_ID, fullPullerTopologyId);
    conf.put(FullPullConstants.DS_NAME, topologyId);
    conf.put(FullPullConstants.ZKCONNECT, zkConnect);
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, commonConfSplit.getProperty(FullPullConstants.TOPOLOGY_WORKER_CHILDOPTS));
    //设置message超时时间为,保证每个分片都能在该内拉完数据
    conf.setMessageTimeoutSecs(Integer.parseInt(commonConfSplit.getProperty(FullPullConstants.STORM_MESSAGE_TIMEOUT)));
    conf.setMaxSpoutPending(Integer.parseInt(commonConfSplit.getProperty(FullPullConstants.STORM_MAX_SPOUT_PENDING)));
    conf.setNumWorkers(Integer.parseInt(commonConfSplit.getProperty(FullPullConstants.STORM_NUM_WORKERS)));
    conf.setDebug(true);

    if (runAsLocal) {
        conf.setMaxTaskParallelism(3);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyName, conf, topology);
    } else {
        StormSubmitter.submitTopology(topologyName, conf, topology);
    }
}
 
Example 3
Source File: DBusLogProcessorTopology.java    From DBus with Apache License 2.0 6 votes vote down vote up
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
    Config conf = new Config();
    conf.put(com.creditease.dbus.commons.Constants.ZOOKEEPER_SERVERS, zkConnect);
    conf.put(Constants.TOPOLOGY_ID, topologyId);
    conf.setMessageTimeoutSecs(Integer.parseInt(properties.getProperty(Constants.LOG_MESSAGE_TIMEOUT)));
    //conf.setMaxSpoutPending(30);
    conf.setDebug(true);
    conf.setNumWorkers(Integer.parseInt(properties.getProperty(Constants.LOG_NUMWORKERS)));

    if (runAsLocal) {
        conf.setMaxTaskParallelism(10);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyName, conf, topology);
    } else {
        StormSubmitter.submitTopology(topologyName, conf, topology);
    }
}
 
Example 4
Source File: SinkTopology.java    From DBus with Apache License 2.0 6 votes vote down vote up
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
    Config conf = new Config();
    conf.put(Constants.ZOOKEEPER_SERVERS, zkConnect);
    conf.put(Constants.TOPOLOGY_ID, topologyId);
    conf.put(Constants.SINK_TYPE, sinkType);
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, sinkerConf.getProperty(SinkerConstants.TOPOLOGY_WORKER_CHILDOPTS));

    conf.setMessageTimeoutSecs(Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_MESSAGE_TIMEOUT)));
    conf.setMaxSpoutPending(Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_MAX_SPOUT_PENDING)));
    conf.setDebug(true);
    conf.setNumWorkers(Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_NUM_WORKERS)));
    if (runAsLocal) {
        conf.setMaxTaskParallelism(10);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyName, conf, topology);
    } else {
        StormSubmitter.submitTopology(topologyName, conf, topology);
    }
}
 
Example 5
Source File: DrpcTestTopologyCsharp.java    From storm-net-adapter with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  	TopologyBuilder builder = new TopologyBuilder();
	  
  	DRPCSpout drpcSpout = new DRPCSpout("simpledrpc");
    builder.setSpout("drpc-input", drpcSpout,1);

    builder.setBolt("simple", new SimpleDRPC(), 2)
    		.noneGrouping("drpc-input");
    
    builder.setBolt("return", new ReturnResults(),1)
	.noneGrouping("simple");

    Config conf = new Config();
    conf.setDebug(true);
    conf.setMaxTaskParallelism(1);
    
    try
    {
    	StormSubmitter.submitTopology("drpc-q", conf,builder.createTopology());
    }
    catch (Exception e)
    {
    	e.printStackTrace();
    }
}
 
Example 6
Source File: AnchoredWordCount.java    From storm-net-adapter with Apache License 2.0 6 votes vote down vote up
protected int run(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSentenceSpout(), 4);

    builder.setBolt("split", new SplitSentence(), 4).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 4).fieldsGrouping("split", new Fields("word"));

    Config conf = new Config();
    conf.setMaxTaskParallelism(3);

    String topologyName = "word-count";

    conf.setNumWorkers(3);

    if (args != null && args.length > 0) {
        topologyName = args[0];
    }
    return submit(topologyName, conf, builder);
}
 
Example 7
Source File: StormTestUtil.java    From atlas with Apache License 2.0 5 votes vote down vote up
public static Config submitTopology(ILocalCluster stormCluster, String topologyName,
                                    StormTopology stormTopology) throws Exception {
    Config stormConf = new Config();
    stormConf.putAll(Utils.readDefaultConfig());
    stormConf.put("storm.cluster.mode", "local");
    stormConf.setDebug(true);
    stormConf.setMaxTaskParallelism(3);
    stormConf.put(Config.STORM_TOPOLOGY_SUBMISSION_NOTIFIER_PLUGIN,
            org.apache.atlas.storm.hook.StormAtlasHook.class.getName());

    stormCluster.submitTopology(topologyName, stormConf, stormTopology);

    Thread.sleep(10000);
    return stormConf;
}
 
Example 8
Source File: DBusRouterTopology.java    From DBus with Apache License 2.0 5 votes vote down vote up
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
    Config conf = new Config();
    conf.put(com.creditease.dbus.commons.Constants.ZOOKEEPER_SERVERS, zkConnect);
    conf.put(Constants.TOPOLOGY_ID, topologyId);
    conf.put(Constants.TOPOLOGY_ALIAS, alias);
    conf.put(Constants.ROUTER_PROJECT_NAME, projectName);

    String workerChildOpts = routerConf.getProperty(DBusRouterConstants.STORM_TOPOLOGY_WORKER_CHILDOPTS, "-Xmx2g");
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, workerChildOpts);

    int msgTimeout = Integer.valueOf(routerConf.getProperty(DBusRouterConstants.STORM_MESSAGE_TIMEOUT, "10"));
    conf.setMessageTimeoutSecs(msgTimeout);

    int maxSpoutPending = Integer.valueOf(routerConf.getProperty(DBusRouterConstants.STORM_MAX_SPOUT_PENDING, "100"));
    conf.setMaxSpoutPending(maxSpoutPending);
    conf.setDebug(true);

    int numWorks = Integer.valueOf(routerConf.getProperty(DBusRouterConstants.STORM_NUM_WORKS, "1"));
    conf.setNumWorkers(numWorks);

    if (runAsLocal) {
        conf.setMaxTaskParallelism(10);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyName, conf, topology);
    } else {
        StormSubmitter.submitTopology(topologyName, conf, topology);
    }
}
 
Example 9
Source File: KafkaStormWordCountTopology.java    From Building-Data-Streaming-Applications-with-Apache-Kafka with MIT License 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        String zkConnString = "localhost:2181";
        String topic = "words";
        BrokerHosts hosts = new ZkHosts(zkConnString);

        SpoutConfig kafkaSpoutConfig = new SpoutConfig(hosts, topic, "/" + topic,
                "wordcountID");
        kafkaSpoutConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
        kafkaSpoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

        TopologyBuilder topologyBuilder = new TopologyBuilder();
        topologyBuilder.setSpout("kafkaspout", new KafkaSpout(kafkaSpoutConfig));
        topologyBuilder.setBolt("stringsplit", new StringToWordsSpliterBolt()).shuffleGrouping("kafkaspout");
        topologyBuilder.setBolt("counter", new WordCountCalculatorBolt()).shuffleGrouping("stringsplit");

        Config config = new Config();
        config.setDebug(true);
        if (args != null && args.length > 1) {
            config.setNumWorkers(3);
            StormSubmitter.submitTopology(args[1], config, topologyBuilder.createTopology());
        } else {
            // Cap the maximum number of executors that can be spawned
            // for a component to 3
            config.setMaxTaskParallelism(3);
            // LocalCluster is used to run locally
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("KafkaLocal", config, topologyBuilder.createTopology());
            // sleep
            try {
                Thread.sleep(10000);
            } catch (InterruptedException e) {
                // TODO Auto-generated catch block
                cluster.killTopology("KafkaToplogy");
                cluster.shutdown();
            }

            cluster.shutdown();
        }
    }
 
Example 10
Source File: AppMain.java    From storm_spring_boot_demo with MIT License 5 votes vote down vote up
/**
 * 用于debug
 * @param name
 * @param builder
 * @throws InterruptedException
 */
private static void localSubmit(String name,TopologyBuilder builder, Config conf)
        throws InterruptedException {
    conf.setDebug(true);
    conf.setMaxTaskParallelism(3);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(name, conf, builder.createTopology());
    Thread.sleep(100000);
    cluster.shutdown();
}
 
Example 11
Source File: StormTestUtil.java    From incubator-atlas with Apache License 2.0 5 votes vote down vote up
public static Config submitTopology(ILocalCluster stormCluster, String topologyName,
                                    StormTopology stormTopology) throws Exception {
    Config stormConf = new Config();
    stormConf.putAll(Utils.readDefaultConfig());
    stormConf.put("storm.cluster.mode", "local");
    stormConf.setDebug(true);
    stormConf.setMaxTaskParallelism(3);
    stormConf.put(Config.STORM_TOPOLOGY_SUBMISSION_NOTIFIER_PLUGIN,
            org.apache.atlas.storm.hook.StormAtlasHook.class.getName());

    stormCluster.submitTopology(topologyName, stormConf, stormTopology);

    Thread.sleep(10000);
    return stormConf;
}
 
Example 12
Source File: ThresholdingEngineAlarmTest.java    From monasca-thresh with Apache License 2.0 5 votes vote down vote up
@BeforeMethod
public void beforeMethod() throws Exception {
  // Fixtures
  alarmDefinitionDAO = mock(AlarmDefinitionDAO.class);

  // Mocks
  alarmDAO = new MockAlarmDAO();

  // Bindings
  Injector.reset();
  Injector.registerModules(new AbstractModule() {
    protected void configure() {
      bind(AlarmDAO.class).toInstance(alarmDAO);
      bind(AlarmDefinitionDAO.class).toInstance(alarmDefinitionDAO);
    }
  });

  // Config
  ThresholdingConfiguration threshConfig = new ThresholdingConfiguration();
  threshConfig.alarmDelay = 1;
  threshConfig.sporadicMetricNamespaces = new HashSet<String>();
  Serialization.registerTarget(KafkaProducerConfiguration.class);

  threshConfig.kafkaProducerConfig =
      Serialization
          .fromJson("{\"KafkaProducerConfiguration\":{\"topic\":\"alarm-state-transitions\",\"metadataBrokerList\":\"192.168.10.10:9092\",\"requestRequiredAcks\":1,\"requestTimeoutMs\":10000,\"producerType\":\"sync\",\"serializerClass\":\"kafka.serializer.StringEncoder\",\"keySerializerClass\":\"\",\"partitionerClass\":\"\",\"compressionCodec\":\"none\",\"compressedTopics\":\"\",\"messageSendMaxRetries\":3,\"retryBackoffMs\":100,\"topicMetadataRefreshIntervalMs\":600000,\"queueBufferingMaxMs\":5000,\"queueBufferingMaxMessages\":10000,\"queueEnqueueTimeoutMs\":-1,\"batchNumMessages\":200,\"sendBufferBytes\":102400,\"clientId\":\"Threshold_Engine\"}}");
  Config stormConfig = new Config();
  stormConfig.setMaxTaskParallelism(1);
  metricSpout = new FeederSpout(new Fields(MetricSpout.FIELDS));
  eventSpout = new FeederSpout(new Fields("event"));
  alarmEventForwarder = mock(AlarmEventForwarder.class);
  Injector
      .registerModules(new TopologyModule(threshConfig, stormConfig, metricSpout, eventSpout));
  Injector.registerModules(new ProducerModule(alarmEventForwarder));

  // Evaluate alarm stats every 5 seconds
  System.setProperty(MetricAggregationBolt.TICK_TUPLE_SECONDS_KEY, "5");

  startTopology();
}
 
Example 13
Source File: IPFraudDetectionTopology.java    From Building-Data-Streaming-Applications-with-Apache-Kafka with MIT License 4 votes vote down vote up
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
    Intialize(args[0]);
    logger.info("Successfully loaded Configuration ");


    BrokerHosts hosts = new ZkHosts(zkhost);
    SpoutConfig spoutConfig = new SpoutConfig(hosts, inputTopic, "/" + KafkaBroker, consumerGroup);
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    spoutConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
    KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
    String[] partNames = {"status_code"};
    String[] colNames = {"date", "request_url", "protocol_type", "status_code"};

    DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper().withColumnFields(new Fields(colNames))
            .withPartitionFields(new Fields(partNames));


    HiveOptions hiveOptions;
    //make sure you change batch size and all paramtere according to requirement
    hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper).withTxnsPerBatch(250).withBatchSize(2)
            .withIdleTimeout(10).withCallTimeout(10000000);

    logger.info("Creating Storm Topology");
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("KafkaSpout", kafkaSpout, 1);

    builder.setBolt("frauddetect", new FraudDetectorBolt()).shuffleGrouping("KafkaSpout");
    builder.setBolt("KafkaOutputBolt",
            new IPFraudKafkaBolt(zkhost, "kafka.serializer.StringEncoder", KafkaBroker, outputTopic), 1)
            .shuffleGrouping("frauddetect");

    builder.setBolt("HiveOutputBolt", new IPFraudHiveBolt(), 1).shuffleGrouping("frauddetect");
    builder.setBolt("HiveBolt", new HiveBolt(hiveOptions)).shuffleGrouping("HiveOutputBolt");

    Config conf = new Config();
    if (args != null && args.length > 1) {
        conf.setNumWorkers(3);
        logger.info("Submiting  topology to storm cluster");

        StormSubmitter.submitTopology(args[1], conf, builder.createTopology());
    } else {
        // Cap the maximum number of executors that can be spawned
        // for a component to 3
        conf.setMaxTaskParallelism(3);
        // LocalCluster is used to run locally
        LocalCluster cluster = new LocalCluster();
        logger.info("Submitting  topology to local cluster");
        cluster.submitTopology("KafkaLocal", conf, builder.createTopology());
        // sleep
        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            // TODO Auto-generated catch block
            logger.error("Exception ocuured" + e);
            cluster.killTopology("KafkaToplogy");
            logger.info("Shutting down cluster");
            cluster.shutdown();
        }
        cluster.shutdown();

    }

}
 
Example 14
Source File: RandomNumberGeneratorSpout.java    From storm-net-adapter with Apache License 2.0 4 votes vote down vote up
@Override
public Map<String, Object> getComponentConfiguration() {
    Config conf = new Config();
    conf.setMaxTaskParallelism(1);
    return conf;
}
 
Example 15
Source File: ThresholdingEngineTest.java    From monasca-thresh with Apache License 2.0 4 votes vote down vote up
@BeforeMethod
public void befortMethod() throws Exception {
  // Fixtures
  final AlarmExpression expression =
      new AlarmExpression("max(cpu{id=5}) >= 3 or max(mem{id=5}) >= 5");
  final AlarmExpression expression2 = AlarmExpression.of(
    "count(log.error{id=5},deterministic) >= 1 OR count(log.warning{id=5},deterministic) >= 1"
  );
  final AlarmExpression expression3 = AlarmExpression.of(
    "max(cpu{id=5}) >= 3 AND count(log.warning{id=5},deterministic) >= 1"
  );

  cpuMetricDef = expression.getSubExpressions().get(0).getMetricDefinition();
  memMetricDef = expression.getSubExpressions().get(1).getMetricDefinition();
  logErrorMetricDef = expression2.getSubExpressions().get(0).getMetricDefinition();
  logWarningMetricDef = expression2.getSubExpressions().get(1).getMetricDefinition();

  extraMemMetricDefDimensions = new HashMap<>(memMetricDef.dimensions);
  extraMemMetricDefDimensions.put("Group", "group A");

  alarmDefinition =
      new AlarmDefinition(TEST_ALARM_TENANT_ID, TEST_ALARM_NAME,
          TEST_ALARM_DESCRIPTION, expression, "LOW", true, new ArrayList<String>());
  this.deterministicAlarmDefinition = new AlarmDefinition(
    DET_TEST_ALARM_TENANT_ID,
    DET_TEST_ALARM_NAME,
    DET_TEST_ALARM_DESCRIPTION,
    expression2,
    "LOW",
    true,
    new ArrayList<String>()
  );
  this.mixedAlarmDefinition = new AlarmDefinition(
    MIXED_TEST_ALARM_TENANT_ID,
    MIXED_TEST_ALARM_NAME,
    MIXED_TEST_ALARM_DESCRIPTION,
    expression3,
    "LOW",
    true,
    new ArrayList<String>()
  );

  // Mocks
  alarmDAO = mock(AlarmDAO.class);
  alarmDefinitionDAO = mock(AlarmDefinitionDAO.class);

  // Bindings
  Injector.reset();
  Injector.registerModules(new AbstractModule() {
    protected void configure() {
      bind(AlarmDAO.class).toInstance(alarmDAO);
      bind(AlarmDefinitionDAO.class).toInstance(alarmDefinitionDAO);
    }
  });

  // Config
  ThresholdingConfiguration threshConfig = new ThresholdingConfiguration();
  threshConfig.alarmDelay = 1;
  threshConfig.sporadicMetricNamespaces = new HashSet<String>();
  Serialization.registerTarget(KafkaProducerConfiguration.class);

  threshConfig.kafkaProducerConfig =
      Serialization
          .fromJson("{\"KafkaProducerConfiguration\":{\"topic\":\"alarm-state-transitions\",\"metadataBrokerList\":\"192.168.10.10:9092\",\"requestRequiredAcks\":1,\"requestTimeoutMs\":10000,\"producerType\":\"sync\",\"serializerClass\":\"kafka.serializer.StringEncoder\",\"keySerializerClass\":\"\",\"partitionerClass\":\"\",\"compressionCodec\":\"none\",\"compressedTopics\":\"\",\"messageSendMaxRetries\":3,\"retryBackoffMs\":100,\"topicMetadataRefreshIntervalMs\":600000,\"queueBufferingMaxMs\":5000,\"queueBufferingMaxMessages\":10000,\"queueEnqueueTimeoutMs\":-1,\"batchNumMessages\":200,\"sendBufferBytes\":102400,\"clientId\":\"Threshold_Engine\"}}");
  Config stormConfig = new Config();
  stormConfig.setMaxTaskParallelism(1);
  metricSpout = new FeederSpout(new Fields(MetricSpout.FIELDS));
  eventSpout = new FeederSpout(new Fields("event"));
  alarmEventForwarder = mock(AlarmEventForwarder.class);
  Injector
      .registerModules(new TopologyModule(threshConfig, stormConfig, metricSpout, eventSpout));
  Injector.registerModules(new ProducerModule(alarmEventForwarder));

}
 
Example 16
Source File: WordCountTopologyCsharp.java    From storm-net-adapter with Apache License 2.0 3 votes vote down vote up
public static void main(String[] args) throws Exception {

		TopologyBuilder builder = new TopologyBuilder();

		builder.setSpout("generator", new Generator(), 1);

		builder.setBolt("splitter", new Splitter(), 5).fieldsGrouping("generator",
				new Fields("word"));
		
		builder.setBolt("counter", new Counter(), 8).fieldsGrouping("splitter",
				new Fields("word", "count"));

		Config conf = new Config();
		//conf.setDebug(true);

		if (args != null && args.length > 0) {
			conf.setNumWorkers(3);

			StormSubmitter.submitTopologyWithProgressBar(args[0], conf,
					builder.createTopology());
		} else {
			conf.setMaxTaskParallelism(3);

			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("WordCount", conf, builder.createTopology());

			Thread.sleep(10000);

			cluster.shutdown();
		}
	}