Java Code Examples for backtype.storm.Config.setMessageTimeoutSecs()

The following are Jave code examples for showing how to use setMessageTimeoutSecs() of the backtype.storm.Config class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: Infrastructure   File: AbstractTopology.java   Source Code and License Vote up 6 votes
/**
 * Creates a standalone topology.
 * 
 * @param args the topology arguments
 * @param topo the topology instance
 * @throws Exception in case of creation problems
 */
public static void main(String[] args, AbstractTopology topo) throws Exception {
    Config config = new Config();
    config.setMessageTimeoutSecs(100);
    PipelineOptions options = new PipelineOptions(args);
    RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
    topo.createTopology(config, b);
    
    // main topology: int numWorkers = options.getNumberOfWorkers(2);
    options.toConf(config);
    
    if (args != null && args.length > 0) {
        config.setNumWorkers(2);
        StormSubmitter.submitTopology(args[0], config, b.createTopology());
    } else {
        config.setMaxTaskParallelism(2);
        final LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("testGenPip", config, b.createTopology());
    }
}
 
Example 2
Project: Infrastructure   File: Topology.java   Source Code and License Vote up 6 votes
/**
 * Creates a standalone topology.
 * 
 * @param args the topology arguments
 * @throws Exception in case of creation problems
 */
public static void main(String[] args) throws Exception {
    Config config = new Config();
    config.setMessageTimeoutSecs(100);
    PipelineOptions options = new PipelineOptions(args);
    RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
    createTopology(b);
    b.close(args[0], config);
    
    // main topology: int numWorkers = options.getNumberOfWorkers(2);
    options.toConf(config);
    
    if (args != null && args.length > 0) {
        config.setNumWorkers(2);
        StormSubmitter.submitTopology(args[0], config, b.createTopology());
    } else {
        config.setMaxTaskParallelism(2);
        final LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(Naming.PIPELINE_NAME, config, b.createTopology());
    }
}
 
Example 3
Project: Infrastructure   File: TestTopology.java   Source Code and License Vote up 6 votes
/**
 * Creates a standalone topology.
 * 
 * @param args the topology arguments
 * @throws Exception in case of creation problems
 */
public static void main(String[] args) throws Exception {
    Config config = new Config();
    Naming.setDefaultInitializeAlgorithms(config, defaultInitAlgorithms);
    config.setMessageTimeoutSecs(100);
    PipelineOptions options = new PipelineOptions(args);
    RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
    createTopology(b);
    b.close(args[0], config);
    
    // main topology: int numWorkers = options.getNumberOfWorkers(2);
    options.toConf(config);
    
    if (args != null && args.length > 0) {
        config.setNumWorkers(2);
        StormSubmitter.submitTopology(args[0], config, b.createTopology());
    } else {
        config.setMaxTaskParallelism(2);
        final LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(PIP_NAME, config, b.createTopology());
    }
}
 
Example 4
Project: LearnStorm   File: LogAnalyzer.java   Source Code and License Vote up 6 votes
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
		final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
		Config config = new Config();
		config.setDebug(DEBUG);
		config.setNumWorkers(numWorkers);
		config.setMaxSpoutPending(1000000);
		// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
		config.setMessageTimeoutSecs(600);	// This value(30 secs by default) must
							// be larger than retryDelayMaxMs
							// (60 secs by default) in
							/// KafkaSpout.

		TopologyBuilder builder = new TopologyBuilder();
		configureKafkaSpout(builder, config);
		configureESBolts(builder, config);

//		LocalCluster cluster = new LocalCluster();
		StormSubmitter.submitTopology("LogAnalyzerV1", config, builder.createTopology());
	}
 
Example 5
Project: LearnStorm   File: ApLogAnalyzer.java   Source Code and License Vote up 6 votes
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
		final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
		Config config = new Config();
		config.setDebug(DEBUG);
		config.setNumWorkers(numWorkers);
		config.setMaxSpoutPending(1000000);
		// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
		config.setMessageTimeoutSecs(600);	// This value(30 secs by default) must
							// be larger than retryDelayMaxMs
							// (60 secs by default) in
							// KafkaSpout.
		TopologyBuilder builder = new TopologyBuilder();
		configureKafkaSpout(builder, config);
		configureESBolts(builder, config);
//		configureHBaseBolts(builder, config);

//		conf.put(Config.NIMBUS_HOST, "hdp01.localdomain");
//		System.setProperty("storm.jar", "/root/workspace//LearnStorm/target/LearnStorm-0.0.1-SNAPSHOT.jar");
//		System.setProperty("hadoop.home.dir", "/tmp");
//		LocalCluster cluster = new LocalCluster();
		StormSubmitter.submitTopology("ApLogAnalyzerV1", config, builder.createTopology());
	}
 
Example 6
Project: Infrastructure   File: TestTopology.java   Source Code and License Vote up 5 votes
@Override
public TopologyOutput createMainTopology() {
    Config config = new Config();
    config.setMessageTimeoutSecs(100);
    Config.registerSerialization(config, DataItem.class, DataItemSerializer.class);
    RecordingTopologyBuilder builder = new RecordingTopologyBuilder(options);
    builder.setSpout("IntermediarySpout", new TestIntermediarySpout("IntermediarySpout", TOPOLOGY_NAME, 
            "IntermediarySpoutStreamId"), 1).setNumTasks(1);
    BoltDeclarer endBolt = builder.setBolt("EndBolt", new TestEndBolt("EndBolt", TOPOLOGY_NAME
            , "EndBoltStreamId"), 1);
    endBolt.shuffleGrouping("IntermediarySpout", "IntermediarySpoutStreamId");
    return new TopologyOutput(config, builder, 1);
}
 
Example 7
Project: miner   File: TopologyMain.java   Source Code and License Vote up 4 votes
public static void main(String[] args) {
	try{
		TopologyBuilder topologyBuilder = new TopologyBuilder();

		topologyBuilder.setSpout("beginspout", new BeginSpout(), PlatformParas.begin_spout_num).setMaxSpoutPending(200);//1,500
		topologyBuilder.setSpout("loopspout", new LoopSpout(), PlatformParas.loop_spout_num).setMaxSpoutPending(200);

		topologyBuilder.setBolt("generateurl", new GenerateUrlBolt(), PlatformParas.generateurl_bolt_num)//2
				.shuffleGrouping("beginspout")
				.shuffleGrouping("loopspout");
		topologyBuilder.setBolt("generateurl-loop-bolt", new GenerateUrlBolt(), PlatformParas.generateurl_bolt_num)
				.shuffleGrouping("parse", "generate-loop");

		topologyBuilder.setBolt("proxy", new ProxyBolt(), PlatformParas.proxy_bolt_num)
				.shuffleGrouping("generateurl")
				.shuffleGrouping("generateurl-loop-bolt");

		topologyBuilder.setBolt("fetch", new FetchBolt(), PlatformParas.fetch_bolt_num)
				.shuffleGrouping("proxy");

		topologyBuilder.setBolt("parse", new ParseBolt(), PlatformParas.parse_bolt_num)
				.shuffleGrouping("fetch");

		topologyBuilder.setBolt("store", new StoreBolt(), PlatformParas.store_bolt_num)
				.shuffleGrouping("parse", "store");
		
		Config config = new Config();
		config.setDebug(false);
		//default:30s
		config.setMessageTimeoutSecs(PlatformParas.message_timeout_secs);
		//config.setMaxSpoutPending(2000);
		
		if(args != null && args.length>0){
			config.setNumWorkers(PlatformParas.work_num);
			StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
		}else{
			config.setMaxTaskParallelism(2);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("test", config, topologyBuilder.createTopology());
		}

	}catch(Exception e){
		e.printStackTrace();
	}
}
 
Example 8
Project: Infrastructure   File: CoordinationConfigurationTests.java   Source Code and License Vote up 4 votes
/**
 * Tests the pipeline options.
 */
@Test
public void pipelineOptionsTest() {
    PipelineOptions opts = new PipelineOptions();
    opts.setNumberOfWorkers(5);
    Properties prop = new Properties();
    prop.put(CoordinationConfiguration.PIPELINE_START_SOURCE_AUTOCONNECT, "true");
    prop.put(CoordinationConfiguration.INIT_MODE, InitializationMode.DYNAMIC.name());
    prop.put(Configuration.HOST_EVENT, "local");
    prop.put(Configuration.PORT_EVENT, 1234);
    prop.put(Configuration.EVENT_DISABLE_LOGGING, "aaa,bbb");
    prop.put(Configuration.PIPELINE_INTERCONN_PORTS, "10-20");
    CoordinationConfiguration.configure(prop, false);
    System.out.println("Configured " + prop);
    
    // during submission
    @SuppressWarnings("rawtypes")
    Map stormConf = Utils.readStormConfig();
    StormPipelineOptionsSetter optSetter = new StormPipelineOptionsSetter(stormConf, opts);
    StormUtils.doCommonConfiguration(optSetter);
    System.out.println("Conf " + stormConf);
    System.out.println("OPTS " + opts);
    String[] args = opts.toArgs("pip");
    System.out.println("ARGS " + java.util.Arrays.toString(args));
    
    // in topology
    PipelineOptions options = new PipelineOptions(args);
    Config config = new Config();
    config.setMessageTimeoutSecs(100);
    config.setDebug(false);
    config.put("windowSize", 1 * 30);  // Window size (in secs)
    config.put("windowAdvance", 1);  // Advance of the window (in secs)
    config.put("SUBPIPELINE.NAME", "pip"); //sub-pipeline namespace
    //The settings to optimize the storm performance.
    config.put(Config.TOPOLOGY_RECEIVER_BUFFER_SIZE, 8);
    config.put(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE, 32);
    config.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, 16384);
    config.put(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE, 16384);
    config.put(Configuration.HOST_EVENT, Configuration.getEventHost());
    config.put(Configuration.PORT_EVENT, Configuration.getEventPort());
    config.put(Configuration.EVENT_DISABLE_LOGGING, Configuration.getEventDisableLogging());
    config.put(Configuration.PIPELINE_INTERCONN_PORTS, Configuration.getPipelinePorts());
    options.toConf(config);
    System.out.println("Pip Config " + config);
    
    Assert.assertEquals("true", config.get(Constants.CONFIG_KEY_SOURCE_AUTOCONNECT));
    Assert.assertEquals(InitializationMode.DYNAMIC.name(), config.get(Constants.CONFIG_KEY_INIT_MODE));
    Assert.assertEquals("local", config.get(Configuration.HOST_EVENT));
    Assert.assertEquals("1234", config.get(Configuration.PORT_EVENT));
    Assert.assertEquals("aaa,bbb", config.get(Configuration.EVENT_DISABLE_LOGGING));
    Assert.assertEquals("10-20", config.get(Configuration.PIPELINE_INTERCONN_PORTS));
    
    CoordinationConfiguration.clear();
}
 
Example 9
Project: storm-hive-streaming-example   File: Topology.java   Source Code and License Vote up 4 votes
public void run(String... args){
    String kafkaTopic = "stock_topic";

    SpoutConfig spoutConfig = new SpoutConfig(new ZkHosts("127.0.0.1"),
            kafkaTopic, "/kafka_storm", "StormSpout");
    spoutConfig.useStartOffsetTimeIfOffsetOutOfRange = true;
    spoutConfig.startOffsetTime = System.currentTimeMillis();

    KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
    
    // Hive connection configuration
    String metaStoreURI = "thrift://one.hdp:9083";
    String dbName = "default";
    String tblName = "stock_prices";
    // Fields for possible partition
    String[] partNames = {"name"};
    // Fields for possible column data
    String[] colNames = {"day", "open", "high", "low", "close", "volume","adj_close"};
    // Record Writer configuration
    DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper()
            .withColumnFields(new Fields(colNames))
            .withPartitionFields(new Fields(partNames));

    HiveOptions hiveOptions;
    hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper)
            .withTxnsPerBatch(2)
            .withBatchSize(100)
            .withIdleTimeout(10)
            .withCallTimeout(10000000);
            //.withKerberosKeytab(path_to_keytab)
            //.withKerberosPrincipal(krb_principal);

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(KAFKA_SPOUT_ID, kafkaSpout);
    builder.setBolt(STOCK_PROCESS_BOLT_ID, new StockDataBolt()).shuffleGrouping(KAFKA_SPOUT_ID);
    builder.setBolt(HIVE_BOLT_ID, new HiveBolt(hiveOptions)).shuffleGrouping(STOCK_PROCESS_BOLT_ID);
    
    String topologyName = "StormHiveStreamingTopo";
    Config config = new Config();
    config.setNumWorkers(1);
    config.setMessageTimeoutSecs(60);
    try {
        StormSubmitter.submitTopology(topologyName, config, builder.createTopology());
    } catch (AlreadyAliveException | InvalidTopologyException | AuthorizationException ex) {
        Logger.getLogger(Topology.class.getName()).log(Level.SEVERE, null, ex);
    }
}