Java Code Examples for org.apache.storm.Config#setMessageTimeoutSecs()

The following examples show how to use org.apache.storm.Config#setMessageTimeoutSecs() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FullPullerTopology.java    From DBus with Apache License 2.0 6 votes vote down vote up
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
    Config conf = new Config();
    conf.put(FullPullConstants.FULL_SPLITTER_TOPOLOGY_ID, fullSplitterTopologyId);
    conf.put(FullPullConstants.FULL_PULLER_TOPOLOGY_ID, fullPullerTopologyId);
    conf.put(FullPullConstants.DS_NAME, topologyId);
    conf.put(FullPullConstants.ZKCONNECT, zkConnect);
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, commonConfSplit.getProperty(FullPullConstants.TOPOLOGY_WORKER_CHILDOPTS));
    //设置message超时时间为,保证每个分片都能在该内拉完数据
    conf.setMessageTimeoutSecs(Integer.parseInt(commonConfSplit.getProperty(FullPullConstants.STORM_MESSAGE_TIMEOUT)));
    conf.setMaxSpoutPending(Integer.parseInt(commonConfSplit.getProperty(FullPullConstants.STORM_MAX_SPOUT_PENDING)));
    conf.setNumWorkers(Integer.parseInt(commonConfSplit.getProperty(FullPullConstants.STORM_NUM_WORKERS)));
    conf.setDebug(true);

    if (runAsLocal) {
        conf.setMaxTaskParallelism(3);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyName, conf, topology);
    } else {
        StormSubmitter.submitTopology(topologyName, conf, topology);
    }
}
 
Example 2
Source File: DBusLogProcessorTopology.java    From DBus with Apache License 2.0 6 votes vote down vote up
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
    Config conf = new Config();
    conf.put(com.creditease.dbus.commons.Constants.ZOOKEEPER_SERVERS, zkConnect);
    conf.put(Constants.TOPOLOGY_ID, topologyId);
    conf.setMessageTimeoutSecs(Integer.parseInt(properties.getProperty(Constants.LOG_MESSAGE_TIMEOUT)));
    //conf.setMaxSpoutPending(30);
    conf.setDebug(true);
    conf.setNumWorkers(Integer.parseInt(properties.getProperty(Constants.LOG_NUMWORKERS)));

    if (runAsLocal) {
        conf.setMaxTaskParallelism(10);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyName, conf, topology);
    } else {
        StormSubmitter.submitTopology(topologyName, conf, topology);
    }
}
 
Example 3
Source File: SinkTopology.java    From DBus with Apache License 2.0 6 votes vote down vote up
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
    Config conf = new Config();
    conf.put(Constants.ZOOKEEPER_SERVERS, zkConnect);
    conf.put(Constants.TOPOLOGY_ID, topologyId);
    conf.put(Constants.SINK_TYPE, sinkType);
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, sinkerConf.getProperty(SinkerConstants.TOPOLOGY_WORKER_CHILDOPTS));

    conf.setMessageTimeoutSecs(Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_MESSAGE_TIMEOUT)));
    conf.setMaxSpoutPending(Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_MAX_SPOUT_PENDING)));
    conf.setDebug(true);
    conf.setNumWorkers(Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_NUM_WORKERS)));
    if (runAsLocal) {
        conf.setMaxTaskParallelism(10);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyName, conf, topology);
    } else {
        StormSubmitter.submitTopology(topologyName, conf, topology);
    }
}
 
Example 4
Source File: MysqlExtractorTopology.java    From DBus with Apache License 2.0 5 votes vote down vote up
public void buildTopology(String[] args) {
    //TODO
    if (parseCommandArgs(args) != 0) {
        return;
    }
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("CanalClientSpout", new CanalClientSpout(), 1);
    builder.setBolt("KafkaProducerBolt", new KafkaProducerBolt(), 1).shuffleGrouping("CanalClientSpout");

    Config conf = new Config();
    conf.put(Constants.ZOOKEEPER_SERVERS, zkServers);
    conf.put(Constants.EXTRACTOR_TOPOLOGY_ID, extractorTopologyId);
    logger.info(Constants.ZOOKEEPER_SERVERS + "=" + zkServers);
    logger.info(Constants.EXTRACTOR_TOPOLOGY_ID + "=" + extractorTopologyId);
    conf.setNumWorkers(1);
    conf.setMaxSpoutPending(50);
    conf.setMessageTimeoutSecs(120);
    if (!runAsLocal) {
        conf.setDebug(false);
        try {
            //StormSubmitter.submitTopology("extractorTopologyId", conf, builder.createTopology());
            StormSubmitter.submitTopology(extractorTopologyId, conf, builder.createTopology());
        } catch (Exception e) {
            e.printStackTrace();
        }
    } else {
        conf.setDebug(false);
        LocalCluster cluster = new LocalCluster();
        //cluster.submitTopology("extractorTopologyId", conf, builder.createTopology());
        cluster.submitTopology(extractorTopologyId, conf, builder.createTopology());
    }
}
 
Example 5
Source File: DBusRouterTopology.java    From DBus with Apache License 2.0 5 votes vote down vote up
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
    Config conf = new Config();
    conf.put(com.creditease.dbus.commons.Constants.ZOOKEEPER_SERVERS, zkConnect);
    conf.put(Constants.TOPOLOGY_ID, topologyId);
    conf.put(Constants.TOPOLOGY_ALIAS, alias);
    conf.put(Constants.ROUTER_PROJECT_NAME, projectName);

    String workerChildOpts = routerConf.getProperty(DBusRouterConstants.STORM_TOPOLOGY_WORKER_CHILDOPTS, "-Xmx2g");
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, workerChildOpts);

    int msgTimeout = Integer.valueOf(routerConf.getProperty(DBusRouterConstants.STORM_MESSAGE_TIMEOUT, "10"));
    conf.setMessageTimeoutSecs(msgTimeout);

    int maxSpoutPending = Integer.valueOf(routerConf.getProperty(DBusRouterConstants.STORM_MAX_SPOUT_PENDING, "100"));
    conf.setMaxSpoutPending(maxSpoutPending);
    conf.setDebug(true);

    int numWorks = Integer.valueOf(routerConf.getProperty(DBusRouterConstants.STORM_NUM_WORKS, "1"));
    conf.setNumWorkers(numWorks);

    if (runAsLocal) {
        conf.setMaxTaskParallelism(10);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyName, conf, topology);
    } else {
        StormSubmitter.submitTopology(topologyName, conf, topology);
    }
}
 
Example 6
Source File: ExclamationTopology.java    From incubator-heron with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();
  int parallelism = 2;

  int spouts = parallelism;
  builder.setSpout("word", new TestWordSpout(Duration.ofMillis(50)), spouts);
  int bolts = 2 * parallelism;
  builder.setBolt("exclaim1", new ExclamationBolt(), bolts)
      .shuffleGrouping("word");

  Config conf = new Config();
  conf.setDebug(true);
  conf.setMaxSpoutPending(10);
  conf.setMessageTimeoutSecs(600);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  if (args != null && args.length > 0) {
    conf.setNumWorkers(parallelism);
    StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
  } else {
    System.out.println("Topology name not provided as an argument, running in simulator mode.");
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
Example 7
Source File: DispatcherAppenderTopology.java    From DBus with Apache License 2.0 4 votes vote down vote up
private void start(StormTopology topology, boolean runAsLocal) throws Exception {

        Config conf = new Config();

        // 启动类型为all,或者dispatcher
        if (topologyType.equals(Constants.TopologyType.ALL) || topologyType.equals(Constants.TopologyType.DISPATCHER)) {
            /**
             * dispatcher配置
             */

            conf.put(com.creditease.dbus.commons.Constants.ZOOKEEPER_SERVERS, zookeeper);
            conf.put(com.creditease.dbus.commons.Constants.TOPOLOGY_ID, dispatcherTopologyId);
            logger.info(com.creditease.dbus.commons.Constants.ZOOKEEPER_SERVERS + "=" + zookeeper);
            logger.info(com.creditease.dbus.commons.Constants.TOPOLOGY_ID + "=" + dispatcherTopologyId);
        }

        // 启动类型为all,或者appender
        if (topologyType.equals(Constants.TopologyType.ALL) || topologyType.equals(Constants.TopologyType.APPENDER)) {

            /**
             * appender配置
             */
            conf.put(Constants.StormConfigKey.TOPOLOGY_ID, appenderTopologyId);
            conf.put(Constants.StormConfigKey.ZKCONNECT, zookeeper);
            conf.put(Constants.StormConfigKey.DATASOURCE, datasource);
        }

//        conf.put(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE, 4096);
//        conf.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, 4096);
//        conf.put(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE, 4096);

        conf.setDebug(true);

        conf.setNumAckers(1);
        //设置worker数
        conf.setNumWorkers(1);
        //设置任务在发出后,但还没处理完成的中间状态任务的最大数量, 如果没有设置最大值为50
        int MaxSpoutPending = getConfigureValueWithDefault(Constants.ConfigureKey.MAX_SPOUT_PENDING, 50);
        conf.setMaxSpoutPending(MaxSpoutPending);
        //设置任务在多久之内没处理完成,则这个任务处理失败
        conf.setMessageTimeoutSecs(120);

        String opts = getWorkerChildopts();
        if (opts != null && opts.trim().length() > 0) {
            conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, opts);
        }

//        conf.put(Config.TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS, true);
//        conf.registerSerialization(org.apache.avro.util.Utf8.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DBusConsumerRecord.class);
//        conf.registerSerialization(org.apache.kafka.common.record.TimestampType.class);
//        conf.registerSerialization(com.creditease.dbus.stream.common.appender.bean.EmitData.class);
//        conf.registerSerialization(com.creditease.dbus.stream.common.appender.enums.Command.class);
//        conf.registerSerialization(org.apache.avro.generic.GenericData.class);
//        conf.registerSerialization(com.creditease.dbus.stream.oracle.appender.avro.GenericData.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage12.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage12.Schema12.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage13.Schema13.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage13.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage.Field.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage.Payload.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage.Protocol.class);
//        conf.registerSerialization(com.creditease.dbus.commons.DbusMessage.ProtocolType.class);
//        conf.registerSerialization(com.creditease.dbus.stream.oracle.appender.bolt.processor.appender.OraWrapperData.class);
//        conf.registerSerialization(com.creditease.dbus.stream.common.appender.spout.cmds.TopicResumeCmd.class);

        if (runAsLocal) {
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology(topologyId, conf, topology);
            /*String cmd;
            do {
                cmd = System.console().readLine();
            } while (!cmd.equals("exit"));
            cluster.shutdown();*/
        } else {
            StormSubmitter.submitTopology(topologyId, conf, topology);
        }
    }
 
Example 8
Source File: LogLevelCountTopology.java    From nifi-streaming-examples with Apache License 2.0 4 votes vote down vote up
public static void main( String[] args ) throws Exception {
    String propertiesFile = DEFAULT_PROPERTIES_FILE;
    if (args != null && args.length == 1 && args[0] != null) {
        propertiesFile = args[0];
    }

    LogLevelCountProperties props = new LogLevelCountProperties(propertiesFile);

    int windowMillis = props.getStormWindowMillis();
    double rateThreshold = props.getStormRateThreshold();

    // Build the spout for pulling data from NiFi and pull out the log level into a tuple field
    NiFiSpout niFiSpout = new NiFiSpout(getSourceConfig(props), Collections.singletonList(props.getLogLevelAttribute()));

    // Build the bolt for counting log levels over a tumbling window
    BaseWindowedBolt logLevelWindowBolt = new LogLevelWindowBolt(props.getLogLevelAttribute())
            .withTumblingWindow(new BaseWindowedBolt.Duration(windowMillis, TimeUnit.MILLISECONDS));

    // Build the bolt for pushing results back to NiFi
    NiFiDataPacketBuilder dictionaryBuilder = new DictionaryBuilder(windowMillis, rateThreshold);
    NiFiBolt niFiBolt = new NiFiBolt(getSinkConfig(props), dictionaryBuilder, 10).withBatchSize(1);

    // Build the topology of NiFiSpout -> LogLevelWindowBolt -> NiFiBolt
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("nifiInput", niFiSpout);
    builder.setBolt("logLevels", logLevelWindowBolt).shuffleGrouping("nifiInput");
    builder.setBolt("nifiOutput", niFiBolt).shuffleGrouping("logLevels");

    // Submit the topology
    Config conf = new Config();
    conf.setDebug(true);

    // Need to set the message timeout to twice the window size in seconds
    conf.setMessageTimeoutSecs((props.getStormWindowMillis()/1000) * 2);

    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    }
    else {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("log-levels", conf, builder.createTopology());
        Utils.sleep(130000);
        cluster.killTopology("log-levels");
        cluster.shutdown();
    }
}