Java Code Examples for backtype.storm.Config#setNumAckers()

The following examples show how to use backtype.storm.Config#setNumAckers() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MultiStageAckingTopology.java    From incubator-heron with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  if (args.length != 1) {
    throw new RuntimeException("Please specify the name of the topology");
  }
  TopologyBuilder builder = new TopologyBuilder();

  int parallelism = 2;
  builder.setSpout("word", new AckingTestWordSpout(), parallelism);
  builder.setBolt("exclaim1", new ExclamationBolt(true), parallelism)
      .shuffleGrouping("word");
  builder.setBolt("exclaim2", new ExclamationBolt(false), parallelism)
      .shuffleGrouping("exclaim1");

  Config conf = new Config();
  conf.setDebug(true);

  // Put an arbitrary large number here if you don't want to slow the topology down
  conf.setMaxSpoutPending(1000 * 1000 * 1000);

  // To enable acking, we need to setEnableAcking true
  conf.setNumAckers(1);

  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  conf.setNumWorkers(parallelism);
  StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
 
Example 2
Source File: TaskHookTopology.java    From incubator-heron with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  if (args.length != 1) {
    throw new RuntimeException("Specify topology name");
  }
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word", new AckingTestWordSpout(), 2);
  builder.setBolt("count", new CountBolt(), 2)
      .shuffleGrouping("word");

  Config conf = new Config();
  conf.setDebug(true);
  // Put an arbitrary large number here if you don't want to slow the topology down
  conf.setMaxSpoutPending(1000 * 1000 * 1000);
  // To enable acking, we need to setEnableAcking true
  conf.setNumAckers(1);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  // Set the task hook
  List<String> taskHooks = new LinkedList<>();
  taskHooks.add("org.apache.heron.examples.TaskHookTopology$TestTaskHook");
  org.apache.heron.api.Config.setAutoTaskHooks(conf, taskHooks);

  // component resource configuration
  org.apache.heron.api.Config.setComponentRam(conf, "word", ByteAmount.fromMegabytes(512));
  org.apache.heron.api.Config.setComponentRam(conf, "count", ByteAmount.fromMegabytes(512));

  // container resource configuration
  org.apache.heron.api.Config.setContainerDiskRequested(conf, ByteAmount.fromGigabytes(2));
  org.apache.heron.api.Config.setContainerRamRequested(conf, ByteAmount.fromGigabytes(2));
  org.apache.heron.api.Config.setContainerCpuRequested(conf, 2);


  conf.setNumWorkers(2);
  StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
 
Example 3
Source File: SequenceTopologyTool.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public StormTopology buildTopology() {
    Config conf = getConf();
    TopologyBuilder builder = new TopologyBuilder();
    
    int spout_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
    int bolt_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);
    
    builder.setSpout(SequenceTopologyDef.SEQUENCE_SPOUT_NAME, new SequenceSpout(), spout_Parallelism_hint);
    
    boolean isEnableSplit = JStormUtils.parseBoolean(conf.get("enable.split"), false);
    
    if (!isEnableSplit) {
        builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME, new TotalCount(), bolt_Parallelism_hint)
                .localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
    } else {
        
        builder.setBolt(SequenceTopologyDef.SPLIT_BOLT_NAME, new SplitRecord(), bolt_Parallelism_hint)
                .localOrShuffleGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
                
        builder.setBolt(SequenceTopologyDef.TRADE_BOLT_NAME, new PairCount(), bolt_Parallelism_hint)
                .shuffleGrouping(SequenceTopologyDef.SPLIT_BOLT_NAME, SequenceTopologyDef.TRADE_STREAM_ID);
        builder.setBolt(SequenceTopologyDef.CUSTOMER_BOLT_NAME, new PairCount(), bolt_Parallelism_hint)
                .shuffleGrouping(SequenceTopologyDef.SPLIT_BOLT_NAME, SequenceTopologyDef.CUSTOMER_STREAM_ID);
                
        builder.setBolt(SequenceTopologyDef.MERGE_BOLT_NAME, new MergeRecord(), bolt_Parallelism_hint)
                .fieldsGrouping(SequenceTopologyDef.TRADE_BOLT_NAME, new Fields("ID"))
                .fieldsGrouping(SequenceTopologyDef.CUSTOMER_BOLT_NAME, new Fields("ID"));
                
        builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME, new TotalCount(), bolt_Parallelism_hint)
                .noneGrouping(SequenceTopologyDef.MERGE_BOLT_NAME);
    }
    
    boolean kryoEnable = JStormUtils.parseBoolean(conf.get("kryo.enable"), false);
    if (kryoEnable) {
        System.out.println("Use Kryo ");
        boolean useJavaSer = JStormUtils.parseBoolean(conf.get("fall.back.on.java.serialization"), true);
        
        Config.setFallBackOnJavaSerialization(conf, useJavaSer);
        
        Config.registerSerialization(conf, TradeCustomer.class);
        Config.registerSerialization(conf, Pair.class);
    }
    int ackerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), 1);
    Config.setNumAckers(conf, ackerNum);
    
    int workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS), 20);
    conf.put(Config.TOPOLOGY_WORKERS, workerNum);
    
    return builder.createTopology();
}
 
Example 4
Source File: SequenceTopology.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public static void SetBuilder(TopologyBuilder builder, Map conf) {
    
    int spout_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
    int bolt_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);
    
    builder.setSpout(SequenceTopologyDef.SEQUENCE_SPOUT_NAME, new SequenceSpout(), spout_Parallelism_hint);
    
    boolean isEnableSplit = JStormUtils.parseBoolean(conf.get("enable.split"), false);
    
    if (!isEnableSplit) {
        BoltDeclarer boltDeclarer = builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME, new TotalCount(),
                bolt_Parallelism_hint);
                
        // localFirstGrouping is only for jstorm
        // boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
        boltDeclarer.shuffleGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME)
                .allGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME, SequenceTopologyDef.CONTROL_STREAM_ID)
                .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 3);
    } else {
        
        builder.setBolt(SequenceTopologyDef.SPLIT_BOLT_NAME, new SplitRecord(), bolt_Parallelism_hint)
                .localOrShuffleGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
                
        builder.setBolt(SequenceTopologyDef.TRADE_BOLT_NAME, new PairCount(), bolt_Parallelism_hint)
                .shuffleGrouping(SequenceTopologyDef.SPLIT_BOLT_NAME, SequenceTopologyDef.TRADE_STREAM_ID);
        builder.setBolt(SequenceTopologyDef.CUSTOMER_BOLT_NAME, new PairCount(), bolt_Parallelism_hint)
                .shuffleGrouping(SequenceTopologyDef.SPLIT_BOLT_NAME, SequenceTopologyDef.CUSTOMER_STREAM_ID);
                
        builder.setBolt(SequenceTopologyDef.MERGE_BOLT_NAME, new MergeRecord(), bolt_Parallelism_hint)
                .fieldsGrouping(SequenceTopologyDef.TRADE_BOLT_NAME, new Fields("ID"))
                .fieldsGrouping(SequenceTopologyDef.CUSTOMER_BOLT_NAME, new Fields("ID"));
                
        builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME, new TotalCount(), bolt_Parallelism_hint)
                .noneGrouping(SequenceTopologyDef.MERGE_BOLT_NAME);
    }
    
    boolean kryoEnable = JStormUtils.parseBoolean(conf.get("kryo.enable"), false);
    if (kryoEnable) {
        System.out.println("Use Kryo ");
        boolean useJavaSer = JStormUtils.parseBoolean(conf.get("fall.back.on.java.serialization"), true);
        
        Config.setFallBackOnJavaSerialization(conf, useJavaSer);
        
        Config.registerSerialization(conf, TradeCustomer.class, TradeCustomerSerializer.class);
        Config.registerSerialization(conf, Pair.class, PairSerializer.class);
    }
    
    // conf.put(Config.TOPOLOGY_DEBUG, false);
    // conf.put(ConfigExtension.TOPOLOGY_DEBUG_RECV_TUPLE, false);
    // conf.put(Config.STORM_LOCAL_MODE_ZMQ, false);
    
    int ackerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), 1);
    Config.setNumAckers(conf, ackerNum);
    // conf.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 6);
    // conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 20);
    // conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1);
    
    int workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS), 20);
    conf.put(Config.TOPOLOGY_WORKERS, workerNum);
    
}
 
Example 5
Source File: SequenceTopologyTest.java    From jstorm with Apache License 2.0 4 votes vote down vote up
@Test
    public void testSequenceTopology()
    {
        TopologyBuilder topologyBuilder = new TopologyBuilder();
        topologyBuilder.setSpout(SequenceTopologyDef.SEQUENCE_SPOUT_NAME, new SequenceTestSpout(), SPOUT_PARALLELISM_HINT);

        topologyBuilder.setBolt(SequenceTopologyDef.SPLIT_BOLT_NAME, new SequenceTestSplitRecord(), BOLT_PARALLELISM_HINT)
                .localOrShuffleGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);

        topologyBuilder.setBolt(SequenceTopologyDef.TRADE_BOLT_NAME, new SequenceTestPairCount(), BOLT_PARALLELISM_HINT)
                .shuffleGrouping(SequenceTopologyDef.SPLIT_BOLT_NAME, SequenceTopologyDef.TRADE_STREAM_ID);

        topologyBuilder.setBolt(SequenceTopologyDef.CUSTOMER_BOLT_NAME, new SequenceTestPairCount(), BOLT_PARALLELISM_HINT)
                .shuffleGrouping(SequenceTopologyDef.SPLIT_BOLT_NAME, SequenceTopologyDef.CUSTOMER_STREAM_ID);

        topologyBuilder.setBolt(SequenceTopologyDef.MERGE_BOLT_NAME, new SequenceTestMergeRecord(), BOLT_PARALLELISM_HINT)
                .fieldsGrouping(SequenceTopologyDef.TRADE_BOLT_NAME, new Fields("ID"))
                .fieldsGrouping(SequenceTopologyDef.CUSTOMER_BOLT_NAME, new Fields("ID"));

        topologyBuilder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME, new SequenceTestTotalCount(), BOLT_PARALLELISM_HINT)
                .noneGrouping(SequenceTopologyDef.MERGE_BOLT_NAME);

        Map conf = new HashMap();                               //use config in detail.yaml
//        Config.setFallBackOnJavaSerialization(conf, true);      //fall.back.on.java.serialization: true
//                                                                //enable.split: true
//        Config.registerSerialization(conf, TradeCustomer.class, TradeCustomerSerializer.class);
//        Config.registerSerialization(conf, Pair.class, PairSerializer.class);
        Config.setNumAckers(conf, 1);
        Config.setNumWorkers(conf, 3);
        conf.put("spout.max.sending.num", SPOUT_MAX_SEND_NUM);  //set a limit for the spout to get a precise
                                                                //number to make sure the topology works well.
        conf.put(Config.TOPOLOGY_NAME, "SequenceTopologyTest");

        //the following is just for the JStormUnitTestMetricValidator to pick the metric data
        //from all the metrics.If you are not using JStormUnitTestMetricValidator, it is useless.
        //The first element is the key that register in the metric, the second one is the key
        //map with the metric value as a parameter in the callback function validateMetrics().
        Set<String> userDefineMetrics = new HashSet<String>();
        userDefineMetrics.add(SequenceTestMetricsDef.METRIC_SPOUT_EMIT);
        userDefineMetrics.add(SequenceTestMetricsDef.METRIC_SPOUT_SUCCESS);
        userDefineMetrics.add(SequenceTestMetricsDef.METRIC_SPOUT_FAIL);
        userDefineMetrics.add(SequenceTestMetricsDef.METRIC_SPOUT_TRADE_SUM);
        userDefineMetrics.add(SequenceTestMetricsDef.METRIC_SPOUT_CUSTOMER_SUM);
        userDefineMetrics.add(SequenceTestMetricsDef.METRIC_SPLIT_EMIT);
        userDefineMetrics.add(SequenceTestMetricsDef.METRIC_PAIR_TRADE_EMIT);
        userDefineMetrics.add(SequenceTestMetricsDef.METRIC_PAIR_CUSTOMER_EMIT);
        userDefineMetrics.add(SequenceTestMetricsDef.METRIC_MERGE_EMIT);
        userDefineMetrics.add(SequenceTestMetricsDef.METRIC_TOTAL_EXECUTE);
        userDefineMetrics.add(SequenceTestMetricsDef.METRIC_TOTAL_TRADE_SUM);
        userDefineMetrics.add(SequenceTestMetricsDef.METRIC_TOTAL_CUSTOMER_SUM);


        JStormUnitTestMetricValidator validator =  new JStormUnitTestMetricValidator(userDefineMetrics) {
            @Override
            public boolean validateMetrics(Map<String, Double> metrics)
            {
                for(Map.Entry<String, Double> entry : metrics.entrySet())
                    LOG.info("user define metric Key = " + entry.getKey() + " Value = " + entry.getValue());

                int spoutEmit = (int)(metrics.get(SequenceTestMetricsDef.METRIC_SPOUT_EMIT)).doubleValue();
                int spoutSuccess = (int)(metrics.get(SequenceTestMetricsDef.METRIC_SPOUT_SUCCESS)).doubleValue();
                int spoutFail = (int)(metrics.get(SequenceTestMetricsDef.METRIC_SPOUT_FAIL)).doubleValue();
                long spoutTradeSum = (long)(metrics.get(SequenceTestMetricsDef.METRIC_SPOUT_TRADE_SUM)).doubleValue();
                long spoutCustomerSum = (long)(metrics.get(SequenceTestMetricsDef.METRIC_SPOUT_CUSTOMER_SUM)).doubleValue();

                int splitEmit = (int)(metrics.get(SequenceTestMetricsDef.METRIC_SPLIT_EMIT)).doubleValue();
                int pairTradeEmit = (int)(metrics.get(SequenceTestMetricsDef.METRIC_PAIR_TRADE_EMIT)).doubleValue();
                int pairCustomerEmit = (int)(metrics.get(SequenceTestMetricsDef.METRIC_PAIR_CUSTOMER_EMIT)).doubleValue();
                int mergeEmit = (int)(metrics.get(SequenceTestMetricsDef.METRIC_MERGE_EMIT)).doubleValue();

                int totalExecute = (int)(metrics.get(SequenceTestMetricsDef.METRIC_TOTAL_EXECUTE)).doubleValue();
                long totalTradeSum = (long)(metrics.get(SequenceTestMetricsDef.METRIC_TOTAL_TRADE_SUM)).doubleValue();
                long totalCustomerSum = (long)(metrics.get(SequenceTestMetricsDef.METRIC_TOTAL_CUSTOMER_SUM)).doubleValue();

                assertEquals(SPOUT_MAX_SEND_NUM, spoutEmit);
                assertEquals(spoutEmit, spoutSuccess);
                assertEquals(0, spoutFail);
                assertEquals(2*spoutEmit, splitEmit);
                assertEquals(splitEmit, pairTradeEmit*2);
                assertEquals(splitEmit, pairCustomerEmit*2);
                assertEquals(splitEmit, mergeEmit*2);
                assertEquals(mergeEmit, totalExecute);
                assertEquals(spoutTradeSum, totalTradeSum);
                assertEquals(spoutCustomerSum, totalCustomerSum);
                return true;
            }
        };

        //the below line time in second 150 is recommend, at least it should be more than 120 since the
        //metric data was grabbed every 60s but not so precise.
        boolean result = JStormUnitTestRunner.submitTopology(topologyBuilder.createTopology(), conf, 150, validator);
        assertTrue("Topology should pass the validator", result);
    }