Java Code Examples for org.apache.storm.topology.TopologyBuilder#createTopology()

The following examples show how to use org.apache.storm.topology.TopologyBuilder#createTopology() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TopologyFactoryBean.java    From breeze with Apache License 2.0 6 votes vote down vote up
private StormTopology build() {
	run();
	verify();

	Map<String,BoltDeclarer> declaredBolts = new HashMap<>();

	TopologyBuilder builder = new TopologyBuilder();
	for (Map.Entry<ConfiguredSpout,List<ConfiguredBolt>> line : entrySet()) {
		ConfiguredSpout spout = line.getKey();
		String lastId = spout.getId();
		String streamId = spout.getOutputStreamId();
		builder.setSpout(lastId, spout, spout.getParallelism());
		for (ConfiguredBolt bolt : line.getValue()) {
			String id = bolt.getId();
			BoltDeclarer declarer = declaredBolts.get(id);
			if (declarer == null)
				declarer = builder.setBolt(id, bolt, bolt.getParallelism());
			declarer.noneGrouping(lastId, streamId);
			if (declaredBolts.put(id, declarer) != null) break;
			lastId = id;
			streamId = bolt.getOutputStreamId();
		}
	}

	return builder.createTopology();
}
 
Example 2
Source File: ParserBoltTest.java    From logparser with Apache License 2.0 6 votes vote down vote up
@Test
public void runRest() throws InterruptedException, NoSuchMethodException {
    TopologyBuilder builder = new TopologyBuilder();

    // ----------
    builder.setSpout("Spout", new TestApacheLogsSpout());
    // ----------
    HttpdLoglineParserBolt parserBolt = new HttpdLoglineParserBolt(TestCase.getLogFormat(), INPUT_FIELD_NAME, OUTPUT_FIELD_NAME);

    builder.setBolt("Parser", parserBolt, 1).shuffleGrouping("Spout");
    // ----------
    builder.setBolt("Printer", new ValidateOutput(), 1).shuffleGrouping("Parser");
    // ----------

    StormTopology topology = builder.createTopology();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("Unit test", new HashMap<String, String>(), topology);
    Thread.sleep(10000L); // Run for 10 seconds
    cluster.killTopology("Unit test");
    cluster.shutdown();

}
 
Example 3
Source File: SinkTopology.java    From DBus with Apache License 2.0 6 votes vote down vote up
private StormTopology buildTopology() throws Exception {
    loadSinkerConf();

    Integer spoutSize = Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_KAFKA_READ_SPOUT_PARALLEL));
    Integer boltSize = Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_WRITE_BOUT_PARALLEL));
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("SinkerKafkaReadSpout", new SinkerKafkaReadSpout(), spoutSize);
    builder.setBolt("SinkerWriteBolt", new SinkerWriteBolt(), boltSize)
            .fieldsGrouping("SinkerKafkaReadSpout", "dataStream", new Fields("ns"))
            .allGrouping("SinkerKafkaReadSpout", "ctrlStream");

    Config conf = new Config();
    conf.setDebug(true);
    conf.setNumWorkers(1);

    return builder.createTopology();
}
 
Example 4
Source File: StatisticTopology.java    From storm-statistic with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    /**
     * 设置spout和bolt的dag(有向无环图)
     */
    KafkaSpout kafkaSpout = createKafkaSpout();
    builder.setSpout("id_kafka_spout", kafkaSpout);
    builder.setBolt("id_convertIp_bolt", new ConvertIPBolt()).shuffleGrouping("id_kafka_spout"); // 通过不同的数据流转方式,来指定数据的上游组件
    builder.setBolt("id_statistic_bolt", new StatisticBolt()).shuffleGrouping("id_convertIp_bolt"); // 通过不同的数据流转方式,来指定数据的上游组件
    // 使用builder构建topology
    StormTopology topology = builder.createTopology();
    String topologyName = KafkaStormTopology.class.getSimpleName();  // 拓扑的名称
    Config config = new Config();   // Config()对象继承自HashMap,但本身封装了一些基本的配置

    // 启动topology,本地启动使用LocalCluster,集群启动使用StormSubmitter
    if (args == null || args.length < 1) {  // 没有参数时使用本地模式,有参数时使用集群模式
        LocalCluster localCluster = new LocalCluster(); // 本地开发模式,创建的对象为LocalCluster
        localCluster.submitTopology(topologyName, config, topology);
    } else {
        StormSubmitter.submitTopology(topologyName, config, topology);
    }
}
 
Example 5
Source File: WordCountTopology.java    From twister2 with Apache License 2.0 5 votes vote down vote up
@Override
public StormTopology buildTopology() {
  int parallelism = config.getIntegerValue("parallelism", 1);

  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout("word", new WordSpout(), parallelism);
  builder.setBolt("consumer", new ConsumerBolt(), parallelism)
      .fieldsGrouping("word", new Fields("word"));

  return builder.createTopology();
}
 
Example 6
Source File: SplitJoinTopologyTest.java    From streamline with Apache License 2.0 5 votes vote down vote up
protected StormTopology createTopology() {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(RULES_TEST_SPOUT, new RulesTestSpout(2000));
    builder.setBolt(SPLIT_BOLT, createSplitBolt()).shuffleGrouping(RULES_TEST_SPOUT);
    builder.setBolt(STAGE_BOLT, createStageBolt()).shuffleGrouping(SPLIT_BOLT, SPLIT_STREAM_ID.getId());
    builder.setBolt(JOIN_BOLT, createJoinBolt()).shuffleGrouping(STAGE_BOLT, STAGE_OUTPUT_STREAM.getId());
    builder.setBolt(SINK_BOLT, new RulesTestSinkBolt()).shuffleGrouping(JOIN_BOLT, JOIN_OUTPUT_STREAM.getId());
    return builder.createTopology();
}
 
Example 7
Source File: NormalizationTopologyTest.java    From streamline with Apache License 2.0 5 votes vote down vote up
protected StormTopology createTopology(NormalizationProcessor normalizationProcessor) {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(RULES_TEST_SPOUT, new RulesTestSpout(2000));
    builder.setBolt(NORMALIZATION_BOLT, new NormalizationBolt(normalizationProcessor)).shuffleGrouping(RULES_TEST_SPOUT, NormalizationProcessor.DEFAULT_STREAM_ID);
    builder.setBolt(SINK_BOLT, new RulesTestSinkBolt()).shuffleGrouping(NORMALIZATION_BOLT, OUTPUT_STREAM_ID);
    return builder.createTopology();
}
 
Example 8
Source File: StormTestUtil.java    From incubator-atlas with Apache License 2.0 5 votes vote down vote up
public static StormTopology createTestTopology() {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("words", new TestWordSpout(), 10);
    builder.setBolt("count", new TestWordCounter(), 3).shuffleGrouping("words");
    builder.setBolt("globalCount", new TestGlobalCount(), 2).shuffleGrouping("count");

    return builder.createTopology();
}
 
Example 9
Source File: TopologyModule.java    From monasca-thresh with Apache License 2.0 5 votes vote down vote up
@Provides
StormTopology topology() {
  TopologyBuilder builder = new TopologyBuilder();

  // Receives metrics
  builder.setSpout("metrics-spout", Injector.getInstance(IRichSpout.class, "metrics"),
      config.metricSpoutThreads).setNumTasks(config.metricSpoutTasks);

  // Receives events
  builder.setSpout("event-spout", Injector.getInstance(IRichSpout.class, "event"),
      config.eventSpoutThreads).setNumTasks(config.eventSpoutTasks);

  // Event -> Events
  builder
      .setBolt("event-bolt", new EventProcessingBolt(config.database), config.eventBoltThreads)
      .shuffleGrouping("event-spout").setNumTasks(config.eventBoltTasks);

  // Metrics / Event -> Filtering
  builder
      .setBolt("filtering-bolt", new MetricFilteringBolt(config.database),
          config.filteringBoltThreads)
      .fieldsGrouping("metrics-spout", new Fields(MetricSpout.FIELDS[0]))
      .allGrouping("event-bolt", EventProcessingBolt.METRIC_ALARM_EVENT_STREAM_ID)
      .allGrouping("event-bolt", EventProcessingBolt.ALARM_DEFINITION_EVENT_STREAM_ID)
      .setNumTasks(config.filteringBoltTasks);

  // Filtering /Event -> Alarm Creation
  builder
      .setBolt("alarm-creation-bolt", new AlarmCreationBolt(config.database),
          config.alarmCreationBoltThreads)
      .fieldsGrouping("filtering-bolt",
          MetricFilteringBolt.NEW_METRIC_FOR_ALARM_DEFINITION_STREAM,
          new Fields(AlarmCreationBolt.ALARM_CREATION_FIELDS[3]))
      .allGrouping("event-bolt", EventProcessingBolt.METRIC_SUB_ALARM_EVENT_STREAM_ID)
      .allGrouping("event-bolt", EventProcessingBolt.ALARM_EVENT_STREAM_ID)
      .allGrouping("event-bolt", EventProcessingBolt.ALARM_DEFINITION_EVENT_STREAM_ID)
      .setNumTasks(config.alarmCreationBoltTasks);

  // Filtering / Event / Alarm Creation -> Aggregation
  builder
      .setBolt("aggregation-bolt",
          new MetricAggregationBolt(config, config.database), config.aggregationBoltThreads)
      .fieldsGrouping("filtering-bolt", new Fields(MetricFilteringBolt.FIELDS[0]))
      .allGrouping("filtering-bolt", MetricAggregationBolt.METRIC_AGGREGATION_CONTROL_STREAM)
      .fieldsGrouping("filtering-bolt", AlarmCreationBolt.ALARM_CREATION_STREAM,
          new Fields(AlarmCreationBolt.ALARM_CREATION_FIELDS[1]))
      .allGrouping("event-bolt", EventProcessingBolt.METRIC_SUB_ALARM_EVENT_STREAM_ID)
      .fieldsGrouping("event-bolt", EventProcessingBolt.METRIC_ALARM_EVENT_STREAM_ID,
          new Fields(EventProcessingBolt.METRIC_ALARM_EVENT_STREAM_FIELDS[1]))
      .fieldsGrouping("alarm-creation-bolt", AlarmCreationBolt.ALARM_CREATION_STREAM,
          new Fields(AlarmCreationBolt.ALARM_CREATION_FIELDS[1]))
      .setNumTasks(config.aggregationBoltTasks);

  // Alarm Creation / Event
  // Aggregation / Event -> Thresholding
  builder
      .setBolt("thresholding-bolt",
          new AlarmThresholdingBolt(config.database, config.kafkaProducerConfig),
          config.thresholdingBoltThreads)
      .fieldsGrouping("aggregation-bolt", new Fields(MetricAggregationBolt.FIELDS[0]))
      .fieldsGrouping("event-bolt", EventProcessingBolt.ALARM_EVENT_STREAM_ID,
          new Fields(EventProcessingBolt.ALARM_EVENT_STREAM_FIELDS[1]))
      .allGrouping("event-bolt", EventProcessingBolt.ALARM_DEFINITION_EVENT_STREAM_ID)
      .allGrouping("event-bolt", EventProcessingBolt.METRIC_SUB_ALARM_EVENT_STREAM_ID)
      .setNumTasks(config.thresholdingBoltTasks);

  return builder.createTopology();
}
 
Example 10
Source File: SlidingWindowTopology.java    From twister2 with Apache License 2.0 5 votes vote down vote up
@Override
public StormTopology buildTopology() {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("source", new TestWordSpout(), 1);
  builder.setBolt("windower", new SlidingWindowBolt()
      .withWindow(
          new BaseWindowedBolt.Count(30),
          new BaseWindowedBolt.Count(10)
      ), 1)
      .shuffleGrouping("source");
  return builder.createTopology();
}
 
Example 11
Source File: TumblingWindowTopology.java    From twister2 with Apache License 2.0 5 votes vote down vote up
@Override
public StormTopology buildTopology() {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("source", new TestWordSpout(), 1);
  builder.setBolt("windower", new TumblingWindowBolt()
      .withTumblingWindow(
          new BaseWindowedBolt.Count(10)
      ), 1)
      .shuffleGrouping("source");
  return builder.createTopology();
}
 
Example 12
Source File: IntervalWindowTopology.java    From twister2 with Apache License 2.0 5 votes vote down vote up
@Override
public StormTopology buildTopology() {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("source", new TestWordSpout(), 1);
  builder.setBolt("windower", new IntervalWindowBolt()
      .withTumblingWindow(
          new BaseWindowedBolt.Duration(2, TimeUnit.SECONDS)
      ), 1).shuffleGrouping("source");
  return builder.createTopology();
}
 
Example 13
Source File: MultiSpoutTopology.java    From twister2 with Apache License 2.0 5 votes vote down vote up
@Override
public StormTopology buildTopology() {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word0", new TestWordSpout(), 2);
  builder.setSpout("word1", new TestWordSpout(), 2);
  builder.setSpout("word2", new TestWordSpout(), 2);
  builder.setBolt("exclaim1", new ExclamationBolt(), 2)
      .shuffleGrouping("word0")
      .shuffleGrouping("word1")
      .shuffleGrouping("word2");
  return builder.createTopology();
}
 
Example 14
Source File: SentenceWordCountTopology.java    From twister2 with Apache License 2.0 5 votes vote down vote up
@Override
public StormTopology buildTopology() {
  TopologyBuilder builder = new TopologyBuilder();


  builder.setSpout("spout", new FastRandomSentenceSpout(), 1);
  builder.setBolt("split", new SplitSentence(), 1).shuffleGrouping("spout");
  builder.setBolt("count", new WordCount(), 1)
      .fieldsGrouping("split", new Fields("word"));
  return builder.createTopology();
}
 
Example 15
Source File: DBusRouterTopology.java    From DBus with Apache License 2.0 5 votes vote down vote up
private StormTopology buildTopology() throws Exception {
    // 加载router zk配置信息
    loadRouterConf();

    TopologyBuilder builder = new TopologyBuilder();

    int kafkaReadSpoutParallel =
            Integer.valueOf(routerConf.getProperty(DBusRouterConstants.STORM_KAFKA_READ_SPOUT_PARALLEL, "1"));
    builder.setSpout("RouterKafkaReadSpout",
            new DBusRouterKafkaReadSpout(),
            kafkaReadSpoutParallel);

    builder.setSpout("RouterMonitorSpout",
            new DBusRouterMonitorSpout(),
            1);

    int encodeBoltParallel =
            Integer.valueOf(routerConf.getProperty(DBusRouterConstants.STORM_ENCODE_BOLT_PARALLEL, "5"));
    builder.setBolt("RouterEncodeBolt",
            new DBusRouterEncodeBolt(), encodeBoltParallel)
            .fieldsGrouping("RouterKafkaReadSpout", "umsOrHbStream", new Fields("ns"))
            .allGrouping("RouterKafkaReadSpout", "ctrlStream");

    int statBoltParallel = 1;
    builder.setBolt("RouterStatBolt",
            new DBusRouterStatBolt(), statBoltParallel)
            .shuffleGrouping("RouterEncodeBolt", "statStream")
            .shuffleGrouping("RouterEncodeBolt", "ctrlStream");

    int kafkaWriteBoltParallel =
            Integer.valueOf(routerConf.getProperty(DBusRouterConstants.STORM_KAFKA_WREIT_BOLT_PARALLEL, "1"));
    builder.setBolt("RouterKafkaWriteBolt",
            new DBusRouterKafkaWriteBolt(),
            kafkaWriteBoltParallel).
            allGrouping("RouterEncodeBolt", "ctrlStream").
            shuffleGrouping("RouterEncodeBolt", "umsOrHbStream").
            shuffleGrouping("RouterStatBolt", "statStream");

    return builder.createTopology();
}
 
Example 16
Source File: DBusLogProcessorTopology.java    From DBus with Apache License 2.0 5 votes vote down vote up
private StormTopology buildTopology() throws Exception {
    int kafkaReadSpoutParallel = Integer.valueOf(properties.getProperty(Constants.LOG_KAFKA_READ_SPOUT_PARALLEL));
    int transformBoltParallel = Integer.valueOf(properties.getProperty(Constants.LOG_TRANSFORM_BOLT_PARALLEL));
    int kafkaWriteBoltParallel = Integer.valueOf(properties.getProperty(Constants.LOG_KAFKA_WRITE_BOLT_PARALLEL));
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("LogProcessorKafkaReadSpout",
            new LogProcessorKafkaReadSpout(),
            kafkaReadSpoutParallel);

    builder.setBolt("LogProcessorTransformBolt",
            new LogProcessorTransformBolt(), transformBoltParallel)
            .shuffleGrouping("LogProcessorKafkaReadSpout", "umsStream")
            .allGrouping("LogProcessorKafkaReadSpout", "ctrlOrHbStream");

    builder.setBolt("LogProcessorStatBolt",
            new LogProcessorStatBolt(), 1).
            shuffleGrouping("LogProcessorTransformBolt", "statStream").
            allGrouping("LogProcessorTransformBolt", "ctrlStream");

    builder.setBolt("LogProcessorHeartbeatBolt",
            new LogProcessorHeartbeatBolt(), 1).
            shuffleGrouping("LogProcessorTransformBolt", "umsStream").
            shuffleGrouping("LogProcessorTransformBolt", "heartbeatStream").
            allGrouping("LogProcessorTransformBolt", "ctrlStream");

    builder.setBolt("LogProcessorKafkaWriteBolt",
            new LogProcessorKafkaWriteBolt(),
            kafkaWriteBoltParallel).
            allGrouping("LogProcessorTransformBolt", "ctrlStream").
            shuffleGrouping("LogProcessorHeartbeatBolt", "heartbeatStream").
            shuffleGrouping("LogProcessorHeartbeatBolt", "umsStream").
            shuffleGrouping("LogProcessorStatBolt");

    return builder.createTopology();
}
 
Example 17
Source File: StormTestUtil.java    From atlas with Apache License 2.0 5 votes vote down vote up
public static StormTopology createTestTopology() {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("words", new TestWordSpout(), 10);
    builder.setBolt("count", new TestWordCounter(), 3).shuffleGrouping("words");
    builder.setBolt("globalCount", new TestGlobalCount(), 2).shuffleGrouping("count");

    return builder.createTopology();
}
 
Example 18
Source File: WordCountApp.java    From java-study with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws InterruptedException, AlreadyAliveException, InvalidTopologyException {
 	//定义拓扑
     TopologyBuilder builder = new TopologyBuilder();
     builder.setSpout("word-reader" , new WordReader());
     builder.setBolt("word-normalizer" , new WordNormalizer()).shuffleGrouping("word-reader" );
     builder.setBolt("word-counter" , new WordCounter()).fieldsGrouping("word-normalizer" , new Fields("word"));
     StormTopology topology = builder.createTopology();
     //配置
     
     Config conf = new Config();
     String fileName ="words.txt" ;
     conf.put("fileName" , fileName );
     conf.setDebug(false);
 
      //运行拓扑
      System.out.println("开始...");
      if(args !=null&&args.length>0){ //有参数时,表示向集群提交作业,并把第一个参数当做topology名称
     	 System.out.println("远程模式");
          try {
	StormSubmitter.submitTopology(args[0], conf, topology);
} catch (AuthorizationException e) {
	e.printStackTrace();
}
    } else{//没有参数时,本地提交
      //启动本地模式
 	 System.out.println("本地模式");
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("Getting-Started-Topologie" , conf , topology );
      Thread.sleep(5000);
      //关闭本地集群
      cluster.shutdown();
    }
      System.out.println("结束");
    
 }
 
Example 19
Source File: WordCountApp.java    From java-study with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws InterruptedException, AlreadyAliveException, InvalidTopologyException {
 	//定义拓扑
     TopologyBuilder builder = new TopologyBuilder();
     builder.setSpout("word-reader" , new WordReader());
     builder.setBolt("word-normalizer" , new WordNormalizer()).shuffleGrouping("word-reader" );
     builder.setBolt("word-counter" , new WordCounter()).fieldsGrouping("word-normalizer" , new Fields("word"));
     StormTopology topology = builder.createTopology();
     //配置
     
     Config conf = new Config();
     String fileName ="words.txt" ;
     conf.put("fileName" , fileName );
     conf.setDebug(false);
 
      //运行拓扑
      System.out.println("开始...");
      if(args !=null&&args.length>0){ //有参数时,表示向集群提交作业,并把第一个参数当做topology名称
     	 System.out.println("远程模式");
          try {
	StormSubmitter.submitTopology(args[0], conf, topology);
} catch (AuthorizationException e) {
	e.printStackTrace();
}
    } else{//没有参数时,本地提交
      //启动本地模式
 	 System.out.println("本地模式");
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("Getting-Started-Topologie" , conf , topology );
      Thread.sleep(5000);
      //关闭本地集群
      cluster.shutdown();
    }
      System.out.println("结束");
    
 }
 
Example 20
Source File: DispatcherAppenderTopology.java    From DBus with Apache License 2.0 4 votes vote down vote up
private StormTopology buildTopology() throws Exception {

        TopologyBuilder builder = new TopologyBuilder();

        // 启动类型为all,或者dispatcher
        if (topologyType.equals(Constants.TopologyType.ALL) || topologyType.equals(Constants.TopologyType.DISPATCHER)) {
            /**
             * dispatcher部分
             */
            //dispatcher部分
            this.initializeDispatcher(zookeeper, Constants.ZKPath.ZK_TOPOLOGY_ROOT + "/" + dispatcherTopologyId);

            if (datasourceType != null && datasourceType.equals(com.creditease.dbus.commons.Constants.DB2_CONFIG.DB2)) {
                builder.setSpout("dispatcher-kafkaConsumerSpout", new Db2KafkaConsumerSpout(), 1);
                builder.setBolt("dispatcher-DispatcherBout", new Db2DispatcherBout(), 1)
                        .shuffleGrouping("dispatcher-kafkaConsumerSpout");
                builder.setBolt("dispatcher-kafkaProducerBout", new Db2KafkaProducerBout(), 1)
                        .shuffleGrouping("dispatcher-DispatcherBout");
            } else {
                builder.setSpout("dispatcher-kafkaConsumerSpout", new KafkaConsumerSpout(), 1);
                builder.setBolt("dispatcher-DispatcherBout", new DispatcherBout(), 1)
                        .shuffleGrouping("dispatcher-kafkaConsumerSpout");
                builder.setBolt("dispatcher-kafkaProducerBout", new KafkaProducerBout(), 1)
                        .shuffleGrouping("dispatcher-DispatcherBout");
            }
        }

        // 启动类型为all,或者appender
        if (topologyType.equals(Constants.TopologyType.ALL) || topologyType.equals(Constants.TopologyType.APPENDER)) {
            /**
             * appender部分
             */
            // 初始化配置文件
            this.initializeAppender(zookeeper, Constants.ZKPath.ZK_TOPOLOGY_ROOT + "/" + appenderTopologyId);
            builder.setSpout("appender-spout", new DbusKafkaSpout(), 1);
            builder.setBolt("appender-dispatcher", new DispatcherBolt(), 1).shuffleGrouping("appender-spout");
            builder.setBolt("appender-meta-fetcher", new DbusAppenderBolt(), getBoltParallelism(Constants.ConfigureKey.META_FETCHER_BOLT_PARALLELISM, 3))
                    .customGrouping("appender-dispatcher", new DbusGrouping());
            builder.setBolt("appender-wrapper", new WrapperBolt(), getBoltParallelism(Constants.ConfigureKey.WRAPPER_BOLT_PARALLELISM, 3))
                    .customGrouping("appender-meta-fetcher", new DbusGrouping());
            builder.setBolt("appender-kafka-writer", new DbusKafkaWriterBolt(), getBoltParallelism(Constants.ConfigureKey.KAFKA_WRITTER_BOLT_PARALLELISM, 3))
                    .customGrouping("appender-wrapper", new DbusGrouping());

            // 为了避免dbus-router统计信息与dispatcher和appender不一致的情况,将写心跳到kafka中的逻辑提前到appender-kafka-writer中
            //builder.setBolt("appender-heart-beat", new DbusHeartBeatBolt(), 1).shuffleGrouping("appender-kafka-writer");
        }


        return builder.createTopology();
    }