Java Code Examples for org.apache.storm.Config

The following examples show how to use org.apache.storm.Config. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: storm-nginx-log   Source File: NginxStorm.java    License: MIT License 6 votes vote down vote up
public static void main(String[] argv) throws InterruptedException {

        Config config = new Config();
        config.setDebug(true);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("LogSpout", new LogSpout(), 1);
        builder.setBolt("SpliteBolt", new SpliteBolt(), 1).shuffleGrouping("LogSpout");
        builder.setBolt("CounterBolt", new CounterBolt(), 1)
                .fieldsGrouping("SpliteBolt", new Fields("item"));

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("NginxLog", config, builder.createTopology());
//        Thread.sleep(10000);
//
//        cluster.killTopology("NginxLog");
//        cluster.shutdown();
    }
 
Example 2
Source Project: metron   Source File: ParserTopologyCLITest.java    License: Apache License 2.0 6 votes vote down vote up
public void testConfig_noExtra(boolean longOpt) throws ParseException {
 CommandLine cli = new CLIBuilder().with(ParserTopologyCLI.ParserOptions.BROKER_URL, "mybroker")
                                   .with(ParserTopologyCLI.ParserOptions.ZK_QUORUM, "myzk")
                                   .with(ParserTopologyCLI.ParserOptions.SENSOR_TYPES, "mysensor")
                                   .with(ParserTopologyCLI.ParserOptions.NUM_WORKERS, "1")
                                   .with(ParserTopologyCLI.ParserOptions.NUM_ACKERS, "2")
                                   .with(ParserTopologyCLI.ParserOptions.NUM_MAX_TASK_PARALLELISM, "3")
                                   .with(ParserTopologyCLI.ParserOptions.MESSAGE_TIMEOUT, "4")
                                   .build(longOpt);
  Optional<Config> configOptional = ParserTopologyCLI.ParserOptions.getConfig(cli);
  Config config = configOptional.get();
  assertEquals(1, config.get(Config.TOPOLOGY_WORKERS));
  assertEquals(2, config.get(Config.TOPOLOGY_ACKER_EXECUTORS));
  assertEquals(3, config.get(Config.TOPOLOGY_MAX_TASK_PARALLELISM));
  assertEquals(4, config.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS));
}
 
Example 3
Source Project: storm-net-adapter   Source File: AnchoredWordCount.java    License: Apache License 2.0 6 votes vote down vote up
protected int run(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSentenceSpout(), 4);

    builder.setBolt("split", new SplitSentence(), 4).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 4).fieldsGrouping("split", new Fields("word"));

    Config conf = new Config();
    conf.setMaxTaskParallelism(3);

    String topologyName = "word-count";

    conf.setNumWorkers(3);

    if (args != null && args.length > 0) {
        topologyName = args[0];
    }
    return submit(topologyName, conf, builder);
}
 
Example 4
Source Project: tutorials   Source File: TopologyRunner.java    License: MIT License 6 votes vote down vote up
public static void runTopology() {
    String filePath = "./src/main/resources/operations.txt";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("randomNumberSpout", new RandomNumberSpout());
    builder.setBolt("filteringBolt", new FilteringBolt()).shuffleGrouping("randomNumberSpout");
    builder.setBolt("aggregatingBolt", new AggregatingBolt()
      .withTimestampField("timestamp")
      .withLag(BaseWindowedBolt.Duration.seconds(1))
      .withWindow(BaseWindowedBolt.Duration.seconds(5))).shuffleGrouping("filteringBolt");
    builder.setBolt("fileBolt", new FileWritingBolt(filePath)).shuffleGrouping("aggregatingBolt");

    Config config = new Config();
    config.setDebug(false);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("Test", config, builder.createTopology());
}
 
Example 5
Source Project: storm-net-adapter   Source File: AggregateExample.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public static void main(String[] args) throws Exception {
    StreamBuilder builder = new StreamBuilder();
    /**
     * Computes average of the stream of numbers emitted by the spout. Internally the per-partition
     * sum and counts are accumulated and emitted to a downstream task where the partially accumulated
     * results are merged and the final result is emitted.
     */
    builder.newStream(new RandomIntegerSpout(), new ValueMapper<Integer>(0), 2)
           .window(TumblingWindows.of(BaseWindowedBolt.Duration.seconds(5)))
           .filter(x -> x > 0 && x < 500)
           .aggregate(new Avg())
           .print();

    Config config = new Config();
    String topoName = "AGG_EXAMPLE";
    if (args.length > 0) {
        topoName = args[0];
    }
    config.setNumWorkers(1);
    StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build());
}
 
Example 6
Source Project: storm-net-adapter   Source File: SlidingWindowTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("integer", new RandomIntegerSpout(), 1);
    builder.setBolt("slidingsum", new SlidingWindowSumBolt().withWindow(Count.of(30), Count.of(10)), 1)
           .shuffleGrouping("integer");
    builder.setBolt("tumblingavg", new TumblingWindowAvgBolt().withTumblingWindow(Count.of(3)), 1)
           .shuffleGrouping("slidingsum");
    builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("tumblingavg");
    Config conf = new Config();
    conf.setDebug(true);
    String topoName = "test";
    if (args != null && args.length > 0) {
        topoName = args[0];
    }
    conf.setNumWorkers(1);
    StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology());
}
 
Example 7
Source Project: storm-net-adapter   Source File: ReachTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    LinearDRPCTopologyBuilder builder = construct();

    Config conf = new Config();
    conf.setNumWorkers(6);
    String topoName = "reach-drpc";
    if (args.length > 0) {
        topoName = args[0];
    }
    StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createRemoteTopology());

    try (DRPCClient drpc = DRPCClient.getConfiguredClient(conf)) {
        String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
        for (String url : urlsToTry) {
            System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
        }
    }
}
 
Example 8
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new RandomIntegerSpout());
    builder.setBolt("sumbolt", new WindowSumBolt().withWindow(new Count(5), new Count(3))
                                                  .withMessageIdField("msgid"), 1).shuffleGrouping("spout");
    builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("sumbolt");
    Config conf = new Config();
    conf.setDebug(false);
    //conf.put(Config.TOPOLOGY_STATE_PROVIDER, "org.apache.storm.redis.state.RedisKeyValueStateProvider");

    String topoName = "test";
    if (args != null && args.length > 0) {
        topoName = args[0];
    }
    conf.setNumWorkers(1);
    StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology());
}
 
Example 9
Source Project: DBus   Source File: SinkTopology.java    License: Apache License 2.0 6 votes vote down vote up
private StormTopology buildTopology() throws Exception {
    loadSinkerConf();

    Integer spoutSize = Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_KAFKA_READ_SPOUT_PARALLEL));
    Integer boltSize = Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_WRITE_BOUT_PARALLEL));
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("SinkerKafkaReadSpout", new SinkerKafkaReadSpout(), spoutSize);
    builder.setBolt("SinkerWriteBolt", new SinkerWriteBolt(), boltSize)
            .fieldsGrouping("SinkerKafkaReadSpout", "dataStream", new Fields("ns"))
            .allGrouping("SinkerKafkaReadSpout", "ctrlStream");

    Config conf = new Config();
    conf.setDebug(true);
    conf.setNumWorkers(1);

    return builder.createTopology();
}
 
Example 10
public static void main(String[] args) throws InterruptedException, AlreadyAliveException, InvalidTopologyException, AuthorizationException {
   TopologyBuilder builder = new TopologyBuilder();
   builder.setSpout(DATA_GENERATOR, new ASpout());
   builder.setBolt(DATA_CALCULATOR, new ABolt()).shuffleGrouping(DATA_GENERATOR);
   builder.setBolt(DATA_PRINTER, new DataPrinter()).shuffleGrouping(DATA_CALCULATOR).shuffleGrouping(DATA_GENERATOR);

   Config config = new Config();

   LocalCluster cluster = new LocalCluster();
   cluster.submitTopology(TOPOLOGY_NAME, config,
         builder.createTopology());

   Thread.sleep(100000);
   cluster.killTopology(TOPOLOGY_NAME);
   cluster.shutdown();
}
 
Example 11
Source Project: breeze   Source File: TopologyStarterTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void stormConfTypes() {
	Properties source = new Properties();
	source.put(TOPOLOGY_NAME, "name");
	source.put(TOPOLOGY_DEBUG, true);
	source.put(TOPOLOGY_WORKERS, 1);
	source.put(TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, 2048);
	source.put(SUPERVISOR_SLOTS_PORTS, asList(7000, 7001));
	source.put(DRPC_SERVERS, asList("host1","host2","host3"));
	source.put("unknown", "2");

	Config result = TopologyStarter.stormConfig(source);
	assertEquals("name", result.get(TOPOLOGY_NAME));
	assertEquals(true, result.get(TOPOLOGY_DEBUG));
	assertEquals(1, result.get(TOPOLOGY_WORKERS));
	assertEquals(2048, result.get(TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE));
	assertEquals(asList("host1", "host2", "host3"), result.get(DRPC_SERVERS));
	assertEquals(asList(7000, 7001), result.get(SUPERVISOR_SLOTS_PORTS));
	assertEquals("2", result.get("unknown"));
}
 
Example 12
@Test
public void testValidJMXObjectName() throws Exception {

  final String topologyName = "someTopology";

  Map config = new HashMap();
  config.put(Config.TOPOLOGY_NAME, topologyName);
  processor = new SimpleJMXStormMetricProcessor(config);

  Metric metric = new Metric("component", "kafkaPartition{host=kafka_9092, partition=0}", 1.9);
  IMetricsConsumer.TaskInfo taskInfo = new IMetricsConsumer.TaskInfo("localhost", 1010, "emitBot", 2, System.currentTimeMillis(), 100);

  String name = processor.mBeanName(metric, taskInfo);
  ObjectName objName = new ObjectName(name);

  assertThat(objName.getCanonicalName(), is("storm:component=component,host-port-task=localhost-1010-2,operation=\"kafkaPartition{host=kafka_9092, partition=0}\",topology=someTopology"));
}
 
Example 13
Source Project: incubator-heron   Source File: SerializationFactory.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({"rawtypes", "unchecked"})
private static Map<String, String> normalizeKryoRegister(Map conf) {
  // TODO: de-duplicate this logic with the code in nimbus
  Object res = conf.get(Config.TOPOLOGY_KRYO_REGISTER);
  if (res == null) {
    return new TreeMap<String, String>();
  }
  Map<String, String> ret = new HashMap<>();
  if (res instanceof Map) {
    ret = (Map<String, String>) res;
  } else {
    for (Object o : (List) res) {
      if (o instanceof Map) {
        ret.putAll((Map) o);
      } else {
        ret.put((String) o, null);
      }
    }
  }

  //ensure always same order for registrations with TreeMap
  return new TreeMap<String, String>(ret);
}
 
Example 14
Source Project: storm-net-adapter   Source File: MultipleLoggerTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("word", new TestWordSpout(), 10);
    builder.setBolt("exclaim1", new ExclamationLoggingBolt(), 3).shuffleGrouping("word");
    builder.setBolt("exclaim2", new ExclamationLoggingBolt(), 2).shuffleGrouping("exclaim1");

    Config conf = new Config();
    conf.setDebug(true);
    String topoName = MultipleLoggerTopology.class.getName();
    if (args != null && args.length > 0) {
        topoName = args[0];
    }
    conf.setNumWorkers(2);
    StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology());
}
 
Example 15
@Test
public void testMetricsAdditionNotReplacement() {
    Config config = new Config();
    Map<String, String> metrics = new HashMap<>();
    metrics.put("foo", "foo.bar.baz");
    config.put(Config.TOPOLOGY_WORKER_METRICS, metrics);

    SigarLoggingMetricsConsumer.register(config, null);

    Assert.assertNotNull(config.get(Config.TOPOLOGY_WORKER_METRICS));

    Map<String, String> actual = (Map<String, String>) config.get(Config.TOPOLOGY_WORKER_METRICS);
    Assert.assertTrue(actual.keySet().containsAll(SigarLoggingMetricsConsumer.METRICS.keySet()));
    Assert.assertTrue(actual.values().containsAll(SigarLoggingMetricsConsumer.METRICS.values()));
    Assert.assertEquals(actual.get("foo"), "foo.bar.baz");
}
 
Example 16
@Override
public void configure(Config conf) {
    super.configure(conf);
    String protocolimplementation = ConfUtils.getString(conf,
            PROTOCOL_IMPL_CONFIG);
    try {
        Class protocolClass = Class.forName(protocolimplementation);
        boolean interfaceOK = Protocol.class
                .isAssignableFrom(protocolClass);
        if (!interfaceOK) {
            throw new RuntimeException("Class " + protocolimplementation
                    + " does not implement Protocol");
        }
        directProtocol = (Protocol) protocolClass.newInstance();
        directProtocol.configure(conf);
    } catch (Exception e) {
        throw new RuntimeException(
                "DelegatorRemoteDriverProtocol needs a valid protocol class for the config "
                        + PROTOCOL_IMPL_CONFIG + "but has :"
                        + protocolimplementation);
    }
}
 
Example 17
private TestJob createPirkTestJob(final Config config)
{
  final SpoutConfig kafkaConfig = setUpTestKafkaSpout(config);
  return new TestJob()
  {
    StormTopology topology = PirkTopology.getPirkTopology(kafkaConfig);

    @Override
    public void run(ILocalCluster iLocalCluster) throws Exception
    {
      iLocalCluster.submitTopology("pirk_integration_test", config, topology);
      logger.info("Pausing for setup.");
      // Thread.sleep(4000);
      // KafkaProducer producer = new KafkaProducer<String,String>(createKafkaProducerConfig());
      // loadTestData(producer);
      // Thread.sleep(10000);
      while (OutputBolt.latch.getCount() == testCountDown)
      {
        Thread.sleep(1000);
      }
      testCountDown -= 1;

      logger.info("Finished...");
    }
  };
}
 
Example 18
Source Project: jstorm   Source File: PerformanceTestTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void SetRemoteTopology()
        throws Exception {
    String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
    if (streamName == null) {
        String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
        streamName = className[className.length - 1];
    }
    
    TopologyBuilder builder = new TopologyBuilder();
    
    int spout_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
    int bolt_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);
    builder.setSpout("spout", new TestSpout(), spout_Parallelism_hint);
    
    BoltDeclarer boltDeclarer = builder.setBolt("bolt", new TestBolt(), bolt_Parallelism_hint);
    // localFirstGrouping is only for jstorm
    // boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
    boltDeclarer.shuffleGrouping("spout");
    // .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 60);
    
    StormSubmitter.submitTopology(streamName, conf, builder.createTopology());
    
}
 
Example 19
Source Project: metron   Source File: BatchTimeoutHelper.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @return the recommended TickInterval to request, in seconds
 * Guaranteed positive number.
 */
public int getRecommendedTickInterval() {
  if (!initialized) {this.init();}
  // Remember that parameter settings in the CLI override parameter settings set by the Storm component.
  // We shouldn't have to deal with this in the Metron environment, but just in case,
  // warn if our recommended value will be overridden by cliTickTupleFreqSecs.
  if (cliTickTupleFreqSecs > 0 && cliTickTupleFreqSecs > recommendedTickIntervalSecs) {
    LOG.warn("Parameter '" + Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS + "' has been forced to value '" +
            Integer.toString(cliTickTupleFreqSecs) + "' via CLI configuration.  This will override the desired " +
            "setting of '" + Integer.toString(recommendedTickIntervalSecs) +
            "' and may lead to delayed batch flushing.");
  }
  if (cliTickTupleFreqSecs > 0 && cliTickTupleFreqSecs < recommendedTickIntervalSecs) {
    LOG.info("Parameter '" + Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS + "' has been forced to value '" +
            Integer.toString(cliTickTupleFreqSecs) + "' via CLI configuration.  This will override the desired " +
            "setting of '" + Integer.toString(recommendedTickIntervalSecs) +
            "' and may lead to unexpected periodicity in batch flushing.");
  }
  return recommendedTickIntervalSecs;
}
 
Example 20
Source Project: incubator-heron   Source File: AckingTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  if (args.length != 1) {
    throw new RuntimeException("Specify topology name");
  }
  TopologyBuilder builder = new TopologyBuilder();

  int spouts = 2;
  int bolts = 2;
  builder.setSpout("word", new AckingTestWordSpout(), spouts);
  builder.setBolt("exclaim1", new ExclamationBolt(), bolts)
      .shuffleGrouping("word");

  Config conf = new Config();
  conf.setDebug(true);

  // Put an arbitrary large number here if you don't want to slow the topology down
  conf.setMaxSpoutPending(1000 * 1000 * 1000);

  // To enable acking, we need to setEnableAcking true
  conf.setNumAckers(1);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  // Set the number of workers or stream managers
  conf.setNumWorkers(2);
  StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
 
Example 21
Source Project: metron   Source File: ParserBolt.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * This method is called by TopologyBuilder.createTopology() to obtain topology and
 * bolt specific configuration parameters.  We use it primarily to configure how often
 * a tick tuple will be sent to our bolt.
 * @return conf topology and bolt specific configuration parameters
 */
@Override
public Map<String, Object> getComponentConfiguration() {
  // This is called long before prepare(), so do some of the same stuff as prepare() does,
  // to get the valid WriterConfiguration.  But don't store any non-serializable objects,
  // else Storm will throw a runtime error.
  Function<WriterConfiguration, WriterConfiguration> configurationXform;
  WriterHandler writer = sensorToWriterMap.entrySet().iterator().next().getValue();
  if (writer.isWriterToBulkWriter()) {
    configurationXform = WriterToBulkWriter.TRANSFORMATION;
  } else {
    configurationXform = x -> x;
  }
  WriterConfiguration writerconf = configurationXform
      .apply(getConfigurationStrategy()
          .createWriterConfig(writer.getBulkMessageWriter(), getConfigurations()));

  BatchTimeoutHelper timeoutHelper = new BatchTimeoutHelper(writerconf::getAllConfiguredTimeouts, batchTimeoutDivisor);
  this.requestedTickFreqSecs = timeoutHelper.getRecommendedTickInterval();
  //And while we've got BatchTimeoutHelper handy, capture the maxBatchTimeout for writerComponent.
  this.maxBatchTimeout = timeoutHelper.getMaxBatchTimeout();

  Map<String, Object> conf = super.getComponentConfiguration();
  if (conf == null) {
    conf = new HashMap<>();
  }
  conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, requestedTickFreqSecs);
  LOG.info("Requesting " + Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS + " set to " + Integer.toString(requestedTickFreqSecs));
  return conf;
}
 
Example 22
Source Project: nifi   Source File: NiFiStormTopology.java    License: Apache License 2.0 5 votes vote down vote up
public static void main( String[] args ) {
    // Build a Site-To-Site client config for pulling data
    final SiteToSiteClientConfig inputConfig = new SiteToSiteClient.Builder()
            .url("http://localhost:8080/nifi")
            .portName("Data for Storm")
            .buildConfig();

    // Build a Site-To-Site client config for pushing data
    final SiteToSiteClientConfig outputConfig = new SiteToSiteClient.Builder()
            .url("http://localhost:8080/nifi")
            .portName("Data from Storm")
            .buildConfig();

    final int tickFrequencySeconds = 5;
    final NiFiDataPacketBuilder niFiDataPacketBuilder = new SimpleNiFiDataPacketBuilder();
    final NiFiBolt niFiBolt = new NiFiBolt(outputConfig, niFiDataPacketBuilder, tickFrequencySeconds)
            //.withBatchSize(1)
            ;

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("nifiInput", new NiFiSpout(inputConfig));
    builder.setBolt("nifiOutput", niFiBolt).shuffleGrouping("nifiInput");

    // Submit the topology running in local mode
    Config conf = new Config();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());

    Utils.sleep(90000);
    cluster.shutdown();
}
 
Example 23
Source Project: springBoot-study   Source File: App.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args)  {
		//定义一个拓扑
		TopologyBuilder builder=new TopologyBuilder();
		//设置1个Executeor(线程),默认一个
		builder.setSpout(test_spout, new TestSpout(),1);
		//shuffleGrouping:表示是随机分组
		//设置1个Executeor(线程),和两个task
		builder.setBolt(test_bolt, new TestBolt(),1).setNumTasks(1).shuffleGrouping(test_spout);
		//fieldsGrouping:表示是按字段分组
		//设置1个Executeor(线程),和1个task
		builder.setBolt(test2_bolt, new Test2Bolt(),1).setNumTasks(1).fieldsGrouping(test_bolt, new Fields("count"));
		Config conf = new Config();
		conf.put("test", "test");
		try{
		  //运行拓扑
	       if(args !=null&&args.length>0){ //有参数时,表示向集群提交作业,并把第一个参数当做topology名称
	       	 System.out.println("运行远程模式");
			 StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
	      } else{//没有参数时,本地提交
	        //启动本地模式
	     	System.out.println("运行本地模式");
	        LocalCluster cluster = new LocalCluster();
	        cluster.submitTopology("Word-counts" ,conf,  builder.createTopology() );
	        Thread.sleep(20000);
//	        //关闭本地集群
	        cluster.shutdown();
	      }
		}catch (Exception e){
			e.printStackTrace();
		}
	}
 
Example 24
Source Project: springBoot-study   Source File: App.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args)  {
		// TODO Auto-generated method stub
		//定义一个拓扑
		TopologyBuilder builder=new TopologyBuilder();
		builder.setSpout(str1, new TestSpout());
		builder.setBolt(str2, new TestBolt()).shuffleGrouping(str1);
		Config conf = new Config();
		conf.put("test", "test");
		try{
		  //运行拓扑
	       if(args !=null&&args.length>0){ //有参数时,表示向集群提交作业,并把第一个参数当做topology名称
	       	 System.out.println("远程模式");
			 StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
	      } else{//没有参数时,本地提交
	        //启动本地模式
	     	System.out.println("本地模式");
	        LocalCluster cluster = new LocalCluster();
	        cluster.submitTopology("111" ,conf,  builder.createTopology() );
//	        Thread.sleep(2000);
//	        //关闭本地集群
//	        cluster.shutdown();
	      }
		}catch (Exception e){
			e.printStackTrace();
		}
		
	}
 
Example 25
Source Project: elasticsearch-hadoop   Source File: AbstractStormSuite.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void before() throws Throwable {

    copyPropertiesIntoCfg(cfg);

    String stormMode = TestSettings.TESTING_PROPS.getProperty("storm", "local");
    isLocal = "local".equals(stormMode);
    //cfg.setDebug(true);
    cfg.setNumWorkers(Integer.parseInt(TestSettings.TESTING_PROPS.getProperty("storm.numworkers", "2")));
    //cfg.setMaxTaskParallelism(2);
    cfg.put(Config.TOPOLOGY_MIN_REPLICATION_COUNT, 0);
    cfg.put(Config.TOPOLOGY_MAX_REPLICATION_WAIT_TIME_SEC, 0);

    stormCluster = new LocalCluster();
}
 
Example 26
Source Project: storm-net-adapter   Source File: WordCountToBolt.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    StreamBuilder builder = new StreamBuilder();

    // Redis config parameters for the RedisStoreBolt
    JedisPoolConfig poolConfig = new JedisPoolConfig.Builder()
        .setHost("127.0.0.1").setPort(6379).build();
    // Storm tuple to redis key-value mapper
    RedisStoreMapper storeMapper = new WordCountStoreMapper();
    // The redis bolt (sink)
    IRichBolt redisStoreBolt = new RedisStoreBolt(poolConfig, storeMapper);

    // A stream of words
    builder.newStream(new TestWordSpout(), new ValueMapper<String>(0))
           /*
            * create a stream of (word, 1) pairs
            */
           .mapToPair(w -> Pair.of(w, 1))
           /*
            * aggregate the count
            */
           .countByKey()
           /*
            * The result of aggregation is forwarded to
            * the RedisStoreBolt. The forwarded tuple is a
            * key-value pair of (word, count) with ("key", "value")
            * being the field names.
            */
           .to(redisStoreBolt);

    Config config = new Config();
    String topoName = "test";
    if (args.length > 0) {
        topoName = args[0];
    }
    config.setNumWorkers(1);
    StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build());
}
 
Example 27
@SuppressWarnings("unchecked")
@Test
public void testStormMetricProcessorCreationViaReflection() throws Exception {

  final String host = "someHost";
  final int port = 1234;
  final int reportPeriod = 13;
  final String topologyName = "someTopology";

  Map config = new HashMap();
  config.put(YammerFacadeMetric.FACADE_METRIC_TIME_BUCKET_IN_SEC, 60);
  config.put(SimpleGraphiteStormMetricProcessor.GRAPHITE_HOST, host);
  config.put(SimpleGraphiteStormMetricProcessor.GRAPHITE_PORT, port);
  config.put(SimpleGraphiteStormMetricProcessor.REPORT_PERIOD_IN_SEC, reportPeriod);
  config.put(Config.TOPOLOGY_NAME, topologyName);


  MetricReporterConfig metricReporterConfig =
          new MetricReporterConfig(".*", SimpleGraphiteStormMetricProcessor.class.getCanonicalName());
  final SimpleGraphiteStormMetricProcessor stormMetricProcessor =
          (SimpleGraphiteStormMetricProcessor)metricReporterConfig.getStormMetricProcessor(config);

  assertThat(stormMetricProcessor.getGraphiteServerHost(), is(host));
  assertThat(stormMetricProcessor.getGraphiteServerPort(), is(port));
  assertThat(stormMetricProcessor.topologyName, is(topologyName));
  assertThat(stormMetricProcessor.config, is(config));

}
 
Example 28
public static void main(String[] args) {
    LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("addUser");
    builder.addBolt(new MyBolt());

    LocalCluster localCluster = new LocalCluster();
    LocalDRPC drpc = new LocalDRPC();
    localCluster.submitTopology("local-drpc", new Config(), builder.createLocalTopology(drpc));

    String result = drpc.execute("addUser", "zhangsan");
    System.out.println("From client: " + result);

    localCluster.shutdown();
    drpc.shutdown();
}
 
Example 29
@Test
public void testMissingNumWorkers() {
    exception.expect(IllegalArgumentException.class);
    stormLocalCluster = new StormLocalCluster.Builder()
            .setZookeeperHost(propertyParser.getProperty(ConfigVars.ZOOKEEPER_HOST_KEY))
            .setZookeeperPort(Long.parseLong(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY)))
            .setEnableDebug(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.STORM_ENABLE_DEBUG_KEY)))
            .setStormConfig(new Config())
            .build();
}
 
Example 30
@Test
public void shouldSetTickTupleFrequencyInComponentConfigurationToNonZeroValue() {
    // given
    IntermediateRankingsBolt bolt = new IntermediateRankingsBolt();

    // when
    Map<String, Object> componentConfig = bolt.getComponentConfiguration();

    // then
    assertThat(componentConfig).containsKey(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
    Integer emitFrequencyInSeconds = (Integer) componentConfig.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
    assertThat(emitFrequencyInSeconds).isGreaterThan(0);
}