Java Code Examples for backtype.storm.Config.put()

The following are Jave code examples for showing how to use put() of the backtype.storm.Config class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: RealEstate-Streaming   File: PhoenixTest.java   Source Code and License Vote up 7 votes
public void buildAndSubmit() throws Exception {
	 TopologyBuilder builder = new TopologyBuilder();
	 Config config = new Config();
     config.setDebug(true);
     // String nimbusHost = topologyConfig.getProperty("nimbus.host");
     config.put(Config.NIMBUS_HOST, "localhost");
     
     configureKafkaSpout(builder);
     //configureRouteBolt(builder);
     configurePhoenixTest(builder);
     
     /*
     builder.setBolt("submitter", new SubmitBolt())
        .shuffleGrouping(ROUTE_BOLT);
     */
     
     try {
         StormSubmitter.submitTopology("simple-topology", config, builder.createTopology());
     } catch (Exception e) {
         LOG.error("Error submiting Topology", e);
     }

}
 
Example 2
Project: RealEstate-Streaming   File: KafkaPhoenixTopology.java   Source Code and License Vote up 6 votes
public void buildAndSubmit() throws Exception {
	 TopologyBuilder builder = new TopologyBuilder();
	 Config config = new Config();
     config.setDebug(true);
     // String nimbusHost = topologyConfig.getProperty("nimbus.host");
     config.put(Config.NIMBUS_HOST, "localhost");
     
     configureKafkaSpout(builder);
     configureRouteBolt(builder);
     configureInsertBolt(builder);
     
     //builder.setBolt("submitter", new SubmitBolt())
     //   .shuffleGrouping(ROUTE_BOLT);
     
     try {
         StormSubmitter.submitTopology("realestate-topology", config, builder.createTopology());
     } catch (Exception e) {
         LOG.error("Error submiting Topology", e);
     }

}
 
Example 3
Project: aeolus   File: OrderedInputSpoutTest.java   Source Code and License Vote up 6 votes
@Test
public void testSingleEmptyPartition() {
	@SuppressWarnings("unchecked")
	List<Deque<String>> data = new LinkedList<Deque<String>>(Arrays.asList(new LinkedList<String>()));
	TestOrderedInputSpout spout = new TestOrderedInputSpout(data, this.r);
	
	Config conf = new Config();
	conf.put(TestOrderedInputSpout.NUMBER_OF_PARTITIONS, new Integer(1));
	
	TestSpoutOutputCollector col = new TestSpoutOutputCollector();
	spout.open(conf, mock(TopologyContext.class), new SpoutOutputCollector(col));
	
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	
	Assert.assertEquals(0, col.output.size());
}
 
Example 4
Project: LearnStorm   File: ApLogGenerator.java   Source Code and License Vote up 6 votes
private void configureKafkaBolt(TopologyBuilder builder, Config config) {
	String topic = topologyConfig.getProperty("kafka.topic");
	Properties props = new Properties();
	props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl);
	props.put(ProducerConfig.CLIENT_ID_CONFIG, "storm-kafka-producer");
	props.put("metadata.broker.list", brokerUrl);
	props.put("serializer.class", "kafka.serializer.StringEncoder");
	props.put("request.required.acks", "1");
	config.setMaxSpoutPending(20);
	config.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props);
	KafkaBolt<String, String> kafkaBolt = new KafkaBolt<String, String>().withTopicSelector(new DefaultTopicSelector(topic))
									.withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper<String, String>("key", "log"));
	builder.setBolt("KafkaBolt", kafkaBolt, 3).shuffleGrouping(SPOUT_ID).setDebug(DEBUG);
}
 
Example 5
Project: aeolus   File: OrderedInputSpoutTest.java   Source Code and License Vote up 6 votes
@Test
public void testAllPartitionsEmpty() {
	@SuppressWarnings("unchecked")
	List<Deque<String>> data = new LinkedList<Deque<String>>(Arrays.asList(new LinkedList<String>(),
		new LinkedList<String>(), new LinkedList<String>()));
	TestOrderedInputSpout spout = new TestOrderedInputSpout(data, this.r);
	
	Config conf = new Config();
	conf.put(TestOrderedInputSpout.NUMBER_OF_PARTITIONS, new Integer(3));
	
	TestSpoutOutputCollector col = new TestSpoutOutputCollector();
	spout.open(conf, mock(TopologyContext.class), new SpoutOutputCollector(col));
	
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	
	Assert.assertEquals(0, col.output.size());
}
 
Example 6
Project: aeolus   File: OrderedFileInputSpoutTest.java   Source Code and License Vote up 6 votes
@Test
public void testSingleEmptyPartition() {
	TestOrderedFileInputSpout spout = new TestOrderedFileInputSpout();
	
	Config conf = new Config();
	conf.put(TestOrderedFileInputSpout.NUMBER_OF_PARTITIONS, new Integer(1));
	
	TestSpoutOutputCollector col = new TestSpoutOutputCollector();
	spout.open(conf, mock(TopologyContext.class), new SpoutOutputCollector(col));
	
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	
	Assert.assertEquals(0, col.output.size());
}
 
Example 7
Project: rb-bi   File: TopologyFunctionTest.java   Source Code and License Vote up 5 votes
@Test
public void macVendorTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/flows.json").getPath());
    File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/macVendorFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);
    Scanner checkFlows = new Scanner(checkFlow);

    MacVendorFunction._ouiFilePath = Thread.currentThread().getContextClassLoader().getResource("db/oui-vendors").getPath();

    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");
    fieldsFlow.add("macVendor");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new MacVendorFunction(), new Fields("macVendor"))
            .each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(checkFlows.nextLine(), stormFlow);
    }
}
 
Example 8
Project: rb-bi   File: TopologyFunctionTest.java   Source Code and License Vote up 5 votes
@Test
public void nonTimestampTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/nonTimestampFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);


    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new CheckTimestampFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(true, stormFlow.contains("timestamp"));
    }
}
 
Example 9
Project: rb-bi   File: TopologyFunctionTest.java   Source Code and License Vote up 5 votes
@Test
public void analizeHttpUrlTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/httpFlows.json").getPath());
    File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/httpFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);
    Scanner checkFlows = new Scanner(checkFlow);

    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");
    fieldsFlow.add("httpUrlMap");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new AnalizeHttpUrlFunction(), new Fields("httpUrlMap"))
            .each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(checkFlows.nextLine(), stormFlow);
    }
}
 
Example 10
Project: fiware-sinfonier   File: DynamicTopology.java   Source Code and License Vote up 5 votes
private static Config setTopologyStormConfig(JSONObject topologyProperties) throws ConfigurationException {

        Config conf = new Config();

        Iterator<?> keys = topologyProperties.keys();
        while(keys.hasNext()){
            String stormProperty = (String) keys.next();
            conf.put(stormProperty, topologyProperties.get(stormProperty));
        }

        return conf;
    }
 
Example 11
Project: preliminary.demo   File: RaceTopologyLocal.java   Source Code and License Vote up 5 votes
public static void main(String[] args) {
	LocalCluster cluster = new LocalCluster();

	
	/* begin young-define*/
	Config conf = new Config();
	TopologyBuilder builder = new TopologyBuilder();
	builder.setSpout("spout", new SpoutLocal(), 1);
       builder.setBolt("split", new SplitSentenceLocal(), 1).shuffleGrouping("spout");
       builder.setBolt("count", new WordCountLocal(), 1).fieldsGrouping("split", new Fields("word"));
       /* end young-define */
       
	
	//建议加上这行,使得每个bolt/spout的并发度都为1
	conf.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 1);

	//提交拓扑
	cluster.submitTopology("SequenceTest", conf, builder.createTopology());

	//等待1分钟, 1分钟后会停止拓扑和集群, 视调试情况可增大该数值
	try {
		Thread.sleep(60000);
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}        

	//结束拓扑
	cluster.killTopology("SequenceTest");

	cluster.shutdown();
}
 
Example 12
Project: miner   File: ExclaimBasicTopo.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSpout());
    builder.setBolt("exclaim", new ProxyBolt()).shuffleGrouping("spout");
    builder.setBolt("print", new PrintBolt()).shuffleGrouping("exclaim");

    Config conf = new Config();
    conf.setDebug(false);

    /* Config里封装了Redis的配置 */
    conf.put("ip","127.0.0.1");
    conf.put("port","6379");
    conf.put("password","password");

    if (args != null && args.length > 0) {
        conf.setNumWorkers(1);

        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", conf, builder.createTopology());
        Utils.sleep(10*1000);
        cluster.killTopology("test");
        cluster.shutdown();
    }
}
 
Example 13
Project: es-hadoop-v2.2.0   File: AbstractStormSuite.java   Source Code and License Vote up 5 votes
private static void copyPropertiesIntoCfg(Config cfg) {
    Properties props = TestSettings.TESTING_PROPS;

    for (String property : props.stringPropertyNames()) {
        cfg.put(property, props.get(property));
    }
}
 
Example 14
Project: storm-trident-example   File: ExampleTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
	
	Config conf = new Config();
	conf.put("redisServerIP", args[0]);
	conf.put("redisServerPort", args[1]);
	conf.put("phraseCount", "4");

	StormSubmitter.submitTopology("trident-eg7", conf,
       		buildTopology());

}
 
Example 15
Project: LearnStorm   File: TridentKafkaWordCount.java   Source Code and License Vote up 5 votes
/**
     * Returns the storm config for the topology that publishes sentences to kafka "test" topic using a kafka bolt.
     * The KAFKA_BROKER_PROPERTIES is needed for the KafkaBolt.
     *
     * @return the topology config
     */
    public Config getProducerConfig() {
        Config conf = new Config();
        conf.setMaxSpoutPending(20);
        Properties props = new Properties();
        props.put("metadata.broker.list", brokerUrl);
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl);
//        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
//        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        props.put(ProducerConfig.CLIENT_ID_CONFIG, "storm-kafka-producer");
        conf.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props);
        return conf;
    }
 
Example 16
Project: storm-cassandra-cql   File: SimpleUpdateTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
    final Config configuration = new Config();
    configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_CQL_HOSTS, "localhost");
    final LocalCluster cluster = new LocalCluster();
    LOG.info("Submitting topology.");
    cluster.submitTopology("cqlexample", configuration, buildTopology());
    LOG.info("Topology submitted.");
    Thread.sleep(600000);
}
 
Example 17
Project: LearnStorm   File: ApLogAnalyzer.java   Source Code and License Vote up 5 votes
private void configureESBolts(TopologyBuilder builder, Config config) {
	HashMap<String, Object> esConfig = new HashMap<String, Object>();
	esConfig.put(ESIndexerBolt.ES_CLUSTER_NAME, topologyConfig.getProperty(ESIndexerBolt.ES_CLUSTER_NAME));
	esConfig.put(ESIndexerBolt.ES_NODES, topologyConfig.getProperty(ESIndexerBolt.ES_NODES));
	esConfig.put(ESIndexerBolt.ES_SHIELD_ENABLED, topologyConfig.getProperty(ESIndexerBolt.ES_SHIELD_ENABLED));
	esConfig.put(ESIndexerBolt.ES_SHIELD_USER, topologyConfig.getProperty(ESIndexerBolt.ES_SHIELD_USER));
	esConfig.put(ESIndexerBolt.ES_SHIELD_PASS, topologyConfig.getProperty(ESIndexerBolt.ES_SHIELD_PASS));
	esConfig.put(ESIndexerBolt.ES_ASYNC_ENABLED, topologyConfig.getProperty(ESIndexerBolt.ES_ASYNC_ENABLED));
	config.put("es.conf", esConfig);
	ESIndexerBolt esBolt = new ESIndexerBolt().withConfigKey("es.conf");
	final int boltThreads = Integer.valueOf(topologyConfig.getProperty("bolt.ESIndexerBolt.threads"));

	builder.setBolt(ESINDEXER_BOLT_ID, esBolt, boltThreads).shuffleGrouping(KAFKA_SPOUT_ID).setDebug(DEBUG);
}
 
Example 18
Project: aeolus   File: OrderedFileInputSpoutTest.java   Source Code and License Vote up 5 votes
@Test
public void testOpenMultiplePartitions() throws Exception {
	TestOrderedFileInputSpout spout = new TestOrderedFileInputSpout();
	
	Config conf = new Config();
	conf.put(TestOrderedFileInputSpout.INPUT_FILE_NAME, "dummyFileName-");
	conf.put(TestOrderedFileInputSpout.INPUT_FILE_SUFFIXES, Arrays.asList(new String[] {"1", "2", "3"}));
	
	for(int i = 1; i <= 3; ++i) {
		FileReader fileReaderMock = PowerMockito.mock(FileReader.class);
		PowerMockito.whenNew(FileReader.class).withArguments("dummyFileName-" + i).thenReturn(fileReaderMock);
		
		BufferedReader bufferedReaderMock = PowerMockito.mock(BufferedReader.class);
		PowerMockito.whenNew(BufferedReader.class).withArguments(fileReaderMock).thenReturn(bufferedReaderMock);
	}
	List<Integer> taskMock = new LinkedList<Integer>();
	taskMock.add(new Integer(0));
	TopologyContext contextMock = mock(TopologyContext.class);
	when(contextMock.getComponentTasks(anyString())).thenReturn(taskMock);
	when(new Integer(contextMock.getThisTaskIndex())).thenReturn(new Integer(0));
	
	spout.open(conf, contextMock, mock(SpoutOutputCollector.class));
	Assert.assertTrue(spout.closePartition(new Integer(0)));
	Assert.assertTrue(spout.closePartition(new Integer(1)));
	Assert.assertTrue(spout.closePartition(new Integer(2)));
	try {
		spout.closePartition(new Integer(3));
		Assert.fail();
	} catch(RuntimeException e) {
		// expected
	}
}
 
Example 19
Project: storm-cassandra-cql   File: WordCountTopology.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws Exception {
    final Config configuration = new Config();
    configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_CQL_HOSTS, "localhost");
    final LocalCluster cluster = new LocalCluster();
    LocalDRPC client = new LocalDRPC();

    LOG.info("Submitting topology.");
    cluster.submitTopology("cqlexample", configuration, buildWordCountAndSourceTopology(client));
    LOG.info("Topology submitted.");
    Thread.sleep(10000);
    LOG.info("DRPC Query: Word Count [cat, dog, the, man]: {}", client.execute("words", "cat dog the man"));
    cluster.shutdown();
    client.shutdown();
}
 
Example 20
Project: aeolus   File: OrderedInputSpoutTest.java   Source Code and License Vote up 5 votes
@Test
public void testSinglePartition() {
	LinkedList<String> partition = new LinkedList<String>();
	partition.add("1");
	partition.add("2");
	partition.add("3");
	LinkedList<List<Object>> expectedResult = new LinkedList<List<Object>>();
	expectedResult.add(Arrays.asList(new Object[] {new Long(1), new String("1")}));
	expectedResult.add(Arrays.asList(new Object[] {new Long(2), new String("2")}));
	expectedResult.add(Arrays.asList(new Object[] {new Long(3), new String("3")}));
	@SuppressWarnings("unchecked")
	List<Deque<String>> data = new LinkedList<Deque<String>>(Arrays.asList(partition));
	
	TestOrderedInputSpout spout = new TestOrderedInputSpout(data, this.r);
	
	Config conf = new Config();
	conf.put(TestOrderedInputSpout.NUMBER_OF_PARTITIONS, new Integer(1));
	
	TestSpoutOutputCollector col = new TestSpoutOutputCollector();
	spout.open(conf, mock(TopologyContext.class), new SpoutOutputCollector(col));
	
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	
	Assert.assertEquals(1, col.output.size());
	Assert.assertNotEquals(null, col.output.get(Utils.DEFAULT_STREAM_ID));
	Assert.assertEquals(expectedResult, col.output.get(Utils.DEFAULT_STREAM_ID));
}
 
Example 21
Project: rb-bi   File: TopologyFunctionTest.java   Source Code and License Vote up 4 votes
@Test
public void geoIpTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/flows.json").getPath());
    File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/geoIpFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);
    Scanner checkFlows = new Scanner(checkFlow);

    GeoIpFunction.CITY_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/city.dat").getPath();
    GeoIpFunction.CITY_V6_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/cityv6.dat").getPath();
    GeoIpFunction.ASN_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/asn.dat").getPath();
    GeoIpFunction.ASN_V6_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/asnv6.dat").getPath();

    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");
    fieldsFlow.add("geoIPMap");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new GeoIpFunction(), new Fields("geoIPMap"))
            .each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        //System.out.println(stormFlow);
        Assert.assertEquals(checkFlows.nextLine(), stormFlow);
    }
}
 
Example 22
Project: storm-hbase-1.0.x   File: PersistentWordCount.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws Exception {
    Config config = new Config();

    Map<String, Object> hbConf = new HashMap<String, Object>();
    if(args.length > 0){
        hbConf.put("hbase.rootdir", args[0]);
    }
    config.put("hbase.conf", hbConf);

    WordSpout spout = new WordSpout();
    WordCounter bolt = new WordCounter();

    SimpleHBaseMapper mapper = new SimpleHBaseMapper()
            .withRowKeyField("word")
            .withColumnFields(new Fields("word"))
            .withCounterFields(new Fields("count"))
            .withColumnFamily("cf");

    HBaseBolt hbase = new HBaseBolt("WordCount", mapper)
            .withConfigKey("hbase.conf");


    // wordSpout ==> countBolt ==> HBaseBolt
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(WORD_SPOUT, spout, 1);
    builder.setBolt(COUNT_BOLT, bolt, 1).shuffleGrouping(WORD_SPOUT);
    builder.setBolt(HBASE_BOLT, hbase, 1).fieldsGrouping(COUNT_BOLT, new Fields("word"));


    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", config, builder.createTopology());
        Thread.sleep(30000);
        cluster.killTopology("test");
        cluster.shutdown();
        System.exit(0);
    } else if (args.length == 2) {
        StormSubmitter.submitTopology(args[1], config, builder.createTopology());
    } else{
        System.out.println("Usage: HdfsFileTopology <hdfs url> [topology name]");
    }
}
 
Example 23
Project: storm-hbase-1.0.x   File: LookupWordCount.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws Exception {
    Config config = new Config();

    Map<String, Object> hbConf = new HashMap<String, Object>();
    if(args.length > 0){
        hbConf.put("hbase.rootdir", args[0]);
    }
    config.put("hbase.conf", hbConf);

    WordSpout spout = new WordSpout();
    TotalWordCounter totalBolt = new TotalWordCounter();

    SimpleHBaseMapper mapper = new SimpleHBaseMapper().withRowKeyField("word");
    HBaseProjectionCriteria projectionCriteria = new HBaseProjectionCriteria();
    projectionCriteria.addColumn(new HBaseProjectionCriteria.ColumnMetaData("cf", "count"));

    WordCountValueMapper rowToTupleMapper = new WordCountValueMapper();

    HBaseLookupBolt hBaseLookupBolt = new HBaseLookupBolt("WordCount", mapper, rowToTupleMapper)
            .withConfigKey("hbase.conf")
            .withProjectionCriteria(projectionCriteria);

    //wordspout -> lookupbolt -> totalCountBolt
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(WORD_SPOUT, spout, 1);
    builder.setBolt(LOOKUP_BOLT, hBaseLookupBolt, 1).shuffleGrouping(WORD_SPOUT);
    builder.setBolt(TOTAL_COUNT_BOLT, totalBolt, 1).fieldsGrouping(LOOKUP_BOLT, new Fields("columnName"));

    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", config, builder.createTopology());
        Thread.sleep(30000);
        cluster.killTopology("test");
        cluster.shutdown();
        System.exit(0);
    } else if (args.length == 2) {
        StormSubmitter.submitTopology(args[1], config, builder.createTopology());
    } else{
        System.out.println("Usage: LookupWordCount <hbase.rootdir>");
    }
}
 
Example 24
Project: java   File: TradeProcessingTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
  TopologyBuilder builder = new TopologyBuilder();

  LOGGER.info("Building Trade Processing Topology..");

  builder.setSpout(TRD_COLLECTOR_SPOUT, new TradeCollectorSpout(),
      CONFIG.getNumber("TRD_COLLECTOR_SPOUT_PARALLELISM"));

  builder
      .setBolt(TRD_ELIGIBILITY_BOLT, new TradeEligibilityBolt(),
          CONFIG.getNumber("TRD_ELIGIBILITY_BOLT_PARALLELISM"))
      .shuffleGrouping(TRD_COLLECTOR_SPOUT);

  builder
      .setBolt(TRD_REPORTING_BOLT, new TradeReportPersistenceBolt(),
          CONFIG.getNumber("TRD_REPORTING_BOLT_PARALLELISM"))
      .shuffleGrouping(TRD_ELIGIBILITY_BOLT, REPORT_STREAM);

  builder
      .setBolt(TRD_EXCLUSION_BOLT, new TradeExclusionPersistenceBolt(),
          CONFIG.getNumber("TRD_EXCLUSION_BOLT_PARALLELISM"))
      .shuffleGrouping(TRD_ELIGIBILITY_BOLT, EXCLUDE_STREAM);

  Config conf = new Config();
  conf.setDebug(CONFIG.is("DEBUG_FLAG"));
  conf.setNumWorkers(CONFIG.getInt("NUMBER_OF_WORKERS"));
  conf.setMaxTaskParallelism(CONFIG.getInt("MAX_TASK_PARALLELISM"));
  conf.setMaxSpoutPending(CONFIG.getInt("MAX_SPOUT_PENDING"));
  conf.put(Config.TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS,
      CONFIG.getInt("MAX_SPOUT_PENDING_WAIT_MS"));
  conf.put(Config.TOPOLOGY_SPOUT_WAIT_STRATEGY, CONFIG.get("TOPOLOGY_WAIT_STRATEGY"));
  conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, CONFIG.getInt("TOPOLOGY_MESSAGE_TIMEOUT_SECS"));
  conf.put(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS,
      CONFIG.is("TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS"));
  LOGGER.info("Submitting Trade Processing Topology..");
  if (args != null && args.length > 0) {
    conf.put(Config.NIMBUS_HOST, CONFIG.get("LOCAL_NIMBUS_HOST"));
    conf.put(Config.NIMBUS_THRIFT_PORT, CONFIG.getInt("LOCAL_NIMBUS_PORT"));
    conf.put(Config.STORM_ZOOKEEPER_PORT, CONFIG.getInt("LOCAL_ZOOKEEPER_PORT"));
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  } else {
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(TRD_PROCESSING_TOPOLOGY, conf, builder.createTopology());
    Utils.sleep(CONFIG.getLong("LOCAL_CLUSTER_RUNTIME"));
    cluster.killTopology(TRD_PROCESSING_TOPOLOGY);
    cluster.shutdown();
  }
}
 
Example 25
Project: streaming_outliers   File: Topology.java   Source Code and License Vote up 4 votes
public static void main(String... argv) throws Exception {
    CommandLine cli = OutlierOptions.parse(new PosixParser(), argv);
    DataPointExtractorConfig extractorConfig = JSONUtil.INSTANCE.load(new FileInputStream(new File(OutlierOptions.EXTRACTOR_CONFIG.get(cli)))
                                                                     , DataPointExtractorConfig.class
                                                                     );
    com.caseystella.analytics.outlier.streaming.OutlierConfig streamingOutlierConfig = JSONUtil.INSTANCE.load(new FileInputStream(new File(OutlierOptions.STREAM_OUTLIER_CONFIG.get(cli)))
                                                                     , com.caseystella.analytics.outlier.streaming.OutlierConfig.class
                                                                     );

    PersistenceConfig persistenceConfig = JSONUtil.INSTANCE.load(new FileInputStream(new File(OutlierOptions.TIMESERIES_DB_CONFIG.get(cli)))
                                                                     , PersistenceConfig.class
                                                                     );
    int numSpouts = 1;
    int numWorkers = 10;
    if(OutlierOptions.NUM_WORKERS.has(cli)) {
        numWorkers = Integer.parseInt(OutlierOptions.NUM_WORKERS.get(cli));
    }
    if(OutlierOptions.NUM_SPOUTS.has(cli)) {
        numSpouts = Integer.parseInt(OutlierOptions.NUM_SPOUTS.get(cli));
    }
    Map clusterConf = Utils.readStormConfig();
    clusterConf.put("topology.max.spout.pending", 100);
    Config config = new Config();
    config.put("topology.max.spout.pending", 100);
    config.setNumWorkers(numWorkers);
    config.registerMetricsConsumer(LoggingMetricsConsumer.class);

    String topicName = OutlierOptions.TOPIC.get(cli);
    String topologyName = "streaming_outliers_" + topicName;
    String zkConnectString = OutlierOptions.ZK_QUORUM.get(cli);
    /*DataPointExtractorConfig extractorConfig
                                            , com.caseystella.analytics.outlier.streaming.OutlierConfig streamingOutlierConfig
                                            , com.caseystella.analytics.outlier.batch.OutlierConfig batchOutlierConfig
                                            , PersistenceConfig persistenceConfig
                                            , String kafkaTopic
                                            , String zkQuorum
                                            , int numWorkers*/
    boolean startAtBeginning = OutlierOptions.FROM_BEGINNING.has(cli);
    TopologyBuilder topology = createTopology( extractorConfig
                                             , streamingOutlierConfig
                                             , persistenceConfig
                                             , topicName
                                             , zkConnectString
                                             , OutlierOptions.ES_NODE.get(cli)
                                             , numWorkers
                                             , numSpouts
                                             , OutlierOptions.NUM_INDEXING_WORKERS.has(cli)?
                                               Integer.parseInt(OutlierOptions.NUM_INDEXING_WORKERS.get(cli)):
                                               5
                                             , OutlierOptions.INDEX.has(cli)?
                                               OutlierOptions.INDEX.get(cli):
                                               "{source}/outlier"
                                             , startAtBeginning
                                             );
    StormSubmitter.submitTopologyWithProgressBar( topologyName, clusterConf, topology.createTopology());
    //Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();
}
 
Example 26
Project: jstorm-0.9.6.3-   File: SequenceTopologyTool.java   Source Code and License Vote up 4 votes
public StormTopology buildTopology()
{
	Config conf = getConf();
	TopologyBuilder builder = new TopologyBuilder();

	int spout_Parallelism_hint = JStormUtils.parseInt(
			conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
	int bolt_Parallelism_hint = JStormUtils.parseInt(
			conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);

	builder.setSpout(SequenceTopologyDef.SEQUENCE_SPOUT_NAME,
			new SequenceSpout(), spout_Parallelism_hint);

	boolean isEnableSplit = JStormUtils.parseBoolean(
			conf.get("enable.split"), false);

	if (isEnableSplit == false) {
		builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME,
				new TotalCount(), bolt_Parallelism_hint).localFirstGrouping(
				SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
	} else {

		builder.setBolt(SequenceTopologyDef.SPLIT_BOLT_NAME,
				new SplitRecord(), bolt_Parallelism_hint)
				.localOrShuffleGrouping(
						SequenceTopologyDef.SEQUENCE_SPOUT_NAME);

		builder.setBolt(SequenceTopologyDef.TRADE_BOLT_NAME,
				new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
				SequenceTopologyDef.SPLIT_BOLT_NAME,
				SequenceTopologyDef.TRADE_STREAM_ID);
		builder.setBolt(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
				new PairCount(), bolt_Parallelism_hint).shuffleGrouping(
				SequenceTopologyDef.SPLIT_BOLT_NAME,
				SequenceTopologyDef.CUSTOMER_STREAM_ID);

		builder.setBolt(SequenceTopologyDef.MERGE_BOLT_NAME,
				new MergeRecord(), bolt_Parallelism_hint)
				.fieldsGrouping(SequenceTopologyDef.TRADE_BOLT_NAME,
						new Fields("ID"))
				.fieldsGrouping(SequenceTopologyDef.CUSTOMER_BOLT_NAME,
						new Fields("ID"));

		builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME,
				new TotalCount(), bolt_Parallelism_hint).noneGrouping(
				SequenceTopologyDef.MERGE_BOLT_NAME);
	}

	boolean kryoEnable = JStormUtils.parseBoolean(conf.get("kryo.enable"),
			false);
	if (kryoEnable == true) {
		System.out.println("Use Kryo ");
		boolean useJavaSer = JStormUtils.parseBoolean(
				conf.get("fall.back.on.java.serialization"), true);

		Config.setFallBackOnJavaSerialization(conf, useJavaSer);

		Config.registerSerialization(conf, TradeCustomer.class);
		Config.registerSerialization(conf, Pair.class);
	}
	int ackerNum = JStormUtils.parseInt(
			conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), 1);
	Config.setNumAckers(conf, ackerNum);

	int workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS),
			20);
	conf.put(Config.TOPOLOGY_WORKERS, workerNum);

	return  builder.createTopology();	
}
 
Example 27
Project: cloudpelican-lsd   File: SupervisorStatsWriterBolt.java   Source Code and License Vote up 4 votes
public Map<String, Object> getComponentConfiguration() {
    Config conf = new Config();
    int tickFrequencyInSeconds = 10;
    conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, tickFrequencyInSeconds);
    return conf;
}
 
Example 28
Project: docker-kafka-storm   File: WordCountTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws Exception {

        Config conf = new Config();
        String TOPOLOGY_NAME;

        if (args != null && args.length > 0) {
            TOPOLOGY_NAME = args[0];
            /**
             * Remote deployment as part of Docker Compose multi-application setup
             *
             * @TOPOLOGY_NAME:       Name of Storm topology
             * @ZK_HOST:             Host IP address of ZooKeeper
             * @ZK_PORT:             Port of ZooKeeper
             * @TOPIC:               Kafka Topic which this Storm topology is consuming from
             */
            LOG.info("Submitting topology " + TOPOLOGY_NAME + " to remote cluster.");
            String ZK_HOST = args[1];
            int ZK_PORT = Integer.parseInt(args[2]);
            String TOPIC = args[3];
            String NIMBUS_HOST = args[4];
            int NIMBUS_THRIFT_PORT = Integer.parseInt(args[5]);

            conf.setDebug(false);
            conf.setNumWorkers(2);
            conf.setMaxTaskParallelism(5);
            conf.put(Config.NIMBUS_HOST, NIMBUS_HOST);
            conf.put(Config.NIMBUS_THRIFT_PORT, NIMBUS_THRIFT_PORT);
            conf.put(Config.STORM_ZOOKEEPER_PORT, ZK_PORT);
            conf.put(Config.STORM_ZOOKEEPER_SERVERS, Arrays.asList(ZK_HOST));

            WordCountTopology wordCountTopology = new WordCountTopology(ZK_HOST, String.valueOf(ZK_PORT));
            StormSubmitter.submitTopology(TOPOLOGY_NAME, conf, wordCountTopology.buildTopology(TOPIC));

        }
        else {
            TOPOLOGY_NAME = "wordcount-topology";
            /**
             * Local mode (only for testing purposes)
             */
            LOG.info("Starting topology " + TOPOLOGY_NAME + " in LocalMode.");

            conf.setDebug(false);
            conf.setNumWorkers(2);
            conf.setMaxTaskParallelism(2);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology(TOPOLOGY_NAME, conf, new WordCountTopology().buildTopology());

            Thread.sleep(10000);
            cluster.shutdown();
        }
    }
 
Example 29
Project: cloudpelican-lsd   File: MatchBolt.java   Source Code and License Vote up 4 votes
public Map<String, Object> getComponentConfiguration() {
    Config conf = new Config();
    int tickFrequencyInSeconds = 1;
    conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, tickFrequencyInSeconds);
    return conf;
}
 
Example 30
Project: yuzhouwan   File: ZkTopology.java   Source Code and License Vote up 4 votes
public static void main(String[] args) {

        //这个地方其实就是kafka配置文件里边的zookeeper.connect这个参数,可以去那里拿过来
        String brokerZkStr = "10.100.90.201:2181/kafka_online_sample";
        String brokerZkPath = "/brokers";
        ZkHosts zkHosts = new ZkHosts(brokerZkStr, brokerZkPath);

        String topic = "mars-wap";
        //以下:将offset汇报到哪个zk集群,相应配置
        String offsetZkServers = "10.199.203.169";
        String offsetZkPort = "2181";
        List<String> zkServersList = new ArrayList<>();
        zkServersList.add(offsetZkServers);
        //汇报offset信息的root路径
        String offsetZkRoot = "/stormExample";
        //存储该spout id的消费offset信息,譬如以topoName来命名
        String offsetZkId = "storm-example";

        SpoutConfig kafkaConfig = new SpoutConfig(zkHosts, topic, offsetZkRoot, offsetZkId);
        kafkaConfig.zkPort = Integer.parseInt(offsetZkPort);
        kafkaConfig.zkServers = zkServersList;
        kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

        KafkaSpout spout = new KafkaSpout(kafkaConfig);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", spout, 1);
        builder.setBolt("bolt", new EsBolt("storm/docs"), 1).shuffleGrouping("spout");

        Config config = new Config();
        config.put("es.index.auto.create", "true");

        if (args.length > 0) {
            try {
                StormSubmitter.submitTopology("storm-kafka-example", config, builder.createTopology());
            } catch (Exception e) {
                LOG.error("error: {}", e.getMessage());
            }
        } else {
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("test", config, builder.createTopology());
        }
    }
 
Example 31
Project: aeolus   File: OrderedFileInputSpoutTest.java   Source Code and License Vote up 4 votes
@Test
public void testMultiplePartitionsRandom() throws Exception {
	LinkedList<List<Object>> expectedResult = new LinkedList<List<Object>>();
	
	int size, number, totalInputSize = 0;
	
	final int stepSizeRange = 1 + this.r.nextInt(6);
	
	for(int i = 1; i <= 3; ++i) {
		FileReader fileReaderMock = PowerMockito.mock(FileReader.class);
		PowerMockito.whenNew(FileReader.class).withArguments("dummyFileName-" + i).thenReturn(fileReaderMock);
		
		BufferedReader bufferedReaderMock = PowerMockito.mock(BufferedReader.class);
		PowerMockito.whenNew(BufferedReader.class).withArguments(fileReaderMock).thenReturn(bufferedReaderMock);
		
		OngoingStubbing<String> stub = when(bufferedReaderMock.readLine());
		
		size = 20 + this.r.nextInt(200);
		totalInputSize += size;
		number = 0;
		for(int j = 0; j < size; ++j) {
			number += this.r.nextInt(stepSizeRange);
			String line = "sid" + j + "," + number + ",dummy" + i;
			stub = stub.thenReturn(line);
			expectedResult.add(new Values(new Long(number), line));
		}
		stub = stub.thenReturn(null);
	}
	Collections.sort(expectedResult, new TimestampComperator());
	
	TestOrderedFileInputSpout spout = new TestOrderedFileInputSpout();
	
	Config conf = new Config();
	conf.put(TestOrderedFileInputSpout.INPUT_FILE_NAME, "dummyFileName-");
	conf.put(TestOrderedFileInputSpout.INPUT_FILE_SUFFIXES, Arrays.asList(new String[] {"1", "2", "3"}));
	
	List<Integer> taskMock = new LinkedList<Integer>();
	taskMock.add(new Integer(0));
	TopologyContext contextMock = mock(TopologyContext.class);
	when(contextMock.getComponentTasks(anyString())).thenReturn(taskMock);
	when(new Integer(contextMock.getThisTaskIndex())).thenReturn(new Integer(0));
	
	TestSpoutOutputCollector col = new TestSpoutOutputCollector();
	
	spout.open(conf, contextMock, new SpoutOutputCollector(col));
	
	for(int i = 0; i < totalInputSize + 5; ++i) {
		spout.nextTuple();
	}
	
	Assert.assertEquals(1, col.output.size());
	Assert.assertNotEquals(null, col.output.get(Utils.DEFAULT_STREAM_ID));
	Assert.assertEquals(totalInputSize, col.output.get(Utils.DEFAULT_STREAM_ID).size());
	
	while(expectedResult.size() > 0) {
		Set<List<Object>> expectedSubset = new HashSet<List<Object>>();
		Set<List<Object>> resultSubset = new HashSet<List<Object>>();
		long ts;
		do {
			ts = ((Long)expectedResult.getFirst().get(0)).longValue();
			expectedSubset.add(expectedResult.removeFirst());
			resultSubset.add(col.output.get(Utils.DEFAULT_STREAM_ID).removeFirst());
		} while(expectedResult.size() > 0 && ts == ((Long)expectedResult.getFirst().get(0)).longValue());
		
		Assert.assertEquals(expectedSubset, resultSubset);
	}
}
 
Example 32
Project: cloudpelican-lsd   File: OutlierDetectionBolt.java   Source Code and License Vote up 4 votes
public Map<String, Object> getComponentConfiguration() {
    Config conf = new Config();
    int tickFrequencyInSeconds = 60;
    conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, tickFrequencyInSeconds);
    return conf;
}
 
Example 33
Project: aeolus   File: FileOutputBoltTest.java   Source Code and License Vote up 4 votes
@Test
public void testExecute() throws Exception {
	final LinkedList<String> expectedResult = new LinkedList<String>();
	final LinkedList<String> result = new LinkedList<String>();
	final LinkedList<Tuple> input = new LinkedList<Tuple>();
	
	
	Config conf = new Config();
	String dummyDir = "dummyDir";
	String dummyFile = "dummyFile";
	
	String usedDir = ".";
	String usedFile = "result.dat";
	switch(this.r.nextInt(4)) {
	case 0:
		conf.put(TestFileOutputBolt.OUTPUT_DIR_NAME, dummyDir);
		usedDir = dummyDir;
		break;
	case 1:
		conf.put(TestFileOutputBolt.OUTPUT_FILE_NAME, dummyFile);
		usedFile = dummyFile;
		break;
	case 2:
		conf.put(TestFileOutputBolt.OUTPUT_DIR_NAME, dummyDir);
		conf.put(TestFileOutputBolt.OUTPUT_FILE_NAME, dummyFile);
		usedDir = dummyDir;
		usedFile = dummyFile;
		break;
	default:
	}
	
	FileWriter fileWriterMock = PowerMockito.mock(FileWriter.class);
	PowerMockito.whenNew(FileWriter.class).withArguments(usedDir + File.separator + usedFile)
		.thenReturn(fileWriterMock);
	
	BufferedWriter dummyWriter = new BufferedWriter(fileWriterMock) {
		@Override
		public void write(String s) {
			result.add(s);
		}
	};
	PowerMockito.whenNew(BufferedWriter.class).withArguments(fileWriterMock).thenReturn(dummyWriter);
	
	
	TestFileOutputBolt bolt = new TestFileOutputBolt();
	TestOutputCollector collector = new TestOutputCollector();
	bolt.prepare(conf, null, new OutputCollector(collector));
	
	GeneralTopologyContext context = mock(GeneralTopologyContext.class);
	when(context.getComponentOutputFields(anyString(), anyString())).thenReturn(new Fields("dummy"));
	when(context.getComponentId(anyInt())).thenReturn("componentID");
	
	final int numberOfLines = 20;
	for(int i = 0; i < numberOfLines; ++i) {
		TupleImpl t = new TupleImpl(context, new Values(new Integer(this.r.nextInt())), 0, null);
		input.add(t);
		expectedResult.add(t.toString());
		bolt.execute(t);
	}
	
	Assert.assertEquals(expectedResult, result);
	Assert.assertEquals(input, collector.acked);
}
 
Example 34
Project: aeolus   File: OrderedFileInputSpoutTest.java   Source Code and License Vote up 4 votes
@Test
public void testMultiplePartitionsStrict() throws Exception {
	LinkedList<List<Object>> expectedResult = new LinkedList<List<Object>>();
	
	final int numberOfLines = 20;
	for(int i = 1; i <= 3; ++i) {
		FileReader fileReaderMock = PowerMockito.mock(FileReader.class);
		PowerMockito.whenNew(FileReader.class).withArguments("dummyFileName-" + i).thenReturn(fileReaderMock);
		
		BufferedReader bufferedReaderMock = PowerMockito.mock(BufferedReader.class);
		PowerMockito.whenNew(BufferedReader.class).withArguments(fileReaderMock).thenReturn(bufferedReaderMock);
		
		OngoingStubbing<String> stub = when(bufferedReaderMock.readLine());
		for(int j = 0; j < numberOfLines; ++j) {
			String line = "sid" + j + "," + j + ",dummy" + i;
			stub = stub.thenReturn(line);
			expectedResult.add(new Values(new Long(j), line));
		}
		stub = stub.thenReturn(null);
	}
	Collections.sort(expectedResult, new TimestampComperator());
	
	TestOrderedFileInputSpout spout = new TestOrderedFileInputSpout();
	
	Config conf = new Config();
	conf.put(TestOrderedFileInputSpout.INPUT_FILE_NAME, "dummyFileName-");
	conf.put(TestOrderedFileInputSpout.INPUT_FILE_SUFFIXES, Arrays.asList(new String[] {"1", "2", "3"}));
	
	List<Integer> taskMock = new LinkedList<Integer>();
	taskMock.add(new Integer(0));
	TopologyContext contextMock = mock(TopologyContext.class);
	when(contextMock.getComponentTasks(anyString())).thenReturn(taskMock);
	when(new Integer(contextMock.getThisTaskIndex())).thenReturn(new Integer(0));
	
	TestSpoutOutputCollector col = new TestSpoutOutputCollector();
	
	spout.open(conf, contextMock, new SpoutOutputCollector(col));
	
	for(int i = 0; i < 3 * numberOfLines + 5; ++i) {
		spout.nextTuple();
	}
	
	Assert.assertEquals(1, col.output.size());
	Assert.assertNotEquals(null, col.output.get(Utils.DEFAULT_STREAM_ID));
	Assert.assertEquals(3 * numberOfLines, col.output.get(Utils.DEFAULT_STREAM_ID).size());
	
	for(int i = 0; i < numberOfLines; ++i) {
		Set<List<Object>> expectedSubset = new HashSet<List<Object>>();
		Set<List<Object>> resultSubset = new HashSet<List<Object>>();
		for(int j = 0; j < 3; ++j) {
			expectedSubset.add(expectedResult.removeFirst());
			resultSubset.add(col.output.get(Utils.DEFAULT_STREAM_ID).removeFirst());
		}
		Assert.assertEquals(expectedSubset, resultSubset);
	}
}