Java Code Examples for backtype.storm.tuple.Fields

The following are top voted examples for showing how to use backtype.storm.tuple.Fields. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: preliminary.demo   File: RaceTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {

        Config conf = new Config();
        int spout_Parallelism_hint = 1;
        int split_Parallelism_hint = 2;
        int count_Parallelism_hint = 2;

        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout("spout", new RaceSentenceSpout(), spout_Parallelism_hint);
        builder.setBolt("split", new SplitSentence(), split_Parallelism_hint).shuffleGrouping("spout");
        builder.setBolt("count", new WordCount(), count_Parallelism_hint).fieldsGrouping("split", new Fields("word"));
        String topologyName = RaceConfig.JstormTopologyName;

        try {
            StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
            //begin by Young
            
            //end by Young
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }
 
Example 2
Project: es-hadoop-v2.2.0   File: AbstractStormSimpleBoltTests.java   View source code 6 votes vote down vote up
@Test
public void testSimpleWriteTopology() throws Exception {
    List doc1 = Collections.singletonList(ImmutableMap.of("one", 1, "two", 2));
    List doc2 = Collections.singletonList(ImmutableMap.of("OTP", "Otopeni", "SFO", "San Fran"));

    String target = index + "/simple-write";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-1", new TestSpout(ImmutableList.of(doc2, doc1), new Fields("doc")));
    builder.setBolt("es-bolt-1", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-1");

    MultiIndexSpoutStormSuite.run(index + "simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    RestUtils.refresh(index);
    assertTrue(RestUtils.exists(target));
    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("SFO"));
}
 
Example 3
Project: es-hadoop-v2.2.0   File: AbstractStormJsonSimpleBoltTests.java   View source code 6 votes vote down vote up
@Test
public void testSimpleWriteTopology() throws Exception {
    List doc1 = Collections.singletonList("{\"reason\" : \"business\",\"airport\" : \"SFO\"}");
    List doc2 = Collections.singletonList("{\"participants\" : 5,\"airport\" : \"OTP\"}");

    String target = index + "/json-simple-write";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-1", new TestSpout(ImmutableList.of(doc1, doc2), new Fields("json")));
    builder.setBolt("es-bolt-1", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-1");

    MultiIndexSpoutStormSuite.run(index + "json-simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    RestUtils.refresh(index);
    assertTrue(RestUtils.exists(target));
    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("SFO"));
}
 
Example 4
Project: es-hadoop-v2.2.0   File: AbstractStormIndexPatternBoltTests.java   View source code 6 votes vote down vote up
@Test
public void test1WriteIndexPattern() throws Exception {
    List doc1 = ImmutableList.of("one", "1", "two", "2", "number", 1);
    List doc2 = ImmutableList.of("OTP", "Otopeni", "SFO", "San Fran", "number", 2);

    String target = index + "/write-{number}";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-3", new TestSpout(ImmutableList.of(doc2, doc1), new Fields("key1", "val1", "key2",
            "val2", "key3", "number")));
    builder.setBolt("es-bolt-3", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-3");

    MultiIndexSpoutStormSuite.run(index + "write-pattern", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(20));

    Thread.sleep(1000);
    RestUtils.refresh(index);
    assertTrue(RestUtils.exists(index + "/write-1"));
    assertTrue(RestUtils.exists(index + "/write-2"));

    String results = RestUtils.get(index + "/write-1" + "/_search?");
    assertThat(results, containsString("two"));

    results = RestUtils.get(index + "/write-2" + "/_search?");
    assertThat(results, containsString("SFO"));
}
 
Example 5
Project: rb-bi   File: TopologyFunctionTest.java   View source code 5 votes vote down vote up
@Test
public void macVendorTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/flows.json").getPath());
    File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/macVendorFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);
    Scanner checkFlows = new Scanner(checkFlow);

    MacVendorFunction._ouiFilePath = Thread.currentThread().getContextClassLoader().getResource("db/oui-vendors").getPath();

    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");
    fieldsFlow.add("macVendor");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new MacVendorFunction(), new Fields("macVendor"))
            .each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(checkFlows.nextLine(), stormFlow);
    }
}
 
Example 6
Project: rb-bi   File: TopologyFunctionTest.java   View source code 5 votes vote down vote up
@Test
public void nonTimestampTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/nonTimestampFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);


    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new CheckTimestampFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(true, stormFlow.contains("timestamp"));
    }
}
 
Example 7
Project: rb-bi   File: TopologyFunctionTest.java   View source code 5 votes vote down vote up
@Test
public void analizeHttpUrlTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/httpFlows.json").getPath());
    File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/httpFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);
    Scanner checkFlows = new Scanner(checkFlow);

    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");
    fieldsFlow.add("httpUrlMap");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new AnalizeHttpUrlFunction(), new Fields("httpUrlMap"))
            .each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(checkFlows.nextLine(), stormFlow);
    }
}
 
Example 8
Project: sourcevirtues-samples   File: MorphlinesBolt.java   View source code 5 votes vote down vote up
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
    LOG.info("START declareOutputFields");
    if (terminalBolt || EmptyUtils.nullOrEmpty(outputFieldNames)) {
        LOG.info("This is TERMINAL BOLT");
        terminalBolt = true;
        return;
    }

    declarer.declare(new Fields(outputFieldNames));
}
 
Example 9
Project: storm-demos   File: CalculateBolt.java   View source code 5 votes vote down vote up
public void declareOutputFields(OutputFieldsDeclarer declarer) {
	declarer.declareStream(ChannelTopology.TRANSFER_STREAM, 
			new Fields("channel","code","timestamp","num","ratio"));
	declarer.declareStream(ChannelTopology.OPENTSDB_STREAM, 
			new Fields("channel","code","timestamp","num","ratio"));	
	declarer.declareStream(ChannelTopology.HBASE_STREAM, 
			new Fields("rowkey","column","columnvalue"));
}
 
Example 10
Project: preliminary.demo   File: RaceTopologyLocal.java   View source code 5 votes vote down vote up
public static void main(String[] args) {
	LocalCluster cluster = new LocalCluster();

	
	/* begin young-define*/
	Config conf = new Config();
	TopologyBuilder builder = new TopologyBuilder();
	builder.setSpout("spout", new SpoutLocal(), 1);
       builder.setBolt("split", new SplitSentenceLocal(), 1).shuffleGrouping("spout");
       builder.setBolt("count", new WordCountLocal(), 1).fieldsGrouping("split", new Fields("word"));
       /* end young-define */
       
	
	//建议加上这行,使得每个bolt/spout的并发度都为1
	conf.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 1);

	//提交拓扑
	cluster.submitTopology("SequenceTest", conf, builder.createTopology());

	//等待1分钟, 1分钟后会停止拓扑和集群, 视调试情况可增大该数值
	try {
		Thread.sleep(60000);
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}        

	//结束拓扑
	cluster.killTopology("SequenceTest");

	cluster.shutdown();
}
 
Example 11
Project: hadooparchitecturebook   File: MovingAvgLocalTopologyRunner.java   View source code 5 votes vote down vote up
public static void main(String[] args) 
    throws Exception {
  
  Config conf = new Config();
  LocalCluster cluster = new LocalCluster();
  
  TridentTopology topology = new TridentTopology();

  Stream movingAvgStream =
    topology.newStream("ticks-spout", buildSpout())
    .each(new Fields("stock-ticks"), new TickParser(), new Fields("price"))
    .aggregate(new Fields("price"), new CalculateAverage(), new Fields("count"));

  cluster.submitTopology("moving-avg", conf, topology.build());
}
 
Example 12
Project: miner   File: TopologyMain.java   View source code 5 votes vote down vote up
public static void main(String[] args) {
	try{
		TopologyBuilder topologyBuilder = new TopologyBuilder();
		topologyBuilder.setSpout("spout-number", new ProduceRecordSpout(Type.NUMBER, new String[]{"111 222 333", "80966 31"}), 1);
		topologyBuilder.setSpout("spout-string", new ProduceRecordSpout(Type.STRING, new String[]{"abc ddd fasko", "hello the world"}), 1);
		topologyBuilder.setSpout("spout-sign", new ProduceRecordSpout(Type.SIGN, new String[]{"++ -*% *** @@", "{+-} ^#######"}), 1);

		topologyBuilder.setBolt("bolt-splitter", new SplitRecordBolt(), 2)
				.shuffleGrouping("spout-number")
				.shuffleGrouping("spout-string")
				.shuffleGrouping("spout-sign");

		topologyBuilder.setBolt("bolt-distributor", new DistributeWordByTypeBolt(), 1)
				.fieldsGrouping("bolt-splitter", new Fields("type"));

		topologyBuilder.setBolt("bolt-number-saver", new SaveDataBolt(Type.NUMBER), 1)
				.shuffleGrouping("bolt-distributor", "stream-number-saver");
		topologyBuilder.setBolt("bolt-string-saver", new SaveDataBolt(Type.STRING), 1)
				.shuffleGrouping("bolt-distributor", "stream-string-saver");
		topologyBuilder.setBolt("bolt-sign-saver", new SaveDataBolt(Type.SIGN), 1)
				.shuffleGrouping("bolt-distributor", "stream-sign-saver");

		Config config = new Config();
		config.setDebug(false);
		
		if(args != null && args.length>0){
			config.setNumWorkers(4);
			StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
		}else{
			config.setMaxTaskParallelism(2);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("test", config, topologyBuilder.createTopology());
		}
		
	}catch(Exception e){
		e.printStackTrace();
	}
}
 
Example 13
Project: java   File: DeliveryCheckBolt.java   View source code 5 votes vote down vote up
@Override
public void declareOutputFields(OutputFieldsDeclarer arg0) {
  arg0.declare(new Fields("trade"));
  arg0.declareStream("oddstream", new Fields("trade"));
  arg0.declareStream("evenstream", new Fields("trade"));

}
 
Example 14
Project: es-hadoop-v2.2.0   File: TestSpout.java   View source code 5 votes vote down vote up
public TestSpout(List<List> tuples, Fields output) {
    this.tuples = tuples;
    this.fields = output;
    this.spout = null;
    DONE_TUPLE = new ArrayList(output.size());
    for (int i = 0; i < output.size(); i++) {
        DONE_TUPLE.add(DONE);
    }
}
 
Example 15
Project: storm-demo   File: DelimitedRecordFormat.java   View source code 5 votes vote down vote up
@Override
public byte[] format(Tuple tuple) {
    StringBuilder sb = new StringBuilder();
    Fields fields = this.fields == null ? tuple.getFields() : this.fields;
    int size = fields.size();
    for(int i = 0; i < size; i++){
        sb.append(tuple.getValueByField(fields.get(i)));
        if(i != size - 1){
            sb.append(this.fieldDelimiter);
        }
    }
    sb.append(this.recordDelimiter);
    return sb.toString().getBytes();
}
 
Example 16
Project: RealEstate-Streaming   File: RouteBolt.java   View source code 5 votes vote down vote up
@Override
public void declareOutputFields(OutputFieldsDeclarer arg0) {
	arg0.declare(new Fields("title", 
			                "link",
			                "description",
			                "pubDate",
			                "thumbnail"));
}
 
Example 17
Project: Practical-Real-time-Processing-and-Analytics   File: FileSpout.java   View source code 4 votes vote down vote up
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
	Fields schema = new Fields("line");
	declarer.declare(schema);
}
 
Example 18
Project: Practical-Real-time-Processing-and-Analytics   File: ParserBolt.java   View source code 4 votes vote down vote up
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
	declarer.declareStream("tdrstream", new Fields("tdrstream"));
}
 
Example 19
Project: Practical-Real-time-Processing-and-Analytics   File: FileSpout.java   View source code 4 votes vote down vote up
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
	Fields schema = new Fields("line");
	declarer.declare(schema);
}
 
Example 20
Project: Practical-Real-time-Processing-and-Analytics   File: ParserBolt.java   View source code 4 votes vote down vote up
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
	declarer.declareStream("tdrstream", new Fields("tdrstream"));
}
 
Example 21
Project: Mastering-Apache-Storm   File: SampleSpout.java   View source code 4 votes vote down vote up
public void declareOutputFields(OutputFieldsDeclarer declarer) {
	// emit the field site.
	declarer.declare(new Fields("firstName","lastName","companyName"));
}
 
Example 22
Project: rb-bi   File: TransactionalTridentKafkaSpout.java   View source code 4 votes vote down vote up
@Override
public Fields getOutputFields() {
    return _config.scheme.getOutputFields();
}
 
Example 23
Project: rb-bi   File: OpaqueTridentKafkaSpout.java   View source code 4 votes vote down vote up
@Override
public Fields getOutputFields() {
    return _config.scheme.getOutputFields();
}
 
Example 24
Project: rb-bi   File: StringScheme.java   View source code 4 votes vote down vote up
public Fields getOutputFields() {
    return new Fields(STRING_SCHEME_KEY);
}
 
Example 25
Project: rb-bi   File: RedBorderTopology.java   View source code 4 votes vote down vote up
private static TridentState persist(String topic, Stream s, String field) {
    String outputTopic = _config.getOutputTopic(topic);
    int partitions = _config.tranquilityPartitions(topic);
    int replication = _config.tranquilityReplication();
    TridentState ret;

    if (outputTopic != null) {
        int flowPrePartitions = _config.getKafkaPartitions(outputTopic);

        ret = s.each(new Fields(field), new MapToJSONFunction(), new Fields("jsonString"))
                .partitionPersist(KafkaState.nonTransactional(_config.getZkHost()),
                        new Fields("jsonString"), new KafkaStateUpdater("jsonString", outputTopic))
                .parallelismHint(flowPrePartitions);
    } else {
        BeamFactory bf;
        TridentBeamStateFactory druidState = null;
        String zkHost = _config.getZkHost();

        switch (topic) {
            case "traffics":
                bf = new BeamFlow(partitions, replication, zkHost, _config.getMaxRows());
                druidState = new TridentBeamStateFactory<>(bf);
                break;
            case "events":
                bf = new BeamEvent(partitions, replication, zkHost, _config.getMaxRows());
                druidState = new TridentBeamStateFactory<>(bf);
                break;
            case "monitor":

                bf = new BeamMonitor(partitions, replication, zkHost, _config.getMaxRows());
                druidState = new TridentBeamStateFactory<>(bf);
                break;
            default:
                System.out.println("Tranquility beams not defined!");
                break;
        }

        ret = s.partitionPersist(druidState, new Fields(field), new TridentBeamStateUpdater())
                .parallelismHint(partitions);
    }

    return ret;
}
 
Example 26
Project: rb-bi   File: TopologyFunctionTest.java   View source code 4 votes vote down vote up
@Test
public void geoIpTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/flows.json").getPath());
    File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/geoIpFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);
    Scanner checkFlows = new Scanner(checkFlow);

    GeoIpFunction.CITY_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/city.dat").getPath();
    GeoIpFunction.CITY_V6_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/cityv6.dat").getPath();
    GeoIpFunction.ASN_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/asn.dat").getPath();
    GeoIpFunction.ASN_V6_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/asnv6.dat").getPath();

    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");
    fieldsFlow.add("geoIPMap");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new GeoIpFunction(), new Fields("geoIPMap"))
            .each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        //System.out.println(stormFlow);
        Assert.assertEquals(checkFlows.nextLine(), stormFlow);
    }
}
 
Example 27
Project: storm-scheduler   File: UuidSpout.java   View source code 4 votes vote down vote up
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
    declarer.declare(new Fields("keyfield"));
}
 
Example 28
Project: storm-scheduler   File: NothingPayloadBolt.java   View source code 4 votes vote down vote up
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
    declarer.declare(new Fields("keyfield", "payload"));
}
 
Example 29
Project: storm-scheduler   File: NothingBolt.java   View source code 4 votes vote down vote up
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
    declarer.declare(new Fields("keyfield"));
}
 
Example 30
Project: storm-scheduler   File: RandomSpout.java   View source code 4 votes vote down vote up
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
    declarer.declare(new Fields("keyfield"));
}
 
Example 31
Project: storm-scheduler   File: UuidPayloadSpout.java   View source code 4 votes vote down vote up
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
    declarer.declare(new Fields("keyfield", "payload"));
}
 
Example 32
Project: storm-hbase-1.0.x   File: SimpleTridentHBaseMapper.java   View source code 4 votes vote down vote up
public SimpleTridentHBaseMapper withColumnFields(Fields columnFields){
    this.columnFields = columnFields;
    return this;
}
 
Example 33
Project: storm-hbase-1.0.x   File: SimpleTridentHBaseMapper.java   View source code 4 votes vote down vote up
public SimpleTridentHBaseMapper withCounterFields(Fields counterFields){
    this.counterFields = counterFields;
    return this;
}
 
Example 34
Project: storm-hbase-1.0.x   File: SimpleHBaseMapper.java   View source code 4 votes vote down vote up
public SimpleHBaseMapper withColumnFields(Fields columnFields){
    this.columnFields = columnFields;
    return this;
}
 
Example 35
Project: storm-hbase-1.0.x   File: SimpleHBaseMapper.java   View source code 4 votes vote down vote up
public SimpleHBaseMapper withCounterFields(Fields counterFields){
    this.counterFields = counterFields;
    return this;
}
 
Example 36
Project: storm-hbase-1.0.x   File: WordCountTrident.java   View source code 4 votes vote down vote up
public static StormTopology buildTopology(String hbaseRoot){
    Fields fields = new Fields("word", "count");
    FixedBatchSpout spout = new FixedBatchSpout(fields, 4,
            new Values("storm", 1),
            new Values("trident", 1),
            new Values("needs", 1),
            new Values("javadoc", 1)
    );
    spout.setCycle(true);

    TridentHBaseMapper tridentHBaseMapper = new SimpleTridentHBaseMapper()
            .withColumnFamily("cf")
            .withColumnFields(new Fields("word"))
            .withCounterFields(new Fields("count"))
            .withRowKeyField("word");

    HBaseValueMapper rowToStormValueMapper = new WordCountValueMapper();

    HBaseProjectionCriteria projectionCriteria = new HBaseProjectionCriteria();
    projectionCriteria.addColumn(new HBaseProjectionCriteria.ColumnMetaData("cf", "count"));

    HBaseState.Options options = new HBaseState.Options()
            .withConfigKey(hbaseRoot)
            .withDurability(Durability.SYNC_WAL)
            .withMapper(tridentHBaseMapper)
            .withProjectionCriteria(projectionCriteria)
            .withRowToStormValueMapper(rowToStormValueMapper)
            .withTableName("WordCount");

    StateFactory factory = new HBaseStateFactory(options);

    TridentTopology topology = new TridentTopology();
    Stream stream = topology.newStream("spout1", spout);

    stream.partitionPersist(factory, fields,  new HBaseUpdater(), new Fields());

    TridentState state = topology.newStaticState(factory);
    stream = stream.stateQuery(state, new Fields("word"), new HBaseQuery(), new Fields("columnName","columnValue"));
    stream.each(new Fields("word","columnValue"), new PrintFunction(), new Fields());
    return topology.build();
}
 
Example 37
Project: java   File: TradeCollectorSpout.java   View source code 4 votes vote down vote up
@Override
public void declareOutputFields(OutputFieldsDeclarer arg0) {
  arg0.declare(new Fields(TRD_FIELDS));

}
 
Example 38
Project: storm-hbase-1.0.x   File: WordCountValueMapper.java   View source code 4 votes vote down vote up
public void declareOutputFields(OutputFieldsDeclarer declarer) {
    declarer.declare(new Fields("columnName","columnValue"));
}
 
Example 39
Project: storm-hbase-1.0.x   File: PersistentWordCount.java   View source code 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config config = new Config();

    Map<String, Object> hbConf = new HashMap<String, Object>();
    if(args.length > 0){
        hbConf.put("hbase.rootdir", args[0]);
    }
    config.put("hbase.conf", hbConf);

    WordSpout spout = new WordSpout();
    WordCounter bolt = new WordCounter();

    SimpleHBaseMapper mapper = new SimpleHBaseMapper()
            .withRowKeyField("word")
            .withColumnFields(new Fields("word"))
            .withCounterFields(new Fields("count"))
            .withColumnFamily("cf");

    HBaseBolt hbase = new HBaseBolt("WordCount", mapper)
            .withConfigKey("hbase.conf");


    // wordSpout ==> countBolt ==> HBaseBolt
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(WORD_SPOUT, spout, 1);
    builder.setBolt(COUNT_BOLT, bolt, 1).shuffleGrouping(WORD_SPOUT);
    builder.setBolt(HBASE_BOLT, hbase, 1).fieldsGrouping(COUNT_BOLT, new Fields("word"));


    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", config, builder.createTopology());
        Thread.sleep(30000);
        cluster.killTopology("test");
        cluster.shutdown();
        System.exit(0);
    } else if (args.length == 2) {
        StormSubmitter.submitTopology(args[1], config, builder.createTopology());
    } else{
        System.out.println("Usage: HdfsFileTopology <hdfs url> [topology name]");
    }
}
 
Example 40
Project: storm-hbase-1.0.x   File: LookupWordCount.java   View source code 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config config = new Config();

    Map<String, Object> hbConf = new HashMap<String, Object>();
    if(args.length > 0){
        hbConf.put("hbase.rootdir", args[0]);
    }
    config.put("hbase.conf", hbConf);

    WordSpout spout = new WordSpout();
    TotalWordCounter totalBolt = new TotalWordCounter();

    SimpleHBaseMapper mapper = new SimpleHBaseMapper().withRowKeyField("word");
    HBaseProjectionCriteria projectionCriteria = new HBaseProjectionCriteria();
    projectionCriteria.addColumn(new HBaseProjectionCriteria.ColumnMetaData("cf", "count"));

    WordCountValueMapper rowToTupleMapper = new WordCountValueMapper();

    HBaseLookupBolt hBaseLookupBolt = new HBaseLookupBolt("WordCount", mapper, rowToTupleMapper)
            .withConfigKey("hbase.conf")
            .withProjectionCriteria(projectionCriteria);

    //wordspout -> lookupbolt -> totalCountBolt
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(WORD_SPOUT, spout, 1);
    builder.setBolt(LOOKUP_BOLT, hBaseLookupBolt, 1).shuffleGrouping(WORD_SPOUT);
    builder.setBolt(TOTAL_COUNT_BOLT, totalBolt, 1).fieldsGrouping(LOOKUP_BOLT, new Fields("columnName"));

    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", config, builder.createTopology());
        Thread.sleep(30000);
        cluster.killTopology("test");
        cluster.shutdown();
        System.exit(0);
    } else if (args.length == 2) {
        StormSubmitter.submitTopology(args[1], config, builder.createTopology());
    } else{
        System.out.println("Usage: LookupWordCount <hbase.rootdir>");
    }
}