org.elasticsearch.storm.EsBolt Java Examples

The following examples show how to use org.elasticsearch.storm.EsBolt. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CreditCardTopologyBuilder.java    From yuzhouwan with Apache License 2.0 6 votes vote down vote up
public static StormTopology build() {

        String json1 = "{\"reason\" : \"business\",\"airport\" : \"SFO\"}";
        String json2 = "{\"participants\" : 5,\"airport\" : \"OTP\"}";

        Map<String, Object> conf = new HashMap<>();
        /*
         * Configuration: https://www.elastic.co/guide/en/elasticsearch/hadoop/current/configuration.html
         */
        conf.put("es.nodes", "192.168.1.101");
        conf.put("es.port", 9200);
        conf.put("es.input.json", "true");
        conf.put("es.batch.size.entries", "100");

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("json-spout", new StringSpout(json1, json2));
        builder.setBolt("es-bolt", new EsBolt("storm/json-trips", conf)).shuffleGrouping("json-spout");

        return builder.createTopology();
    }
 
Example #2
Source File: AbstractStormSimpleBoltTests.java    From elasticsearch-hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testSimpleWriteTopology() throws Exception {
    List doc1 = Collections.singletonList(ImmutableMap.of("one", 1, "two", 2));
    List doc2 = Collections.singletonList(ImmutableMap.of("OTP", "Otopeni", "SFO", "San Fran"));

    String target = index + "/simple-write";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-1", new TestSpout(ImmutableList.of(doc2, doc1), new Fields("doc")));
    builder.setBolt("es-bolt-1", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-1");

    MultiIndexSpoutStormSuite.run(index + "simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    RestUtils.refresh(index);
    assertTrue(RestUtils.exists(target));
    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("SFO"));
}
 
Example #3
Source File: AbstractStormJsonSimpleBoltTests.java    From elasticsearch-hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testSimpleWriteTopology() throws Exception {
    List doc1 = Collections.singletonList("{\"reason\" : \"business\",\"airport\" : \"SFO\"}");
    List doc2 = Collections.singletonList("{\"participants\" : 5,\"airport\" : \"OTP\"}");

    String target = index + "/json-simple-write";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-1", new TestSpout(ImmutableList.of(doc1, doc2), new Fields("json")));
    builder.setBolt("es-bolt-1", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-1");

    assumeTrue(COMPONENT_HAS_COMPLETED.is(2));

    MultiIndexSpoutStormSuite.run(index + "json-simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    RestUtils.refresh(index);
    assertTrue(RestUtils.exists(target));
    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("SFO"));
}
 
Example #4
Source File: AbstractStormIdMappingBoltTests.java    From elasticsearch-hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void test2WriteWithId() throws Exception {
    List doc1 = ImmutableList.of("one", "fo1", "two", "fo2", "number", 1);
    List doc2 = ImmutableList.of("OTP", "Otopeni", "SFO", "San Fran", "number", 2);

    Map localCfg = new LinkedHashMap(conf);
    localCfg.put(ConfigurationOptions.ES_MAPPING_ID, "number");

    String target = index + "/id-write";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-2", new TestSpout(ImmutableList.of(doc2, doc1), new Fields("key1", "valo1", "key2",
            "valo2", "key3", "number")));
    builder.setBolt("es-bolt-2", new TestBolt(new EsBolt(target, localCfg))).shuffleGrouping("test-spout-2");

    MultiIndexSpoutStormSuite.run(index + "id-write", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    RestUtils.refresh(index);
    Thread.sleep(1000);
    assertTrue(RestUtils.exists(target + "/1"));
    assertTrue(RestUtils.exists(target + "/2"));

    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("two"));
}
 
Example #5
Source File: AbstractStormIndexPatternBoltTests.java    From elasticsearch-hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void test1WriteIndexPattern() throws Exception {
    List doc1 = ImmutableList.of("one", "1", "two", "2", "number", 1);
    List doc2 = ImmutableList.of("OTP", "Otopeni", "SFO", "San Fran", "number", 2);

    String target = index + "/write-{number}";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-3", new TestSpout(ImmutableList.of(doc2, doc1), new Fields("key1", "val1", "key2",
            "val2", "key3", "number")));
    builder.setBolt("es-bolt-3", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-3");

    MultiIndexSpoutStormSuite.run(index + "write-pattern", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(20));

    Thread.sleep(1000);
    RestUtils.refresh(index);
    assertTrue(RestUtils.exists(index + "/write-1"));
    assertTrue(RestUtils.exists(index + "/write-2"));

    String results = RestUtils.get(index + "/write-1" + "/_search?");
    assertThat(results, containsString("two"));

    results = RestUtils.get(index + "/write-2" + "/_search?");
    assertThat(results, containsString("SFO"));
}
 
Example #6
Source File: StreamToEs.java    From elasticsearch-hadoop with Apache License 2.0 6 votes vote down vote up
public static void submitJob(String principal, String keytab, String esNodes) throws Exception {
    List doc1 = Collections.singletonList("{\"reason\" : \"business\",\"airport\" : \"SFO\"}");
    List doc2 = Collections.singletonList("{\"participants\" : 5,\"airport\" : \"OTP\"}");

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("Input", new TestSpout(ImmutableList.of(doc1, doc2), new Fields("json"), true));
    builder.setBolt("ES", new EsBolt("storm-test"))
            .shuffleGrouping("Input")
            .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 2);

    // Nimbus needs to be started with the cred renewer and credentials plugins set in its config file

    Config conf = new Config();
    List<Object> plugins = new ArrayList<Object>();
    plugins.add(AutoElasticsearch.class.getName());
    conf.put(Config.TOPOLOGY_AUTO_CREDENTIALS, plugins);
    conf.put(ConfigurationOptions.ES_NODES, esNodes);
    conf.put(ConfigurationOptions.ES_SECURITY_AUTHENTICATION, "kerberos");
    conf.put(ConfigurationOptions.ES_NET_SPNEGO_AUTH_ELASTICSEARCH_PRINCIPAL, "HTTP/[email protected]");
    conf.put(ConfigurationOptions.ES_INPUT_JSON, "true");
    StormSubmitter.submitTopology("test-run", conf, builder.createTopology());
}
 
Example #7
Source File: EsIndexBolt.java    From cognition with Apache License 2.0 4 votes vote down vote up
@Override
public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
  String target = boltConfig.get(ConfigurationOptions.ES_RESOURCE_WRITE);
  esBolt = new EsBolt(target, boltConfig);
  esBolt.prepare(conf, context, collector);
}
 
Example #8
Source File: ZkTopology.java    From yuzhouwan with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) {

        //这个地方其实就是kafka配置文件里边的zookeeper.connect这个参数,可以去那里拿过来
        String brokerZkStr = "10.100.90.201:2181/kafka_online_sample";
        String brokerZkPath = "/brokers";
        ZkHosts zkHosts = new ZkHosts(brokerZkStr, brokerZkPath);

        String topic = "mars-wap";
        //以下:将offset汇报到哪个zk集群,相应配置
        String offsetZkServers = "10.199.203.169";
        String offsetZkPort = "2181";
        List<String> zkServersList = new ArrayList<>();
        zkServersList.add(offsetZkServers);
        //汇报offset信息的root路径
        String offsetZkRoot = "/stormExample";
        //存储该spout id的消费offset信息,譬如以topoName来命名
        String offsetZkId = "storm-example";

        SpoutConfig kafkaConfig = new SpoutConfig(zkHosts, topic, offsetZkRoot, offsetZkId);
        kafkaConfig.zkPort = Integer.parseInt(offsetZkPort);
        kafkaConfig.zkServers = zkServersList;
        kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

        KafkaSpout spout = new KafkaSpout(kafkaConfig);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", spout, 1);
        builder.setBolt("bolt", new EsBolt("storm/docs"), 1).shuffleGrouping("spout");

        Config config = new Config();
        config.put("es.index.auto.create", "true");

        if (args.length > 0) {
            try {
                StormSubmitter.submitTopology("storm-kafka-example", config, builder.createTopology());
            } catch (Exception e) {
                LOG.error("", e);
            }
        } else {
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("test", config, builder.createTopology());
        }
    }