Java Code Examples for backtype.storm.Config.get()

The following are Jave code examples for showing how to use get() of the backtype.storm.Config class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: jstorm-0.9.6.3-   File: SequenceTopologyTool.java   View Source Code Vote up 6 votes
public  void SetRemoteTopology() throws AlreadyAliveException,
		InvalidTopologyException, TopologyAssignException {
	 Config conf = getConf();
	StormTopology topology = buildTopology();

	conf.put(Config.STORM_CLUSTER_MODE, "distributed");
	String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
	if (streamName == null) {
		streamName = "SequenceTest";
	}

	if (streamName.contains("zeromq")) {
		conf.put(Config.STORM_MESSAGING_TRANSPORT,
				"com.alibaba.jstorm.message.zeroMq.MQContext");

	} else {
		conf.put(Config.STORM_MESSAGING_TRANSPORT,
				"com.alibaba.jstorm.message.netty.NettyContext");
	}

	StormSubmitter.submitTopology(streamName, conf,topology);
	
}
 
Example 2
Project: learn_jstorm   File: SequenceTopologyTool.java   View Source Code Vote up 6 votes
public  void SetRemoteTopology() throws AlreadyAliveException,
		InvalidTopologyException, TopologyAssignException {
	 Config conf = getConf();
	StormTopology topology = buildTopology();

	conf.put(Config.STORM_CLUSTER_MODE, "distributed");
	String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
	if (streamName == null) {
		streamName = "SequenceTest";
	}

	if (streamName.contains("zeromq")) {
		conf.put(Config.STORM_MESSAGING_TRANSPORT,
				"com.alibaba.jstorm.message.zeroMq.MQContext");

	} else {
		conf.put(Config.STORM_MESSAGING_TRANSPORT,
				"com.alibaba.jstorm.message.netty.NettyContext");
	}

	StormSubmitter.submitTopology(streamName, conf,topology);
	
}
 
Example 3
Project: jstrom   File: SequenceTopologyTool.java   View Source Code Vote up 6 votes
public  void SetRemoteTopology() throws AlreadyAliveException,
		InvalidTopologyException, TopologyAssignException {
	 Config conf = getConf();
	StormTopology topology = buildTopology();

	conf.put(Config.STORM_CLUSTER_MODE, "distributed");
	String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
	if (streamName == null) {
		streamName = "SequenceTest";
	}

	if (streamName.contains("zeromq")) {
		conf.put(Config.STORM_MESSAGING_TRANSPORT,
				"com.alibaba.jstorm.message.zeroMq.MQContext");

	} else {
		conf.put(Config.STORM_MESSAGING_TRANSPORT,
				"com.alibaba.jstorm.message.netty.NettyContext");
	}

	StormSubmitter.submitTopology(streamName, conf,topology);
	
}
 
Example 4
Project: alfresco-apache-storm-demo   File: FetcherBolt.java   View Source Code Vote up 5 votes
private void checkConfiguration(Config stormConf) {

        // ensure that a value has been set for the agent name and that that
        // agent name is the first value in the agents we advertise for robot
        // rules parsing
        String agentName = (String) stormConf.get("http.agent.name");
        if (agentName == null || agentName.trim().length() == 0) {
            String message = "Fetcher: No agents listed in 'http.agent.name'"
                    + " property.";
            LOG.error(message);
            throw new IllegalArgumentException(message);
        }
    }
 
Example 5
Project: realtime-event-processing   File: DocEventProcessingTopology.java   View Source Code Vote up 5 votes
public static StormTopology buildTopology(Config conf, LocalDRPC drpc) {

        TridentTopology topology = new TridentTopology();

        //Kafka Spout
        BrokerHosts zk = new ZkHosts(conf.get(CrawlerConfig.KAFKA_CONSUMER_HOST_NAME) + ":" +conf.get(CrawlerConfig.KAFKA_CONSUMER_HOST_PORT));
        TridentKafkaConfig kafkaConfig = new TridentKafkaConfig(zk, (String) conf.get(CrawlerConfig.KAFKA_TOPIC_DOCUMENT_NAME));
        kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
        OpaqueTridentKafkaSpout spout = new OpaqueTridentKafkaSpout(kafkaConfig);

        //ElasticSearch Persistent State
        Settings esSettings = ImmutableSettings.settingsBuilder()
                .put("storm.elasticsearch.cluster.name", conf.get(CrawlerConfig.ELASTICSEARCH_CLUSTER_NAME))
                .put("storm.elasticsearch.hosts", conf.get(CrawlerConfig.ELASTICSEARCH_HOST_NAME) + ":" + conf.get(CrawlerConfig.ELASTICSEARCH_HOST_PORT))
                .build();
        StateFactory esStateFactory = new ESIndexState.Factory<JSONObject>(new ClientFactory.NodeClient(esSettings.getAsMap()), JSONObject.class);
        TridentState esStaticState = topology.newStaticState(esStateFactory);

        String esIndex = (String)(conf.get(CrawlerConfig.ELASTICSEARCH_INDEX_NAME));
        topology.newStream("docstream",spout)
                .each( new Fields("str"), new SplitDocStreamArgs(), new Fields("filename", "task", "user", "content"))
                .each( new Fields("filename", "task", "user"), new PrintFilter("Kafka"))
                .each( new Fields("filename","task","user","content"), new PrepareDocForElasticSearch(), new Fields("index","type","id","source") )
                .partitionPersist(esStateFactory, new Fields("index","type","id","source"), new ESIndexUpdater<String>(new ESTridentTupleMapper()), new Fields());

        return topology.build();
    }
 
Example 6
Project: web-crawler   File: WebCrawlerTopology.java   View Source Code Vote up 4 votes
public static StormTopology buildTopology(Config conf, LocalDRPC localDrpc) {
    TridentTopology topology = new TridentTopology();

    //Kafka Spout
    BrokerHosts zk = new ZkHosts(conf.get(CrawlerConfig.KAFKA_CONSUMER_HOST_NAME) + ":" +conf.get(CrawlerConfig.KAFKA_CONSUMER_HOST_PORT));
    TridentKafkaConfig kafkaConfig = new TridentKafkaConfig(zk, (String) conf.get(CrawlerConfig.KAFKA_TOPIC_NAME));
    kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    OpaqueTridentKafkaSpout spout = new OpaqueTridentKafkaSpout(kafkaConfig);

    //ElasticSearch Persistent State
    Settings esSettings = ImmutableSettings.settingsBuilder()
            .put("storm.elasticsearch.cluster.name", conf.get(CrawlerConfig.ELASTICSEARCH_CLUSTER_NAME))
            .put("storm.elasticsearch.hosts", conf.get(CrawlerConfig.ELASTICSEARCH_HOST_NAME) + ":" + conf.get(CrawlerConfig.ELASTICSEARCH_HOST_PORT))
            .build();
    StateFactory esStateFactory = new ESIndexState.Factory<String>(new ClientFactory.NodeClient(esSettings.getAsMap()), String.class);
    TridentState esStaticState = topology.newStaticState(esStateFactory);

    //Topology
    topology.newStream("crawlKafkaSpout", spout).parallelismHint(5)
             //Splits url and depth information on receiving from Kafka
            .each(new Fields("str"), new SplitKafkaInput(), new Fields("url", "depth"))
            //Bloom Filter. Filters already crawled URLs
            .each(new Fields("url"), new URLFilter())
            //Download and Parse Webpage
            .each(new Fields("url"), new GetAdFreeWebPage(), new Fields("content_html", "title", "href"))//TODO Optimize
            //Add Href URls to Kafka queue
            .each(new Fields("href", "depth"), new KafkaProducerFilter())//TODO Replace with kafka persistent state.
            //Insert to Elasticsearch
            .each(new Fields("url", "content_html", "title"), new PrepareForElasticSearch(), new Fields("index", "type", "id", "source"))
            .partitionPersist(esStateFactory, new Fields("index", "type", "id", "source"), new ESIndexUpdater<String>(new ESTridentTupleMapper()))
    ;

    //DRPC
    topology.newDRPCStream("search", localDrpc)
            .each(new Fields("args"), new SplitDRPCArgs(), new Fields("query_input"))
            .each(new Fields("query_input"), new BingAutoSuggest(0), new Fields("query_preProcessed"))//TODO return List of expanded query
            .each(new Fields("query_preProcessed"), new PrepareSearchQuery(), new Fields("query", "indices", "types"))
            .groupBy(new Fields("query", "indices", "types"))
            .stateQuery(esStaticState, new Fields("query", "indices", "types"), new QuerySearchIndexQuery(), new Fields("results"))
    ;

    return topology.build();
}
 
Example 7
Project: aeolus   File: FileReaderSpoutITCase.java   View Source Code Vote up 4 votes
@Test(timeout = 30000)
public void test() throws AlreadyAliveException, InvalidTopologyException, IOException {
	Config conf = new Config();
	
	if(System.getProperty("user.dir").endsWith("JUnitLoop")) {
		conf.put(FileReaderSpout.INPUT_FILE_NAME, "../aeolus/queries/lrb/src/test/resources/xway-");
		conf.put(SpoutDataFileOutputBolt.OUTPUT_DIR_NAME, "../aeolus/queries/lrb/src/test/resources");
	} else {
		conf.put(FileReaderSpout.INPUT_FILE_NAME, "src/test/resources/xway-");
		conf.put(SpoutDataFileOutputBolt.OUTPUT_DIR_NAME, "src/test/resources");
	}
	
	LinkedList<String> inputFiles = new LinkedList<String>();
	for(int i = 0; i < 10; ++i) {
		inputFiles.add(i + "-sample.dat");
	}
	conf.put(FileReaderSpout.INPUT_FILE_SUFFIXES, inputFiles);
	
	
	
	TopologyBuilder builder = new TopologyBuilder();
	final int dop = 1 + this.r.nextInt(10);
	builder.setSpout("Spout", new FileReaderSpout(), new Integer(dop));
	SpoutDataFileOutputBolt sink = new SpoutDataFileOutputBolt();
	builder.setBolt("Sink", new TimestampMerger(sink, 0), new Integer(1)).shuffleGrouping("Spout")
		.allGrouping("Spout", TimestampMerger.FLUSH_STREAM_ID);
	
	
	
	LocalCluster cluster = new LocalCluster();
	cluster.submitTopology("LR-SpoutTest", conf, builder.createTopology());
	Utils.sleep(10 * 1000);
	cluster.deactivate("LR-SpoutTest");
	Utils.sleep(1000);
	cluster.killTopology("LR-SpoutTest");
	Utils.sleep(5 * 1000); // give "kill" some time to clean up; otherwise, test might hang and time out
	cluster.shutdown();
	
	
	
	BufferedReader reader = new BufferedReader(new FileReader(
		(String)conf.get(SpoutDataFileOutputBolt.OUTPUT_DIR_NAME) + File.separator + "result.dat"));
	LinkedList<String> result = new LinkedList<String>();
	String line;
	while((line = reader.readLine()) != null) {
		result.add(line);
	}
	reader.close();
	
	LinkedList<String> expectedResult = new LinkedList<String>();
	for(String file : inputFiles) {
		reader = new BufferedReader(new FileReader((String)conf.get(FileReaderSpout.INPUT_FILE_NAME) + file));
		while((line = reader.readLine()) != null) {
			int p1 = line.indexOf(",");
			int p2 = line.indexOf(",", p1 + 1);
			expectedResult.add(line.substring(p1 + 1, p2) + "," + line);
		}
		reader.close();
	}
	Collections.sort(expectedResult);
	expectedResult.add("FLUSH");
	
	Assert.assertEquals(expectedResult, result);
}
 
Example 8
Project: incubator-samoa   File: StormTopologySubmitter.java   View Source Code Vote up 4 votes
public static void main(String[] args) throws IOException {
  Properties props = StormSamoaUtils.getProperties();

  String uploadedJarLocation = props.getProperty(StormJarSubmitter.UPLOADED_JAR_LOCATION_KEY);
  if (uploadedJarLocation == null) {
    logger.error("Invalid properties file. It must have key {}",
        StormJarSubmitter.UPLOADED_JAR_LOCATION_KEY);
    return;
  }

  List<String> tmpArgs = new ArrayList<String>(Arrays.asList(args));
  int numWorkers = StormSamoaUtils.numWorkers(tmpArgs);

  args = tmpArgs.toArray(new String[0]);
  StormTopology stormTopo = StormSamoaUtils.argsToTopology(args);

  Config conf = new Config();
  conf.putAll(Utils.readStormConfig());
  conf.putAll(Utils.readCommandLineOpts());
  conf.setDebug(false);
  conf.setNumWorkers(numWorkers);

  String profilerOption =
      props.getProperty(StormTopologySubmitter.YJP_OPTIONS_KEY);
  if (profilerOption != null) {
    String topoWorkerChildOpts = (String) conf.get(Config.TOPOLOGY_WORKER_CHILDOPTS);
    StringBuilder optionBuilder = new StringBuilder();
    if (topoWorkerChildOpts != null) {
      optionBuilder.append(topoWorkerChildOpts);
      optionBuilder.append(' ');
    }
    optionBuilder.append(profilerOption);
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, optionBuilder.toString());
  }

  Map<String, Object> myConfigMap = new HashMap<String, Object>(conf);
  StringWriter out = new StringWriter();

  try {
    JSONValue.writeJSONString(myConfigMap, out);
  } catch (IOException e) {
    System.out.println("Error in writing JSONString");
    e.printStackTrace();
    return;
  }

  Config config = new Config();
  config.putAll(Utils.readStormConfig());

  NimbusClient nc = NimbusClient.getConfiguredClient(config);
  String topologyName = stormTopo.getTopologyName();
  try {
    System.out.println("Submitting topology with name: "
        + topologyName);
    nc.getClient().submitTopology(topologyName, uploadedJarLocation,
        out.toString(), stormTopo.getStormBuilder().createTopology());
    System.out.println(topologyName + " is successfully submitted");

  } catch (AlreadyAliveException aae) {
    System.out.println("Fail to submit " + topologyName
        + "\nError message: " + aae.get_msg());
  } catch (InvalidTopologyException ite) {
    System.out.println("Invalid topology for " + topologyName);
    ite.printStackTrace();
  } catch (TException te) {
    System.out.println("Texception for " + topologyName);
    te.printStackTrace();
  }
}
 
Example 9
Project: realtime-event-processing   File: URLEventProcessingTopology.java   View Source Code Vote up 4 votes
public static StormTopology buildTopology(Config conf, LocalDRPC localDrpc) {
    TridentTopology topology = new TridentTopology();

    //Kafka Spout
    BrokerHosts zk = new ZkHosts(conf.get(CrawlerConfig.KAFKA_CONSUMER_HOST_NAME) + ":" +conf.get(CrawlerConfig.KAFKA_CONSUMER_HOST_PORT));
    TridentKafkaConfig kafkaConfig = new TridentKafkaConfig(zk, (String) conf.get(CrawlerConfig.KAFKA_TOPIC_NAME));
    kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    //kafkaConfig.ignoreZkOffsets=true;
    OpaqueTridentKafkaSpout spout = new OpaqueTridentKafkaSpout(kafkaConfig);

    //ElasticSearch Persistent State
    Settings esSettings = ImmutableSettings.settingsBuilder()
            .put("storm.elasticsearch.cluster.name", conf.get(CrawlerConfig.ELASTICSEARCH_CLUSTER_NAME))
            .put("storm.elasticsearch.hosts", conf.get(CrawlerConfig.ELASTICSEARCH_HOST_NAME) + ":" + conf.get(CrawlerConfig.ELASTICSEARCH_HOST_PORT))
            .build();
    StateFactory esStateFactory = new ESIndexState.Factory<JSONObject>(new ClientFactory.NodeClient(esSettings.getAsMap()), JSONObject.class);
    TridentState esStaticState = topology.newStaticState(esStateFactory);

    //Topology
    topology.newStream("crawlKafkaSpout", spout).parallelismHint(5)
            //Splits words on receiving from Kafka
            .each(new Fields("str"), new SplitFunction(), new Fields("url", "depth", "task", "user"))
            .each(new Fields("str"), new PrintFilter("Kafka"))
            //Bloom Filter, Filters already crawled URLs
            .each(new Fields("url", "task"), new URLFilter())
            //Download and Parse Webpage
            .each(new Fields("url"), new GetAdFreeWebPage(), new Fields("content_html", "title", "href"))
            //Sending URLs present in the page into the kafka queue.
            .each(new Fields("href", "depth", "task", "user"), new KafkaProducerFilter())
            //Insert to Elasticsearch
            .each(new Fields("url", "content_html", "title", "task", "user"), new PrepareForElasticSearch(), new Fields("index", "type", "id", "source"))
            .partitionPersist(esStateFactory, new Fields("index", "type", "id", "source"), new ESIndexUpdater<String>(new ESTridentTupleMapper()), new Fields())
            ;

    //DRPC
    topology.newDRPCStream("search", localDrpc)
            .each(new Fields("args"), new SplitDRPCArgs(), new Fields("query_input", "task"))
            .each(new Fields("query_input"), new BingAutoSuggest(0), new Fields("query_preProcessed"))
            .each(new Fields("query_preProcessed", "task"), new PrepareSearchQuery(), new Fields("query", "indices", "types"))
            .groupBy(new Fields("query", "indices", "types"))
            .stateQuery(esStaticState, new Fields("query", "indices", "types"), new QuerySearchIndexQuery(), new Fields("results"))
            ;

    return topology.build();
}